From ee4273d760e642cc33f15f85e0b82eeaaa8cc375 Mon Sep 17 00:00:00 2001 From: Damian at mba Date: Mon, 24 Oct 2022 01:23:43 +0200 Subject: [PATCH] fix step count on ddim --- ldm/models/diffusion/sampler.py | 1 + ldm/models/diffusion/shared_invokeai_diffusion.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ldm/models/diffusion/sampler.py b/ldm/models/diffusion/sampler.py index 8099997bb3..79c15717fe 100644 --- a/ldm/models/diffusion/sampler.py +++ b/ldm/models/diffusion/sampler.py @@ -359,6 +359,7 @@ class Sampler(object): unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, t_next = ts_next, + step_count=total_steps ) x_dec, pred_x0, e_t = outs diff --git a/ldm/models/diffusion/shared_invokeai_diffusion.py b/ldm/models/diffusion/shared_invokeai_diffusion.py index c14e71be8d..4bf5688586 100644 --- a/ldm/models/diffusion/shared_invokeai_diffusion.py +++ b/ldm/models/diffusion/shared_invokeai_diffusion.py @@ -86,14 +86,14 @@ class InvokeAIDiffuserComponent: cross_attention_control_types_to_do = CrossAttentionControl.get_active_cross_attention_control_types_for_step(self.cross_attention_control_context, percent_through) if len(cross_attention_control_types_to_do)==0: - #print('step', step_index, ': not doing cross attention control') + print('pct', percent_through, ': not doing cross attention control') # faster batched path x_twice = torch.cat([x]*2) sigma_twice = torch.cat([sigma]*2) both_conditionings = torch.cat([unconditioning, conditioning]) unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2) else: - #print('step', step_index, ': doing cross attention control on', cross_attention_control_types_to_do) + print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do) # slower non-batched path (20% slower on mac MPS) # We are only interested in using attention maps for conditioned_next_x, but batching them with generation of # unconditioned_next_x causes attention maps to *also* be saved for the unconditioned_next_x.