From 0564397ee6d206a7ede5ff16628b4b46578339c4 Mon Sep 17 00:00:00 2001
From: Damian at mba <damian@frey.NOSPAMco.nz>
Date: Mon, 24 Oct 2022 11:16:43 +0200
Subject: [PATCH] cleanup logs

---
 ldm/models/diffusion/shared_invokeai_diffusion.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/ldm/models/diffusion/shared_invokeai_diffusion.py b/ldm/models/diffusion/shared_invokeai_diffusion.py
index f52dd46766..b8a7a04d0e 100644
--- a/ldm/models/diffusion/shared_invokeai_diffusion.py
+++ b/ldm/models/diffusion/shared_invokeai_diffusion.py
@@ -82,18 +82,18 @@ class InvokeAIDiffuserComponent:
                 # flip because sigmas[0] is for the fully denoised image
                 # percent_through must be <1
                 percent_through = 1.0 - float(sigma_index.item() + 1) / float(self.model.sigmas.shape[0])
-                print('estimated percent_through', percent_through, 'from sigma', sigma.item())
+                #print('estimated percent_through', percent_through, 'from sigma', sigma.item())
             cross_attention_control_types_to_do = CrossAttentionControl.get_active_cross_attention_control_types_for_step(self.cross_attention_control_context, percent_through)
 
         if len(cross_attention_control_types_to_do)==0:
-            print('not doing cross attention control')
+            #print('not doing cross attention control')
             # faster batched path
             x_twice = torch.cat([x]*2)
             sigma_twice = torch.cat([sigma]*2)
             both_conditionings = torch.cat([unconditioning, conditioning])
             unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2)
         else:
-            print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
+            #print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
             # slower non-batched path (20% slower on mac MPS)
             # We are only interested in using attention maps for conditioned_next_x, but batching them with generation of
             # unconditioned_next_x causes attention maps to *also* be saved for the unconditioned_next_x.