mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
cleanup logs
This commit is contained in:
parent
63902f3d34
commit
0564397ee6
@ -82,18 +82,18 @@ class InvokeAIDiffuserComponent:
|
|||||||
# flip because sigmas[0] is for the fully denoised image
|
# flip because sigmas[0] is for the fully denoised image
|
||||||
# percent_through must be <1
|
# percent_through must be <1
|
||||||
percent_through = 1.0 - float(sigma_index.item() + 1) / float(self.model.sigmas.shape[0])
|
percent_through = 1.0 - float(sigma_index.item() + 1) / float(self.model.sigmas.shape[0])
|
||||||
print('estimated percent_through', percent_through, 'from sigma', sigma.item())
|
#print('estimated percent_through', percent_through, 'from sigma', sigma.item())
|
||||||
cross_attention_control_types_to_do = CrossAttentionControl.get_active_cross_attention_control_types_for_step(self.cross_attention_control_context, percent_through)
|
cross_attention_control_types_to_do = CrossAttentionControl.get_active_cross_attention_control_types_for_step(self.cross_attention_control_context, percent_through)
|
||||||
|
|
||||||
if len(cross_attention_control_types_to_do)==0:
|
if len(cross_attention_control_types_to_do)==0:
|
||||||
print('not doing cross attention control')
|
#print('not doing cross attention control')
|
||||||
# faster batched path
|
# faster batched path
|
||||||
x_twice = torch.cat([x]*2)
|
x_twice = torch.cat([x]*2)
|
||||||
sigma_twice = torch.cat([sigma]*2)
|
sigma_twice = torch.cat([sigma]*2)
|
||||||
both_conditionings = torch.cat([unconditioning, conditioning])
|
both_conditionings = torch.cat([unconditioning, conditioning])
|
||||||
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2)
|
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2)
|
||||||
else:
|
else:
|
||||||
print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
|
#print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
|
||||||
# slower non-batched path (20% slower on mac MPS)
|
# slower non-batched path (20% slower on mac MPS)
|
||||||
# We are only interested in using attention maps for conditioned_next_x, but batching them with generation of
|
# We are only interested in using attention maps for conditioned_next_x, but batching them with generation of
|
||||||
# unconditioned_next_x causes attention maps to *also* be saved for the unconditioned_next_x.
|
# unconditioned_next_x causes attention maps to *also* be saved for the unconditioned_next_x.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user