mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
add shape_freedom arg to .swap()
This commit is contained in:
parent
04d93f0445
commit
8f35819ddf
@ -1,3 +1,4 @@
|
||||
import math
|
||||
import string
|
||||
from typing import Union
|
||||
|
||||
@ -121,15 +122,28 @@ class CrossAttentionControlSubstitute(CrossAttentionControlledFragment):
|
||||
def __init__(self, original: Union[Fragment, list], edited: Union[Fragment, list], options: dict=None):
|
||||
self.original = original
|
||||
self.edited = edited
|
||||
|
||||
default_options = {
|
||||
's_start': 0.0,
|
||||
's_end': 1.0,
|
||||
's_end': 0.3, # gives best results
|
||||
't_start': 0.0,
|
||||
't_end': 1.0
|
||||
}
|
||||
merged_options = default_options
|
||||
if options is not None:
|
||||
shape_freedom = options.pop('shape_freedom', None)
|
||||
if shape_freedom is not None:
|
||||
# high shape freedom = SD can do what it wants with the shape of the object
|
||||
# high shape freedom => s_end = 0
|
||||
# low shape freedom => s_end = 1
|
||||
# shape freedom is in a "linear" space, while noticeable changes to s_end are typically closer around 0,
|
||||
# and there is very little perceptible difference as s_end increases above 0.5
|
||||
# so for shape_freedom = 0.5 we probably want s_end to be 0.2
|
||||
# -> cube root and subtract from 1.0
|
||||
merged_options.s_end = 1.0 - math.cbrt(shape_freedom)
|
||||
print('converted shape_freedom argument to', merged_options)
|
||||
merged_options.update(options)
|
||||
|
||||
self.options = merged_options
|
||||
|
||||
def __repr__(self):
|
||||
|
@ -78,6 +78,7 @@ class InvokeAIDiffuserComponent:
|
||||
percent_through = float(step_index) / float(self.cross_attention_control_context.step_count)
|
||||
else:
|
||||
# find the current sigma in the sigma sequence
|
||||
# todo: this doesn't work with k_dpm_2 because the sigma used jumps around in the sequence
|
||||
sigma_index = torch.nonzero(self.model.sigmas <= sigma)[-1]
|
||||
# flip because sigmas[0] is for the fully denoised image
|
||||
# percent_through must be <1
|
||||
@ -86,14 +87,14 @@ class InvokeAIDiffuserComponent:
|
||||
cross_attention_control_types_to_do = CrossAttentionControl.get_active_cross_attention_control_types_for_step(self.cross_attention_control_context, percent_through)
|
||||
|
||||
if len(cross_attention_control_types_to_do)==0:
|
||||
print('step', step_index, ': not doing cross attention control')
|
||||
#print('step', step_index, ': not doing cross attention control')
|
||||
# faster batched path
|
||||
x_twice = torch.cat([x]*2)
|
||||
sigma_twice = torch.cat([sigma]*2)
|
||||
both_conditionings = torch.cat([unconditioning, conditioning])
|
||||
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2)
|
||||
else:
|
||||
print('step', step_index, ': doing cross attention control on', cross_attention_control_types_to_do)
|
||||
#print('step', step_index, ': doing cross attention control on', cross_attention_control_types_to_do)
|
||||
# slower non-batched path (20% slower on mac MPS)
|
||||
# We are only interested in using attention maps for conditioned_next_x, but batching them with generation of
|
||||
# unconditioned_next_x causes attention maps to *also* be saved for the unconditioned_next_x.
|
||||
|
Loading…
Reference in New Issue
Block a user