mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
inpaint model progress
- working with plain prompts, weighted prompts and merge prompts - not tested with prompt2prompt
This commit is contained in:
parent
2daf187bdb
commit
0d0481ce75
@ -76,4 +76,4 @@ model:
|
|||||||
target: torch.nn.Identity
|
target: torch.nn.Identity
|
||||||
|
|
||||||
cond_stage_config:
|
cond_stage_config:
|
||||||
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
||||||
|
@ -43,14 +43,7 @@ class CFGDenoiser(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
def forward(self, x, sigma, uncond, cond, cond_scale):
|
def forward(self, x, sigma, uncond, cond, cond_scale):
|
||||||
if isinstance(cond,dict): # hybrid model
|
next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale)
|
||||||
x_in = torch.cat([x] * 2)
|
|
||||||
sigma_in = torch.cat([sigma] * 2)
|
|
||||||
cond_in = self.sampler.make_cond_in(uncond,cond)
|
|
||||||
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
|
|
||||||
next_x = uncond + (cond - uncond) * cond_scale
|
|
||||||
else: # cross attention model
|
|
||||||
next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale)
|
|
||||||
if self.warmup < self.warmup_max:
|
if self.warmup < self.warmup_max:
|
||||||
thresh = max(1, 1 + (self.threshold - 1) * (self.warmup / self.warmup_max))
|
thresh = max(1, 1 + (self.threshold - 1) * (self.warmup / self.warmup_max))
|
||||||
self.warmup += 1
|
self.warmup += 1
|
||||||
|
@ -90,7 +90,19 @@ class InvokeAIDiffuserComponent:
|
|||||||
# faster batched path
|
# faster batched path
|
||||||
x_twice = torch.cat([x]*2)
|
x_twice = torch.cat([x]*2)
|
||||||
sigma_twice = torch.cat([sigma]*2)
|
sigma_twice = torch.cat([sigma]*2)
|
||||||
both_conditionings = torch.cat([unconditioning, conditioning])
|
if isinstance(conditioning, dict):
|
||||||
|
assert isinstance(unconditioning, dict)
|
||||||
|
both_conditionings = dict()
|
||||||
|
for k in conditioning:
|
||||||
|
if isinstance(conditioning[k], list):
|
||||||
|
both_conditionings[k] = [
|
||||||
|
torch.cat([unconditioning[k][i], conditioning[k][i]])
|
||||||
|
for i in range(len(conditioning[k]))
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
both_conditionings[k] = torch.cat([unconditioning[k], conditioning[k]])
|
||||||
|
else:
|
||||||
|
both_conditionings = torch.cat([unconditioning, conditioning])
|
||||||
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2)
|
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2)
|
||||||
else:
|
else:
|
||||||
#print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
|
#print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
|
||||||
|
@ -439,7 +439,7 @@ class FrozenCLIPEmbedder(AbstractEncoder):
|
|||||||
param.requires_grad = False
|
param.requires_grad = False
|
||||||
|
|
||||||
def forward(self, text, **kwargs):
|
def forward(self, text, **kwargs):
|
||||||
|
print(f'DEBUG text={text}, max_length={self.max_length}')
|
||||||
batch_encoding = self.tokenizer(
|
batch_encoding = self.tokenizer(
|
||||||
text,
|
text,
|
||||||
truncation=True,
|
truncation=True,
|
||||||
|
Loading…
Reference in New Issue
Block a user