mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into ui-fixes
This commit is contained in:
commit
33cb6cb4d8
@ -367,36 +367,31 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
# original idea by https://github.com/AmericanPresidentJimmyCarter
|
||||
# TODO: research more for second order schedulers timesteps
|
||||
def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end):
|
||||
num_inference_steps = steps
|
||||
if scheduler.config.get("cpu_only", False):
|
||||
scheduler.set_timesteps(num_inference_steps, device="cpu")
|
||||
scheduler.set_timesteps(steps, device="cpu")
|
||||
timesteps = scheduler.timesteps.to(device=device)
|
||||
else:
|
||||
scheduler.set_timesteps(num_inference_steps, device=device)
|
||||
scheduler.set_timesteps(steps, device=device)
|
||||
timesteps = scheduler.timesteps
|
||||
|
||||
# apply denoising_start
|
||||
# skip greater order timesteps
|
||||
_timesteps = timesteps[:: scheduler.order]
|
||||
|
||||
# get start timestep index
|
||||
t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start)))
|
||||
t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, timesteps)))
|
||||
timesteps = timesteps[t_start_idx:]
|
||||
if scheduler.order == 2 and t_start_idx > 0:
|
||||
timesteps = timesteps[1:]
|
||||
t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, _timesteps)))
|
||||
|
||||
# save start timestep to apply noise
|
||||
init_timestep = timesteps[:1]
|
||||
|
||||
# apply denoising_end
|
||||
# get end timestep index
|
||||
t_end_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_end)))
|
||||
t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, timesteps)))
|
||||
if scheduler.order == 2 and t_end_idx > 0:
|
||||
t_end_idx += 1
|
||||
timesteps = timesteps[:t_end_idx]
|
||||
t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, _timesteps[t_start_idx:])))
|
||||
|
||||
# calculate step count based on scheduler order
|
||||
num_inference_steps = len(timesteps)
|
||||
if scheduler.order == 2:
|
||||
num_inference_steps += num_inference_steps % 2
|
||||
num_inference_steps = num_inference_steps // 2
|
||||
# apply order to indexes
|
||||
t_start_idx *= scheduler.order
|
||||
t_end_idx *= scheduler.order
|
||||
|
||||
init_timestep = timesteps[t_start_idx : t_start_idx + 1]
|
||||
timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx]
|
||||
num_inference_steps = len(timesteps) // scheduler.order
|
||||
|
||||
return num_inference_steps, timesteps, init_timestep
|
||||
|
||||
|
@ -558,12 +558,22 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
step_output = self.scheduler.step(noise_pred, timestep, latents, **conditioning_data.scheduler_args)
|
||||
|
||||
# TODO: issue to diffusers?
|
||||
# undo internal counter increment done by scheduler.step, so timestep can be resolved as before call
|
||||
# this needed to be able call scheduler.add_noise with current timestep
|
||||
if self.scheduler.order == 2:
|
||||
self.scheduler._index_counter[timestep.item()] -= 1
|
||||
|
||||
# TODO: this additional_guidance extension point feels redundant with InvokeAIDiffusionComponent.
|
||||
# But the way things are now, scheduler runs _after_ that, so there was
|
||||
# no way to use it to apply an operation that happens after the last scheduler.step.
|
||||
for guidance in additional_guidance:
|
||||
step_output = guidance(step_output, timestep, conditioning_data)
|
||||
|
||||
# restore internal counter
|
||||
if self.scheduler.order == 2:
|
||||
self.scheduler._index_counter[timestep.item()] += 1
|
||||
|
||||
return step_output
|
||||
|
||||
def _unet_forward(
|
||||
|
Loading…
Reference in New Issue
Block a user