mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Add UniPC Scheduler
This commit is contained in:
parent
b0c41b4828
commit
06b5800d28
@ -54,16 +54,17 @@ class NoiseOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
# TODO: this seems like a hack
|
# TODO: this seems like a hack
|
||||||
scheduler_map = dict(
|
scheduler_map = dict(
|
||||||
ddim=diffusers.DDIMScheduler,
|
ddim=(diffusers.DDIMScheduler, dict()),
|
||||||
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
dpmpp_2=(diffusers.DPMSolverMultistepScheduler, dict()),
|
||||||
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
|
k_dpm_2=(diffusers.KDPM2DiscreteScheduler, dict()),
|
||||||
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
|
k_dpm_2_a=(diffusers.KDPM2AncestralDiscreteScheduler, dict()),
|
||||||
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
k_dpmpp_2=(diffusers.DPMSolverMultistepScheduler, dict()),
|
||||||
k_euler=diffusers.EulerDiscreteScheduler,
|
k_euler=(diffusers.EulerDiscreteScheduler, dict()),
|
||||||
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
|
k_euler_a=(diffusers.EulerAncestralDiscreteScheduler, dict()),
|
||||||
k_heun=diffusers.HeunDiscreteScheduler,
|
k_heun=(diffusers.HeunDiscreteScheduler, dict()),
|
||||||
k_lms=diffusers.LMSDiscreteScheduler,
|
k_lms=(diffusers.LMSDiscreteScheduler, dict()),
|
||||||
plms=diffusers.PNDMScheduler,
|
plms=(diffusers.PNDMScheduler, dict()),
|
||||||
|
unipc=(diffusers.UniPCMultistepScheduler, dict(cpu_only=True))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -73,8 +74,9 @@ SAMPLER_NAME_VALUES = Literal[
|
|||||||
|
|
||||||
|
|
||||||
def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
|
def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
|
||||||
scheduler_class = scheduler_map.get(scheduler_name,'ddim')
|
scheduler_class, scheduler_extra_config = scheduler_map.get(scheduler_name,'ddim')
|
||||||
scheduler = scheduler_class.from_config(model.scheduler.config)
|
scheduler_config = {**model.scheduler.config, **scheduler_extra_config}
|
||||||
|
scheduler = scheduler_class.from_config(scheduler_config)
|
||||||
# hack copied over from generate.py
|
# hack copied over from generate.py
|
||||||
if not hasattr(scheduler, 'uses_inpainting_model'):
|
if not hasattr(scheduler, 'uses_inpainting_model'):
|
||||||
scheduler.uses_inpainting_model = lambda: False
|
scheduler.uses_inpainting_model = lambda: False
|
||||||
@ -293,11 +295,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
|
|||||||
latent, device=model.device, dtype=latent.dtype
|
latent, device=model.device, dtype=latent.dtype
|
||||||
)
|
)
|
||||||
|
|
||||||
timesteps, _ = model.get_img2img_timesteps(
|
timesteps, _ = model.get_img2img_timesteps(self.steps, self.strength)
|
||||||
self.steps,
|
|
||||||
self.strength,
|
|
||||||
device=model.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
result_latents, result_attention_map_saver = model.latents_from_embeddings(
|
result_latents, result_attention_map_saver = model.latents_from_embeddings(
|
||||||
latents=initial_latents,
|
latents=initial_latents,
|
||||||
|
@ -119,6 +119,7 @@ SAMPLER_CHOICES = [
|
|||||||
"plms",
|
"plms",
|
||||||
# diffusers:
|
# diffusers:
|
||||||
"pndm",
|
"pndm",
|
||||||
|
"unipc"
|
||||||
]
|
]
|
||||||
|
|
||||||
PRECISION_CHOICES = [
|
PRECISION_CHOICES = [
|
||||||
|
@ -1049,27 +1049,28 @@ class Generate:
|
|||||||
|
|
||||||
# See https://github.com/huggingface/diffusers/issues/277#issuecomment-1371428672
|
# See https://github.com/huggingface/diffusers/issues/277#issuecomment-1371428672
|
||||||
scheduler_map = dict(
|
scheduler_map = dict(
|
||||||
ddim=diffusers.DDIMScheduler,
|
ddim=(diffusers.DDIMScheduler, dict()),
|
||||||
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
dpmpp_2=(diffusers.DPMSolverMultistepScheduler, dict()),
|
||||||
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
|
k_dpm_2=(diffusers.KDPM2DiscreteScheduler, dict()),
|
||||||
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
|
k_dpm_2_a=(diffusers.KDPM2AncestralDiscreteScheduler, dict()),
|
||||||
# DPMSolverMultistepScheduler is technically not `k_` anything, as it is neither
|
# DPMSolverMultistepScheduler is technically not `k_` anything, as it is neither
|
||||||
# the k-diffusers implementation nor included in EDM (Karras 2022), but we can
|
# the k-diffusers implementation nor included in EDM (Karras 2022), but we can
|
||||||
# provide an alias for compatibility.
|
# provide an alias for compatibility.
|
||||||
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
k_dpmpp_2=(diffusers.DPMSolverMultistepScheduler, dict()),
|
||||||
k_euler=diffusers.EulerDiscreteScheduler,
|
k_euler=(diffusers.EulerDiscreteScheduler, dict()),
|
||||||
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
|
k_euler_a=(diffusers.EulerAncestralDiscreteScheduler, dict()),
|
||||||
k_heun=diffusers.HeunDiscreteScheduler,
|
k_heun=(diffusers.HeunDiscreteScheduler, dict()),
|
||||||
k_lms=diffusers.LMSDiscreteScheduler,
|
k_lms=(diffusers.LMSDiscreteScheduler, dict()),
|
||||||
plms=diffusers.PNDMScheduler,
|
plms=(diffusers.PNDMScheduler, dict()),
|
||||||
|
unipc=(diffusers.UniPCMultistepScheduler, dict(cpu_only=True))
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.sampler_name in scheduler_map:
|
if self.sampler_name in scheduler_map:
|
||||||
sampler_class = scheduler_map[self.sampler_name]
|
sampler_class, sampler_extra_config = scheduler_map[self.sampler_name]
|
||||||
msg = (
|
msg = (
|
||||||
f"Setting Sampler to {self.sampler_name} ({sampler_class.__name__})"
|
f"Setting Sampler to {self.sampler_name} ({sampler_class.__name__})"
|
||||||
)
|
)
|
||||||
self.sampler = sampler_class.from_config(self.model.scheduler.config)
|
self.sampler = sampler_class.from_config({**self.model.scheduler.config, **sampler_extra_config})
|
||||||
else:
|
else:
|
||||||
msg = (
|
msg = (
|
||||||
f" Unsupported Sampler: {self.sampler_name} "+
|
f" Unsupported Sampler: {self.sampler_name} "+
|
||||||
|
@ -72,17 +72,18 @@ class InvokeAIGeneratorOutput:
|
|||||||
# old code that calls Generate will continue to work.
|
# old code that calls Generate will continue to work.
|
||||||
class InvokeAIGenerator(metaclass=ABCMeta):
|
class InvokeAIGenerator(metaclass=ABCMeta):
|
||||||
scheduler_map = dict(
|
scheduler_map = dict(
|
||||||
ddim=diffusers.DDIMScheduler,
|
ddim=(diffusers.DDIMScheduler, dict()),
|
||||||
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
dpmpp_2=(diffusers.DPMSolverMultistepScheduler, dict()),
|
||||||
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
|
k_dpm_2=(diffusers.KDPM2DiscreteScheduler, dict()),
|
||||||
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
|
k_dpm_2_a=(diffusers.KDPM2AncestralDiscreteScheduler, dict()),
|
||||||
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
k_dpmpp_2=(diffusers.DPMSolverMultistepScheduler, dict()),
|
||||||
k_euler=diffusers.EulerDiscreteScheduler,
|
k_euler=(diffusers.EulerDiscreteScheduler, dict()),
|
||||||
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
|
k_euler_a=(diffusers.EulerAncestralDiscreteScheduler, dict()),
|
||||||
k_heun=diffusers.HeunDiscreteScheduler,
|
k_heun=(diffusers.HeunDiscreteScheduler, dict()),
|
||||||
k_lms=diffusers.LMSDiscreteScheduler,
|
k_lms=(diffusers.LMSDiscreteScheduler, dict()),
|
||||||
plms=diffusers.PNDMScheduler,
|
plms=(diffusers.PNDMScheduler, dict()),
|
||||||
)
|
unipc=(diffusers.UniPCMultistepScheduler, dict(cpu_only=True))
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
model_info: dict,
|
model_info: dict,
|
||||||
@ -181,8 +182,9 @@ class InvokeAIGenerator(metaclass=ABCMeta):
|
|||||||
return generator_class(model, self.params.precision)
|
return generator_class(model, self.params.precision)
|
||||||
|
|
||||||
def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
|
def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
|
||||||
scheduler_class = self.scheduler_map.get(scheduler_name,'ddim')
|
scheduler_class, scheduler_extra_config = self.scheduler_map.get(scheduler_name,'ddim')
|
||||||
scheduler = scheduler_class.from_config(model.scheduler.config)
|
scheduler_config = {**model.scheduler.config, **scheduler_extra_config}
|
||||||
|
scheduler = scheduler_class.from_config(scheduler_config)
|
||||||
# hack copied over from generate.py
|
# hack copied over from generate.py
|
||||||
if not hasattr(scheduler, 'uses_inpainting_model'):
|
if not hasattr(scheduler, 'uses_inpainting_model'):
|
||||||
scheduler.uses_inpainting_model = lambda: False
|
scheduler.uses_inpainting_model = lambda: False
|
||||||
|
@ -47,6 +47,7 @@ from diffusers import (
|
|||||||
LDMTextToImagePipeline,
|
LDMTextToImagePipeline,
|
||||||
LMSDiscreteScheduler,
|
LMSDiscreteScheduler,
|
||||||
PNDMScheduler,
|
PNDMScheduler,
|
||||||
|
UniPCMultistepScheduler,
|
||||||
StableDiffusionPipeline,
|
StableDiffusionPipeline,
|
||||||
UNet2DConditionModel,
|
UNet2DConditionModel,
|
||||||
)
|
)
|
||||||
@ -1209,6 +1210,8 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
|
scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
|
||||||
elif scheduler_type == "dpm":
|
elif scheduler_type == "dpm":
|
||||||
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
|
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
|
||||||
|
elif scheduler_type == 'unipc':
|
||||||
|
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
|
||||||
elif scheduler_type == "ddim":
|
elif scheduler_type == "ddim":
|
||||||
scheduler = scheduler
|
scheduler = scheduler
|
||||||
else:
|
else:
|
||||||
|
@ -509,10 +509,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
run_id=None,
|
run_id=None,
|
||||||
callback: Callable[[PipelineIntermediateState], None] = None,
|
callback: Callable[[PipelineIntermediateState], None] = None,
|
||||||
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
|
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
|
||||||
|
if self.scheduler.config.get("cpu_only", False):
|
||||||
|
scheduler_device = torch.device('cpu')
|
||||||
|
else:
|
||||||
|
scheduler_device = self._model_group.device_for(self.unet)
|
||||||
|
|
||||||
if timesteps is None:
|
if timesteps is None:
|
||||||
self.scheduler.set_timesteps(
|
self.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
|
||||||
num_inference_steps, device=self._model_group.device_for(self.unet)
|
|
||||||
)
|
|
||||||
timesteps = self.scheduler.timesteps
|
timesteps = self.scheduler.timesteps
|
||||||
infer_latents_from_embeddings = GeneratorToCallbackinator(
|
infer_latents_from_embeddings = GeneratorToCallbackinator(
|
||||||
self.generate_latents_from_embeddings, PipelineIntermediateState
|
self.generate_latents_from_embeddings, PipelineIntermediateState
|
||||||
@ -725,12 +728,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
noise: torch.Tensor,
|
noise: torch.Tensor,
|
||||||
run_id=None,
|
run_id=None,
|
||||||
callback=None,
|
callback=None,
|
||||||
) -> InvokeAIStableDiffusionPipelineOutput:
|
) -> InvokeAIStableDiffusionPipelineOutput:
|
||||||
timesteps, _ = self.get_img2img_timesteps(
|
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
|
||||||
num_inference_steps,
|
|
||||||
strength,
|
|
||||||
device=self._model_group.device_for(self.unet),
|
|
||||||
)
|
|
||||||
result_latents, result_attention_maps = self.latents_from_embeddings(
|
result_latents, result_attention_maps = self.latents_from_embeddings(
|
||||||
latents=initial_latents if strength < 1.0 else torch.zeros_like(
|
latents=initial_latents if strength < 1.0 else torch.zeros_like(
|
||||||
initial_latents, device=initial_latents.device, dtype=initial_latents.dtype
|
initial_latents, device=initial_latents.device, dtype=initial_latents.dtype
|
||||||
@ -756,13 +755,19 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
return self.check_for_safety(output, dtype=conditioning_data.dtype)
|
return self.check_for_safety(output, dtype=conditioning_data.dtype)
|
||||||
|
|
||||||
def get_img2img_timesteps(
|
def get_img2img_timesteps(
|
||||||
self, num_inference_steps: int, strength: float, device
|
self, num_inference_steps: int, strength: float, device=None
|
||||||
) -> (torch.Tensor, int):
|
) -> (torch.Tensor, int):
|
||||||
img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components)
|
img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components)
|
||||||
assert img2img_pipeline.scheduler is self.scheduler
|
assert img2img_pipeline.scheduler is self.scheduler
|
||||||
img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=device)
|
|
||||||
|
if self.scheduler.config.get("cpu_only", False):
|
||||||
|
scheduler_device = torch.device('cpu')
|
||||||
|
else:
|
||||||
|
scheduler_device = self._model_group.device_for(self.unet)
|
||||||
|
|
||||||
|
img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
|
||||||
timesteps, adjusted_steps = img2img_pipeline.get_timesteps(
|
timesteps, adjusted_steps = img2img_pipeline.get_timesteps(
|
||||||
num_inference_steps, strength, device=device
|
num_inference_steps, strength, device=scheduler_device
|
||||||
)
|
)
|
||||||
# Workaround for low strength resulting in zero timesteps.
|
# Workaround for low strength resulting in zero timesteps.
|
||||||
# TODO: submit upstream fix for zero-step img2img
|
# TODO: submit upstream fix for zero-step img2img
|
||||||
@ -796,9 +801,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
if init_image.dim() == 3:
|
if init_image.dim() == 3:
|
||||||
init_image = init_image.unsqueeze(0)
|
init_image = init_image.unsqueeze(0)
|
||||||
|
|
||||||
timesteps, _ = self.get_img2img_timesteps(
|
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
|
||||||
num_inference_steps, strength, device=device
|
|
||||||
)
|
|
||||||
|
|
||||||
# 6. Prepare latent variables
|
# 6. Prepare latent variables
|
||||||
# can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents
|
# can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents
|
||||||
|
@ -15,6 +15,7 @@ SAMPLER_CHOICES = [
|
|||||||
"plms",
|
"plms",
|
||||||
# diffusers:
|
# diffusers:
|
||||||
"pndm",
|
"pndm",
|
||||||
|
"unipc"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ export const DIFFUSERS_SCHEDULERS: Array<string> = [
|
|||||||
'k_euler',
|
'k_euler',
|
||||||
'k_euler_a',
|
'k_euler_a',
|
||||||
'k_heun',
|
'k_heun',
|
||||||
|
'unipc',
|
||||||
];
|
];
|
||||||
|
|
||||||
// Valid image widths
|
// Valid image widths
|
||||||
|
Loading…
x
Reference in New Issue
Block a user