From 9e7b470189e3f8f9cee7baace4ccb37be9f8d61e Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 21 Jul 2024 20:45:55 +0300 Subject: [PATCH 01/28] Handle inpaint models --- invokeai/app/invocations/denoise_latents.py | 7 ++ .../extensions/inpaint_model.py | 66 +++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 invokeai/backend/stable_diffusion/extensions/inpaint_model.py diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index ccacc3303c..1f28252408 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -58,6 +58,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( from invokeai.backend.stable_diffusion.diffusion.custom_atttention import CustomAttnProcessor2_0 from invokeai.backend.stable_diffusion.diffusion_backend import StableDiffusionBackend from invokeai.backend.stable_diffusion.extension_callback_type import ExtensionCallbackType +from invokeai.backend.stable_diffusion.extensions.inpaint_model import InpaintModelExt from invokeai.backend.stable_diffusion.extensions.preview import PreviewExt from invokeai.backend.stable_diffusion.extensions_manager import ExtensionsManager from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP @@ -790,6 +791,12 @@ class DenoiseLatentsInvocation(BaseInvocation): ext_manager.add_extension(PreviewExt(step_callback)) + ### inpaint + # TODO: add inpainting on normal model + mask, masked_latents, is_gradient_mask = self.prep_inpaint_mask(context, latents) + if unet_config.variant == "inpaint": # ModelVariantType.Inpaint: + ext_manager.add_extension(InpaintModelExt(mask, masked_latents, is_gradient_mask)) + # ext: t2i/ip adapter ext_manager.run_callback(ExtensionCallbackType.SETUP, denoise_ctx) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py new file mode 100644 index 0000000000..190e0fa931 --- /dev/null +++ b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Optional + +import torch +from diffusers import UNet2DConditionModel + +from invokeai.backend.stable_diffusion.extension_callback_type import ExtensionCallbackType +from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase, callback + +if TYPE_CHECKING: + from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext + + +class InpaintModelExt(ExtensionBase): + def __init__( + self, + mask: Optional[torch.Tensor], + masked_latents: Optional[torch.Tensor], + is_gradient_mask: bool, + ): + super().__init__() + self.mask = mask + self.masked_latents = masked_latents + self.is_gradient_mask = is_gradient_mask + + @staticmethod + def _is_inpaint_model(unet: UNet2DConditionModel): + return unet.conv_in.in_channels == 9 + + @callback(ExtensionCallbackType.PRE_DENOISE_LOOP) + def init_tensors(self, ctx: DenoiseContext): + if not self._is_inpaint_model(ctx.unet): + raise Exception("InpaintModelExt should be used only on inpaint model!") + + if self.mask is None: + self.mask = torch.ones_like(ctx.latents[:1, :1]) + self.mask = self.mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) + + if self.masked_latents is None: + self.masked_latents = torch.zeros_like(ctx.latents[:1]) + self.masked_latents = self.masked_latents.to(device=ctx.latents.device, dtype=ctx.latents.dtype) + + # TODO: any ideas about order value? + # do last so that other extensions works with normal latents + @callback(ExtensionCallbackType.PRE_UNET, order=1000) + def append_inpaint_layers(self, ctx: DenoiseContext): + batch_size = ctx.unet_kwargs.sample.shape[0] + b_mask = torch.cat([self.mask] * batch_size) + b_masked_latents = torch.cat([self.masked_latents] * batch_size) + ctx.unet_kwargs.sample = torch.cat( + [ctx.unet_kwargs.sample, b_mask, b_masked_latents], + dim=1, + ) + + # TODO: should here be used order? + # restore unmasked part as inpaint model can change unmasked part slightly + @callback(ExtensionCallbackType.POST_DENOISE_LOOP) + def restore_unmasked(self, ctx: DenoiseContext): + if self.mask is None: + return + + if self.is_gradient_mask: + ctx.latents = torch.where(self.mask > 0, ctx.latents, ctx.inputs.orig_latents) + else: + ctx.latents = torch.lerp(ctx.inputs.orig_latents, ctx.latents, self.mask) From 58f3072b9154f34df1cb6c4a26de3d59882ecd5c Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 21 Jul 2024 22:17:29 +0300 Subject: [PATCH 02/28] Handle inpainting on normal models --- invokeai/app/invocations/denoise_latents.py | 8 +- .../stable_diffusion/extensions/inpaint.py | 91 +++++++++++++++++++ .../extensions/inpaint_model.py | 2 +- 3 files changed, 97 insertions(+), 4 deletions(-) create mode 100644 invokeai/backend/stable_diffusion/extensions/inpaint.py diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 1f28252408..3a9e0291af 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -37,7 +37,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.backend.ip_adapter.ip_adapter import IPAdapter from invokeai.backend.lora import LoRAModelRaw -from invokeai.backend.model_manager import BaseModelType +from invokeai.backend.model_manager import BaseModelType, ModelVariantType from invokeai.backend.model_patcher import ModelPatcher from invokeai.backend.stable_diffusion import PipelineIntermediateState, set_seamless from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext, DenoiseInputs @@ -58,6 +58,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( from invokeai.backend.stable_diffusion.diffusion.custom_atttention import CustomAttnProcessor2_0 from invokeai.backend.stable_diffusion.diffusion_backend import StableDiffusionBackend from invokeai.backend.stable_diffusion.extension_callback_type import ExtensionCallbackType +from invokeai.backend.stable_diffusion.extensions.inpaint import InpaintExt from invokeai.backend.stable_diffusion.extensions.inpaint_model import InpaintModelExt from invokeai.backend.stable_diffusion.extensions.preview import PreviewExt from invokeai.backend.stable_diffusion.extensions_manager import ExtensionsManager @@ -792,10 +793,11 @@ class DenoiseLatentsInvocation(BaseInvocation): ext_manager.add_extension(PreviewExt(step_callback)) ### inpaint - # TODO: add inpainting on normal model mask, masked_latents, is_gradient_mask = self.prep_inpaint_mask(context, latents) - if unet_config.variant == "inpaint": # ModelVariantType.Inpaint: + if unet_config.variant == ModelVariantType.Inpaint: ext_manager.add_extension(InpaintModelExt(mask, masked_latents, is_gradient_mask)) + elif mask is not None: + ext_manager.add_extension(InpaintExt(mask, is_gradient_mask)) # ext: t2i/ip adapter ext_manager.run_callback(ExtensionCallbackType.SETUP, denoise_ctx) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint.py b/invokeai/backend/stable_diffusion/extensions/inpaint.py new file mode 100644 index 0000000000..5ef81f2c03 --- /dev/null +++ b/invokeai/backend/stable_diffusion/extensions/inpaint.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import einops +import torch +from diffusers import UNet2DConditionModel + +from invokeai.backend.stable_diffusion.extension_callback_type import ExtensionCallbackType +from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase, callback + +if TYPE_CHECKING: + from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext + + +class InpaintExt(ExtensionBase): + def __init__( + self, + mask: torch.Tensor, + is_gradient_mask: bool, + ): + super().__init__() + self.mask = mask + self.is_gradient_mask = is_gradient_mask + + @staticmethod + def _is_normal_model(unet: UNet2DConditionModel): + return unet.conv_in.in_channels == 4 + + def _apply_mask(self, ctx: DenoiseContext, latents: torch.Tensor, t: torch.Tensor) -> torch.Tensor: + batch_size = latents.size(0) + mask = einops.repeat(self.mask, "b c h w -> (repeat b) c h w", repeat=batch_size) + if t.dim() == 0: + # some schedulers expect t to be one-dimensional. + # TODO: file diffusers bug about inconsistency? + t = einops.repeat(t, "-> batch", batch=batch_size) + # Noise shouldn't be re-randomized between steps here. The multistep schedulers + # get very confused about what is happening from step to step when we do that. + mask_latents = ctx.scheduler.add_noise(ctx.inputs.orig_latents, self.noise, t) + # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? + # mask_latents = self.scheduler.scale_model_input(mask_latents, t) + mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) + if self.is_gradient_mask: + threshhold = (t.item()) / ctx.scheduler.config.num_train_timesteps + mask_bool = mask > threshhold # I don't know when mask got inverted, but it did + masked_input = torch.where(mask_bool, latents, mask_latents) + else: + masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) + return masked_input + + @callback(ExtensionCallbackType.PRE_DENOISE_LOOP) + def init_tensors(self, ctx: DenoiseContext): + if not self._is_normal_model(ctx.unet): + raise Exception("InpaintExt should be used only on normal models!") + + self.mask = self.mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) + + self.noise = ctx.inputs.noise + if self.noise is None: + self.noise = torch.randn( + ctx.latents.shape, + dtype=torch.float32, + device="cpu", + generator=torch.Generator(device="cpu").manual_seed(ctx.seed), + ).to(device=ctx.latents.device, dtype=ctx.latents.dtype) + + # TODO: order value + @callback(ExtensionCallbackType.PRE_STEP, order=-100) + def apply_mask_to_initial_latents(self, ctx: DenoiseContext): + ctx.latents = self._apply_mask(ctx, ctx.latents, ctx.timestep) + + # TODO: order value + # TODO: redo this with preview events rewrite + @callback(ExtensionCallbackType.POST_STEP, order=-100) + def apply_mask_to_step_output(self, ctx: DenoiseContext): + timestep = ctx.scheduler.timesteps[-1] + if hasattr(ctx.step_output, "denoised"): + ctx.step_output.denoised = self._apply_mask(ctx, ctx.step_output.denoised, timestep) + elif hasattr(ctx.step_output, "pred_original_sample"): + ctx.step_output.pred_original_sample = self._apply_mask(ctx, ctx.step_output.pred_original_sample, timestep) + else: + ctx.step_output.pred_original_sample = self._apply_mask(ctx, ctx.step_output.prev_sample, timestep) + + # TODO: should here be used order? + # restore unmasked part after the last step is completed + @callback(ExtensionCallbackType.POST_DENOISE_LOOP) + def restore_unmasked(self, ctx: DenoiseContext): + if self.is_gradient_mask: + ctx.latents = torch.where(self.mask > 0, ctx.latents, ctx.inputs.orig_latents) + else: + ctx.latents = torch.lerp(ctx.inputs.orig_latents, ctx.latents, self.mask) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py index 190e0fa931..b1cf8fa476 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py @@ -31,7 +31,7 @@ class InpaintModelExt(ExtensionBase): @callback(ExtensionCallbackType.PRE_DENOISE_LOOP) def init_tensors(self, ctx: DenoiseContext): if not self._is_inpaint_model(ctx.unet): - raise Exception("InpaintModelExt should be used only on inpaint model!") + raise Exception("InpaintModelExt should be used only on inpaint models!") if self.mask is None: self.mask = torch.ones_like(ctx.latents[:1, :1]) From 5003e5d763671d99893b63f8f7bb8d5b0b1f9aa9 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 22 Jul 2024 23:47:39 +0300 Subject: [PATCH 03/28] Same changes as in other PRs, add check for running inpainting on inpaint model without source image Co-Authored-By: Ryan Dick <14897797+RyanJDick@users.noreply.github.com> --- invokeai/app/invocations/denoise_latents.py | 2 +- .../stable_diffusion/extensions/inpaint.py | 27 ++++++++------- .../extensions/inpaint_model.py | 34 +++++++++---------- 3 files changed, 32 insertions(+), 31 deletions(-) diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 3a9e0291af..eb1ee44bda 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -718,7 +718,7 @@ class DenoiseLatentsInvocation(BaseInvocation): return seed, noise, latents def invoke(self, context: InvocationContext) -> LatentsOutput: - if os.environ.get("USE_MODULAR_DENOISE", False): + if os.environ.get("USE_MODULAR_DENOISE", True): return self._new_invoke(context) else: return self._old_invoke(context) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint.py b/invokeai/backend/stable_diffusion/extensions/inpaint.py index 5ef81f2c03..27ea0a4ed6 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import einops import torch @@ -20,8 +20,9 @@ class InpaintExt(ExtensionBase): is_gradient_mask: bool, ): super().__init__() - self.mask = mask - self.is_gradient_mask = is_gradient_mask + self._mask = mask + self._is_gradient_mask = is_gradient_mask + self._noise: Optional[torch.Tensor] = None @staticmethod def _is_normal_model(unet: UNet2DConditionModel): @@ -29,18 +30,18 @@ class InpaintExt(ExtensionBase): def _apply_mask(self, ctx: DenoiseContext, latents: torch.Tensor, t: torch.Tensor) -> torch.Tensor: batch_size = latents.size(0) - mask = einops.repeat(self.mask, "b c h w -> (repeat b) c h w", repeat=batch_size) + mask = einops.repeat(self._mask, "b c h w -> (repeat b) c h w", repeat=batch_size) if t.dim() == 0: # some schedulers expect t to be one-dimensional. # TODO: file diffusers bug about inconsistency? t = einops.repeat(t, "-> batch", batch=batch_size) # Noise shouldn't be re-randomized between steps here. The multistep schedulers # get very confused about what is happening from step to step when we do that. - mask_latents = ctx.scheduler.add_noise(ctx.inputs.orig_latents, self.noise, t) + mask_latents = ctx.scheduler.add_noise(ctx.inputs.orig_latents, self._noise, t) # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? # mask_latents = self.scheduler.scale_model_input(mask_latents, t) mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) - if self.is_gradient_mask: + if self._is_gradient_mask: threshhold = (t.item()) / ctx.scheduler.config.num_train_timesteps mask_bool = mask > threshhold # I don't know when mask got inverted, but it did masked_input = torch.where(mask_bool, latents, mask_latents) @@ -53,11 +54,11 @@ class InpaintExt(ExtensionBase): if not self._is_normal_model(ctx.unet): raise Exception("InpaintExt should be used only on normal models!") - self.mask = self.mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) + self._mask = self._mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) - self.noise = ctx.inputs.noise - if self.noise is None: - self.noise = torch.randn( + self._noise = ctx.inputs.noise + if self._noise is None: + self._noise = torch.randn( ctx.latents.shape, dtype=torch.float32, device="cpu", @@ -85,7 +86,7 @@ class InpaintExt(ExtensionBase): # restore unmasked part after the last step is completed @callback(ExtensionCallbackType.POST_DENOISE_LOOP) def restore_unmasked(self, ctx: DenoiseContext): - if self.is_gradient_mask: - ctx.latents = torch.where(self.mask > 0, ctx.latents, ctx.inputs.orig_latents) + if self._is_gradient_mask: + ctx.latents = torch.where(self._mask > 0, ctx.latents, ctx.inputs.orig_latents) else: - ctx.latents = torch.lerp(ctx.inputs.orig_latents, ctx.latents, self.mask) + ctx.latents = torch.lerp(ctx.inputs.orig_latents, ctx.latents, self._mask) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py index b1cf8fa476..9be259408f 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py @@ -20,9 +20,12 @@ class InpaintModelExt(ExtensionBase): is_gradient_mask: bool, ): super().__init__() - self.mask = mask - self.masked_latents = masked_latents - self.is_gradient_mask = is_gradient_mask + if mask is not None and masked_latents is None: + raise ValueError("Source image required for inpaint mask when inpaint model used!") + + self._mask = mask + self._masked_latents = masked_latents + self._is_gradient_mask = is_gradient_mask @staticmethod def _is_inpaint_model(unet: UNet2DConditionModel): @@ -33,21 +36,21 @@ class InpaintModelExt(ExtensionBase): if not self._is_inpaint_model(ctx.unet): raise Exception("InpaintModelExt should be used only on inpaint models!") - if self.mask is None: - self.mask = torch.ones_like(ctx.latents[:1, :1]) - self.mask = self.mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) + if self._mask is None: + self._mask = torch.ones_like(ctx.latents[:1, :1]) + self._mask = self._mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) - if self.masked_latents is None: - self.masked_latents = torch.zeros_like(ctx.latents[:1]) - self.masked_latents = self.masked_latents.to(device=ctx.latents.device, dtype=ctx.latents.dtype) + if self._masked_latents is None: + self._masked_latents = torch.zeros_like(ctx.latents[:1]) + self._masked_latents = self._masked_latents.to(device=ctx.latents.device, dtype=ctx.latents.dtype) # TODO: any ideas about order value? # do last so that other extensions works with normal latents @callback(ExtensionCallbackType.PRE_UNET, order=1000) def append_inpaint_layers(self, ctx: DenoiseContext): batch_size = ctx.unet_kwargs.sample.shape[0] - b_mask = torch.cat([self.mask] * batch_size) - b_masked_latents = torch.cat([self.masked_latents] * batch_size) + b_mask = torch.cat([self._mask] * batch_size) + b_masked_latents = torch.cat([self._masked_latents] * batch_size) ctx.unet_kwargs.sample = torch.cat( [ctx.unet_kwargs.sample, b_mask, b_masked_latents], dim=1, @@ -57,10 +60,7 @@ class InpaintModelExt(ExtensionBase): # restore unmasked part as inpaint model can change unmasked part slightly @callback(ExtensionCallbackType.POST_DENOISE_LOOP) def restore_unmasked(self, ctx: DenoiseContext): - if self.mask is None: - return - - if self.is_gradient_mask: - ctx.latents = torch.where(self.mask > 0, ctx.latents, ctx.inputs.orig_latents) + if self._is_gradient_mask: + ctx.latents = torch.where(self._mask > 0, ctx.latents, ctx.inputs.orig_latents) else: - ctx.latents = torch.lerp(ctx.inputs.orig_latents, ctx.latents, self.mask) + ctx.latents = torch.lerp(ctx.inputs.orig_latents, ctx.latents, self._mask) From 87eb0183807b05fe4843aa6718e9a7e1e18ee320 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 22 Jul 2024 23:49:20 +0300 Subject: [PATCH 04/28] Revert debug change --- invokeai/app/invocations/denoise_latents.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index eb1ee44bda..3a9e0291af 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -718,7 +718,7 @@ class DenoiseLatentsInvocation(BaseInvocation): return seed, noise, latents def invoke(self, context: InvocationContext) -> LatentsOutput: - if os.environ.get("USE_MODULAR_DENOISE", True): + if os.environ.get("USE_MODULAR_DENOISE", False): return self._new_invoke(context) else: return self._old_invoke(context) From 62aa064e561b70f65f1a97571915399c1efc8606 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 23 Jul 2024 18:03:59 +0300 Subject: [PATCH 05/28] Handle seamless in modular denoise --- invokeai/app/invocations/denoise_latents.py | 5 ++ .../stable_diffusion/extensions/seamless.py | 75 +++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 invokeai/backend/stable_diffusion/extensions/seamless.py diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 2787074265..5fa2068bde 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -62,6 +62,7 @@ from invokeai.backend.stable_diffusion.extensions.controlnet import ControlNetEx from invokeai.backend.stable_diffusion.extensions.freeu import FreeUExt from invokeai.backend.stable_diffusion.extensions.preview import PreviewExt from invokeai.backend.stable_diffusion.extensions.rescale_cfg import RescaleCFGExt +from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt from invokeai.backend.stable_diffusion.extensions_manager import ExtensionsManager from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES @@ -833,6 +834,10 @@ class DenoiseLatentsInvocation(BaseInvocation): if self.unet.freeu_config: ext_manager.add_extension(FreeUExt(self.unet.freeu_config)) + ### seamless + if self.unet.seamless_axes: + ext_manager.add_extension(SeamlessExt(self.unet.seamless_axes)) + # context for loading additional models with ExitStack() as exit_stack: # later should be smth like: diff --git a/invokeai/backend/stable_diffusion/extensions/seamless.py b/invokeai/backend/stable_diffusion/extensions/seamless.py new file mode 100644 index 0000000000..3e303bc31b --- /dev/null +++ b/invokeai/backend/stable_diffusion/extensions/seamless.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from contextlib import contextmanager +from typing import Callable, Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +from diffusers import UNet2DConditionModel +from diffusers.models.lora import LoRACompatibleConv + +from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase + + +class SeamlessExt(ExtensionBase): + def __init__( + self, + seamless_axes: List[str], + ): + super().__init__() + self._seamless_axes = seamless_axes + + @contextmanager + def patch_unet(self, unet: UNet2DConditionModel, cached_weights: Optional[Dict[str, torch.Tensor]] = None): + with self.static_patch_model( + model=unet, + seamless_axes=self._seamless_axes, + ): + yield + + @staticmethod + @contextmanager + def static_patch_model( + model: torch.nn.Module, + seamless_axes: List[str], + ): + if not seamless_axes: + yield + return + + # override conv_forward + # https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019 + def _conv_forward_asymmetric( + self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None + ): + self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0) + self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3]) + working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode) + working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode) + return torch.nn.functional.conv2d( + working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups + ) + + original_layers: List[Tuple[nn.Conv2d, Callable]] = [] + + try: + x_mode = "circular" if "x" in seamless_axes else "constant" + y_mode = "circular" if "y" in seamless_axes else "constant" + + conv_layers: List[torch.nn.Conv2d] = [] + + for module in model.modules(): + if isinstance(module, torch.nn.Conv2d): + conv_layers.append(module) + + for layer in conv_layers: + if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None: + layer.lora_layer = lambda *x: 0 + original_layers.append((layer, layer._conv_forward)) + layer._conv_forward = _conv_forward_asymmetric.__get__(layer, torch.nn.Conv2d) + + yield + + finally: + for layer, orig_conv_forward in original_layers: + layer._conv_forward = orig_conv_forward From ca21996a97f58bec1adedc7ebc3278110461a0c9 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 23 Jul 2024 18:04:33 +0300 Subject: [PATCH 06/28] Remove old seamless class --- invokeai/app/invocations/denoise_latents.py | 4 +- invokeai/app/invocations/latents_to_image.py | 4 +- invokeai/backend/stable_diffusion/__init__.py | 2 - invokeai/backend/stable_diffusion/seamless.py | 51 ------------------- 4 files changed, 4 insertions(+), 57 deletions(-) delete mode 100644 invokeai/backend/stable_diffusion/seamless.py diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 5fa2068bde..6d8cde8bfa 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -39,7 +39,7 @@ from invokeai.backend.ip_adapter.ip_adapter import IPAdapter from invokeai.backend.lora import LoRAModelRaw from invokeai.backend.model_manager import BaseModelType from invokeai.backend.model_patcher import ModelPatcher -from invokeai.backend.stable_diffusion import PipelineIntermediateState, set_seamless +from invokeai.backend.stable_diffusion import PipelineIntermediateState from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext, DenoiseInputs from invokeai.backend.stable_diffusion.diffusers_pipeline import ( ControlNetData, @@ -920,7 +920,7 @@ class DenoiseLatentsInvocation(BaseInvocation): ExitStack() as exit_stack, unet_info.model_on_device() as (model_state_dict, unet), ModelPatcher.apply_freeu(unet, self.unet.freeu_config), - set_seamless(unet, self.unet.seamless_axes), # FIXME + SeamlessExt.static_patch_model(unet, self.unet.seamless_axes), # FIXME # Apply the LoRA after unet has been moved to its target device for faster patching. ModelPatcher.apply_lora_unet( unet, diff --git a/invokeai/app/invocations/latents_to_image.py b/invokeai/app/invocations/latents_to_image.py index cc8a9c44a3..35b8483f2c 100644 --- a/invokeai/app/invocations/latents_to_image.py +++ b/invokeai/app/invocations/latents_to_image.py @@ -24,7 +24,7 @@ from invokeai.app.invocations.fields import ( from invokeai.app.invocations.model import VAEField from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.backend.stable_diffusion import set_seamless +from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params from invokeai.backend.util.devices import TorchDevice @@ -59,7 +59,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard): vae_info = context.models.load(self.vae.vae) assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny)) - with set_seamless(vae_info.model, self.vae.seamless_axes), vae_info as vae: + with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae: assert isinstance(vae, (AutoencoderKL, AutoencoderTiny)) latents = latents.to(vae.device) if self.fp32: diff --git a/invokeai/backend/stable_diffusion/__init__.py b/invokeai/backend/stable_diffusion/__init__.py index 440cb4410b..6a6f2ebc49 100644 --- a/invokeai/backend/stable_diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/__init__.py @@ -7,11 +7,9 @@ from invokeai.backend.stable_diffusion.diffusers_pipeline import ( # noqa: F401 StableDiffusionGeneratorPipeline, ) from invokeai.backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent # noqa: F401 -from invokeai.backend.stable_diffusion.seamless import set_seamless # noqa: F401 __all__ = [ "PipelineIntermediateState", "StableDiffusionGeneratorPipeline", "InvokeAIDiffuserComponent", - "set_seamless", ] diff --git a/invokeai/backend/stable_diffusion/seamless.py b/invokeai/backend/stable_diffusion/seamless.py deleted file mode 100644 index 23ed978c6d..0000000000 --- a/invokeai/backend/stable_diffusion/seamless.py +++ /dev/null @@ -1,51 +0,0 @@ -from contextlib import contextmanager -from typing import Callable, List, Optional, Tuple, Union - -import torch -import torch.nn as nn -from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL -from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny -from diffusers.models.lora import LoRACompatibleConv -from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel - - -@contextmanager -def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL, AutoencoderTiny], seamless_axes: List[str]): - if not seamless_axes: - yield - return - - # override conv_forward - # https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019 - def _conv_forward_asymmetric(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None): - self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0) - self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3]) - working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode) - working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode) - return torch.nn.functional.conv2d( - working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups - ) - - original_layers: List[Tuple[nn.Conv2d, Callable]] = [] - - try: - x_mode = "circular" if "x" in seamless_axes else "constant" - y_mode = "circular" if "y" in seamless_axes else "constant" - - conv_layers: List[torch.nn.Conv2d] = [] - - for module in model.modules(): - if isinstance(module, torch.nn.Conv2d): - conv_layers.append(module) - - for layer in conv_layers: - if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None: - layer.lora_layer = lambda *x: 0 - original_layers.append((layer, layer._conv_forward)) - layer._conv_forward = _conv_forward_asymmetric.__get__(layer, torch.nn.Conv2d) - - yield - - finally: - for layer, orig_conv_forward in original_layers: - layer._conv_forward = orig_conv_forward From 9d1fcba415d29c7f3d29c55a8f9ba1c5f9274193 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 23 Jul 2024 23:29:28 +0300 Subject: [PATCH 07/28] Fix create gradient mask node output --- invokeai/app/invocations/create_gradient_mask.py | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/app/invocations/create_gradient_mask.py b/invokeai/app/invocations/create_gradient_mask.py index 089313463b..3b0afec197 100644 --- a/invokeai/app/invocations/create_gradient_mask.py +++ b/invokeai/app/invocations/create_gradient_mask.py @@ -93,6 +93,7 @@ class CreateGradientMaskInvocation(BaseInvocation): # redistribute blur so that the original edges are 0 and blur outwards to 1 blur_tensor = (blur_tensor - 0.5) * 2 + blur_tensor[blur_tensor < 0] = 0.0 threshold = 1 - self.minimum_denoise From c323a760a5bed1c5ecf701458b126927e66cf7b5 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 23 Jul 2024 23:34:28 +0300 Subject: [PATCH 08/28] Suggested changes Co-Authored-By: Ryan Dick <14897797+RyanJDick@users.noreply.github.com> --- invokeai/app/invocations/denoise_latents.py | 39 ++++++++++--------- .../stable_diffusion/extensions/inpaint.py | 30 ++++++++++++-- .../extensions/inpaint_model.py | 22 ++++++++++- 3 files changed, 68 insertions(+), 23 deletions(-) diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 3a9e0291af..0d9293be02 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -732,10 +732,6 @@ class DenoiseLatentsInvocation(BaseInvocation): dtype = TorchDevice.choose_torch_dtype() seed, noise, latents = self.prepare_noise_and_latents(context, self.noise, self.latents) - latents = latents.to(device=device, dtype=dtype) - if noise is not None: - noise = noise.to(device=device, dtype=dtype) - _, _, latent_height, latent_width = latents.shape conditioning_data = self.get_conditioning_data( @@ -768,21 +764,6 @@ class DenoiseLatentsInvocation(BaseInvocation): denoising_end=self.denoising_end, ) - denoise_ctx = DenoiseContext( - inputs=DenoiseInputs( - orig_latents=latents, - timesteps=timesteps, - init_timestep=init_timestep, - noise=noise, - seed=seed, - scheduler_step_kwargs=scheduler_step_kwargs, - conditioning_data=conditioning_data, - attention_processor_cls=CustomAttnProcessor2_0, - ), - unet=None, - scheduler=scheduler, - ) - # get the unet's config so that we can pass the base to sd_step_callback() unet_config = context.models.get_config(self.unet.unet.key) @@ -799,6 +780,26 @@ class DenoiseLatentsInvocation(BaseInvocation): elif mask is not None: ext_manager.add_extension(InpaintExt(mask, is_gradient_mask)) + # Initialize context for modular denoise + latents = latents.to(device=device, dtype=dtype) + if noise is not None: + noise = noise.to(device=device, dtype=dtype) + + denoise_ctx = DenoiseContext( + inputs=DenoiseInputs( + orig_latents=latents, + timesteps=timesteps, + init_timestep=init_timestep, + noise=noise, + seed=seed, + scheduler_step_kwargs=scheduler_step_kwargs, + conditioning_data=conditioning_data, + attention_processor_cls=CustomAttnProcessor2_0, + ), + unet=None, + scheduler=scheduler, + ) + # ext: t2i/ip adapter ext_manager.run_callback(ExtensionCallbackType.SETUP, denoise_ctx) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint.py b/invokeai/backend/stable_diffusion/extensions/inpaint.py index 27ea0a4ed6..fa58958b47 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint.py @@ -14,18 +14,40 @@ if TYPE_CHECKING: class InpaintExt(ExtensionBase): + """An extension for inpainting with non-inpainting models. See `InpaintModelExt` for inpainting with inpainting + models. + """ def __init__( self, mask: torch.Tensor, is_gradient_mask: bool, ): + """Initialize InpaintExt. + Args: + mask (torch.Tensor): The inpainting mask. Shape: (1, 1, latent_height, latent_width). Values are + expected to be in the range [0, 1]. A value of 0 means that the corresponding 'pixel' should not be + inpainted. + is_gradient_mask (bool): If True, mask is interpreted as a gradient mask meaning that the mask values range + from 0 to 1. If False, mask is interpreted as binary mask meaning that the mask values are either 0 or + 1. + """ super().__init__() self._mask = mask self._is_gradient_mask = is_gradient_mask + + # Noise, which used to noisify unmasked part of image + # if noise provided to context, then it will be used + # if no noise provided, then noise will be generated based on seed self._noise: Optional[torch.Tensor] = None @staticmethod def _is_normal_model(unet: UNet2DConditionModel): + """ Checks if the provided UNet belongs to a regular model. + The `in_channels` of a UNet vary depending on model type: + - normal - 4 + - depth - 5 + - inpaint - 9 + """ return unet.conv_in.in_channels == 4 def _apply_mask(self, ctx: DenoiseContext, latents: torch.Tensor, t: torch.Tensor) -> torch.Tensor: @@ -42,8 +64,8 @@ class InpaintExt(ExtensionBase): # mask_latents = self.scheduler.scale_model_input(mask_latents, t) mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) if self._is_gradient_mask: - threshhold = (t.item()) / ctx.scheduler.config.num_train_timesteps - mask_bool = mask > threshhold # I don't know when mask got inverted, but it did + threshold = (t.item()) / ctx.scheduler.config.num_train_timesteps + mask_bool = mask > threshold masked_input = torch.where(mask_bool, latents, mask_latents) else: masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) @@ -52,11 +74,13 @@ class InpaintExt(ExtensionBase): @callback(ExtensionCallbackType.PRE_DENOISE_LOOP) def init_tensors(self, ctx: DenoiseContext): if not self._is_normal_model(ctx.unet): - raise Exception("InpaintExt should be used only on normal models!") + raise ValueError("InpaintExt should be used only on normal models!") self._mask = self._mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) self._noise = ctx.inputs.noise + # 'noise' might be None if the latents have already been noised (e.g. when running the SDXL refiner). + # We still need noise for inpainting, so we generate it from the seed here. if self._noise is None: self._noise = torch.randn( ctx.latents.shape, diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py index 9be259408f..b5a08a85a8 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py @@ -13,12 +13,26 @@ if TYPE_CHECKING: class InpaintModelExt(ExtensionBase): + """An extension for inpainting with inpainting models. See `InpaintExt` for inpainting with non-inpainting + models. + """ def __init__( self, mask: Optional[torch.Tensor], masked_latents: Optional[torch.Tensor], is_gradient_mask: bool, ): + """Initialize InpaintModelExt. + Args: + mask (Optional[torch.Tensor]): The inpainting mask. Shape: (1, 1, latent_height, latent_width). Values are + expected to be in the range [0, 1]. A value of 0 means that the corresponding 'pixel' should not be + inpainted. + masked_latents (Optional[torch.Tensor]): Latents of initial image, with masked out by black color inpainted area. + If mask provided, then too should be provided. Shape: (1, 1, latent_height, latent_width) + is_gradient_mask (bool): If True, mask is interpreted as a gradient mask meaning that the mask values range + from 0 to 1. If False, mask is interpreted as binary mask meaning that the mask values are either 0 or + 1. + """ super().__init__() if mask is not None and masked_latents is None: raise ValueError("Source image required for inpaint mask when inpaint model used!") @@ -29,12 +43,18 @@ class InpaintModelExt(ExtensionBase): @staticmethod def _is_inpaint_model(unet: UNet2DConditionModel): + """ Checks if the provided UNet belongs to a regular model. + The `in_channels` of a UNet vary depending on model type: + - normal - 4 + - depth - 5 + - inpaint - 9 + """ return unet.conv_in.in_channels == 9 @callback(ExtensionCallbackType.PRE_DENOISE_LOOP) def init_tensors(self, ctx: DenoiseContext): if not self._is_inpaint_model(ctx.unet): - raise Exception("InpaintModelExt should be used only on inpaint models!") + raise ValueError("InpaintModelExt should be used only on inpaint models!") if self._mask is None: self._mask = torch.ones_like(ctx.latents[:1, :1]) From 19c00241c6d5bc58ea0058aa90ffb9a22b4f30c7 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Wed, 24 Jul 2024 00:59:13 +0300 Subject: [PATCH 09/28] Use non-inverted mask generally(except inpaint model handling) --- invokeai/app/invocations/denoise_latents.py | 4 +++- .../backend/stable_diffusion/extensions/inpaint.py | 10 +++++----- .../stable_diffusion/extensions/inpaint_model.py | 7 +++++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 0d9293be02..b7a296a9b4 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -674,7 +674,7 @@ class DenoiseLatentsInvocation(BaseInvocation): else: masked_latents = torch.where(mask < 0.5, 0.0, latents) - return 1 - mask, masked_latents, self.denoise_mask.gradient + return mask, masked_latents, self.denoise_mask.gradient @staticmethod def prepare_noise_and_latents( @@ -830,6 +830,8 @@ class DenoiseLatentsInvocation(BaseInvocation): seed, noise, latents = self.prepare_noise_and_latents(context, self.noise, self.latents) mask, masked_latents, gradient_mask = self.prep_inpaint_mask(context, latents) + if mask is not None: + mask = 1 - mask # TODO(ryand): I have hard-coded `do_classifier_free_guidance=True` to mirror the behaviour of ControlNets, # below. Investigate whether this is appropriate. diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint.py b/invokeai/backend/stable_diffusion/extensions/inpaint.py index fa58958b47..6bf155b44f 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint.py @@ -25,7 +25,7 @@ class InpaintExt(ExtensionBase): """Initialize InpaintExt. Args: mask (torch.Tensor): The inpainting mask. Shape: (1, 1, latent_height, latent_width). Values are - expected to be in the range [0, 1]. A value of 0 means that the corresponding 'pixel' should not be + expected to be in the range [0, 1]. A value of 1 means that the corresponding 'pixel' should not be inpainted. is_gradient_mask (bool): If True, mask is interpreted as a gradient mask meaning that the mask values range from 0 to 1. If False, mask is interpreted as binary mask meaning that the mask values are either 0 or @@ -65,10 +65,10 @@ class InpaintExt(ExtensionBase): mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) if self._is_gradient_mask: threshold = (t.item()) / ctx.scheduler.config.num_train_timesteps - mask_bool = mask > threshold + mask_bool = mask < 1 - threshold masked_input = torch.where(mask_bool, latents, mask_latents) else: - masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) + masked_input = torch.lerp(latents, mask_latents.to(dtype=latents.dtype), mask.to(dtype=latents.dtype)) return masked_input @callback(ExtensionCallbackType.PRE_DENOISE_LOOP) @@ -111,6 +111,6 @@ class InpaintExt(ExtensionBase): @callback(ExtensionCallbackType.POST_DENOISE_LOOP) def restore_unmasked(self, ctx: DenoiseContext): if self._is_gradient_mask: - ctx.latents = torch.where(self._mask > 0, ctx.latents, ctx.inputs.orig_latents) + ctx.latents = torch.where(self._mask < 1, ctx.latents, ctx.inputs.orig_latents) else: - ctx.latents = torch.lerp(ctx.inputs.orig_latents, ctx.latents, self._mask) + ctx.latents = torch.lerp(ctx.latents, ctx.inputs.orig_latents, self._mask) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py index b5a08a85a8..e1cadb0a2e 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py @@ -25,7 +25,7 @@ class InpaintModelExt(ExtensionBase): """Initialize InpaintModelExt. Args: mask (Optional[torch.Tensor]): The inpainting mask. Shape: (1, 1, latent_height, latent_width). Values are - expected to be in the range [0, 1]. A value of 0 means that the corresponding 'pixel' should not be + expected to be in the range [0, 1]. A value of 1 means that the corresponding 'pixel' should not be inpainted. masked_latents (Optional[torch.Tensor]): Latents of initial image, with masked out by black color inpainted area. If mask provided, then too should be provided. Shape: (1, 1, latent_height, latent_width) @@ -37,7 +37,10 @@ class InpaintModelExt(ExtensionBase): if mask is not None and masked_latents is None: raise ValueError("Source image required for inpaint mask when inpaint model used!") - self._mask = mask + # Inverse mask, because inpaint models treat mask as: 0 - remain same, 1 - inpaint + self._mask = None + if mask is not None: + self._mask = 1 - mask self._masked_latents = masked_latents self._is_gradient_mask = is_gradient_mask From 416d29fb839ecdae932e8f9a0907fd04fb5449ca Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Wed, 24 Jul 2024 01:17:28 +0300 Subject: [PATCH 10/28] Ruff format --- invokeai/backend/stable_diffusion/extensions/inpaint.py | 3 ++- invokeai/backend/stable_diffusion/extensions/inpaint_model.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint.py b/invokeai/backend/stable_diffusion/extensions/inpaint.py index 6bf155b44f..7bdd9238df 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint.py @@ -17,6 +17,7 @@ class InpaintExt(ExtensionBase): """An extension for inpainting with non-inpainting models. See `InpaintModelExt` for inpainting with inpainting models. """ + def __init__( self, mask: torch.Tensor, @@ -42,7 +43,7 @@ class InpaintExt(ExtensionBase): @staticmethod def _is_normal_model(unet: UNet2DConditionModel): - """ Checks if the provided UNet belongs to a regular model. + """Checks if the provided UNet belongs to a regular model. The `in_channels` of a UNet vary depending on model type: - normal - 4 - depth - 5 diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py index e1cadb0a2e..4a89f8223f 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py @@ -16,6 +16,7 @@ class InpaintModelExt(ExtensionBase): """An extension for inpainting with inpainting models. See `InpaintExt` for inpainting with non-inpainting models. """ + def __init__( self, mask: Optional[torch.Tensor], @@ -46,7 +47,7 @@ class InpaintModelExt(ExtensionBase): @staticmethod def _is_inpaint_model(unet: UNet2DConditionModel): - """ Checks if the provided UNet belongs to a regular model. + """Checks if the provided UNet belongs to a regular model. The `in_channels` of a UNet vary depending on model type: - normal - 4 - depth - 5 From 6af659b1da7402880325193f03ddd42369454c90 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Wed, 24 Jul 2024 02:55:33 +0300 Subject: [PATCH 11/28] Handle t2i adapter in modular denoise --- invokeai/app/invocations/denoise_latents.py | 29 +++++ .../extensions/t2i_adapter.py | 115 ++++++++++++++++++ 2 files changed, 144 insertions(+) create mode 100644 invokeai/backend/stable_diffusion/extensions/t2i_adapter.py diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 2787074265..e9899a8289 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -62,6 +62,7 @@ from invokeai.backend.stable_diffusion.extensions.controlnet import ControlNetEx from invokeai.backend.stable_diffusion.extensions.freeu import FreeUExt from invokeai.backend.stable_diffusion.extensions.preview import PreviewExt from invokeai.backend.stable_diffusion.extensions.rescale_cfg import RescaleCFGExt +from invokeai.backend.stable_diffusion.extensions.t2i_adapter import T2IAdapterExt from invokeai.backend.stable_diffusion.extensions_manager import ExtensionsManager from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES @@ -498,6 +499,33 @@ class DenoiseLatentsInvocation(BaseInvocation): ) ) + @staticmethod + def parse_t2i_adapter_field( + exit_stack: ExitStack, + context: InvocationContext, + t2i_adapters: Optional[Union[T2IAdapterField, list[T2IAdapterField]]], + ext_manager: ExtensionsManager, + ) -> None: + if t2i_adapters is None: + return + + # Handle the possibility that t2i_adapters could be a list or a single T2IAdapterField. + if isinstance(t2i_adapters, T2IAdapterField): + t2i_adapters = [t2i_adapters] + + for t2i_adapter_field in t2i_adapters: + ext_manager.add_extension( + T2IAdapterExt( + node_context=context, + model_id=t2i_adapter_field.t2i_adapter_model, + image=context.images.get_pil(t2i_adapter_field.image.image_name), + weight=t2i_adapter_field.weight, + begin_step_percent=t2i_adapter_field.begin_step_percent, + end_step_percent=t2i_adapter_field.end_step_percent, + resize_mode=t2i_adapter_field.resize_mode, + ) + ) + def prep_ip_adapter_image_prompts( self, context: InvocationContext, @@ -840,6 +868,7 @@ class DenoiseLatentsInvocation(BaseInvocation): # ext = extension_field.to_extension(exit_stack, context, ext_manager) # ext_manager.add_extension(ext) self.parse_controlnet_field(exit_stack, context, self.control, ext_manager) + self.parse_t2i_adapter_field(exit_stack, context, self.t2i_adapter, ext_manager) # ext: t2i/ip adapter ext_manager.run_callback(ExtensionCallbackType.SETUP, denoise_ctx) diff --git a/invokeai/backend/stable_diffusion/extensions/t2i_adapter.py b/invokeai/backend/stable_diffusion/extensions/t2i_adapter.py new file mode 100644 index 0000000000..6c8b4b7504 --- /dev/null +++ b/invokeai/backend/stable_diffusion/extensions/t2i_adapter.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +import math +from typing import TYPE_CHECKING, List, Optional, Union + +import torch +from diffusers import T2IAdapter +from PIL.Image import Image + +from invokeai.app.util.controlnet_utils import prepare_control_image +from invokeai.backend.model_manager import BaseModelType +from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningMode +from invokeai.backend.stable_diffusion.extension_callback_type import ExtensionCallbackType +from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase, callback + +if TYPE_CHECKING: + from invokeai.app.invocations.model import ModelIdentifierField + from invokeai.app.services.shared.invocation_context import InvocationContext + from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES + from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext + + +class T2IAdapterExt(ExtensionBase): + def __init__( + self, + node_context: InvocationContext, + model_id: ModelIdentifierField, + image: Image, + weight: Union[float, List[float]], + begin_step_percent: float, + end_step_percent: float, + resize_mode: CONTROLNET_RESIZE_VALUES, + ): + super().__init__() + self._node_context = node_context + self._model_id = model_id + self._image = image + self._weight = weight + self._resize_mode = resize_mode + self._begin_step_percent = begin_step_percent + self._end_step_percent = end_step_percent + + self._adapter_state: Optional[List[torch.Tensor]] = None + + # The max_unet_downscale is the maximum amount that the UNet model downscales the latent image internally. + model_config = self._node_context.models.get_config(self._model_id.key) + if model_config.base == BaseModelType.StableDiffusion1: + self._max_unet_downscale = 8 + elif model_config.base == BaseModelType.StableDiffusionXL: + self._max_unet_downscale = 4 + else: + raise ValueError(f"Unexpected T2I-Adapter base model type: '{model_config.base}'.") + + @callback(ExtensionCallbackType.SETUP) + def setup(self, ctx: DenoiseContext): + t2i_model: T2IAdapter + with self._node_context.models.load(self._model_id) as t2i_model: + _, _, latents_height, latents_width = ctx.inputs.orig_latents.shape + + self._adapter_state = self._run_model( + model=t2i_model, + image=self._image, + latents_height=latents_height, + latents_width=latents_width, + max_unet_downscale=self._max_unet_downscale, + resize_mode=self._resize_mode, + ) + + def _run_model( + self, + model: T2IAdapter, + image: Image, + latents_height: int, + latents_width: int, + max_unet_downscale: int, + resize_mode: CONTROLNET_RESIZE_VALUES, + ): + input_height = latents_height // max_unet_downscale * model.total_downscale_factor + input_width = latents_width // max_unet_downscale * model.total_downscale_factor + + t2i_image = prepare_control_image( + image=image, + do_classifier_free_guidance=False, + width=input_width, + height=input_height, + num_channels=model.config["in_channels"], # mypy treats this as a FrozenDict + device=model.device, + dtype=model.dtype, + resize_mode=resize_mode, + ) + + return model(t2i_image) + + @callback(ExtensionCallbackType.PRE_UNET) + def pre_unet_step(self, ctx: DenoiseContext): + # skip if model not active in current step + total_steps = len(ctx.inputs.timesteps) + first_step = math.floor(self._begin_step_percent * total_steps) + last_step = math.ceil(self._end_step_percent * total_steps) + if ctx.step_index < first_step or ctx.step_index > last_step: + return + + weight = self._weight + if isinstance(weight, list): + weight = weight[ctx.step_index] + + adapter_state = self._adapter_state + if ctx.conditioning_mode == ConditioningMode.Both: + adapter_state = [torch.cat([v] * 2) for v in adapter_state] + + if ctx.unet_kwargs.down_intrablock_additional_residuals is None: + ctx.unet_kwargs.down_intrablock_additional_residuals = [v * weight for v in adapter_state] + else: + for i, value in enumerate(adapter_state): + ctx.unet_kwargs.down_intrablock_additional_residuals[i] += value * weight From adf1a977ea57f1fe0b0c079bcdb72995e410959e Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Fri, 26 Jul 2024 19:22:26 +0300 Subject: [PATCH 12/28] Suggested changes Co-Authored-By: Ryan Dick <14897797+RyanJDick@users.noreply.github.com> --- .../stable_diffusion/extensions/seamless.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/invokeai/backend/stable_diffusion/extensions/seamless.py b/invokeai/backend/stable_diffusion/extensions/seamless.py index 3e303bc31b..a96ea6e4d2 100644 --- a/invokeai/backend/stable_diffusion/extensions/seamless.py +++ b/invokeai/backend/stable_diffusion/extensions/seamless.py @@ -37,6 +37,9 @@ class SeamlessExt(ExtensionBase): yield return + x_mode = "circular" if "x" in seamless_axes else "constant" + y_mode = "circular" if "y" in seamless_axes else "constant" + # override conv_forward # https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019 def _conv_forward_asymmetric( @@ -51,18 +54,11 @@ class SeamlessExt(ExtensionBase): ) original_layers: List[Tuple[nn.Conv2d, Callable]] = [] - try: - x_mode = "circular" if "x" in seamless_axes else "constant" - y_mode = "circular" if "y" in seamless_axes else "constant" + for layer in model.modules(): + if not isinstance(layer, torch.nn.Conv2d): + continue - conv_layers: List[torch.nn.Conv2d] = [] - - for module in model.modules(): - if isinstance(module, torch.nn.Conv2d): - conv_layers.append(module) - - for layer in conv_layers: if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None: layer.lora_layer = lambda *x: 0 original_layers.append((layer, layer._conv_forward)) From bd8890be113b8cb4dae8c8bf5cf4222b057365da Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Fri, 26 Jul 2024 19:24:46 +0300 Subject: [PATCH 13/28] Revert "Fix create gradient mask node output" This reverts commit 9d1fcba415d29c7f3d29c55a8f9ba1c5f9274193. --- invokeai/app/invocations/create_gradient_mask.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/app/invocations/create_gradient_mask.py b/invokeai/app/invocations/create_gradient_mask.py index 3b0afec197..089313463b 100644 --- a/invokeai/app/invocations/create_gradient_mask.py +++ b/invokeai/app/invocations/create_gradient_mask.py @@ -93,7 +93,6 @@ class CreateGradientMaskInvocation(BaseInvocation): # redistribute blur so that the original edges are 0 and blur outwards to 1 blur_tensor = (blur_tensor - 0.5) * 2 - blur_tensor[blur_tensor < 0] = 0.0 threshold = 1 - self.minimum_denoise From 5810cee6c96292bef76cc7be522995eecf013a28 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Fri, 26 Jul 2024 19:47:28 +0300 Subject: [PATCH 14/28] Suggested changes Co-Authored-By: Ryan Dick <14897797+RyanJDick@users.noreply.github.com> --- invokeai/app/invocations/denoise_latents.py | 6 ++++++ invokeai/backend/stable_diffusion/extensions/inpaint.py | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index b7a296a9b4..c502234e5e 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -775,6 +775,10 @@ class DenoiseLatentsInvocation(BaseInvocation): ### inpaint mask, masked_latents, is_gradient_mask = self.prep_inpaint_mask(context, latents) + # NOTE: We used to identify inpainting models by inpecting the shape of the loaded UNet model weights. Now we + # use the ModelVariantType config. During testing, there was a report of a user with models that had an + # incorrect ModelVariantType value. Re-installing the model fixed the issue. If this issue turns out to be + # prevalent, we will have to revisit how we initialize the inpainting extensions. if unet_config.variant == ModelVariantType.Inpaint: ext_manager.add_extension(InpaintModelExt(mask, masked_latents, is_gradient_mask)) elif mask is not None: @@ -830,6 +834,8 @@ class DenoiseLatentsInvocation(BaseInvocation): seed, noise, latents = self.prepare_noise_and_latents(context, self.noise, self.latents) mask, masked_latents, gradient_mask = self.prep_inpaint_mask(context, latents) + # At this point, the mask ranges from 0 (leave unchanged) to 1 (inpaint). + # We invert the mask here for compatibility with the old backend implementation. if mask is not None: mask = 1 - mask diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint.py b/invokeai/backend/stable_diffusion/extensions/inpaint.py index 7bdd9238df..437e06df76 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint.py @@ -75,7 +75,11 @@ class InpaintExt(ExtensionBase): @callback(ExtensionCallbackType.PRE_DENOISE_LOOP) def init_tensors(self, ctx: DenoiseContext): if not self._is_normal_model(ctx.unet): - raise ValueError("InpaintExt should be used only on normal models!") + raise ValueError( + "InpaintExt should be used only on normal (non-inpainting) models. This could be caused by an " + "inpainting model that was incorrectly marked as a non-inpainting model. In some cases, this can be " + "fixed by removing and re-adding the model (so that it gets re-probed)." + ) self._mask = self._mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) From eb257d2d288433e164a2f3b910fc562fd4e7670d Mon Sep 17 00:00:00 2001 From: chainchompa Date: Fri, 26 Jul 2024 13:34:25 -0400 Subject: [PATCH 15/28] update delete board modal to be more descriptive --- invokeai/frontend/web/public/locales/en.json | 3 ++- .../features/gallery/components/Boards/DeleteBoardModal.tsx | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 659df78d9b..d37d38608f 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -31,7 +31,8 @@ "deleteBoard": "Delete Board", "deleteBoardAndImages": "Delete Board and Images", "deleteBoardOnly": "Delete Board Only", - "deletedBoardsCannotbeRestored": "Deleted boards cannot be restored", + "deletedBoardsCannotbeRestored": "Deleted boards cannot be restored. If only this board is deleted then the remaining images will be uncategorized.", + "deletedPrivateBoardsCannotbeRestored": "Deleted boards cannot be restored. If only this board is deleted then the remaining images will be uncategorized and return to their creator.", "hideBoards": "Hide Boards", "loading": "Loading...", "menuItemAutoAdd": "Auto-add to this Board", diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx index 377636d0d0..3707c24440 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx @@ -120,7 +120,11 @@ const DeleteBoardModal = (props: Props) => { bottomMessage={t('boards.bottomMessage')} /> )} - {t('boards.deletedBoardsCannotbeRestored')} + + {boardToDelete.is_private + ? t('boards.deletedPrivateBoardsCannotbeRestored') + : t('boards.deletedBoardsCannotbeRestored')} + {canRestoreDeletedImagesFromBin ? t('gallery.deleteImageBin') : t('gallery.deleteImagePermanent')} From 5b84e117b2970f8e18ac03441bc0ce7ee379b0d0 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Fri, 26 Jul 2024 20:51:12 +0300 Subject: [PATCH 16/28] Suggested changes Co-Authored-By: Ryan Dick <14897797+RyanJDick@users.noreply.github.com> --- .../extensions/t2i_adapter.py | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/invokeai/backend/stable_diffusion/extensions/t2i_adapter.py b/invokeai/backend/stable_diffusion/extensions/t2i_adapter.py index 6c8b4b7504..5c290ea4e7 100644 --- a/invokeai/backend/stable_diffusion/extensions/t2i_adapter.py +++ b/invokeai/backend/stable_diffusion/extensions/t2i_adapter.py @@ -62,8 +62,6 @@ class T2IAdapterExt(ExtensionBase): image=self._image, latents_height=latents_height, latents_width=latents_width, - max_unet_downscale=self._max_unet_downscale, - resize_mode=self._resize_mode, ) def _run_model( @@ -72,21 +70,28 @@ class T2IAdapterExt(ExtensionBase): image: Image, latents_height: int, latents_width: int, - max_unet_downscale: int, - resize_mode: CONTROLNET_RESIZE_VALUES, ): - input_height = latents_height // max_unet_downscale * model.total_downscale_factor - input_width = latents_width // max_unet_downscale * model.total_downscale_factor + # Resize the T2I-Adapter input image. + # We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the + # result will match the latent image's dimensions after max_unet_downscale is applied. + input_height = latents_height // self._max_unet_downscale * model.total_downscale_factor + input_width = latents_width // self._max_unet_downscale * model.total_downscale_factor + # Note: We have hard-coded `do_classifier_free_guidance=False`. This is because we only want to prepare + # a single image. If CFG is enabled, we will duplicate the resultant tensor after applying the + # T2I-Adapter model. + # + # Note: We re-use the `prepare_control_image(...)` from ControlNet for T2I-Adapter, because it has many + # of the same requirements (e.g. preserving binary masks during resize). t2i_image = prepare_control_image( image=image, do_classifier_free_guidance=False, width=input_width, height=input_height, - num_channels=model.config["in_channels"], # mypy treats this as a FrozenDict + num_channels=model.config["in_channels"], device=model.device, dtype=model.dtype, - resize_mode=resize_mode, + resize_mode=self._resize_mode, ) return model(t2i_image) From daa5a88eb2f8d76e1e9305cbe50a5fa733f77a94 Mon Sep 17 00:00:00 2001 From: brandonrising Date: Fri, 26 Jul 2024 13:19:13 -0400 Subject: [PATCH 17/28] Update docker image to use pnpm version 8 --- docker/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index 864bc5eb60..7ea078af0d 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -55,6 +55,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ FROM node:20-slim AS web-builder ENV PNPM_HOME="/pnpm" ENV PATH="$PNPM_HOME:$PATH" +RUN corepack use pnpm@8.x RUN corepack enable WORKDIR /build From ed0174fbc6fec3df7f770494626731840dce58c7 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sat, 27 Jul 2024 13:18:28 +0300 Subject: [PATCH 18/28] Suggested changes Co-Authored-By: Ryan Dick <14897797+RyanJDick@users.noreply.github.com> --- invokeai/backend/stable_diffusion/extensions/inpaint.py | 7 +++---- .../backend/stable_diffusion/extensions/inpaint_model.py | 6 ++---- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint.py b/invokeai/backend/stable_diffusion/extensions/inpaint.py index 437e06df76..0079359155 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint.py @@ -94,13 +94,13 @@ class InpaintExt(ExtensionBase): generator=torch.Generator(device="cpu").manual_seed(ctx.seed), ).to(device=ctx.latents.device, dtype=ctx.latents.dtype) - # TODO: order value + # Use negative order to make extensions with default order work with patched latents @callback(ExtensionCallbackType.PRE_STEP, order=-100) def apply_mask_to_initial_latents(self, ctx: DenoiseContext): ctx.latents = self._apply_mask(ctx, ctx.latents, ctx.timestep) - # TODO: order value # TODO: redo this with preview events rewrite + # Use negative order to make extensions with default order work with patched latents @callback(ExtensionCallbackType.POST_STEP, order=-100) def apply_mask_to_step_output(self, ctx: DenoiseContext): timestep = ctx.scheduler.timesteps[-1] @@ -111,8 +111,7 @@ class InpaintExt(ExtensionBase): else: ctx.step_output.pred_original_sample = self._apply_mask(ctx, ctx.step_output.prev_sample, timestep) - # TODO: should here be used order? - # restore unmasked part after the last step is completed + # Restore unmasked part after the last step is completed @callback(ExtensionCallbackType.POST_DENOISE_LOOP) def restore_unmasked(self, ctx: DenoiseContext): if self._is_gradient_mask: diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py index 4a89f8223f..98ee66c458 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py @@ -68,8 +68,7 @@ class InpaintModelExt(ExtensionBase): self._masked_latents = torch.zeros_like(ctx.latents[:1]) self._masked_latents = self._masked_latents.to(device=ctx.latents.device, dtype=ctx.latents.dtype) - # TODO: any ideas about order value? - # do last so that other extensions works with normal latents + # Use negative order to make extensions with default order work with patched latents @callback(ExtensionCallbackType.PRE_UNET, order=1000) def append_inpaint_layers(self, ctx: DenoiseContext): batch_size = ctx.unet_kwargs.sample.shape[0] @@ -80,8 +79,7 @@ class InpaintModelExt(ExtensionBase): dim=1, ) - # TODO: should here be used order? - # restore unmasked part as inpaint model can change unmasked part slightly + # Restore unmasked part as inpaint model can change unmasked part slightly @callback(ExtensionCallbackType.POST_DENOISE_LOOP) def restore_unmasked(self, ctx: DenoiseContext): if self._is_gradient_mask: From 84d028898cf4cc59f7b15a43fa8746469534cb0e Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sat, 27 Jul 2024 13:20:58 +0300 Subject: [PATCH 19/28] Revert wrong comment copy --- invokeai/backend/stable_diffusion/extensions/inpaint_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py index 98ee66c458..6ee8ef6311 100644 --- a/invokeai/backend/stable_diffusion/extensions/inpaint_model.py +++ b/invokeai/backend/stable_diffusion/extensions/inpaint_model.py @@ -68,7 +68,7 @@ class InpaintModelExt(ExtensionBase): self._masked_latents = torch.zeros_like(ctx.latents[:1]) self._masked_latents = self._masked_latents.to(device=ctx.latents.device, dtype=ctx.latents.dtype) - # Use negative order to make extensions with default order work with patched latents + # Do last so that other extensions works with normal latents @callback(ExtensionCallbackType.PRE_UNET, order=1000) def append_inpaint_layers(self, ctx: DenoiseContext): batch_size = ctx.unet_kwargs.sample.shape[0] From e3a75a8adf5125d7fc16126b77665b2f9c4cee3b Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 29 Jul 2024 10:01:53 +1000 Subject: [PATCH 20/28] fix(ui): fix logic to reset selected/auto-add boards when toggling show archived boards The logic was incorrect in two ways: 1. We only ran the logic if we _enable_ showing archived boards. It should be run we we _disable_ showing archived boards. 2. If we couldn't find the selected board in the query cache, we didn't do the reset. This is wrong - if the board isn't in the query cache, we _should_ do the reset. This inverted logic makes more sense before the fix for issue 1. --- .../listeners/addArchivedOrDeletedBoardListener.ts | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts index 1581da9b37..d71ee7501b 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts @@ -71,7 +71,7 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis const shouldShowArchivedBoards = action.payload; // We only need to take action if we have just hidden archived boards. - if (!shouldShowArchivedBoards) { + if (shouldShowArchivedBoards) { return; } @@ -86,14 +86,16 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis // Handle the case where selected board is archived const selectedBoard = queryResult.data.find((b) => b.board_id === selectedBoardId); - if (selectedBoard && selectedBoard.archived) { + if (!selectedBoard || selectedBoard.archived) { + // If we can't find the selected board or it's archived, we should reset the selected board to uncategorized dispatch(boardIdSelected({ boardId: 'none' })); dispatch(galleryViewChanged('images')); } // Handle the case where auto-add board is archived const autoAddBoard = queryResult.data.find((b) => b.board_id === autoAddBoardId); - if (autoAddBoard && autoAddBoard.archived) { + if (!autoAddBoard || autoAddBoard.archived) { + // If we can't find the auto-add board or it's archived, we should reset the selected board to uncategorized dispatch(autoAddBoardIdChanged('none')); } }, From 171a4e6d8091b4b6354cb7b29d87e59c878659ea Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 29 Jul 2024 10:05:42 +1000 Subject: [PATCH 21/28] fix(ui): race condition when deleting a board and resetting selected/auto-add We were checking the selected and auto-add board ids against the query cache to see if they still exist. If not, we reset. This only works if the query cache is updated by the time we do the check - race condition! We already have the board id from the query args, so there's no need to check the query cache - just compare the deleted board ID directly. Previously this file's several listeners were all in a single one and I had adapted/split its logic up a bit wonkily, introducing these problems. --- .../addArchivedOrDeletedBoardListener.ts | 32 ++++++++----------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts index d71ee7501b..23d3cbc9af 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts @@ -10,32 +10,32 @@ import { import { boardsApi } from 'services/api/endpoints/boards'; import { imagesApi } from 'services/api/endpoints/images'; +// Type inference doesn't work for this if you inline it in the listener for some reason +const matchAnyBoardDeleted = isAnyOf( + imagesApi.endpoints.deleteBoard.matchFulfilled, + imagesApi.endpoints.deleteBoardAndImages.matchFulfilled +); + export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartListening) => { /** * The auto-add board shouldn't be set to an archived board or deleted board. When we archive a board, delete * a board, or change a the archived board visibility flag, we may need to reset the auto-add board. */ startAppListening({ - matcher: isAnyOf( - // If a board is deleted, we'll need to reset the auto-add board - imagesApi.endpoints.deleteBoard.matchFulfilled, - imagesApi.endpoints.deleteBoardAndImages.matchFulfilled - ), + matcher: matchAnyBoardDeleted, effect: async (action, { dispatch, getState }) => { const state = getState(); - const queryArgs = selectListBoardsQueryArgs(state); - const queryResult = boardsApi.endpoints.listAllBoards.select(queryArgs)(state); + const deletedBoardId = action.meta.arg.originalArgs; const { autoAddBoardId, selectedBoardId } = state.gallery; - if (!queryResult.data) { - return; - } - - if (!queryResult.data.find((board) => board.board_id === selectedBoardId)) { + // If the deleted board was currently selected, we should reset the selected board to uncategorized + if (deletedBoardId === selectedBoardId) { dispatch(boardIdSelected({ boardId: 'none' })); dispatch(galleryViewChanged('images')); } - if (!queryResult.data.find((board) => board.board_id === autoAddBoardId)) { + + // If the deleted board was selected for auto-add, we should reset the auto-add board to uncategorized + if (deletedBoardId === autoAddBoardId) { dispatch(autoAddBoardIdChanged('none')); } }, @@ -46,14 +46,8 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis matcher: boardsApi.endpoints.updateBoard.matchFulfilled, effect: async (action, { dispatch, getState }) => { const state = getState(); - const queryArgs = selectListBoardsQueryArgs(state); - const queryResult = boardsApi.endpoints.listAllBoards.select(queryArgs)(state); const { shouldShowArchivedBoards } = state.gallery; - if (!queryResult.data) { - return; - } - const wasArchived = action.meta.arg.originalArgs.changes.archived === true; if (wasArchived && !shouldShowArchivedBoards) { From 86a92bb6b5968219040861d0d45d27d508b349b8 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 29 Jul 2024 15:14:34 +0300 Subject: [PATCH 22/28] Add more karras schedulers --- .../stable_diffusion/schedulers/schedulers.py | 16 ++++++++++++---- .../web/src/features/nodes/types/common.ts | 4 ++++ .../web/src/features/nodes/types/v2/common.ts | 4 ++++ .../src/features/parameters/types/constants.ts | 4 ++++ invokeai/frontend/web/src/services/api/schema.ts | 10 +++++----- 5 files changed, 29 insertions(+), 9 deletions(-) diff --git a/invokeai/backend/stable_diffusion/schedulers/schedulers.py b/invokeai/backend/stable_diffusion/schedulers/schedulers.py index 7d6851e278..52e4145672 100644 --- a/invokeai/backend/stable_diffusion/schedulers/schedulers.py +++ b/invokeai/backend/stable_diffusion/schedulers/schedulers.py @@ -24,6 +24,7 @@ SCHEDULER_NAME_VALUES = Literal[ "ddim", "ddpm", "deis", + "deis_k", "lms", "lms_k", "pndm", @@ -33,7 +34,9 @@ SCHEDULER_NAME_VALUES = Literal[ "euler_k", "euler_a", "kdpm_2", + "kdpm_2_k", "kdpm_2_a", + "kdpm_2_a_k", "dpmpp_2s", "dpmpp_2s_k", "dpmpp_2m", @@ -43,6 +46,7 @@ SCHEDULER_NAME_VALUES = Literal[ "dpmpp_sde", "dpmpp_sde_k", "unipc", + "unipc_k", "lcm", "tcd", ] @@ -50,7 +54,8 @@ SCHEDULER_NAME_VALUES = Literal[ SCHEDULER_MAP: dict[SCHEDULER_NAME_VALUES, tuple[Type[SchedulerMixin], dict[str, Any]]] = { "ddim": (DDIMScheduler, {}), "ddpm": (DDPMScheduler, {}), - "deis": (DEISMultistepScheduler, {}), + "deis": (DEISMultistepScheduler, {"use_karras_sigmas": False}), + "deis_k": (DEISMultistepScheduler, {"use_karras_sigmas": True}), "lms": (LMSDiscreteScheduler, {"use_karras_sigmas": False}), "lms_k": (LMSDiscreteScheduler, {"use_karras_sigmas": True}), "pndm": (PNDMScheduler, {}), @@ -59,8 +64,10 @@ SCHEDULER_MAP: dict[SCHEDULER_NAME_VALUES, tuple[Type[SchedulerMixin], dict[str, "euler": (EulerDiscreteScheduler, {"use_karras_sigmas": False}), "euler_k": (EulerDiscreteScheduler, {"use_karras_sigmas": True}), "euler_a": (EulerAncestralDiscreteScheduler, {}), - "kdpm_2": (KDPM2DiscreteScheduler, {}), - "kdpm_2_a": (KDPM2AncestralDiscreteScheduler, {}), + "kdpm_2": (KDPM2DiscreteScheduler, {"use_karras_sigmas": False}), + "kdpm_2_k": (KDPM2DiscreteScheduler, {"use_karras_sigmas": True}), + "kdpm_2_a": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": False}), + "kdpm_2_a_k": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": True}), "dpmpp_2s": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False}), "dpmpp_2s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True}), "dpmpp_2m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False}), @@ -69,7 +76,8 @@ SCHEDULER_MAP: dict[SCHEDULER_NAME_VALUES, tuple[Type[SchedulerMixin], dict[str, "dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "algorithm_type": "sde-dpmsolver++"}), "dpmpp_sde": (DPMSolverSDEScheduler, {"use_karras_sigmas": False, "noise_sampler_seed": 0}), "dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}), - "unipc": (UniPCMultistepScheduler, {"cpu_only": True}), + "unipc": (UniPCMultistepScheduler, {"use_karras_sigmas": False, "cpu_only": True}), + "unipc_k": (UniPCMultistepScheduler, {"use_karras_sigmas": True, "cpu_only": True}), "lcm": (LCMScheduler, {}), "tcd": (TCDScheduler, {}), } diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index 2ea8900281..8f28562387 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -40,12 +40,16 @@ export const zSchedulerField = z.enum([ 'pndm', 'unipc', 'euler_k', + 'deis_k', 'dpmpp_2s_k', 'dpmpp_2m_k', 'dpmpp_2m_sde_k', 'dpmpp_sde_k', 'heun_k', + 'kdpm_2_k', + 'kdpm_2_a_k', 'lms_k', + 'unipc_k', 'euler_a', 'kdpm_2_a', 'lcm', diff --git a/invokeai/frontend/web/src/features/nodes/types/v2/common.ts b/invokeai/frontend/web/src/features/nodes/types/v2/common.ts index 8613076132..c67a0690d5 100644 --- a/invokeai/frontend/web/src/features/nodes/types/v2/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/v2/common.ts @@ -31,12 +31,16 @@ export const zSchedulerField = z.enum([ 'pndm', 'unipc', 'euler_k', + 'deis_k', 'dpmpp_2s_k', 'dpmpp_2m_k', 'dpmpp_2m_sde_k', 'dpmpp_sde_k', 'heun_k', + 'kdpm_2_k', + 'kdpm_2_a_k', 'lms_k', + 'unipc_k', 'euler_a', 'kdpm_2_a', 'lcm', diff --git a/invokeai/frontend/web/src/features/parameters/types/constants.ts b/invokeai/frontend/web/src/features/parameters/types/constants.ts index 6d7b4f9248..462ab050a8 100644 --- a/invokeai/frontend/web/src/features/parameters/types/constants.ts +++ b/invokeai/frontend/web/src/features/parameters/types/constants.ts @@ -66,12 +66,16 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [ { value: 'pndm', label: 'PNDM' }, { value: 'unipc', label: 'UniPC' }, { value: 'euler_k', label: 'Euler Karras' }, + { value: 'deis_k', label: 'DEIS Karras' }, { value: 'dpmpp_sde_k', label: 'DPM++ SDE Karras' }, { value: 'dpmpp_2s_k', label: 'DPM++ 2S Karras' }, { value: 'dpmpp_2m_k', label: 'DPM++ 2M Karras' }, { value: 'dpmpp_2m_sde_k', label: 'DPM++ 2M SDE Karras' }, { value: 'heun_k', label: 'Heun Karras' }, + { value: 'kdpm_2_k', label: 'KDPM 2 Karras' }, + { value: 'kdpm_2_a_k', label: 'KDPM 2 Ancestral Karras' }, { value: 'lms_k', label: 'LMS Karras' }, + { value: 'unipc_k', label: 'UniPC Karras' }, { value: 'euler_a', label: 'Euler Ancestral' }, { value: 'kdpm_2_a', label: 'KDPM 2 Ancestral' }, { value: 'lcm', label: 'LCM' }, diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 59f9897f74..0db435ab1c 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -3553,7 +3553,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * UNet * @description UNet (scheduler, LoRAs) @@ -8553,7 +8553,7 @@ export type components = { * Scheduler * @description Default scheduler for this model */ - scheduler?: ("ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd") | null; + scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd") | null; /** * Steps * @description Default number of steps for this model @@ -11467,7 +11467,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * type * @default scheduler @@ -11483,7 +11483,7 @@ export type components = { * @description Scheduler to use during inference * @enum {string} */ - scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; + scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * type * @default scheduler_output @@ -13261,7 +13261,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * UNet * @description UNet (scheduler, LoRAs) From ffca792d5b703fbda979ad436430fed0f42ed630 Mon Sep 17 00:00:00 2001 From: chainchompa Date: Mon, 29 Jul 2024 09:46:02 -0400 Subject: [PATCH 23/28] edited copy for deleted boards message --- invokeai/frontend/web/public/locales/en.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index d37d38608f..3300f7c7fa 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -31,8 +31,8 @@ "deleteBoard": "Delete Board", "deleteBoardAndImages": "Delete Board and Images", "deleteBoardOnly": "Delete Board Only", - "deletedBoardsCannotbeRestored": "Deleted boards cannot be restored. If only this board is deleted then the remaining images will be uncategorized.", - "deletedPrivateBoardsCannotbeRestored": "Deleted boards cannot be restored. If only this board is deleted then the remaining images will be uncategorized and return to their creator.", + "deletedBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.", + "deletedPrivateBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.", "hideBoards": "Hide Boards", "loading": "Loading...", "menuItemAutoAdd": "Auto-add to this Board", From 55e810efa3c8c99a857afef939283aa0fccce5da Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 29 Jul 2024 22:52:15 +0300 Subject: [PATCH 24/28] Add dpmpp_3 schedulers --- .../stable_diffusion/schedulers/schedulers.py | 20 +++++++++++++------ .../web/src/features/nodes/types/common.ts | 4 ++++ .../web/src/features/nodes/types/v2/common.ts | 4 ++++ .../features/parameters/types/constants.ts | 4 ++++ .../frontend/web/src/services/api/schema.ts | 10 +++++----- 5 files changed, 31 insertions(+), 11 deletions(-) diff --git a/invokeai/backend/stable_diffusion/schedulers/schedulers.py b/invokeai/backend/stable_diffusion/schedulers/schedulers.py index 52e4145672..d5d445a79c 100644 --- a/invokeai/backend/stable_diffusion/schedulers/schedulers.py +++ b/invokeai/backend/stable_diffusion/schedulers/schedulers.py @@ -43,6 +43,10 @@ SCHEDULER_NAME_VALUES = Literal[ "dpmpp_2m_k", "dpmpp_2m_sde", "dpmpp_2m_sde_k", + "dpmpp_3s", + "dpmpp_3s_k", + "dpmpp_3m", + "dpmpp_3m_k", "dpmpp_sde", "dpmpp_sde_k", "unipc", @@ -68,12 +72,16 @@ SCHEDULER_MAP: dict[SCHEDULER_NAME_VALUES, tuple[Type[SchedulerMixin], dict[str, "kdpm_2_k": (KDPM2DiscreteScheduler, {"use_karras_sigmas": True}), "kdpm_2_a": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": False}), "kdpm_2_a_k": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": True}), - "dpmpp_2s": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False}), - "dpmpp_2s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True}), - "dpmpp_2m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False}), - "dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True}), - "dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "algorithm_type": "sde-dpmsolver++"}), - "dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "algorithm_type": "sde-dpmsolver++"}), + "dpmpp_2s": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False, "solver_order": 2}), + "dpmpp_2s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True, "solver_order": 2}), + "dpmpp_2m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "solver_order": 2}), + "dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 2}), + "dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "solver_order": 2, "algorithm_type": "sde-dpmsolver++"}), + "dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 2, "algorithm_type": "sde-dpmsolver++"}), + "dpmpp_3s": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False, "solver_order": 3}), + "dpmpp_3s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True, "solver_order": 3}), + "dpmpp_3m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "solver_order": 3}), + "dpmpp_3m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 3}), "dpmpp_sde": (DPMSolverSDEScheduler, {"use_karras_sigmas": False, "noise_sampler_seed": 0}), "dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}), "unipc": (UniPCMultistepScheduler, {"use_karras_sigmas": False, "cpu_only": True}), diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index 8f28562387..76f769f8e7 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -32,6 +32,8 @@ export const zSchedulerField = z.enum([ 'ddpm', 'dpmpp_2s', 'dpmpp_2m', + 'dpmpp_3s', + 'dpmpp_3m', 'dpmpp_2m_sde', 'dpmpp_sde', 'heun', @@ -43,6 +45,8 @@ export const zSchedulerField = z.enum([ 'deis_k', 'dpmpp_2s_k', 'dpmpp_2m_k', + 'dpmpp_3s_k', + 'dpmpp_3m_k', 'dpmpp_2m_sde_k', 'dpmpp_sde_k', 'heun_k', diff --git a/invokeai/frontend/web/src/features/nodes/types/v2/common.ts b/invokeai/frontend/web/src/features/nodes/types/v2/common.ts index c67a0690d5..59da18f706 100644 --- a/invokeai/frontend/web/src/features/nodes/types/v2/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/v2/common.ts @@ -23,6 +23,8 @@ export const zSchedulerField = z.enum([ 'ddpm', 'dpmpp_2s', 'dpmpp_2m', + 'dpmpp_3s', + 'dpmpp_3m', 'dpmpp_2m_sde', 'dpmpp_sde', 'heun', @@ -34,6 +36,8 @@ export const zSchedulerField = z.enum([ 'deis_k', 'dpmpp_2s_k', 'dpmpp_2m_k', + 'dpmpp_3s_k', + 'dpmpp_3m_k', 'dpmpp_2m_sde_k', 'dpmpp_sde_k', 'heun_k', diff --git a/invokeai/frontend/web/src/features/parameters/types/constants.ts b/invokeai/frontend/web/src/features/parameters/types/constants.ts index 462ab050a8..45ffa76786 100644 --- a/invokeai/frontend/web/src/features/parameters/types/constants.ts +++ b/invokeai/frontend/web/src/features/parameters/types/constants.ts @@ -59,6 +59,8 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [ { value: 'dpmpp_sde', label: 'DPM++ SDE' }, { value: 'dpmpp_2s', label: 'DPM++ 2S' }, { value: 'dpmpp_2m', label: 'DPM++ 2M' }, + { value: 'dpmpp_3s', label: 'DPM++ 3S' }, + { value: 'dpmpp_3m', label: 'DPM++ 3M' }, { value: 'dpmpp_2m_sde', label: 'DPM++ 2M SDE' }, { value: 'heun', label: 'Heun' }, { value: 'kdpm_2', label: 'KDPM 2' }, @@ -70,6 +72,8 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [ { value: 'dpmpp_sde_k', label: 'DPM++ SDE Karras' }, { value: 'dpmpp_2s_k', label: 'DPM++ 2S Karras' }, { value: 'dpmpp_2m_k', label: 'DPM++ 2M Karras' }, + { value: 'dpmpp_3s_k', label: 'DPM++ 3S Karras' }, + { value: 'dpmpp_3m_k', label: 'DPM++ 3M Karras' }, { value: 'dpmpp_2m_sde_k', label: 'DPM++ 2M SDE Karras' }, { value: 'heun_k', label: 'Heun Karras' }, { value: 'kdpm_2_k', label: 'KDPM 2 Karras' }, diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 0db435ab1c..de2f841f40 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -3553,7 +3553,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k"; /** * UNet * @description UNet (scheduler, LoRAs) @@ -8553,7 +8553,7 @@ export type components = { * Scheduler * @description Default scheduler for this model */ - scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd") | null; + scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k") | null; /** * Steps * @description Default number of steps for this model @@ -11467,7 +11467,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k"; /** * type * @default scheduler @@ -11483,7 +11483,7 @@ export type components = { * @description Scheduler to use during inference * @enum {string} */ - scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k"; /** * type * @default scheduler_output @@ -13261,7 +13261,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k"; /** * UNet * @description UNet (scheduler, LoRAs) From 8d16fa6a4917e37cf431cf502e550e5fff9bdc79 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 29 Jul 2024 22:55:45 +0300 Subject: [PATCH 25/28] Remove dpmpp_3s schedulers as it bugged now --- .../backend/stable_diffusion/schedulers/schedulers.py | 7 +++---- .../frontend/web/src/features/nodes/types/common.ts | 2 -- .../frontend/web/src/features/nodes/types/v2/common.ts | 2 -- .../web/src/features/parameters/types/constants.ts | 2 -- invokeai/frontend/web/src/services/api/schema.ts | 10 +++++----- 5 files changed, 8 insertions(+), 15 deletions(-) diff --git a/invokeai/backend/stable_diffusion/schedulers/schedulers.py b/invokeai/backend/stable_diffusion/schedulers/schedulers.py index d5d445a79c..fad99dac9c 100644 --- a/invokeai/backend/stable_diffusion/schedulers/schedulers.py +++ b/invokeai/backend/stable_diffusion/schedulers/schedulers.py @@ -20,6 +20,9 @@ from diffusers import ( ) from diffusers.schedulers.scheduling_utils import SchedulerMixin +# TODO: add dpmpp_3s/dpmpp_3s_k when fix released +# https://github.com/huggingface/diffusers/issues/9007 + SCHEDULER_NAME_VALUES = Literal[ "ddim", "ddpm", @@ -43,8 +46,6 @@ SCHEDULER_NAME_VALUES = Literal[ "dpmpp_2m_k", "dpmpp_2m_sde", "dpmpp_2m_sde_k", - "dpmpp_3s", - "dpmpp_3s_k", "dpmpp_3m", "dpmpp_3m_k", "dpmpp_sde", @@ -78,8 +79,6 @@ SCHEDULER_MAP: dict[SCHEDULER_NAME_VALUES, tuple[Type[SchedulerMixin], dict[str, "dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 2}), "dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "solver_order": 2, "algorithm_type": "sde-dpmsolver++"}), "dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 2, "algorithm_type": "sde-dpmsolver++"}), - "dpmpp_3s": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False, "solver_order": 3}), - "dpmpp_3s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True, "solver_order": 3}), "dpmpp_3m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "solver_order": 3}), "dpmpp_3m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 3}), "dpmpp_sde": (DPMSolverSDEScheduler, {"use_karras_sigmas": False, "noise_sampler_seed": 0}), diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index 76f769f8e7..c84b2dae62 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -32,7 +32,6 @@ export const zSchedulerField = z.enum([ 'ddpm', 'dpmpp_2s', 'dpmpp_2m', - 'dpmpp_3s', 'dpmpp_3m', 'dpmpp_2m_sde', 'dpmpp_sde', @@ -45,7 +44,6 @@ export const zSchedulerField = z.enum([ 'deis_k', 'dpmpp_2s_k', 'dpmpp_2m_k', - 'dpmpp_3s_k', 'dpmpp_3m_k', 'dpmpp_2m_sde_k', 'dpmpp_sde_k', diff --git a/invokeai/frontend/web/src/features/nodes/types/v2/common.ts b/invokeai/frontend/web/src/features/nodes/types/v2/common.ts index 59da18f706..490eaf1f19 100644 --- a/invokeai/frontend/web/src/features/nodes/types/v2/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/v2/common.ts @@ -23,7 +23,6 @@ export const zSchedulerField = z.enum([ 'ddpm', 'dpmpp_2s', 'dpmpp_2m', - 'dpmpp_3s', 'dpmpp_3m', 'dpmpp_2m_sde', 'dpmpp_sde', @@ -36,7 +35,6 @@ export const zSchedulerField = z.enum([ 'deis_k', 'dpmpp_2s_k', 'dpmpp_2m_k', - 'dpmpp_3s_k', 'dpmpp_3m_k', 'dpmpp_2m_sde_k', 'dpmpp_sde_k', diff --git a/invokeai/frontend/web/src/features/parameters/types/constants.ts b/invokeai/frontend/web/src/features/parameters/types/constants.ts index 45ffa76786..ee7d4bf781 100644 --- a/invokeai/frontend/web/src/features/parameters/types/constants.ts +++ b/invokeai/frontend/web/src/features/parameters/types/constants.ts @@ -59,7 +59,6 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [ { value: 'dpmpp_sde', label: 'DPM++ SDE' }, { value: 'dpmpp_2s', label: 'DPM++ 2S' }, { value: 'dpmpp_2m', label: 'DPM++ 2M' }, - { value: 'dpmpp_3s', label: 'DPM++ 3S' }, { value: 'dpmpp_3m', label: 'DPM++ 3M' }, { value: 'dpmpp_2m_sde', label: 'DPM++ 2M SDE' }, { value: 'heun', label: 'Heun' }, @@ -72,7 +71,6 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [ { value: 'dpmpp_sde_k', label: 'DPM++ SDE Karras' }, { value: 'dpmpp_2s_k', label: 'DPM++ 2S Karras' }, { value: 'dpmpp_2m_k', label: 'DPM++ 2M Karras' }, - { value: 'dpmpp_3s_k', label: 'DPM++ 3S Karras' }, { value: 'dpmpp_3m_k', label: 'DPM++ 3M Karras' }, { value: 'dpmpp_2m_sde_k', label: 'DPM++ 2M SDE Karras' }, { value: 'heun_k', label: 'Heun Karras' }, diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index de2f841f40..eebd5dad59 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -3553,7 +3553,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k"; /** * UNet * @description UNet (scheduler, LoRAs) @@ -8553,7 +8553,7 @@ export type components = { * Scheduler * @description Default scheduler for this model */ - scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k") | null; + scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k") | null; /** * Steps * @description Default number of steps for this model @@ -11467,7 +11467,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k"; /** * type * @default scheduler @@ -11483,7 +11483,7 @@ export type components = { * @description Scheduler to use during inference * @enum {string} */ - scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k"; + scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k"; /** * type * @default scheduler_output @@ -13261,7 +13261,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3s" | "dpmpp_3s_k" | "dpmpp_3m" | "dpmpp_3m_k"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k"; /** * UNet * @description UNet (scheduler, LoRAs) From 58e7ab157d5466b6c06f71eae0fab88d1c223fb4 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 29 Jul 2024 22:59:17 +0300 Subject: [PATCH 26/28] Ruff format --- .../backend/stable_diffusion/schedulers/schedulers.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/stable_diffusion/schedulers/schedulers.py b/invokeai/backend/stable_diffusion/schedulers/schedulers.py index fad99dac9c..c8836b316a 100644 --- a/invokeai/backend/stable_diffusion/schedulers/schedulers.py +++ b/invokeai/backend/stable_diffusion/schedulers/schedulers.py @@ -77,8 +77,14 @@ SCHEDULER_MAP: dict[SCHEDULER_NAME_VALUES, tuple[Type[SchedulerMixin], dict[str, "dpmpp_2s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True, "solver_order": 2}), "dpmpp_2m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "solver_order": 2}), "dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 2}), - "dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "solver_order": 2, "algorithm_type": "sde-dpmsolver++"}), - "dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 2, "algorithm_type": "sde-dpmsolver++"}), + "dpmpp_2m_sde": ( + DPMSolverMultistepScheduler, + {"use_karras_sigmas": False, "solver_order": 2, "algorithm_type": "sde-dpmsolver++"}, + ), + "dpmpp_2m_sde_k": ( + DPMSolverMultistepScheduler, + {"use_karras_sigmas": True, "solver_order": 2, "algorithm_type": "sde-dpmsolver++"}, + ), "dpmpp_3m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "solver_order": 3}), "dpmpp_3m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 3}), "dpmpp_sde": (DPMSolverSDEScheduler, {"use_karras_sigmas": False, "noise_sampler_seed": 0}), From 156d14c3492bba3dd6c30a39a2bfd5d958fdbe3c Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 30 Jul 2024 04:05:21 +0300 Subject: [PATCH 27/28] Run api regen --- invokeai/frontend/web/src/services/api/schema.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index eebd5dad59..79b82a23fa 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -3553,7 +3553,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * UNet * @description UNet (scheduler, LoRAs) @@ -8553,7 +8553,7 @@ export type components = { * Scheduler * @description Default scheduler for this model */ - scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k") | null; + scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd") | null; /** * Steps * @description Default number of steps for this model @@ -11467,7 +11467,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * type * @default scheduler @@ -11483,7 +11483,7 @@ export type components = { * @description Scheduler to use during inference * @enum {string} */ - scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k"; + scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * type * @default scheduler_output @@ -13261,7 +13261,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd" | "dpmpp_3m" | "dpmpp_3m_k"; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * UNet * @description UNet (scheduler, LoRAs) From cf996472b92cd519aab499da6e60872c58f33403 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 30 Jul 2024 04:50:56 +0300 Subject: [PATCH 28/28] Suggested changes Co-Authored-By: psychedelicious <4822129+psychedelicious@users.noreply.github.com> --- .../web/src/features/nodes/types/v2/common.ts | 6 --- .../features/parameters/types/constants.ts | 42 +++++++++---------- 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/types/v2/common.ts b/invokeai/frontend/web/src/features/nodes/types/v2/common.ts index 490eaf1f19..8613076132 100644 --- a/invokeai/frontend/web/src/features/nodes/types/v2/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/v2/common.ts @@ -23,7 +23,6 @@ export const zSchedulerField = z.enum([ 'ddpm', 'dpmpp_2s', 'dpmpp_2m', - 'dpmpp_3m', 'dpmpp_2m_sde', 'dpmpp_sde', 'heun', @@ -32,17 +31,12 @@ export const zSchedulerField = z.enum([ 'pndm', 'unipc', 'euler_k', - 'deis_k', 'dpmpp_2s_k', 'dpmpp_2m_k', - 'dpmpp_3m_k', 'dpmpp_2m_sde_k', 'dpmpp_sde_k', 'heun_k', - 'kdpm_2_k', - 'kdpm_2_a_k', 'lms_k', - 'unipc_k', 'euler_a', 'kdpm_2_a', 'lcm', diff --git a/invokeai/frontend/web/src/features/parameters/types/constants.ts b/invokeai/frontend/web/src/features/parameters/types/constants.ts index ee7d4bf781..678b2b37f3 100644 --- a/invokeai/frontend/web/src/features/parameters/types/constants.ts +++ b/invokeai/frontend/web/src/features/parameters/types/constants.ts @@ -52,34 +52,34 @@ export const CLIP_SKIP_MAP = { * Mapping of schedulers to human readable name */ export const SCHEDULER_OPTIONS: ComboboxOption[] = [ - { value: 'euler', label: 'Euler' }, - { value: 'deis', label: 'DEIS' }, { value: 'ddim', label: 'DDIM' }, { value: 'ddpm', label: 'DDPM' }, - { value: 'dpmpp_sde', label: 'DPM++ SDE' }, - { value: 'dpmpp_2s', label: 'DPM++ 2S' }, - { value: 'dpmpp_2m', label: 'DPM++ 2M' }, - { value: 'dpmpp_3m', label: 'DPM++ 3M' }, - { value: 'dpmpp_2m_sde', label: 'DPM++ 2M SDE' }, - { value: 'heun', label: 'Heun' }, - { value: 'kdpm_2', label: 'KDPM 2' }, - { value: 'lms', label: 'LMS' }, - { value: 'pndm', label: 'PNDM' }, - { value: 'unipc', label: 'UniPC' }, - { value: 'euler_k', label: 'Euler Karras' }, + { value: 'deis', label: 'DEIS' }, { value: 'deis_k', label: 'DEIS Karras' }, - { value: 'dpmpp_sde_k', label: 'DPM++ SDE Karras' }, + { value: 'dpmpp_2s', label: 'DPM++ 2S' }, { value: 'dpmpp_2s_k', label: 'DPM++ 2S Karras' }, + { value: 'dpmpp_2m', label: 'DPM++ 2M' }, { value: 'dpmpp_2m_k', label: 'DPM++ 2M Karras' }, - { value: 'dpmpp_3m_k', label: 'DPM++ 3M Karras' }, + { value: 'dpmpp_2m_sde', label: 'DPM++ 2M SDE' }, { value: 'dpmpp_2m_sde_k', label: 'DPM++ 2M SDE Karras' }, - { value: 'heun_k', label: 'Heun Karras' }, - { value: 'kdpm_2_k', label: 'KDPM 2 Karras' }, - { value: 'kdpm_2_a_k', label: 'KDPM 2 Ancestral Karras' }, - { value: 'lms_k', label: 'LMS Karras' }, - { value: 'unipc_k', label: 'UniPC Karras' }, + { value: 'dpmpp_3m', label: 'DPM++ 3M' }, + { value: 'dpmpp_3m_k', label: 'DPM++ 3M Karras' }, + { value: 'dpmpp_sde', label: 'DPM++ SDE' }, + { value: 'dpmpp_sde_k', label: 'DPM++ SDE Karras' }, + { value: 'euler', label: 'Euler' }, + { value: 'euler_k', label: 'Euler Karras' }, { value: 'euler_a', label: 'Euler Ancestral' }, + { value: 'heun', label: 'Heun' }, + { value: 'heun_k', label: 'Heun Karras' }, + { value: 'kdpm_2', label: 'KDPM 2' }, + { value: 'kdpm_2_k', label: 'KDPM 2 Karras' }, { value: 'kdpm_2_a', label: 'KDPM 2 Ancestral' }, + { value: 'kdpm_2_a_k', label: 'KDPM 2 Ancestral Karras' }, { value: 'lcm', label: 'LCM' }, + { value: 'lms', label: 'LMS' }, + { value: 'lms_k', label: 'LMS Karras' }, + { value: 'pndm', label: 'PNDM' }, { value: 'tcd', label: 'TCD' }, -].sort((a, b) => a.label.localeCompare(b.label)); + { value: 'unipc', label: 'UniPC' }, + { value: 'unipc_k', label: 'UniPC Karras' }, +];