diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 9970949ef6..67c7f2abca 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -735,7 +735,7 @@ class DenoiseLatentsInvocation(BaseInvocation): # The image prompts are then passed to prep_ip_adapter_data(). image_prompts = self.prep_ip_adapter_image_prompts(context=context, ip_adapters=ip_adapters) - # get the unet's config so that we can pass the base to dispatch_progress() + # get the unet's config so that we can pass the base to sd_step_callback() unet_config = context.models.get_config(self.unet.unet.key) def step_callback(state: PipelineIntermediateState) -> None: diff --git a/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py b/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py index 6274204c14..717f8e1019 100644 --- a/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py +++ b/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py @@ -170,7 +170,7 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation): min_overlap=self.tile_min_overlap, ) - # Get the unet's config so that we can pass the base to dispatch_progress(). + # Get the unet's config so that we can pass the base to sd_step_callback(). unet_config = context.models.get_config(self.unet.unet.key) def step_callback(state: PipelineIntermediateState) -> None: diff --git a/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py b/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py index ca68233c27..e2934247ed 100644 --- a/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py +++ b/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py @@ -56,7 +56,7 @@ class MultiDiffusionPipeline(StableDiffusionGeneratorPipeline): self._adjust_memory_efficient_attention(latents) # Populate a weighted mask that will be used to combine the results from each region after every step. - # For now, we assume that each regions has the same weight (1.0). + # For now, we assume that each region has the same weight (1.0). region_weight_mask = torch.zeros( (1, 1, latent_height, latent_width), device=latents.device, dtype=latents.dtype )