From ffc28176fe00b59a6ac2600bbaecd5d651ea6b7d Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 12 Jun 2024 13:39:34 -0400 Subject: [PATCH] Remove unused num_inference_steps. --- invokeai/app/invocations/denoise_latents.py | 8 ++-- .../tiled_multi_diffusion_denoise_latents.py | 37 ++++--------------- .../tiled_stable_diffusion_refine.py | 17 ++++----- .../stable_diffusion/diffusers_pipeline.py | 1 - 4 files changed, 17 insertions(+), 46 deletions(-) diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index c275243b96..31b2e9f379 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -601,7 +601,7 @@ class DenoiseLatentsInvocation(BaseInvocation): denoising_start: float, denoising_end: float, seed: int, - ) -> Tuple[int, List[int], int, Dict[str, Any]]: + ) -> Tuple[List[int], int, Dict[str, Any]]: assert isinstance(scheduler, ConfigMixin) if scheduler.config.get("cpu_only", False): scheduler.set_timesteps(steps, device="cpu") @@ -627,7 +627,6 @@ class DenoiseLatentsInvocation(BaseInvocation): init_timestep = timesteps[t_start_idx : t_start_idx + 1] timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx] - num_inference_steps = len(timesteps) // scheduler.order scheduler_step_kwargs: Dict[str, Any] = {} scheduler_step_signature = inspect.signature(scheduler.step) @@ -649,7 +648,7 @@ class DenoiseLatentsInvocation(BaseInvocation): if isinstance(scheduler, TCDScheduler): scheduler_step_kwargs.update({"eta": 1.0}) - return num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs + return timesteps, init_timestep, scheduler_step_kwargs def prep_inpaint_mask( self, context: InvocationContext, latents: torch.Tensor @@ -803,7 +802,7 @@ class DenoiseLatentsInvocation(BaseInvocation): dtype=unet.dtype, ) - num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs = self.init_scheduler( + timesteps, init_timestep, scheduler_step_kwargs = self.init_scheduler( scheduler, device=unet.device, steps=self.steps, @@ -821,7 +820,6 @@ class DenoiseLatentsInvocation(BaseInvocation): mask=mask, masked_latents=masked_latents, gradient_mask=gradient_mask, - num_inference_steps=num_inference_steps, scheduler_step_kwargs=scheduler_step_kwargs, conditioning_data=conditioning_data, control_data=controlnet_data, diff --git a/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py b/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py index ae86c164e1..4810435be2 100644 --- a/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py +++ b/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py @@ -228,7 +228,6 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation): ] controlnet_data_tiles.append(tile_controlnet_data) - # TODO(ryand): Logic from here down needs updating -------------------- # Denoise (i.e. "refine") each tile independently. for image_tile_np, latent_tile, noise_tile in zip(image_tiles_np, latent_tiles, noise_tiles, strict=True): assert latent_tile.shape == noise_tile.shape @@ -238,34 +237,13 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation): # the tiles. Ideally, the ControlNet code should be able to work with Tensors. image_tile_pil = Image.fromarray(image_tile_np) - # Run the ControlNet on the image tile. - height, width, _ = image_tile_np.shape - # The height and width must be evenly divisible by LATENT_SCALE_FACTOR. This is enforced earlier, but we - # validate this assumption here. - assert height % LATENT_SCALE_FACTOR == 0 - assert width % LATENT_SCALE_FACTOR == 0 - controlnet_data = self.run_controlnet( - image=image_tile_pil, - controlnet_model=controlnet_model, - weight=self.control_weight, - do_classifier_free_guidance=True, - width=width, - height=height, - device=controlnet_model.device, - dtype=controlnet_model.dtype, - control_mode="balanced", - resize_mode="just_resize_simple", - ) - - num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs = ( - DenoiseLatentsInvocation.init_scheduler( - scheduler, - device=unet.device, - steps=self.steps, - denoising_start=self.denoising_start, - denoising_end=self.denoising_end, - seed=seed, - ) + timesteps, init_timestep, scheduler_step_kwargs = DenoiseLatentsInvocation.init_scheduler( + scheduler, + device=unet.device, + steps=self.steps, + denoising_start=self.denoising_start, + denoising_end=self.denoising_end, + seed=seed, ) # TODO(ryand): Think about when/if latents/noise should be moved off of the device to save VRAM. @@ -280,7 +258,6 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation): mask=None, masked_latents=None, gradient_mask=None, - num_inference_steps=num_inference_steps, scheduler_step_kwargs=scheduler_step_kwargs, conditioning_data=conditioning_data, control_data=[controlnet_data], diff --git a/invokeai/app/invocations/tiled_stable_diffusion_refine.py b/invokeai/app/invocations/tiled_stable_diffusion_refine.py index 0c56b336b8..5f0512917d 100644 --- a/invokeai/app/invocations/tiled_stable_diffusion_refine.py +++ b/invokeai/app/invocations/tiled_stable_diffusion_refine.py @@ -320,15 +320,13 @@ class TiledStableDiffusionRefineInvocation(BaseInvocation): resize_mode="just_resize_simple", ) - num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs = ( - DenoiseLatentsInvocation.init_scheduler( - scheduler, - device=unet.device, - steps=self.steps, - denoising_start=self.denoising_start, - denoising_end=self.denoising_end, - seed=seed, - ) + timesteps, init_timestep, scheduler_step_kwargs = DenoiseLatentsInvocation.init_scheduler( + scheduler, + device=unet.device, + steps=self.steps, + denoising_start=self.denoising_start, + denoising_end=self.denoising_end, + seed=seed, ) # TODO(ryand): Think about when/if latents/noise should be moved off of the device to save VRAM. @@ -343,7 +341,6 @@ class TiledStableDiffusionRefineInvocation(BaseInvocation): mask=None, masked_latents=None, gradient_mask=None, - num_inference_steps=num_inference_steps, scheduler_step_kwargs=scheduler_step_kwargs, conditioning_data=conditioning_data, control_data=[controlnet_data], diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index ecab11c22d..fcdcffe10b 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -283,7 +283,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): def latents_from_embeddings( self, latents: torch.Tensor, - num_inference_steps: int, scheduler_step_kwargs: dict[str, Any], conditioning_data: TextConditioningData, *,