diff --git a/environments-and-requirements/requirements-lin-amd.txt b/environments-and-requirements/requirements-lin-amd.txt index 0aa6092b30..0e707be483 100644 --- a/environments-and-requirements/requirements-lin-amd.txt +++ b/environments-and-requirements/requirements-lin-amd.txt @@ -1,6 +1,6 @@ -r environments-and-requirements/requirements-base.txt # Get hardware-appropriate torch/torchvision ---extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org +--extra-index-url https://download.pytorch.org/whl/rocm5.2 --trusted-host https://download.pytorch.org torch>=1.13.1 torchvision>=0.14.1 -e . diff --git a/environments-and-requirements/requirements-lin-cuda.txt b/environments-and-requirements/requirements-lin-cuda.txt index d7ea4b4542..1f1ebcf947 100644 --- a/environments-and-requirements/requirements-lin-cuda.txt +++ b/environments-and-requirements/requirements-lin-cuda.txt @@ -1,4 +1,4 @@ ---extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org +--trusted-host https://download.pytorch.org -r environments-and-requirements/requirements-base.txt torch>=1.13.1 torchvision>=0.14.1 diff --git a/environments-and-requirements/requirements-win-colab-cuda.txt b/environments-and-requirements/requirements-win-colab-cuda.txt index 01b25144b6..7fc55c538a 100644 --- a/environments-and-requirements/requirements-win-colab-cuda.txt +++ b/environments-and-requirements/requirements-win-colab-cuda.txt @@ -1,6 +1,6 @@ -r environments-and-requirements/requirements-base.txt # Get hardware-appropriate torch/torchvision ---extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org +--extra-index-url https://download.pytorch.org/whl/cu117 --trusted-host https://download.pytorch.org torch==1.13.1 torchvision==0.14.1 -e . diff --git a/ldm/invoke/generator/diffusers_pipeline.py b/ldm/invoke/generator/diffusers_pipeline.py index b4a0703f18..5e62abf9df 100644 --- a/ldm/invoke/generator/diffusers_pipeline.py +++ b/ldm/invoke/generator/diffusers_pipeline.py @@ -391,7 +391,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): for i, t in enumerate(self.progress_bar(timesteps)): batched_t.fill_(t) step_output = self.step(batched_t, latents, conditioning_data, - i, additional_guidance=additional_guidance) + step_index=i, + total_step_count=len(timesteps), + additional_guidance=additional_guidance) latents = step_output.prev_sample predicted_original = getattr(step_output, 'pred_original_sample', None) @@ -410,7 +412,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): @torch.inference_mode() def step(self, t: torch.Tensor, latents: torch.Tensor, conditioning_data: ConditioningData, - step_index:int | None = None, additional_guidance: List[Callable] = None): + step_index:int, total_step_count:int, + additional_guidance: List[Callable] = None): # invokeai_diffuser has batched timesteps, but diffusers schedulers expect a single value timestep = t[0] @@ -427,6 +430,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data.unconditioned_embeddings, conditioning_data.text_embeddings, conditioning_data.guidance_scale, step_index=step_index, + total_step_count=total_step_count, threshold=conditioning_data.threshold ) diff --git a/ldm/models/diffusion/shared_invokeai_diffusion.py b/ldm/models/diffusion/shared_invokeai_diffusion.py index 0630546394..3be6b10170 100644 --- a/ldm/models/diffusion/shared_invokeai_diffusion.py +++ b/ldm/models/diffusion/shared_invokeai_diffusion.py @@ -89,6 +89,7 @@ class InvokeAIDiffuserComponent: conditioning: Union[torch.Tensor,dict], unconditional_guidance_scale: float, step_index: Optional[int]=None, + total_step_count: Optional[int]=None, threshold: Optional[ThresholdSettings]=None, ): """ @@ -106,7 +107,15 @@ class InvokeAIDiffuserComponent: cross_attention_control_types_to_do = [] context: Context = self.cross_attention_control_context if self.cross_attention_control_context is not None: - percent_through = self.estimate_percent_through(step_index, sigma) + if step_index is not None and total_step_count is not None: + # 🧨diffusers codepath + percent_through = step_index / total_step_count # will never reach 1.0 - this is deliberate + else: + # legacy compvis codepath + # TODO remove when compvis codepath support is dropped + if step_index is None and sigma is None: + raise ValueError(f"Either step_index or sigma is required when doing cross attention control, but both are None.") + percent_through = self.estimate_percent_through(step_index, sigma) cross_attention_control_types_to_do = context.get_active_cross_attention_control_types_for_step(percent_through) wants_cross_attention_control = (len(cross_attention_control_types_to_do) > 0)