2024-03-14 17:56:03 +00:00
|
|
|
import math
|
2024-02-28 17:15:39 +00:00
|
|
|
from dataclasses import dataclass
|
|
|
|
from typing import List, Optional, Union
|
2023-09-08 15:00:11 +00:00
|
|
|
|
|
|
|
import torch
|
|
|
|
|
2024-03-14 17:56:03 +00:00
|
|
|
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
|
|
|
|
2023-09-08 15:00:11 +00:00
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BasicConditioningInfo:
|
2024-03-08 16:55:01 +00:00
|
|
|
"""SD 1/2 text conditioning information produced by Compel."""
|
|
|
|
|
2023-09-08 15:00:11 +00:00
|
|
|
embeds: torch.Tensor
|
|
|
|
|
|
|
|
def to(self, device, dtype=None):
|
|
|
|
self.embeds = self.embeds.to(device=device, dtype=dtype)
|
|
|
|
return self
|
|
|
|
|
|
|
|
|
2024-01-14 23:41:25 +00:00
|
|
|
@dataclass
|
|
|
|
class ConditioningFieldData:
|
|
|
|
conditionings: List[BasicConditioningInfo]
|
|
|
|
|
|
|
|
|
2023-09-08 15:00:11 +00:00
|
|
|
@dataclass
|
|
|
|
class SDXLConditioningInfo(BasicConditioningInfo):
|
2024-03-08 16:55:01 +00:00
|
|
|
"""SDXL text conditioning information produced by Compel."""
|
|
|
|
|
2023-09-08 15:00:11 +00:00
|
|
|
pooled_embeds: torch.Tensor
|
|
|
|
add_time_ids: torch.Tensor
|
|
|
|
|
|
|
|
def to(self, device, dtype=None):
|
|
|
|
self.pooled_embeds = self.pooled_embeds.to(device=device, dtype=dtype)
|
|
|
|
self.add_time_ids = self.add_time_ids.to(device=device, dtype=dtype)
|
|
|
|
return super().to(device=device, dtype=dtype)
|
|
|
|
|
|
|
|
|
2023-09-08 15:47:36 +00:00
|
|
|
@dataclass
|
|
|
|
class IPAdapterConditioningInfo:
|
|
|
|
cond_image_prompt_embeds: torch.Tensor
|
|
|
|
"""IP-Adapter image encoder conditioning embeddings.
|
2023-10-13 18:44:42 +00:00
|
|
|
Shape: (num_images, num_tokens, encoding_dim).
|
2023-09-08 15:47:36 +00:00
|
|
|
"""
|
|
|
|
uncond_image_prompt_embeds: torch.Tensor
|
|
|
|
"""IP-Adapter image encoding embeddings to use for unconditional generation.
|
2023-10-13 18:44:42 +00:00
|
|
|
Shape: (num_images, num_tokens, encoding_dim).
|
2023-09-08 15:47:36 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
|
2024-03-14 17:56:03 +00:00
|
|
|
@dataclass
|
|
|
|
class IPAdapterData:
|
|
|
|
ip_adapter_model: IPAdapter
|
|
|
|
ip_adapter_conditioning: IPAdapterConditioningInfo
|
2024-03-14 20:58:11 +00:00
|
|
|
mask: torch.Tensor
|
2024-04-13 05:39:45 +00:00
|
|
|
target_blocks: List[str]
|
2024-03-14 17:56:03 +00:00
|
|
|
|
|
|
|
# Either a single weight applied to all steps, or a list of weights for each step.
|
|
|
|
weight: Union[float, List[float]] = 1.0
|
|
|
|
begin_step_percent: float = 0.0
|
|
|
|
end_step_percent: float = 1.0
|
|
|
|
|
|
|
|
def scale_for_step(self, step_index: int, total_steps: int) -> float:
|
|
|
|
first_adapter_step = math.floor(self.begin_step_percent * total_steps)
|
|
|
|
last_adapter_step = math.ceil(self.end_step_percent * total_steps)
|
|
|
|
weight = self.weight[step_index] if isinstance(self.weight, List) else self.weight
|
|
|
|
if step_index >= first_adapter_step and step_index <= last_adapter_step:
|
|
|
|
# Only apply this IP-Adapter if the current step is within the IP-Adapter's begin/end step range.
|
|
|
|
return weight
|
|
|
|
# Otherwise, set the IP-Adapter's scale to 0, so it has no effect.
|
|
|
|
return 0.0
|
|
|
|
|
|
|
|
|
2023-09-08 15:00:11 +00:00
|
|
|
@dataclass
|
2024-03-08 17:57:33 +00:00
|
|
|
class Range:
|
|
|
|
start: int
|
|
|
|
end: int
|
|
|
|
|
|
|
|
|
|
|
|
class TextConditioningRegions:
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
masks: torch.Tensor,
|
|
|
|
ranges: list[Range],
|
|
|
|
):
|
|
|
|
# A binary mask indicating the regions of the image that the prompt should be applied to.
|
|
|
|
# Shape: (1, num_prompts, height, width)
|
|
|
|
# Dtype: torch.bool
|
|
|
|
self.masks = masks
|
|
|
|
|
|
|
|
# A list of ranges indicating the start and end indices of the embeddings that corresponding mask applies to.
|
|
|
|
# ranges[i] contains the embedding range for the i'th prompt / mask.
|
|
|
|
self.ranges = ranges
|
|
|
|
|
|
|
|
assert self.masks.shape[1] == len(self.ranges)
|
|
|
|
|
|
|
|
|
2024-03-08 16:49:32 +00:00
|
|
|
class TextConditioningData:
|
2024-03-08 17:57:33 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
uncond_text: Union[BasicConditioningInfo, SDXLConditioningInfo],
|
|
|
|
cond_text: Union[BasicConditioningInfo, SDXLConditioningInfo],
|
|
|
|
uncond_regions: Optional[TextConditioningRegions],
|
|
|
|
cond_regions: Optional[TextConditioningRegions],
|
|
|
|
guidance_scale: Union[float, List[float]],
|
|
|
|
guidance_rescale_multiplier: float = 0,
|
|
|
|
):
|
|
|
|
self.uncond_text = uncond_text
|
|
|
|
self.cond_text = cond_text
|
|
|
|
self.uncond_regions = uncond_regions
|
|
|
|
self.cond_regions = cond_regions
|
|
|
|
# Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
|
|
# `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf).
|
|
|
|
# Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate
|
|
|
|
# images that are closely linked to the text `prompt`, usually at the expense of lower image quality.
|
|
|
|
self.guidance_scale = guidance_scale
|
|
|
|
# For models trained using zero-terminal SNR ("ztsnr"), it's suggested to use guidance_rescale_multiplier of 0.7.
|
|
|
|
# See [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
|
|
|
self.guidance_rescale_multiplier = guidance_rescale_multiplier
|
|
|
|
|
|
|
|
def is_sdxl(self):
|
|
|
|
assert isinstance(self.uncond_text, SDXLConditioningInfo) == isinstance(self.cond_text, SDXLConditioningInfo)
|
|
|
|
return isinstance(self.cond_text, SDXLConditioningInfo)
|