mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Modifying code from https://github.com/tencent-ailab/IP-Adapter. Also adding license notice at top.
This commit is contained in:
@ -1,3 +1,6 @@
|
||||
# copied from https://github.com/tencent-ailab/IP-Adapter (Apache License 2.0)
|
||||
# and modified as needed
|
||||
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
@ -6,11 +9,14 @@ from diffusers import StableDiffusionPipeline
|
||||
from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
|
||||
from PIL import Image
|
||||
|
||||
from .utils import is_torch2_available
|
||||
if is_torch2_available:
|
||||
from .attention_processor import IPAttnProcessor2_0 as IPAttnProcessor, AttnProcessor2_0 as AttnProcessor
|
||||
else:
|
||||
from .attention_processor import IPAttnProcessor, AttnProcessor
|
||||
# FIXME: Getting errors when trying to use PyTorch 2.0 versions of IPAttnProcessor and AttnProcessor
|
||||
# so for now falling back to the default versions
|
||||
# from .utils import is_torch2_available
|
||||
# if is_torch2_available:
|
||||
# from .attention_processor import IPAttnProcessor2_0 as IPAttnProcessor, AttnProcessor2_0 as AttnProcessor
|
||||
# else:
|
||||
# from .attention_processor import IPAttnProcessor, AttnProcessor
|
||||
from .attention_processor import IPAttnProcessor, AttnProcessor
|
||||
from .resampler import Resampler
|
||||
|
||||
|
||||
@ -18,12 +24,12 @@ class ImageProjModel(torch.nn.Module):
|
||||
"""Projection Model"""
|
||||
def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4):
|
||||
super().__init__()
|
||||
|
||||
|
||||
self.cross_attention_dim = cross_attention_dim
|
||||
self.clip_extra_context_tokens = clip_extra_context_tokens
|
||||
self.proj = torch.nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim)
|
||||
self.norm = torch.nn.LayerNorm(cross_attention_dim)
|
||||
|
||||
|
||||
def forward(self, image_embeds):
|
||||
embeds = image_embeds
|
||||
clip_extra_context_tokens = self.proj(embeds).reshape(-1, self.clip_extra_context_tokens, self.cross_attention_dim)
|
||||
@ -32,25 +38,29 @@ class ImageProjModel(torch.nn.Module):
|
||||
|
||||
|
||||
class IPAdapter:
|
||||
|
||||
|
||||
def __init__(self, sd_pipe, image_encoder_path, ip_ckpt, device, num_tokens=4):
|
||||
|
||||
|
||||
self.device = device
|
||||
self.image_encoder_path = image_encoder_path
|
||||
self.ip_ckpt = ip_ckpt
|
||||
self.num_tokens = num_tokens
|
||||
|
||||
self.pipe = sd_pipe.to(self.device)
|
||||
|
||||
# FIXME:
|
||||
# InvokeAI StableDiffusionPipeline has a to() method that isn't meant to be used
|
||||
# so for now assuming that pipeline is already on the correct device
|
||||
# self.pipe = sd_pipe.to(self.device)
|
||||
self.pipe = sd_pipe
|
||||
self.set_ip_adapter()
|
||||
|
||||
|
||||
# load image encoder
|
||||
self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(self.image_encoder_path).to(self.device, dtype=torch.float16)
|
||||
self.clip_image_processor = CLIPImageProcessor()
|
||||
# image proj model
|
||||
self.image_proj_model = self.init_proj()
|
||||
|
||||
|
||||
self.load_ip_adapter()
|
||||
|
||||
|
||||
def init_proj(self):
|
||||
image_proj_model = ImageProjModel(
|
||||
cross_attention_dim=self.pipe.unet.config.cross_attention_dim,
|
||||
@ -58,10 +68,12 @@ class IPAdapter:
|
||||
clip_extra_context_tokens=self.num_tokens,
|
||||
).to(self.device, dtype=torch.float16)
|
||||
return image_proj_model
|
||||
|
||||
|
||||
def set_ip_adapter(self):
|
||||
unet = self.pipe.unet
|
||||
attn_procs = {}
|
||||
print("Original UNet Attn Processors count:", len(unet.attn_processors))
|
||||
print(unet.attn_processors.keys())
|
||||
for name in unet.attn_processors.keys():
|
||||
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
|
||||
if name.startswith("mid_block"):
|
||||
@ -75,16 +87,19 @@ class IPAdapter:
|
||||
if cross_attention_dim is None:
|
||||
attn_procs[name] = AttnProcessor()
|
||||
else:
|
||||
print("swapping in IPAttnProcessor for", name)
|
||||
attn_procs[name] = IPAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim,
|
||||
scale=1.0).to(self.device, dtype=torch.float16)
|
||||
unet.set_attn_processor(attn_procs)
|
||||
|
||||
print("Modified UNet Attn Processors count:", len(unet.attn_processors))
|
||||
print(unet.attn_processors.keys())
|
||||
|
||||
def load_ip_adapter(self):
|
||||
state_dict = torch.load(self.ip_ckpt, map_location="cpu")
|
||||
self.image_proj_model.load_state_dict(state_dict["image_proj"])
|
||||
ip_layers = torch.nn.ModuleList(self.pipe.unet.attn_processors.values())
|
||||
ip_layers.load_state_dict(state_dict["ip_adapter"])
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def get_image_embeds(self, pil_image):
|
||||
if isinstance(pil_image, Image.Image):
|
||||
@ -94,12 +109,14 @@ class IPAdapter:
|
||||
image_prompt_embeds = self.image_proj_model(clip_image_embeds)
|
||||
uncond_image_prompt_embeds = self.image_proj_model(torch.zeros_like(clip_image_embeds))
|
||||
return image_prompt_embeds, uncond_image_prompt_embeds
|
||||
|
||||
|
||||
def set_scale(self, scale):
|
||||
for attn_processor in self.pipe.unet.attn_processors.values():
|
||||
if isinstance(attn_processor, IPAttnProcessor):
|
||||
attn_processor.scale = scale
|
||||
|
||||
|
||||
# IPAdapter.generate() method is not used for InvokeAI
|
||||
# left here for reference
|
||||
def generate(
|
||||
self,
|
||||
pil_image,
|
||||
@ -113,22 +130,22 @@ class IPAdapter:
|
||||
**kwargs,
|
||||
):
|
||||
self.set_scale(scale)
|
||||
|
||||
|
||||
if isinstance(pil_image, Image.Image):
|
||||
num_prompts = 1
|
||||
else:
|
||||
num_prompts = len(pil_image)
|
||||
|
||||
|
||||
if prompt is None:
|
||||
prompt = "best quality, high quality"
|
||||
if negative_prompt is None:
|
||||
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
|
||||
|
||||
|
||||
if not isinstance(prompt, List):
|
||||
prompt = [prompt] * num_prompts
|
||||
if not isinstance(negative_prompt, List):
|
||||
negative_prompt = [negative_prompt] * num_prompts
|
||||
|
||||
|
||||
image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(pil_image)
|
||||
bs_embed, seq_len, _ = image_prompt_embeds.shape
|
||||
image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
|
||||
@ -142,7 +159,7 @@ class IPAdapter:
|
||||
negative_prompt_embeds_, prompt_embeds_ = prompt_embeds.chunk(2)
|
||||
prompt_embeds = torch.cat([prompt_embeds_, image_prompt_embeds], dim=1)
|
||||
negative_prompt_embeds = torch.cat([negative_prompt_embeds_, uncond_image_prompt_embeds], dim=1)
|
||||
|
||||
|
||||
generator = torch.Generator(self.device).manual_seed(seed) if seed is not None else None
|
||||
images = self.pipe(
|
||||
prompt_embeds=prompt_embeds,
|
||||
@ -152,13 +169,13 @@ class IPAdapter:
|
||||
generator=generator,
|
||||
**kwargs,
|
||||
).images
|
||||
|
||||
|
||||
return images
|
||||
|
||||
|
||||
|
||||
|
||||
class IPAdapterXL(IPAdapter):
|
||||
"""SDXL"""
|
||||
|
||||
|
||||
def generate(
|
||||
self,
|
||||
pil_image,
|
||||
@ -171,22 +188,22 @@ class IPAdapterXL(IPAdapter):
|
||||
**kwargs,
|
||||
):
|
||||
self.set_scale(scale)
|
||||
|
||||
|
||||
if isinstance(pil_image, Image.Image):
|
||||
num_prompts = 1
|
||||
else:
|
||||
num_prompts = len(pil_image)
|
||||
|
||||
|
||||
if prompt is None:
|
||||
prompt = "best quality, high quality"
|
||||
if negative_prompt is None:
|
||||
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
|
||||
|
||||
|
||||
if not isinstance(prompt, List):
|
||||
prompt = [prompt] * num_prompts
|
||||
if not isinstance(negative_prompt, List):
|
||||
negative_prompt = [negative_prompt] * num_prompts
|
||||
|
||||
|
||||
image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(pil_image)
|
||||
bs_embed, seq_len, _ = image_prompt_embeds.shape
|
||||
image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
|
||||
@ -199,7 +216,7 @@ class IPAdapterXL(IPAdapter):
|
||||
prompt, num_images_per_prompt=num_samples, do_classifier_free_guidance=True, negative_prompt=negative_prompt)
|
||||
prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1)
|
||||
negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1)
|
||||
|
||||
|
||||
generator = torch.Generator(self.device).manual_seed(seed) if seed is not None else None
|
||||
images = self.pipe(
|
||||
prompt_embeds=prompt_embeds,
|
||||
@ -210,10 +227,10 @@ class IPAdapterXL(IPAdapter):
|
||||
generator=generator,
|
||||
**kwargs,
|
||||
).images
|
||||
|
||||
|
||||
return images
|
||||
|
||||
|
||||
|
||||
|
||||
class IPAdapterPlus(IPAdapter):
|
||||
"""IP-Adapter with fine-grained features"""
|
||||
|
||||
@ -229,7 +246,7 @@ class IPAdapterPlus(IPAdapter):
|
||||
ff_mult=4
|
||||
).to(self.device, dtype=torch.float16)
|
||||
return image_proj_model
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def get_image_embeds(self, pil_image):
|
||||
if isinstance(pil_image, Image.Image):
|
||||
@ -240,4 +257,4 @@ class IPAdapterPlus(IPAdapter):
|
||||
image_prompt_embeds = self.image_proj_model(clip_image_embeds)
|
||||
uncond_clip_image_embeds = self.image_encoder(torch.zeros_like(clip_image), output_hidden_states=True).hidden_states[-2]
|
||||
uncond_image_prompt_embeds = self.image_proj_model(uncond_clip_image_embeds)
|
||||
return image_prompt_embeds, uncond_image_prompt_embeds
|
||||
return image_prompt_embeds, uncond_image_prompt_embeds
|
||||
|
Reference in New Issue
Block a user