From e579be01183cbf07e7586b8a650b5ff15c6d28fd Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Thu, 25 Jul 2024 22:53:17 +0530 Subject: [PATCH 1/3] wip: grounding segment anything --- .../groundingdino/__init__.py | 0 .../config/GroundingDINO_SwinB.py | 43 + .../config/GroundingDINO_SwinT_OGC.py | 43 + .../groundingdino/datasets/__init__.py | 0 .../groundingdino/datasets/transforms.py | 311 ++++++ .../models/GroundingDINO/__init__.py | 15 + .../models/GroundingDINO/backbone/__init__.py | 1 + .../models/GroundingDINO/backbone/backbone.py | 221 +++++ .../backbone/position_encoding.py | 186 ++++ .../backbone/swin_transformer.py | 802 +++++++++++++++ .../models/GroundingDINO/bertwarper.py | 250 +++++ .../models/GroundingDINO/fuse_modules.py | 295 ++++++ .../models/GroundingDINO/groundingdino.py | 358 +++++++ .../models/GroundingDINO/ms_deform_attn.py | 340 +++++++ .../models/GroundingDINO/transformer.py | 922 ++++++++++++++++++ .../GroundingDINO/transformer_vanilla.py | 115 +++ .../models/GroundingDINO/utils.py | 258 +++++ .../groundingdino/models/__init__.py | 18 + .../groundingdino/models/registry.py | 60 ++ .../groundingdino/util/__init__.py | 1 + .../groundingdino/util/box_ops.py | 140 +++ .../groundingdino/util/get_tokenlizer.py | 24 + .../groundingdino/util/inference.py | 222 +++++ .../groundingdino/util/logger.py | 91 ++ .../groundingdino/util/misc.py | 701 +++++++++++++ .../groundingdino/util/slconfig.py | 419 ++++++++ .../groundingdino/util/slio.py | 178 ++++ .../groundingdino/util/time_counter.py | 62 ++ .../groundingdino/util/utils.py | 598 ++++++++++++ .../groundingdino/util/visualizer.py | 309 ++++++ .../groundingdino/util/vl_utils.py | 100 ++ .../groundingdino/version.py | 1 + .../grounding_segment_anything/gsa.py | 112 +++ .../segment_anything/__init__.py | 16 + .../automatic_mask_generator.py | 368 +++++++ .../segment_anything/build_sam.py | 107 ++ .../segment_anything/build_sam_hq.py | 120 +++ .../segment_anything/modeling/__init__.py | 12 + .../segment_anything/modeling/common.py | 43 + .../modeling/image_encoder.py | 392 ++++++++ .../segment_anything/modeling/mask_decoder.py | 171 ++++ .../modeling/mask_decoder_hq.py | 233 +++++ .../modeling/prompt_encoder.py | 212 ++++ .../segment_anything/modeling/sam.py | 174 ++++ .../segment_anything/modeling/transformer.py | 232 +++++ .../segment_anything/predictor.py | 271 +++++ .../segment_anything/utils/__init__.py | 5 + .../segment_anything/utils/amg.py | 346 +++++++ .../segment_anything/utils/onnx.py | 144 +++ .../segment_anything/utils/transforms.py | 102 ++ 50 files changed, 10144 insertions(+) create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinB.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinT_OGC.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/transforms.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/backbone.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/position_encoding.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/swin_transformer.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/bertwarper.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/fuse_modules.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/groundingdino.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/ms_deform_attn.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer_vanilla.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/utils.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/registry.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/box_ops.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/get_tokenlizer.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/inference.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/logger.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/misc.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/slconfig.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/slio.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/time_counter.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/utils.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/visualizer.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/vl_utils.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/version.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/gsa.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/automatic_mask_generator.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam_hq.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/common.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/image_encoder.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder_hq.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/prompt_encoder.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/sam.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/transformer.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/predictor.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/__init__.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/amg.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/onnx.py create mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/transforms.py diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinB.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinB.py new file mode 100644 index 0000000000..f490c4bbd5 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinB.py @@ -0,0 +1,43 @@ +batch_size = 1 +modelname = "groundingdino" +backbone = "swin_B_384_22k" +position_embedding = "sine" +pe_temperatureH = 20 +pe_temperatureW = 20 +return_interm_indices = [1, 2, 3] +backbone_freeze_keywords = None +enc_layers = 6 +dec_layers = 6 +pre_norm = False +dim_feedforward = 2048 +hidden_dim = 256 +dropout = 0.0 +nheads = 8 +num_queries = 900 +query_dim = 4 +num_patterns = 0 +num_feature_levels = 4 +enc_n_points = 4 +dec_n_points = 4 +two_stage_type = "standard" +two_stage_bbox_embed_share = False +two_stage_class_embed_share = False +transformer_activation = "relu" +dec_pred_bbox_embed_share = True +dn_box_noise_scale = 1.0 +dn_label_noise_ratio = 0.5 +dn_label_coef = 1.0 +dn_bbox_coef = 1.0 +embed_init_tgt = True +dn_labelbook_size = 2000 +max_text_len = 256 +text_encoder_type = "bert-base-uncased" +use_text_enhancer = True +use_fusion_layer = True +use_checkpoint = True +use_transformer_ckpt = True +use_text_cross_attention = True +text_dropout = 0.0 +fusion_dropout = 0.0 +fusion_droppath = 0.1 +sub_sentence_present = True diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinT_OGC.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinT_OGC.py new file mode 100644 index 0000000000..9158d5f626 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinT_OGC.py @@ -0,0 +1,43 @@ +batch_size = 1 +modelname = "groundingdino" +backbone = "swin_T_224_1k" +position_embedding = "sine" +pe_temperatureH = 20 +pe_temperatureW = 20 +return_interm_indices = [1, 2, 3] +backbone_freeze_keywords = None +enc_layers = 6 +dec_layers = 6 +pre_norm = False +dim_feedforward = 2048 +hidden_dim = 256 +dropout = 0.0 +nheads = 8 +num_queries = 900 +query_dim = 4 +num_patterns = 0 +num_feature_levels = 4 +enc_n_points = 4 +dec_n_points = 4 +two_stage_type = "standard" +two_stage_bbox_embed_share = False +two_stage_class_embed_share = False +transformer_activation = "relu" +dec_pred_bbox_embed_share = True +dn_box_noise_scale = 1.0 +dn_label_noise_ratio = 0.5 +dn_label_coef = 1.0 +dn_bbox_coef = 1.0 +embed_init_tgt = True +dn_labelbook_size = 2000 +max_text_len = 256 +text_encoder_type = "bert-base-uncased" +use_text_enhancer = True +use_fusion_layer = True +use_checkpoint = True +use_transformer_ckpt = True +use_text_cross_attention = True +text_dropout = 0.0 +fusion_dropout = 0.0 +fusion_droppath = 0.1 +sub_sentence_present = True diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/transforms.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/transforms.py new file mode 100644 index 0000000000..91cf9269e4 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/transforms.py @@ -0,0 +1,311 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Transforms and data augmentation for both image + bbox. +""" +import os +import random + +import PIL +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as F + +from groundingdino.util.box_ops import box_xyxy_to_cxcywh +from groundingdino.util.misc import interpolate + + +def crop(image, target, region): + cropped_image = F.crop(image, *region) + + target = target.copy() + i, j, h, w = region + + # should we do something wrt the original size? + target["size"] = torch.tensor([h, w]) + + fields = ["labels", "area", "iscrowd", "positive_map"] + + if "boxes" in target: + boxes = target["boxes"] + max_size = torch.as_tensor([w, h], dtype=torch.float32) + cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) + cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) + cropped_boxes = cropped_boxes.clamp(min=0) + area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) + target["boxes"] = cropped_boxes.reshape(-1, 4) + target["area"] = area + fields.append("boxes") + + if "masks" in target: + # FIXME should we update the area here if there are no boxes? + target["masks"] = target["masks"][:, i : i + h, j : j + w] + fields.append("masks") + + # remove elements for which the boxes or masks that have zero area + if "boxes" in target or "masks" in target: + # favor boxes selection when defining which elements to keep + # this is compatible with previous implementation + if "boxes" in target: + cropped_boxes = target["boxes"].reshape(-1, 2, 2) + keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) + else: + keep = target["masks"].flatten(1).any(1) + + for field in fields: + if field in target: + target[field] = target[field][keep] + + if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO": + # for debug and visualization only. + if "strings_positive" in target: + target["strings_positive"] = [ + _i for _i, _j in zip(target["strings_positive"], keep) if _j + ] + + return cropped_image, target + + +def hflip(image, target): + flipped_image = F.hflip(image) + + w, h = image.size + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor( + [w, 0, w, 0] + ) + target["boxes"] = boxes + + if "masks" in target: + target["masks"] = target["masks"].flip(-1) + + return flipped_image, target + + +def resize(image, target, size, max_size=None): + # size can be min_size (scalar) or (w, h) tuple + + def get_size_with_aspect_ratio(image_size, size, max_size=None): + w, h = image_size + if max_size is not None: + min_original_size = float(min((w, h))) + max_original_size = float(max((w, h))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = size + oh = int(size * h / w) + else: + oh = size + ow = int(size * w / h) + + return (oh, ow) + + def get_size(image_size, size, max_size=None): + if isinstance(size, (list, tuple)): + return size[::-1] + else: + return get_size_with_aspect_ratio(image_size, size, max_size) + + size = get_size(image.size, size, max_size) + rescaled_image = F.resize(image, size) + + if target is None: + return rescaled_image, None + + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) + ratio_width, ratio_height = ratios + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + scaled_boxes = boxes * torch.as_tensor( + [ratio_width, ratio_height, ratio_width, ratio_height] + ) + target["boxes"] = scaled_boxes + + if "area" in target: + area = target["area"] + scaled_area = area * (ratio_width * ratio_height) + target["area"] = scaled_area + + h, w = size + target["size"] = torch.tensor([h, w]) + + if "masks" in target: + target["masks"] = ( + interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5 + ) + + return rescaled_image, target + + +def pad(image, target, padding): + # assumes that we only pad on the bottom right corners + padded_image = F.pad(image, (0, 0, padding[0], padding[1])) + if target is None: + return padded_image, None + target = target.copy() + # should we do something wrt the original size? + target["size"] = torch.tensor(padded_image.size[::-1]) + if "masks" in target: + target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1])) + return padded_image, target + + +class ResizeDebug(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + return resize(img, target, self.size) + + +class RandomCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + region = T.RandomCrop.get_params(img, self.size) + return crop(img, target, region) + + +class RandomSizeCrop(object): + def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False): + # respect_boxes: True to keep all boxes + # False to tolerence box filter + self.min_size = min_size + self.max_size = max_size + self.respect_boxes = respect_boxes + + def __call__(self, img: PIL.Image.Image, target: dict): + init_boxes = len(target["boxes"]) + max_patience = 10 + for i in range(max_patience): + w = random.randint(self.min_size, min(img.width, self.max_size)) + h = random.randint(self.min_size, min(img.height, self.max_size)) + region = T.RandomCrop.get_params(img, [h, w]) + result_img, result_target = crop(img, target, region) + if ( + not self.respect_boxes + or len(result_target["boxes"]) == init_boxes + or i == max_patience - 1 + ): + return result_img, result_target + return result_img, result_target + + +class CenterCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + image_width, image_height = img.size + crop_height, crop_width = self.size + crop_top = int(round((image_height - crop_height) / 2.0)) + crop_left = int(round((image_width - crop_width) / 2.0)) + return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) + + +class RandomHorizontalFlip(object): + def __init__(self, p=0.5): + self.p = p + + def __call__(self, img, target): + if random.random() < self.p: + return hflip(img, target) + return img, target + + +class RandomResize(object): + def __init__(self, sizes, max_size=None): + assert isinstance(sizes, (list, tuple)) + self.sizes = sizes + self.max_size = max_size + + def __call__(self, img, target=None): + size = random.choice(self.sizes) + return resize(img, target, size, self.max_size) + + +class RandomPad(object): + def __init__(self, max_pad): + self.max_pad = max_pad + + def __call__(self, img, target): + pad_x = random.randint(0, self.max_pad) + pad_y = random.randint(0, self.max_pad) + return pad(img, target, (pad_x, pad_y)) + + +class RandomSelect(object): + """ + Randomly selects between transforms1 and transforms2, + with probability p for transforms1 and (1 - p) for transforms2 + """ + + def __init__(self, transforms1, transforms2, p=0.5): + self.transforms1 = transforms1 + self.transforms2 = transforms2 + self.p = p + + def __call__(self, img, target): + if random.random() < self.p: + return self.transforms1(img, target) + return self.transforms2(img, target) + + +class ToTensor(object): + def __call__(self, img, target): + return F.to_tensor(img), target + + +class RandomErasing(object): + def __init__(self, *args, **kwargs): + self.eraser = T.RandomErasing(*args, **kwargs) + + def __call__(self, img, target): + return self.eraser(img), target + + +class Normalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, image, target=None): + image = F.normalize(image, mean=self.mean, std=self.std) + if target is None: + return image, None + target = target.copy() + h, w = image.shape[-2:] + if "boxes" in target: + boxes = target["boxes"] + boxes = box_xyxy_to_cxcywh(boxes) + boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) + target["boxes"] = boxes + return image, target + + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, target): + for t in self.transforms: + image, target = t(image, target) + return image, target + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/__init__.py new file mode 100644 index 0000000000..2af819d61d --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/__init__.py @@ -0,0 +1,15 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Conditional DETR +# Copyright (c) 2021 Microsoft. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Copied from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# ------------------------------------------------------------------------ + +from .groundingdino import build_groundingdino diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/__init__.py new file mode 100644 index 0000000000..76e4b272b4 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/__init__.py @@ -0,0 +1 @@ +from .backbone import build_backbone diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/backbone.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/backbone.py new file mode 100644 index 0000000000..c8340c723f --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/backbone.py @@ -0,0 +1,221 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Conditional DETR +# Copyright (c) 2021 Microsoft. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Copied from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# ------------------------------------------------------------------------ + +""" +Backbone modules. +""" + +from typing import Dict, List + +import torch +import torch.nn.functional as F +import torchvision +from torch import nn +from torchvision.models._utils import IntermediateLayerGetter + +from groundingdino.util.misc import NestedTensor, clean_state_dict, is_main_process + +from .position_encoding import build_position_encoding +from .swin_transformer import build_swin_transformer + + +class FrozenBatchNorm2d(torch.nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, + without which any other models than torchvision.models.resnet[18,34,50,101] + produce nans. + """ + + def __init__(self, n): + super(FrozenBatchNorm2d, self).__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + num_batches_tracked_key = prefix + "num_batches_tracked" + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super(FrozenBatchNorm2d, self)._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def forward(self, x): + # move reshapes to the beginning + # to make it fuser-friendly + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + rv = self.running_var.reshape(1, -1, 1, 1) + rm = self.running_mean.reshape(1, -1, 1, 1) + eps = 1e-5 + scale = w * (rv + eps).rsqrt() + bias = b - rm * scale + return x * scale + bias + + +class BackboneBase(nn.Module): + def __init__( + self, + backbone: nn.Module, + train_backbone: bool, + num_channels: int, + return_interm_indices: list, + ): + super().__init__() + for name, parameter in backbone.named_parameters(): + if ( + not train_backbone + or "layer2" not in name + and "layer3" not in name + and "layer4" not in name + ): + parameter.requires_grad_(False) + + return_layers = {} + for idx, layer_index in enumerate(return_interm_indices): + return_layers.update( + {"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)} + ) + + # if len: + # if use_stage1_feature: + # return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} + # else: + # return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"} + # else: + # return_layers = {'layer4': "0"} + self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) + self.num_channels = num_channels + + def forward(self, tensor_list: NestedTensor): + xs = self.body(tensor_list.tensors) + out: Dict[str, NestedTensor] = {} + for name, x in xs.items(): + m = tensor_list.mask + assert m is not None + mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] + out[name] = NestedTensor(x, mask) + # import ipdb; ipdb.set_trace() + return out + + +class Backbone(BackboneBase): + """ResNet backbone with frozen BatchNorm.""" + + def __init__( + self, + name: str, + train_backbone: bool, + dilation: bool, + return_interm_indices: list, + batch_norm=FrozenBatchNorm2d, + ): + if name in ["resnet18", "resnet34", "resnet50", "resnet101"]: + backbone = getattr(torchvision.models, name)( + replace_stride_with_dilation=[False, False, dilation], + pretrained=is_main_process(), + norm_layer=batch_norm, + ) + else: + raise NotImplementedError("Why you can get here with name {}".format(name)) + # num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 + assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available." + assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]] + num_channels_all = [256, 512, 1024, 2048] + num_channels = num_channels_all[4 - len(return_interm_indices) :] + super().__init__(backbone, train_backbone, num_channels, return_interm_indices) + + +class Joiner(nn.Sequential): + def __init__(self, backbone, position_embedding): + super().__init__(backbone, position_embedding) + + def forward(self, tensor_list: NestedTensor): + xs = self[0](tensor_list) + out: List[NestedTensor] = [] + pos = [] + for name, x in xs.items(): + out.append(x) + # position encoding + pos.append(self[1](x).to(x.tensors.dtype)) + + return out, pos + + +def build_backbone(args): + """ + Useful args: + - backbone: backbone name + - lr_backbone: + - dilation + - return_interm_indices: available: [0,1,2,3], [1,2,3], [3] + - backbone_freeze_keywords: + - use_checkpoint: for swin only for now + + """ + position_embedding = build_position_encoding(args) + train_backbone = True + if not train_backbone: + raise ValueError("Please set lr_backbone > 0") + return_interm_indices = args.return_interm_indices + assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]] + args.backbone_freeze_keywords + use_checkpoint = getattr(args, "use_checkpoint", False) + + if args.backbone in ["resnet50", "resnet101"]: + backbone = Backbone( + args.backbone, + train_backbone, + args.dilation, + return_interm_indices, + batch_norm=FrozenBatchNorm2d, + ) + bb_num_channels = backbone.num_channels + elif args.backbone in [ + "swin_T_224_1k", + "swin_B_224_22k", + "swin_B_384_22k", + "swin_L_224_22k", + "swin_L_384_22k", + ]: + pretrain_img_size = int(args.backbone.split("_")[-2]) + backbone = build_swin_transformer( + args.backbone, + pretrain_img_size=pretrain_img_size, + out_indices=tuple(return_interm_indices), + dilation=False, + use_checkpoint=use_checkpoint, + ) + + bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :] + else: + raise NotImplementedError("Unknown backbone {}".format(args.backbone)) + + assert len(bb_num_channels) == len( + return_interm_indices + ), f"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}" + + model = Joiner(backbone, position_embedding) + model.num_channels = bb_num_channels + assert isinstance( + bb_num_channels, List + ), "bb_num_channels is expected to be a List but {}".format(type(bb_num_channels)) + # import ipdb; ipdb.set_trace() + return model diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/position_encoding.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/position_encoding.py new file mode 100644 index 0000000000..eac7e896bb --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/position_encoding.py @@ -0,0 +1,186 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# DINO +# Copyright (c) 2022 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Conditional DETR +# Copyright (c) 2021 Microsoft. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Copied from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# ------------------------------------------------------------------------ + +""" +Various positional encodings for the transformer. +""" +import math + +import torch +from torch import nn + +from groundingdino.util.misc import NestedTensor + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + mask = tensor_list.mask + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + # if os.environ.get("SHILONG_AMP", None) == '1': + # eps = 1e-4 + # else: + # eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +class PositionEmbeddingSineHW(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__( + self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None + ): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperatureH = temperatureH + self.temperatureW = temperatureW + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + mask = tensor_list.mask + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + + # import ipdb; ipdb.set_trace() + + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode='floor')) / self.num_pos_feats) + pos_x = x_embed[:, :, :, None] / dim_tx + + dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode='floor')) / self.num_pos_feats) + pos_y = y_embed[:, :, :, None] / dim_ty + + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + + # import ipdb; ipdb.set_trace() + + return pos + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + + def __init__(self, num_pos_feats=256): + super().__init__() + self.row_embed = nn.Embedding(50, num_pos_feats) + self.col_embed = nn.Embedding(50, num_pos_feats) + self.reset_parameters() + + def reset_parameters(self): + nn.init.uniform_(self.row_embed.weight) + nn.init.uniform_(self.col_embed.weight) + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + h, w = x.shape[-2:] + i = torch.arange(w, device=x.device) + j = torch.arange(h, device=x.device) + x_emb = self.col_embed(i) + y_emb = self.row_embed(j) + pos = ( + torch.cat( + [ + x_emb.unsqueeze(0).repeat(h, 1, 1), + y_emb.unsqueeze(1).repeat(1, w, 1), + ], + dim=-1, + ) + .permute(2, 0, 1) + .unsqueeze(0) + .repeat(x.shape[0], 1, 1, 1) + ) + return pos + + +def build_position_encoding(args): + N_steps = args.hidden_dim // 2 + if args.position_embedding in ("v2", "sine"): + # TODO find a better way of exposing other arguments + position_embedding = PositionEmbeddingSineHW( + N_steps, + temperatureH=args.pe_temperatureH, + temperatureW=args.pe_temperatureW, + normalize=True, + ) + elif args.position_embedding in ("v3", "learned"): + position_embedding = PositionEmbeddingLearned(N_steps) + else: + raise ValueError(f"not supported {args.position_embedding}") + + return position_embedding diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/swin_transformer.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/swin_transformer.py new file mode 100644 index 0000000000..fa8837e400 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/swin_transformer.py @@ -0,0 +1,802 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# DINO +# Copyright (c) 2022 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# -------------------------------------------------------- +# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py +# -------------------------------------------------------- + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + +from groundingdino.util.misc import NestedTensor + + +class Mlp(nn.Module): + """Multilayer perceptron.""" + + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + """Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__( + self, + dim, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) + ) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=0.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """Forward function. + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B_, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = q @ k.transpose(-2, -1) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1) + ].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 + ) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1 + ).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + """Swin Transformer Block. + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__( + self, + dim, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop + ) + + self.H = None + self.W = None + + def forward(self, x, mask_matrix): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + mask_matrix: Attention mask for cyclic shift. + """ + B, L, C = x.shape + H, W = self.H, self.W + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + attn_mask = mask_matrix + else: + shifted_x = x + attn_mask = None + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size + ) # nW*B, window_size, window_size, C + x_windows = x_windows.view( + -1, self.window_size * self.window_size, C + ) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + """Patch Merging Layer + Args: + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x, H, W): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + + # padding + pad_input = (H % 2 == 1) or (W % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class BasicLayer(nn.Module): + """A basic Swin Transformer layer for one stage. + Args: + dim (int): Number of feature channels + depth (int): Depths of this stage. + num_heads (int): Number of attention head. + window_size (int): Local window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__( + self, + dim, + depth, + num_heads, + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False, + ): + super().__init__() + self.window_size = window_size + self.shift_size = window_size // 2 + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList( + [ + SwinTransformerBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, H, W): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + + # calculate attention mask for SW-MSA + Hp = int(np.ceil(H / self.window_size)) * self.window_size + Wp = int(np.ceil(W / self.window_size)) * self.window_size + img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device, dtype=x.dtype) # 1 Hp Wp 1 + h_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + w_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size + ) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( + attn_mask == 0, float(0.0) + ) + + for blk in self.blocks: + blk.H, blk.W = H, W + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, attn_mask) + else: + x = blk(x, attn_mask) + if self.downsample is not None: + x_down = self.downsample(x, H, W) + Wh, Ww = (H + 1) // 2, (W + 1) // 2 + return x, H, W, x_down, Wh, Ww + else: + return x, H, W, x, H, W + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding + Args: + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + patch_size = to_2tuple(patch_size) + self.patch_size = patch_size + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + """Forward function.""" + # padding + _, _, H, W = x.size() + if W % self.patch_size[1] != 0: + x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) + if H % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) + + x = self.proj(x) # B C Wh Ww + if self.norm is not None: + Wh, Ww = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) + + return x + + +class SwinTransformer(nn.Module): + """Swin Transformer backbone. + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + Args: + pretrain_img_size (int): Input image size for training the pretrained model, + used in absolute postion embedding. Default 224. + patch_size (int | tuple(int)): Patch size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + depths (tuple[int]): Depths of each Swin Transformer stage. + num_heads (tuple[int]): Number of attention head of each stage. + window_size (int): Window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. + drop_rate (float): Dropout rate. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Default: 0.2. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. + patch_norm (bool): If True, add normalization after patch embedding. Default: True. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + dilation (bool): if True, the output size if 16x downsample, ow 32x downsample. + """ + + def __init__( + self, + pretrain_img_size=224, + patch_size=4, + in_chans=3, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.2, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + dilation=False, + use_checkpoint=False, + ): + super().__init__() + + self.pretrain_img_size = pretrain_img_size + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.dilation = dilation + + # if use_checkpoint: + # print("use_checkpoint!!!!!!!!!!!!!!!!!!!!!!!!") + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None, + ) + + # absolute position embedding + if self.ape: + pretrain_img_size = to_2tuple(pretrain_img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [ + pretrain_img_size[0] // patch_size[0], + pretrain_img_size[1] // patch_size[1], + ] + + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) + ) + trunc_normal_(self.absolute_pos_embed, std=0.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + # prepare downsample list + downsamplelist = [PatchMerging for i in range(self.num_layers)] + downsamplelist[-1] = None + num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)] + if self.dilation: + downsamplelist[-2] = None + num_features[-1] = int(embed_dim * 2 ** (self.num_layers - 1)) // 2 + for i_layer in range(self.num_layers): + layer = BasicLayer( + # dim=int(embed_dim * 2 ** i_layer), + dim=num_features[i_layer], + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + norm_layer=norm_layer, + # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + downsample=downsamplelist[i_layer], + use_checkpoint=use_checkpoint, + ) + self.layers.append(layer) + + # num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + self.num_features = num_features + + # add a norm layer for each output + for i_layer in out_indices: + layer = norm_layer(num_features[i_layer]) + layer_name = f"norm{i_layer}" + self.add_module(layer_name, layer) + + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + if self.frozen_stages >= 1 and self.ape: + self.absolute_pos_embed.requires_grad = False + + if self.frozen_stages >= 2: + self.pos_drop.eval() + for i in range(0, self.frozen_stages - 1): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # def init_weights(self, pretrained=None): + # """Initialize the weights in backbone. + # Args: + # pretrained (str, optional): Path to pre-trained weights. + # Defaults to None. + # """ + + # def _init_weights(m): + # if isinstance(m, nn.Linear): + # trunc_normal_(m.weight, std=.02) + # if isinstance(m, nn.Linear) and m.bias is not None: + # nn.init.constant_(m.bias, 0) + # elif isinstance(m, nn.LayerNorm): + # nn.init.constant_(m.bias, 0) + # nn.init.constant_(m.weight, 1.0) + + # if isinstance(pretrained, str): + # self.apply(_init_weights) + # logger = get_root_logger() + # load_checkpoint(self, pretrained, strict=False, logger=logger) + # elif pretrained is None: + # self.apply(_init_weights) + # else: + # raise TypeError('pretrained must be a str or None') + + def forward_raw(self, x): + """Forward function.""" + x = self.patch_embed(x) + + Wh, Ww = x.size(2), x.size(3) + if self.ape: + # interpolate the position embedding to the corresponding size + absolute_pos_embed = F.interpolate( + self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" + ) + x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C + else: + x = x.flatten(2).transpose(1, 2) + x = self.pos_drop(x) + + outs = [] + for i in range(self.num_layers): + layer = self.layers[i] + x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) + # import ipdb; ipdb.set_trace() + + if i in self.out_indices: + norm_layer = getattr(self, f"norm{i}") + x_out = norm_layer(x_out) + + out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() + outs.append(out) + # in: + # torch.Size([2, 3, 1024, 1024]) + # outs: + # [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \ + # torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])] + return tuple(outs) + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + + """Forward function.""" + x = self.patch_embed(x) + + Wh, Ww = x.size(2), x.size(3) + if self.ape: + # interpolate the position embedding to the corresponding size + absolute_pos_embed = F.interpolate( + self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" + ) + x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C + else: + x = x.flatten(2).transpose(1, 2) + x = self.pos_drop(x) + + outs = [] + for i in range(self.num_layers): + layer = self.layers[i] + x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) + + if i in self.out_indices: + norm_layer = getattr(self, f"norm{i}") + x_out = norm_layer(x_out) + + out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() + outs.append(out) + # in: + # torch.Size([2, 3, 1024, 1024]) + # out: + # [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \ + # torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])] + + # collect for nesttensors + outs_dict = {} + for idx, out_i in enumerate(outs): + m = tensor_list.mask + assert m is not None + mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0] + outs_dict[idx] = NestedTensor(out_i, mask) + + return outs_dict + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super(SwinTransformer, self).train(mode) + self._freeze_stages() + + +def build_swin_transformer(modelname, pretrain_img_size, **kw): + assert modelname in [ + "swin_T_224_1k", + "swin_B_224_22k", + "swin_B_384_22k", + "swin_L_224_22k", + "swin_L_384_22k", + ] + + model_para_dict = { + "swin_T_224_1k": dict( + embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7 + ), + "swin_B_224_22k": dict( + embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7 + ), + "swin_B_384_22k": dict( + embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12 + ), + "swin_L_224_22k": dict( + embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7 + ), + "swin_L_384_22k": dict( + embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12 + ), + } + kw_cgf = model_para_dict[modelname] + kw_cgf.update(kw) + model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf) + return model + + +if __name__ == "__main__": + model = build_swin_transformer("swin_L_384_22k", 384, dilation=True) + x = torch.rand(2, 3, 1024, 1024) + y = model.forward_raw(x) + import ipdb + + ipdb.set_trace() + x = torch.rand(2, 3, 384, 384) + y = model.forward_raw(x) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/bertwarper.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/bertwarper.py new file mode 100644 index 0000000000..86211c63f8 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/bertwarper.py @@ -0,0 +1,250 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ + +import torch +from torch import nn +from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions + + +class BertModelWarper(nn.Module): + def __init__(self, bert_model): + super().__init__() + # self.bert = bert_modelc + + self.config = bert_model.config + self.embeddings = bert_model.embeddings + self.encoder = bert_model.encoder + self.pooler = bert_model.pooler + + self.get_extended_attention_mask = bert_model.get_extended_attention_mask + self.invert_attention_mask = bert_model.invert_attention_mask + self.get_head_mask = bert_model.get_head_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': + # import ipdb; ipdb.set_trace() + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class TextEncoderShell(nn.Module): + def __init__(self, text_encoder): + super().__init__() + self.text_encoder = text_encoder + self.config = self.text_encoder.config + + def forward(self, **kw): + # feed into text encoder + return self.text_encoder(**kw) + + +def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer): + """Generate attention mask between each pair of special tokens + Args: + input_ids (torch.Tensor): input ids. Shape: [bs, num_token] + special_tokens_mask (list): special tokens mask. + Returns: + torch.Tensor: attention mask between each special tokens. + """ + input_ids = tokenized["input_ids"] + bs, num_token = input_ids.shape + # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens + special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool() + for special_token in special_tokens_list: + special_tokens_mask |= input_ids == special_token + + # idxs: each row is a list of indices of special tokens + idxs = torch.nonzero(special_tokens_mask) + + # generate attention mask and positional ids + attention_mask = torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1) + position_ids = torch.zeros((bs, num_token), device=input_ids.device) + previous_col = 0 + for i in range(idxs.shape[0]): + row, col = idxs[i] + if (col == 0) or (col == num_token - 1): + attention_mask[row, col, col] = True + position_ids[row, col] = 0 + else: + attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True + position_ids[row, previous_col + 1 : col + 1] = torch.arange(0, col - previous_col, device=input_ids.device) + + previous_col = col + + # # padding mask + # padding_mask = tokenized['attention_mask'] + # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool() + + return attention_mask, position_ids.to(torch.long) + + +def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer): + """Generate attention mask between each pair of special tokens + Args: + input_ids (torch.Tensor): input ids. Shape: [bs, num_token] + special_tokens_mask (list): special tokens mask. + Returns: + torch.Tensor: attention mask between each special tokens. + """ + input_ids = tokenized["input_ids"] + bs, num_token = input_ids.shape + # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens + special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool() + for special_token in special_tokens_list: + special_tokens_mask |= input_ids == special_token + + # idxs: each row is a list of indices of special tokens + idxs = torch.nonzero(special_tokens_mask) + + # generate attention mask and positional ids + attention_mask = torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1) + position_ids = torch.zeros((bs, num_token), device=input_ids.device) + cate_to_token_mask_list = [[] for _ in range(bs)] + previous_col = 0 + for i in range(idxs.shape[0]): + row, col = idxs[i] + if (col == 0) or (col == num_token - 1): + attention_mask[row, col, col] = True + position_ids[row, col] = 0 + else: + attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True + position_ids[row, previous_col + 1 : col + 1] = torch.arange(0, col - previous_col, device=input_ids.device) + c2t_maski = torch.zeros((num_token), device=input_ids.device).bool() + c2t_maski[previous_col + 1 : col] = True + cate_to_token_mask_list[row].append(c2t_maski) + previous_col = col + + cate_to_token_mask_list = [ + torch.stack(cate_to_token_mask_listi, dim=0) for cate_to_token_mask_listi in cate_to_token_mask_list + ] + + # # padding mask + # padding_mask = tokenized['attention_mask'] + # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool() + + return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/fuse_modules.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/fuse_modules.py new file mode 100644 index 0000000000..e222dadb14 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/fuse_modules.py @@ -0,0 +1,295 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ + +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import DropPath + + +class FeatureResizer(nn.Module): + """ + This class takes as input a set of embeddings of dimension C1 and outputs a set of + embedding of dimension C2, after a linear transformation, dropout and normalization (LN). + """ + + def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): + super().__init__() + self.do_ln = do_ln + # Object feature encoding + self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) + self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) + self.dropout = nn.Dropout(dropout) + + def forward(self, encoder_features): + x = self.fc(encoder_features) + if self.do_ln: + x = self.layer_norm(x) + output = self.dropout(x) + return output + + +def l1norm(X, dim, eps=1e-8): + """L1-normalize columns of X""" + norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps + X = torch.div(X, norm) + return X + + +def l2norm(X, dim, eps=1e-8): + """L2-normalize columns of X""" + norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps + X = torch.div(X, norm) + return X + + +def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8): + """ + query: (n_context, queryL, d) + context: (n_context, sourceL, d) + """ + _, queryL = query.size(0), query.size(1) + batch_size, sourceL = context.size(0), context.size(1) + + # Get attention + # --> (batch, d, queryL) + queryT = torch.transpose(query, 1, 2) + + # (batch, sourceL, d)(batch, d, queryL) + # --> (batch, sourceL, queryL) + attn = torch.bmm(context, queryT) + if raw_feature_norm == "softmax": + # --> (batch*sourceL, queryL) + attn = attn.view(batch_size * sourceL, queryL) + attn = nn.Softmax()(attn) + # --> (batch, sourceL, queryL) + attn = attn.view(batch_size, sourceL, queryL) + elif raw_feature_norm == "l2norm": + attn = l2norm(attn, 2) + elif raw_feature_norm == "clipped_l2norm": + attn = nn.LeakyReLU(0.1)(attn) + attn = l2norm(attn, 2) + else: + raise ValueError("unknown first norm type:", raw_feature_norm) + # --> (batch, queryL, sourceL) + attn = torch.transpose(attn, 1, 2).contiguous() + # --> (batch*queryL, sourceL) + attn = attn.view(batch_size * queryL, sourceL) + attn = nn.Softmax()(attn * smooth) + # --> (batch, queryL, sourceL) + attn = attn.view(batch_size, queryL, sourceL) + # --> (batch, sourceL, queryL) + attnT = torch.transpose(attn, 1, 2).contiguous() + + # --> (batch, d, sourceL) + contextT = torch.transpose(context, 1, 2) + # (batch x d x sourceL)(batch x sourceL x queryL) + # --> (batch, d, queryL) + weightedContext = torch.bmm(contextT, attnT) + # --> (batch, queryL, d) + weightedContext = torch.transpose(weightedContext, 1, 2) + + return weightedContext, attnT + + +class BiMultiHeadAttention(nn.Module): + def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None): + super(BiMultiHeadAttention, self).__init__() + + self.embed_dim = embed_dim + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.v_dim = v_dim + self.l_dim = l_dim + + assert ( + self.head_dim * self.num_heads == self.embed_dim + ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and \ + `num_heads`: {self.num_heads})." + self.scale = self.head_dim ** (-0.5) + self.dropout = dropout + + self.v_proj = nn.Linear(self.v_dim, self.embed_dim) + self.l_proj = nn.Linear(self.l_dim, self.embed_dim) + self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim) + self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim) + + self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim) + self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim) + + self.stable_softmax_2d = True + self.clamp_min_for_underflow = True + self.clamp_max_for_overflow = True + + self._reset_parameters() + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def _reset_parameters(self): + nn.init.xavier_uniform_(self.v_proj.weight) + self.v_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(self.l_proj.weight) + self.l_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(self.values_v_proj.weight) + self.values_v_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(self.values_l_proj.weight) + self.values_l_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(self.out_v_proj.weight) + self.out_v_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(self.out_l_proj.weight) + self.out_l_proj.bias.data.fill_(0) + + def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): + """_summary_ + + Args: + v (_type_): bs, n_img, dim + l (_type_): bs, n_text, dim + attention_mask_v (_type_, optional): _description_. bs, n_img + attention_mask_l (_type_, optional): _description_. bs, n_text + + Returns: + _type_: _description_ + """ + # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': + # import ipdb; ipdb.set_trace() + bsz, tgt_len, _ = v.size() + + query_states = self.v_proj(v) * self.scale + key_states = self._shape(self.l_proj(l), -1, bsz) + value_v_states = self._shape(self.values_v_proj(v), -1, bsz) + value_l_states = self._shape(self.values_l_proj(l), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_v_states = value_v_states.view(*proj_shape) + value_l_states = value_l_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, \ + but is {attn_weights.size()}" + ) + + if self.stable_softmax_2d: + attn_weights = attn_weights - attn_weights.max() + + if self.clamp_min_for_underflow: + attn_weights = torch.clamp( + attn_weights, min=-50000 + ) # Do not increase -50000, data type half has quite limited range + if self.clamp_max_for_overflow: + attn_weights = torch.clamp( + attn_weights, max=50000 + ) # Do not increase 50000, data type half has quite limited range + + attn_weights_T = attn_weights.transpose(1, 2) + attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0] + if self.clamp_min_for_underflow: + attn_weights_l = torch.clamp( + attn_weights_l, min=-50000 + ) # Do not increase -50000, data type half has quite limited range + if self.clamp_max_for_overflow: + attn_weights_l = torch.clamp( + attn_weights_l, max=50000 + ) # Do not increase 50000, data type half has quite limited range + + # mask vison for language + if attention_mask_v is not None: + attention_mask_v = attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) + attn_weights_l.masked_fill_(attention_mask_v, float("-inf")) + + attn_weights_l = attn_weights_l.softmax(dim=-1) + + # mask language for vision + if attention_mask_l is not None: + attention_mask_l = attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) + attn_weights.masked_fill_(attention_mask_l, float("-inf")) + attn_weights_v = attn_weights.softmax(dim=-1) + + attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training) + attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training) + + attn_output_v = torch.bmm(attn_probs_v, value_l_states) + attn_output_l = torch.bmm(attn_probs_l, value_v_states) + + if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, \ + but is {attn_output_v.size()}" + ) + + if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim): + raise ValueError( + f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, \ + but is {attn_output_l.size()}" + ) + + attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output_v = attn_output_v.transpose(1, 2) + attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim) + + attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim) + attn_output_l = attn_output_l.transpose(1, 2) + attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim) + + attn_output_v = self.out_v_proj(attn_output_v) + attn_output_l = self.out_l_proj(attn_output_l) + + return attn_output_v, attn_output_l + + +# Bi-Direction MHA (text->image, image->text) +class BiAttentionBlock(nn.Module): + def __init__( + self, + v_dim, + l_dim, + embed_dim, + num_heads, + dropout=0.1, + drop_path=0.0, + init_values=1e-4, + cfg=None, + ): + """ + Inputs: + embed_dim - Dimensionality of input and attention feature vectors + hidden_dim - Dimensionality of hidden layer in feed-forward network + (usually 2-4x larger than embed_dim) + num_heads - Number of heads to use in the Multi-Head Attention block + dropout - Amount of dropout to apply in the feed-forward network + """ + super(BiAttentionBlock, self).__init__() + + # pre layer norm + self.layer_norm_v = nn.LayerNorm(v_dim) + self.layer_norm_l = nn.LayerNorm(l_dim) + self.attn = BiMultiHeadAttention( + v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout + ) + + # add layer scale for training stability + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True) + self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True) + + def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): + v = self.layer_norm_v(v) + l = self.layer_norm_l(l) + delta_v, delta_l = self.attn(v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l) + # v, l = v + delta_v, l + delta_l + v = v + self.drop_path(self.gamma_v * delta_v) + l = l + self.drop_path(self.gamma_l * delta_l) + return v, l + + # def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/groundingdino.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/groundingdino.py new file mode 100644 index 0000000000..b76df92e13 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/groundingdino.py @@ -0,0 +1,358 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Conditional DETR model and criterion classes. +# Copyright (c) 2021 Microsoft. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# ------------------------------------------------------------------------ +import copy +from typing import List + +import torch +import torch.nn.functional as F +from torch import nn + +from groundingdino.util import get_tokenlizer +from groundingdino.util.misc import NestedTensor, inverse_sigmoid, nested_tensor_from_tensor_list + +from ..registry import MODULE_BUILD_FUNCS +from .backbone import build_backbone +from .bertwarper import BertModelWarper, generate_masks_with_special_tokens_and_transfer_map +from .transformer import build_transformer +from .utils import MLP, ContrastiveEmbed + + +class GroundingDINO(nn.Module): + """This is the Cross-Attention Detector module that performs object detection""" + + def __init__( + self, + backbone, + transformer, + num_queries, + aux_loss=False, + iter_update=False, + query_dim=2, + num_feature_levels=1, + nheads=8, + # two stage + two_stage_type="no", # ['no', 'standard'] + dec_pred_bbox_embed_share=True, + two_stage_class_embed_share=True, + two_stage_bbox_embed_share=True, + num_patterns=0, + dn_number=100, + dn_box_noise_scale=0.4, + dn_label_noise_ratio=0.5, + dn_labelbook_size=100, + text_encoder_type="bert-base-uncased", + sub_sentence_present=True, + max_text_len=256, + ): + """Initializes the model. + Parameters: + backbone: torch module of the backbone to be used. See backbone.py + transformer: torch module of the transformer architecture. See transformer.py + num_queries: number of object queries, ie detection slot. This is the maximal number of objects + Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. + aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. + """ + super().__init__() + self.num_queries = num_queries + self.transformer = transformer + self.hidden_dim = hidden_dim = transformer.d_model + self.num_feature_levels = num_feature_levels + self.nheads = nheads + self.max_text_len = 256 + self.sub_sentence_present = sub_sentence_present + + # setting query dim + self.query_dim = query_dim + assert query_dim == 4 + + # for dn training + self.num_patterns = num_patterns + self.dn_number = dn_number + self.dn_box_noise_scale = dn_box_noise_scale + self.dn_label_noise_ratio = dn_label_noise_ratio + self.dn_labelbook_size = dn_labelbook_size + + # bert + self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) + self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) + self.bert.pooler.dense.weight.requires_grad_(False) + self.bert.pooler.dense.bias.requires_grad_(False) + self.bert = BertModelWarper(bert_model=self.bert) + + self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) + nn.init.constant_(self.feat_map.bias.data, 0) + nn.init.xavier_uniform_(self.feat_map.weight.data) + # freeze + + # special tokens + self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) + + # prepare input projection layers + if num_feature_levels > 1: + num_backbone_outs = len(backbone.num_channels) + input_proj_list = [] + for _ in range(num_backbone_outs): + in_channels = backbone.num_channels[_] + input_proj_list.append( + nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=1), + nn.GroupNorm(32, hidden_dim), + ) + ) + for _ in range(num_feature_levels - num_backbone_outs): + input_proj_list.append( + nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), + nn.GroupNorm(32, hidden_dim), + ) + ) + in_channels = hidden_dim + self.input_proj = nn.ModuleList(input_proj_list) + else: + assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" + self.input_proj = nn.ModuleList( + [ + nn.Sequential( + nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), + nn.GroupNorm(32, hidden_dim), + ) + ] + ) + + self.backbone = backbone + self.aux_loss = aux_loss + self.box_pred_damping = None + + self.iter_update = iter_update + assert iter_update, "Why not iter_update?" + + # prepare pred layers + self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share + # prepare class & box embed + _class_embed = ContrastiveEmbed() + + _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) + nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) + nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) + + if dec_pred_bbox_embed_share: + box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] + else: + box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] + class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] + self.bbox_embed = nn.ModuleList(box_embed_layerlist) + self.class_embed = nn.ModuleList(class_embed_layerlist) + self.transformer.decoder.bbox_embed = self.bbox_embed + self.transformer.decoder.class_embed = self.class_embed + + # two stage + self.two_stage_type = two_stage_type + assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format(two_stage_type) + if two_stage_type != "no": + if two_stage_bbox_embed_share: + assert dec_pred_bbox_embed_share + self.transformer.enc_out_bbox_embed = _bbox_embed + else: + self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) + + if two_stage_class_embed_share: + assert dec_pred_bbox_embed_share + self.transformer.enc_out_class_embed = _class_embed + else: + self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) + + self.refpoint_embed = None + + self._reset_parameters() + + def _reset_parameters(self): + # init input_proj + for proj in self.input_proj: + nn.init.xavier_uniform_(proj[0].weight, gain=1) + nn.init.constant_(proj[0].bias, 0) + + def init_ref_points(self, use_num_queries): + self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) + + def forward(self, samples: NestedTensor, targets: List = None, **kw): + """The forward expects a NestedTensor, which consists of: + - samples.tensor: batched images, of shape [batch_size x 3 x H x W] + - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels + + It returns a dict with the following elements: + - "pred_logits": the classification logits (including no-object) for all queries. + Shape= [batch_size x num_queries x num_classes] + - "pred_boxes": The normalized boxes coordinates for all queries, represented as + (center_x, center_y, width, height). These values are normalized in [0, 1], + relative to the size of each individual image (disregarding possible padding). + See PostProcess for information on how to retrieve the unnormalized bounding box. + - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of + dictionnaries containing the two above keys for each decoder layer. + """ + if targets is None: + captions = kw["captions"] + else: + captions = [t["caption"] for t in targets] + len(captions) + + # encoder texts + tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to(samples.device) + ( + text_self_attention_masks, + position_ids, + cate_to_token_mask_list, + ) = generate_masks_with_special_tokens_and_transfer_map(tokenized, self.specical_tokens, self.tokenizer) + + if text_self_attention_masks.shape[1] > self.max_text_len: + text_self_attention_masks = text_self_attention_masks[:, : self.max_text_len, : self.max_text_len] + position_ids = position_ids[:, : self.max_text_len] + tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] + tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] + tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] + + # extract text embeddings + if self.sub_sentence_present: + tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} + tokenized_for_encoder["attention_mask"] = text_self_attention_masks + tokenized_for_encoder["position_ids"] = position_ids + else: + # import ipdb; ipdb.set_trace() + tokenized_for_encoder = tokenized + + bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 + + encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model + text_token_mask = tokenized.attention_mask.bool() # bs, 195 + # text_token_mask: True for nomask, False for mask + # text_self_attention_masks: True for nomask, False for mask + + if encoded_text.shape[1] > self.max_text_len: + encoded_text = encoded_text[:, : self.max_text_len, :] + text_token_mask = text_token_mask[:, : self.max_text_len] + position_ids = position_ids[:, : self.max_text_len] + text_self_attention_masks = text_self_attention_masks[:, : self.max_text_len, : self.max_text_len] + + text_dict = { + "encoded_text": encoded_text, # bs, 195, d_model + "text_token_mask": text_token_mask, # bs, 195 + "position_ids": position_ids, # bs, 195 + "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 + } + + # import ipdb; ipdb.set_trace() + + if isinstance(samples, (list, torch.Tensor)): + samples = nested_tensor_from_tensor_list(samples) + features, poss = self.backbone(samples) + + srcs = [] + masks = [] + for l, feat in enumerate(features): + src, mask = feat.decompose() + srcs.append(self.input_proj[l](src)) + masks.append(mask) + assert mask is not None + if self.num_feature_levels > len(srcs): + _len_srcs = len(srcs) + for l in range(_len_srcs, self.num_feature_levels): + if l == _len_srcs: + src = self.input_proj[l](features[-1].tensors) + else: + src = self.input_proj[l](srcs[-1]) + m = samples.mask + mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] + pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) + srcs.append(src) + masks.append(mask) + poss.append(pos_l) + + input_query_bbox = input_query_label = attn_mask = None + hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer( + srcs, masks, input_query_bbox, poss, input_query_label, attn_mask, text_dict + ) + + # deformable-detr-like anchor update + outputs_coord_list = [] + for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): + layer_delta_unsig = layer_bbox_embed(layer_hs) + layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) + layer_outputs_unsig = layer_outputs_unsig.sigmoid() + outputs_coord_list.append(layer_outputs_unsig) + outputs_coord_list = torch.stack(outputs_coord_list) + + # output + outputs_class = torch.stack( + [layer_cls_embed(layer_hs, text_dict) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)] + ) + out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord_list[-1]} + + # # for intermediate outputs + # if self.aux_loss: + # out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) + + # # for encoder output + # if hs_enc is not None: + # # prepare intermediate outputs + # interm_coord = ref_enc[-1] + # interm_class = self.transformer.enc_out_class_embed(hs_enc[-1], text_dict) + # out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} + # out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} + + return out + + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_coord): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [{"pred_logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] + + +@MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino") +def build_groundingdino(args): + + backbone = build_backbone(args) + transformer = build_transformer(args) + + dn_labelbook_size = args.dn_labelbook_size + dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share + sub_sentence_present = args.sub_sentence_present + + model = GroundingDINO( + backbone, + transformer, + num_queries=args.num_queries, + aux_loss=True, + iter_update=True, + query_dim=4, + num_feature_levels=args.num_feature_levels, + nheads=args.nheads, + dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, + two_stage_type=args.two_stage_type, + two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, + two_stage_class_embed_share=args.two_stage_class_embed_share, + num_patterns=args.num_patterns, + dn_number=0, + dn_box_noise_scale=args.dn_box_noise_scale, + dn_label_noise_ratio=args.dn_label_noise_ratio, + dn_labelbook_size=dn_labelbook_size, + text_encoder_type=args.text_encoder_type, + sub_sentence_present=sub_sentence_present, + max_text_len=args.max_text_len, + ) + + return model diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/ms_deform_attn.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/ms_deform_attn.py new file mode 100644 index 0000000000..9f5c39c4fb --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/ms_deform_attn.py @@ -0,0 +1,340 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from: +# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/functions/ms_deform_attn_func.py +# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py +# https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/multi_scale_deform_attn.py +# ------------------------------------------------------------------------------------------------ + +import math +import warnings +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.init import constant_, xavier_uniform_ + + +# helpers +def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + +def multi_scale_deformable_attn_pytorch( + value: torch.Tensor, + value_spatial_shapes: torch.Tensor, + sampling_locations: torch.Tensor, + attention_weights: torch.Tensor, +) -> torch.Tensor: + + bs, _, num_heads, embed_dims = value.shape + _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape + value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for level, (H_, W_) in enumerate(value_spatial_shapes): + # bs, H_*W_, num_heads, embed_dims -> + # bs, H_*W_, num_heads*embed_dims -> + # bs, num_heads*embed_dims, H_*W_ -> + # bs*num_heads, embed_dims, H_, W_ + value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_) + # bs, num_queries, num_heads, num_points, 2 -> + # bs, num_heads, num_queries, num_points, 2 -> + # bs*num_heads, num_queries, num_points, 2 + sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1) + # bs*num_heads, embed_dims, num_queries, num_points + sampling_value_l_ = F.grid_sample( + value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False + ) + sampling_value_list.append(sampling_value_l_) + # (bs, num_queries, num_heads, num_levels, num_points) -> + # (bs, num_heads, num_queries, num_levels, num_points) -> + # (bs, num_heads, 1, num_queries, num_levels*num_points) + attention_weights = attention_weights.transpose(1, 2).reshape( + bs * num_heads, 1, num_queries, num_levels * num_points + ) + output = ( + (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) + .sum(-1) + .view(bs, num_heads * embed_dims, num_queries) + ) + return output.transpose(1, 2).contiguous() + + +class MultiScaleDeformableAttention(nn.Module): + """Multi-Scale Deformable Attention Module used in Deformable-DETR + + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + + Args: + embed_dim (int): The embedding dimension of Attention. Default: 256. + num_heads (int): The number of attention heads. Default: 8. + num_levels (int): The number of feature map used in Attention. Default: 4. + num_points (int): The number of sampling points for each query + in each head. Default: 4. + img2col_steps (int): The step used in image_to_column. Defualt: 64. + dropout (float): Dropout layer used in output. Default: 0.1. + batch_first (bool): if ``True``, then the input and output tensor will be + provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)` + """ + + def __init__( + self, + embed_dim: int = 256, + num_heads: int = 8, + num_levels: int = 4, + num_points: int = 4, + img2col_step: int = 64, + batch_first: bool = False, + ): + super().__init__() + if embed_dim % num_heads != 0: + raise ValueError("embed_dim must be divisible by num_heads, but got {} and {}".format(embed_dim, num_heads)) + head_dim = embed_dim // num_heads + + self.batch_first = batch_first + + if not _is_power_of_2(head_dim): + warnings.warn( + """ + You'd better set d_model in MSDeformAttn to make sure that + each dim of the attention head a power of 2, which is more efficient. + """ + ) + + self.im2col_step = img2col_step + self.embed_dim = embed_dim + self.num_heads = num_heads + self.num_levels = num_levels + self.num_points = num_points + self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2) + self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points) + self.value_proj = nn.Linear(embed_dim, embed_dim) + self.output_proj = nn.Linear(embed_dim, embed_dim) + + self.init_weights() + + def _reset_parameters(self): + return self.init_weights() + + def init_weights(self): + """ + Default initialization for Parameters of Module. + """ + constant_(self.sampling_offsets.weight.data, 0.0) + thetas = torch.arange(self.num_heads, dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = ( + (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) + .view(self.num_heads, 1, 1, 2) + .repeat(1, self.num_levels, self.num_points, 1) + ) + for i in range(self.num_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + constant_(self.attention_weights.weight.data, 0.0) + constant_(self.attention_weights.bias.data, 0.0) + xavier_uniform_(self.value_proj.weight.data) + constant_(self.value_proj.bias.data, 0.0) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.0) + + def freeze_sampling_offsets(self): + print("Freeze sampling offsets") + self.sampling_offsets.weight.requires_grad = False + self.sampling_offsets.bias.requires_grad = False + + def freeze_attention_weights(self): + print("Freeze attention weights") + self.attention_weights.weight.requires_grad = False + self.attention_weights.bias.requires_grad = False + + def forward( + self, + query: torch.Tensor, + key: Optional[torch.Tensor] = None, + value: Optional[torch.Tensor] = None, + query_pos: Optional[torch.Tensor] = None, + key_padding_mask: Optional[torch.Tensor] = None, + reference_points: Optional[torch.Tensor] = None, + spatial_shapes: Optional[torch.Tensor] = None, + level_start_index: Optional[torch.Tensor] = None, + **kwargs + ) -> torch.Tensor: + """Forward Function of MultiScaleDeformableAttention + + Args: + query (torch.Tensor): Query embeddings with shape + `(num_query, bs, embed_dim)` + key (torch.Tensor): Key embeddings with shape + `(num_key, bs, embed_dim)` + value (torch.Tensor): Value embeddings with shape + `(num_key, bs, embed_dim)` + query_pos (torch.Tensor): The position embedding for `query`. Default: None. + key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`, + indicating which elements within `key` to be ignored in attention. + reference_points (torch.Tensor): The normalized reference points + with shape `(bs, num_query, num_levels, 2)`, + all elements is range in [0, 1], top-left (0, 0), + bottom-right (1, 1), including padding are. + or `(N, Length_{query}, num_levels, 4)`, add additional + two dimensions `(h, w)` to form reference boxes. + spatial_shapes (torch.Tensor): Spatial shape of features in different levels. + With shape `(num_levels, 2)`, last dimension represents `(h, w)`. + level_start_index (torch.Tensor): The start index of each level. A tensor with + shape `(num_levels, )` which can be represented as + `[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`. + + Returns: + torch.Tensor: forward results with shape `(num_query, bs, embed_dim)` + """ + + if value is None: + value = query + + if query_pos is not None: + query = query + query_pos + + if not self.batch_first: + # change to (bs, num_query ,embed_dims) + query = query.permute(1, 0, 2) + value = value.permute(1, 0, 2) + + bs, num_query, _ = query.shape + bs, num_value, _ = value.shape + + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value + + value = self.value_proj(value) + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], float(0)) + value = value.view(bs, num_value, self.num_heads, -1) + sampling_offsets = self.sampling_offsets(query).view( + bs, num_query, self.num_heads, self.num_levels, self.num_points, 2 + ) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_levels * self.num_points + ) + attention_weights = attention_weights.softmax(-1) + attention_weights = attention_weights.view( + bs, + num_query, + self.num_heads, + self.num_levels, + self.num_points, + ) + + # bs, num_query, num_heads, num_levels, num_points, 2 + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = ( + reference_points[:, :, None, :, None, :] + + sampling_offsets / offset_normalizer[None, None, None, :, None, :] + ) + elif reference_points.shape[-1] == 4: + sampling_locations = ( + reference_points[:, :, None, :, None, :2] + + sampling_offsets / self.num_points * reference_points[:, :, None, :, None, 2:] * 0.5 + ) + else: + raise ValueError( + "Last dim of reference_points must be 2 or 4, but get {} instead.".format(reference_points.shape[-1]) + ) + + # if torch.cuda.is_available() and value.is_cuda: + # halffloat = False + # if value.dtype == torch.float16: + # halffloat = True + # value = value.float() + # sampling_locations = sampling_locations.float() + # attention_weights = attention_weights.float() + + # output = MultiScaleDeformableAttnFunction.apply( + # value, + # spatial_shapes, + # level_start_index, + # sampling_locations, + # attention_weights, + # self.im2col_step, + # ) + + # if halffloat: + # output = output.half() + # else: + # output = multi_scale_deformable_attn_pytorch(value, spatial_shapes, sampling_locations, attention_weights) + + output = multi_scale_deformable_attn_pytorch(value, spatial_shapes, sampling_locations, attention_weights) + + output = self.output_proj(output) + + if not self.batch_first: + output = output.permute(1, 0, 2) + + return output + + +def create_dummy_class(klass, dependency, message=""): + """ + When a dependency of a class is not available, create a dummy class which throws ImportError + when used. + + Args: + klass (str): name of the class. + dependency (str): name of the dependency. + message: extra message to print + Returns: + class: a class object + """ + err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass) + if message: + err = err + " " + message + + class _DummyMetaClass(type): + # throw error on class attribute access + def __getattr__(_, __): # noqa: B902 + raise ImportError(err) + + class _Dummy(object, metaclass=_DummyMetaClass): + # throw error on constructor + def __init__(self, *args, **kwargs): + raise ImportError(err) + + return _Dummy + + +def create_dummy_func(func, dependency, message=""): + """ + When a dependency of a function is not available, create a dummy function which throws + ImportError when used. + + Args: + func (str): name of the function. + dependency (str or list[str]): name(s) of the dependency. + message: extra message to print + Returns: + function: a function object + """ + err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func) + if message: + err = err + " " + message + + if isinstance(dependency, (list, tuple)): + dependency = ",".join(dependency) + + def _dummy(*args, **kwargs): + raise ImportError(err) + + return _dummy diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer.py new file mode 100644 index 0000000000..1a25469604 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer.py @@ -0,0 +1,922 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# DINO +# Copyright (c) 2022 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Conditional DETR Transformer class. +# Copyright (c) 2021 Microsoft. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# ------------------------------------------------------------------------ + +from typing import Optional + +import torch +import torch.utils.checkpoint as checkpoint +from torch import Tensor, nn + +from groundingdino.util.misc import inverse_sigmoid + +from .fuse_modules import BiAttentionBlock +from .ms_deform_attn import MultiScaleDeformableAttention as MSDeformAttn +from .transformer_vanilla import TransformerEncoderLayer +from .utils import (MLP, _get_activation_fn, _get_clones, + gen_encoder_output_proposals, gen_sineembed_for_position, + get_sine_pos_embed) + + +class Transformer(nn.Module): + def __init__( + self, + d_model=256, + nhead=8, + num_queries=300, + num_encoder_layers=6, + num_unicoder_layers=0, + num_decoder_layers=6, + dim_feedforward=2048, + dropout=0.0, + activation="relu", + normalize_before=False, + return_intermediate_dec=False, + query_dim=4, + num_patterns=0, + # for deformable encoder + num_feature_levels=1, + enc_n_points=4, + dec_n_points=4, + # init query + learnable_tgt_init=False, + # two stage + two_stage_type="no", # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1'] + embed_init_tgt=False, + # for text + use_text_enhancer=False, + use_fusion_layer=False, + use_checkpoint=False, + use_transformer_ckpt=False, + use_text_cross_attention=False, + text_dropout=0.1, + fusion_dropout=0.1, + fusion_droppath=0.0, + ): + super().__init__() + self.num_feature_levels = num_feature_levels + self.num_encoder_layers = num_encoder_layers + self.num_unicoder_layers = num_unicoder_layers + self.num_decoder_layers = num_decoder_layers + self.num_queries = num_queries + assert query_dim == 4 + + # choose encoder layer type + encoder_layer = DeformableTransformerEncoderLayer( + d_model, dim_feedforward, dropout, activation, num_feature_levels, nhead, enc_n_points + ) + + if use_text_enhancer: + text_enhance_layer = TransformerEncoderLayer( + d_model=d_model, + nhead=nhead // 2, + dim_feedforward=dim_feedforward // 2, + dropout=text_dropout, + ) + else: + text_enhance_layer = None + + if use_fusion_layer: + feature_fusion_layer = BiAttentionBlock( + v_dim=d_model, + l_dim=d_model, + embed_dim=dim_feedforward // 2, + num_heads=nhead // 2, + dropout=fusion_dropout, + drop_path=fusion_droppath, + ) + else: + feature_fusion_layer = None + + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + assert encoder_norm is None + self.encoder = TransformerEncoder( + encoder_layer, + num_encoder_layers, + d_model=d_model, + num_queries=num_queries, + text_enhance_layer=text_enhance_layer, + feature_fusion_layer=feature_fusion_layer, + use_checkpoint=use_checkpoint, + use_transformer_ckpt=use_transformer_ckpt, + ) + + # choose decoder layer type + decoder_layer = DeformableTransformerDecoderLayer( + d_model, + dim_feedforward, + dropout, + activation, + num_feature_levels, + nhead, + dec_n_points, + use_text_cross_attention=use_text_cross_attention, + ) + + decoder_norm = nn.LayerNorm(d_model) + self.decoder = TransformerDecoder( + decoder_layer, + num_decoder_layers, + decoder_norm, + return_intermediate=return_intermediate_dec, + d_model=d_model, + query_dim=query_dim, + num_feature_levels=num_feature_levels, + ) + + self.d_model = d_model + self.nhead = nhead + self.dec_layers = num_decoder_layers + self.num_queries = num_queries # useful for single stage model only + self.num_patterns = num_patterns + if not isinstance(num_patterns, int): + Warning("num_patterns should be int but {}".format(type(num_patterns))) + self.num_patterns = 0 + + if num_feature_levels > 1: + if self.num_encoder_layers > 0: + self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) + else: + self.level_embed = None + + self.learnable_tgt_init = learnable_tgt_init + assert learnable_tgt_init, "why not learnable_tgt_init" + self.embed_init_tgt = embed_init_tgt + if (two_stage_type != "no" and embed_init_tgt) or (two_stage_type == "no"): + self.tgt_embed = nn.Embedding(self.num_queries, d_model) + nn.init.normal_(self.tgt_embed.weight.data) + else: + self.tgt_embed = None + + # for two stage + self.two_stage_type = two_stage_type + assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format(two_stage_type) + if two_stage_type == "standard": + # anchor selection at the output of encoder + self.enc_output = nn.Linear(d_model, d_model) + self.enc_output_norm = nn.LayerNorm(d_model) + self.two_stage_wh_embedding = None + + if two_stage_type == "no": + self.init_ref_points(num_queries) # init self.refpoint_embed + + self.enc_out_class_embed = None + self.enc_out_bbox_embed = None + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformAttn): + m._reset_parameters() + if self.num_feature_levels > 1 and self.level_embed is not None: + nn.init.normal_(self.level_embed) + + def get_valid_ratio(self, mask): + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def init_ref_points(self, use_num_queries): + self.refpoint_embed = nn.Embedding(use_num_queries, 4) + + def forward(self, srcs, masks, refpoint_embed, pos_embeds, tgt, attn_mask=None, text_dict=None): + """ + Input: + - srcs: List of multi features [bs, ci, hi, wi] + - masks: List of multi masks [bs, hi, wi] + - refpoint_embed: [bs, num_dn, 4]. None in infer + - pos_embeds: List of multi pos embeds [bs, ci, hi, wi] + - tgt: [bs, num_dn, d_model]. None in infer + + """ + # prepare input for encoder + src_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)): + bs, c, h, w = src.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + + src = src.flatten(2).transpose(1, 2) # bs, hw, c + mask = mask.flatten(1) # bs, hw + pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c + if self.num_feature_levels > 1 and self.level_embed is not None: + lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1) + else: + lvl_pos_embed = pos_embed + lvl_pos_embed_flatten.append(lvl_pos_embed) + src_flatten.append(src) + mask_flatten.append(mask) + src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c + mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw} + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1).to(src.dtype) + + # two stage + # enc_topk_proposals = enc_refpoint_embed = None + + ######################################################### + # Begin Encoder + ######################################################### + memory, memory_text = self.encoder( + src_flatten, + pos=lvl_pos_embed_flatten, + level_start_index=level_start_index, + spatial_shapes=spatial_shapes, + valid_ratios=valid_ratios, + key_padding_mask=mask_flatten, + memory_text=text_dict["encoded_text"], + text_attention_mask=~text_dict["text_token_mask"], + # we ~ the mask . False means use the token; True means pad the token + position_ids=text_dict["position_ids"], + text_self_attention_masks=text_dict["text_self_attention_masks"], + ) + ######################################################### + # End Encoder + # - memory: bs, \sum{hw}, c + # - mask_flatten: bs, \sum{hw} + # - lvl_pos_embed_flatten: bs, \sum{hw}, c + # - enc_intermediate_output: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c) + # - enc_intermediate_refpoints: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c) + ######################################################### + text_dict["encoded_text"] = memory_text + # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1': + # if memory.isnan().any() | memory.isinf().any(): + # import ipdb; ipdb.set_trace() + + if self.two_stage_type == "standard": + output_memory, output_proposals = gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes) + output_memory = self.enc_output_norm(self.enc_output(output_memory)) + + if text_dict is not None: + enc_outputs_class_unselected = self.enc_out_class_embed(output_memory, text_dict) + else: + enc_outputs_class_unselected = self.enc_out_class_embed(output_memory) + + topk_logits = enc_outputs_class_unselected.max(-1)[0] + enc_outputs_coord_unselected = ( + self.enc_out_bbox_embed(output_memory) + output_proposals + ) # (bs, \sum{hw}, 4) unsigmoid + topk = self.num_queries + + topk_proposals = torch.topk(topk_logits, topk, dim=1)[1] # bs, nq + + # gather boxes + refpoint_embed_undetach = torch.gather( + enc_outputs_coord_unselected, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) + ) # unsigmoid + refpoint_embed_ = refpoint_embed_undetach.detach() + init_box_proposal = torch.gather( + output_proposals, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) + ).sigmoid() # sigmoid + + # gather tgt + tgt_undetach = torch.gather(output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) + if self.embed_init_tgt: + tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model + else: + tgt_ = tgt_undetach.detach() + + if refpoint_embed is not None: + refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1) + tgt = torch.cat([tgt, tgt_], dim=1) + else: + refpoint_embed, tgt = refpoint_embed_, tgt_ + + elif self.two_stage_type == "no": + tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model + refpoint_embed_ = self.refpoint_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, 4 + + if refpoint_embed is not None: + refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1) + tgt = torch.cat([tgt, tgt_], dim=1) + else: + refpoint_embed, tgt = refpoint_embed_, tgt_ + + if self.num_patterns > 0: + tgt_embed = tgt.repeat(1, self.num_patterns, 1) + refpoint_embed = refpoint_embed.repeat(1, self.num_patterns, 1) + tgt_pat = self.patterns.weight[None, :, :].repeat_interleave( + self.num_queries, 1 + ) # 1, n_q*n_pat, d_model + tgt = tgt_embed + tgt_pat + + init_box_proposal = refpoint_embed_.sigmoid() + + else: + raise NotImplementedError("unknown two_stage_type {}".format(self.two_stage_type)) + ######################################################### + # End preparing tgt + # - tgt: bs, NQ, d_model + # - refpoint_embed(unsigmoid): bs, NQ, d_model + ######################################################### + + ######################################################### + # Begin Decoder + ######################################################### + hs, references = self.decoder( + tgt=tgt.transpose(0, 1), + memory=memory.transpose(0, 1), + memory_key_padding_mask=mask_flatten, + pos=lvl_pos_embed_flatten.transpose(0, 1), + refpoints_unsigmoid=refpoint_embed.transpose(0, 1), + level_start_index=level_start_index, + spatial_shapes=spatial_shapes, + valid_ratios=valid_ratios, + tgt_mask=attn_mask, + memory_text=text_dict["encoded_text"], + text_attention_mask=~text_dict["text_token_mask"], + # we ~ the mask . False means use the token; True means pad the token + ) + ######################################################### + # End Decoder + # hs: n_dec, bs, nq, d_model + # references: n_dec+1, bs, nq, query_dim + ######################################################### + + ######################################################### + # Begin postprocess + ######################################################### + if self.two_stage_type == "standard": + hs_enc = tgt_undetach.unsqueeze(0) + ref_enc = refpoint_embed_undetach.sigmoid().unsqueeze(0) + else: + hs_enc = ref_enc = None + ######################################################### + # End postprocess + # hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or (n_enc, bs, nq, d_model) or None + # ref_enc: (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or (n_enc, bs, nq, d_model) or None + ######################################################### + + return hs, references, hs_enc, ref_enc, init_box_proposal + # hs: (n_dec, bs, nq, d_model) + # references: sigmoid coordinates. (n_dec+1, bs, bq, 4) + # hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or None + # ref_enc: sigmoid coordinates. \ + # (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or None + + +class TransformerEncoder(nn.Module): + def __init__( + self, + encoder_layer, + num_layers, + d_model=256, + num_queries=300, + enc_layer_share=False, + text_enhance_layer=None, + feature_fusion_layer=None, + use_checkpoint=False, + use_transformer_ckpt=False, + ): + """_summary_ + + Args: + encoder_layer (_type_): _description_ + num_layers (_type_): _description_ + norm (_type_, optional): _description_. Defaults to None. + d_model (int, optional): _description_. Defaults to 256. + num_queries (int, optional): _description_. Defaults to 300. + enc_layer_share (bool, optional): _description_. Defaults to False. + + """ + super().__init__() + # prepare layers + self.layers = [] + self.text_layers = [] + self.fusion_layers = [] + if num_layers > 0: + self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share) + + if text_enhance_layer is not None: + self.text_layers = _get_clones(text_enhance_layer, num_layers, layer_share=enc_layer_share) + if feature_fusion_layer is not None: + self.fusion_layers = _get_clones(feature_fusion_layer, num_layers, layer_share=enc_layer_share) + else: + self.layers = [] + del encoder_layer + + if text_enhance_layer is not None: + self.text_layers = [] + del text_enhance_layer + if feature_fusion_layer is not None: + self.fusion_layers = [] + del feature_fusion_layer + + self.query_scale = None + self.num_queries = num_queries + self.num_layers = num_layers + self.d_model = d_model + + self.use_checkpoint = use_checkpoint + self.use_transformer_ckpt = use_transformer_ckpt + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + reference_points_list = [] + for lvl, (H_, W_) in enumerate(spatial_shapes): + + ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), + torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device), + ) + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def forward( + self, + # for images + src: Tensor, + pos: Tensor, + spatial_shapes: Tensor, + level_start_index: Tensor, + valid_ratios: Tensor, + key_padding_mask: Tensor, + # for texts + memory_text: Tensor = None, + text_attention_mask: Tensor = None, + pos_text: Tensor = None, + text_self_attention_masks: Tensor = None, + position_ids: Tensor = None, + ): + """ + Input: + - src: [bs, sum(hi*wi), 256] + - pos: pos embed for src. [bs, sum(hi*wi), 256] + - spatial_shapes: h,w of each level [num_level, 2] + - level_start_index: [num_level] start point of level in sum(hi*wi). + - valid_ratios: [bs, num_level, 2] + - key_padding_mask: [bs, sum(hi*wi)] + + - memory_text: bs, n_text, 256 + - text_attention_mask: bs, n_text + False for no padding; True for padding + - pos_text: bs, n_text, 256 + + - position_ids: bs, n_text + Intermedia: + - reference_points: [bs, sum(hi*wi), num_level, 2] + Outpus: + - output: [bs, sum(hi*wi), 256] + """ + + output = src + + # preparation and reshape + if self.num_layers > 0: + reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device) + + if self.text_layers: + # generate pos_text + bs, n_text, text_dim = memory_text.shape + if pos_text is None and position_ids is None: + pos_text = ( + torch.arange(n_text, device=memory_text.device).float().unsqueeze(0).unsqueeze(-1).repeat(bs, 1, 1) + ) + pos_text = get_sine_pos_embed(pos_text, num_pos_feats=256, exchange_xy=False) + if position_ids is not None: + pos_text = get_sine_pos_embed(position_ids[..., None], num_pos_feats=256, exchange_xy=False) + pos_text = pos_text.to(src.dtype) + + # main process + for layer_id, layer in enumerate(self.layers): + # if output.isnan().any() or memory_text.isnan().any(): + # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': + # import ipdb; ipdb.set_trace() + if self.fusion_layers: + if self.use_checkpoint: + output, memory_text = checkpoint.checkpoint( + self.fusion_layers[layer_id], + output, + memory_text, + key_padding_mask, + text_attention_mask, + ) + else: + output, memory_text = self.fusion_layers[layer_id]( + v=output, + l=memory_text, + attention_mask_v=key_padding_mask, + attention_mask_l=text_attention_mask, + ) + + if self.text_layers: + memory_text = self.text_layers[layer_id]( + src=memory_text.transpose(0, 1), + src_mask=~text_self_attention_masks, # note we use ~ for mask here + src_key_padding_mask=text_attention_mask, + pos=(pos_text.transpose(0, 1) if pos_text is not None else None), + ).transpose(0, 1) + + # main process + if self.use_transformer_ckpt: + output = checkpoint.checkpoint( + layer, + output, + pos, + reference_points, + spatial_shapes, + level_start_index, + key_padding_mask, + ) + else: + output = layer( + src=output, + pos=pos, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + key_padding_mask=key_padding_mask, + ) + + return output, memory_text + + +class TransformerDecoder(nn.Module): + def __init__( + self, + decoder_layer, + num_layers, + norm=None, + return_intermediate=False, + d_model=256, + query_dim=4, + num_feature_levels=1, + ): + super().__init__() + if num_layers > 0: + self.layers = _get_clones(decoder_layer, num_layers) + else: + self.layers = [] + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + assert return_intermediate, "support return_intermediate only" + self.query_dim = query_dim + assert query_dim in [2, 4], "query_dim should be 2/4 but {}".format(query_dim) + self.num_feature_levels = num_feature_levels + + self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2) + self.query_pos_sine_scale = None + + self.query_scale = None + self.bbox_embed = None + self.class_embed = None + + self.d_model = d_model + + self.ref_anchor_head = None + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2 + # for memory + level_start_index: Optional[Tensor] = None, # num_levels + spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2 + valid_ratios: Optional[Tensor] = None, + # for text + memory_text: Optional[Tensor] = None, + text_attention_mask: Optional[Tensor] = None, + ): + """ + Input: + - tgt: nq, bs, d_model + - memory: hw, bs, d_model + - pos: hw, bs, d_model + - refpoints_unsigmoid: nq, bs, 2/4 + - valid_ratios/spatial_shapes: bs, nlevel, 2 + """ + output = tgt + + intermediate = [] + reference_points = refpoints_unsigmoid.sigmoid() + ref_points = [reference_points] + + for layer_id, layer in enumerate(self.layers): + + if reference_points.shape[-1] == 4: + reference_points_input = ( + reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[None, :] + ) # nq, bs, nlevel, 4 + else: + assert reference_points.shape[-1] == 2 + reference_points_input = reference_points[:, :, None] * valid_ratios[None, :] + query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2 + + # conditional query + raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256 + pos_scale = self.query_scale(output) if self.query_scale is not None else 1 + query_pos = pos_scale * raw_query_pos + # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1': + # if query_pos.isnan().any() | query_pos.isinf().any(): + # import ipdb; ipdb.set_trace() + + # main process + output = layer( + tgt=output, + tgt_query_pos=query_pos, + tgt_query_sine_embed=query_sine_embed, + tgt_key_padding_mask=tgt_key_padding_mask, + tgt_reference_points=reference_points_input, + memory_text=memory_text, + text_attention_mask=text_attention_mask, + memory=memory, + memory_key_padding_mask=memory_key_padding_mask, + memory_level_start_index=level_start_index, + memory_spatial_shapes=spatial_shapes, + memory_pos=pos, + self_attn_mask=tgt_mask, + cross_attn_mask=memory_mask, + ) + if output.isnan().any() | output.isinf().any(): + print(f"output layer_id {layer_id} is nan") + try: + num_nan = output.isnan().sum().item() + num_inf = output.isinf().sum().item() + print(f"num_nan {num_nan}, num_inf {num_inf}") + except Exception as e: + print(e) + # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1': + # import ipdb; ipdb.set_trace() + + # iter update + if self.bbox_embed is not None: + # box_holder = self.bbox_embed(output) + # box_holder[..., :self.query_dim] += inverse_sigmoid(reference_points) + # new_reference_points = box_holder[..., :self.query_dim].sigmoid() + + reference_before_sigmoid = inverse_sigmoid(reference_points) + delta_unsig = self.bbox_embed[layer_id](output) + outputs_unsig = delta_unsig + reference_before_sigmoid + new_reference_points = outputs_unsig.sigmoid() + + reference_points = new_reference_points.detach() + # if layer_id != self.num_layers - 1: + ref_points.append(new_reference_points) + + intermediate.append(self.norm(output)) + + return [ + [itm_out.transpose(0, 1) for itm_out in intermediate], + [itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points], + ] + + +class DeformableTransformerEncoderLayer(nn.Module): + def __init__( + self, + d_model=256, + d_ffn=1024, + dropout=0.1, + activation="relu", + n_levels=4, + n_heads=8, + n_points=4, + ): + super().__init__() + + # self attention + self.self_attn = MSDeformAttn( + embed_dim=d_model, + num_levels=n_levels, + num_heads=n_heads, + num_points=n_points, + batch_first=True, + ) + self.dropout1 = nn.Dropout(dropout) + self.norm1 = nn.LayerNorm(d_model) + + # ffn + self.linear1 = nn.Linear(d_model, d_ffn) + self.activation = _get_activation_fn(activation, d_model=d_ffn) + self.dropout2 = nn.Dropout(dropout) + self.linear2 = nn.Linear(d_ffn, d_model) + self.dropout3 = nn.Dropout(dropout) + self.norm2 = nn.LayerNorm(d_model) + + @staticmethod + def with_pos_embed(tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_ffn(self, src): + src2 = self.linear2(self.dropout2(self.activation(self.linear1(src)))) + src = src + self.dropout3(src2) + src = self.norm2(src) + return src + + def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, key_padding_mask=None): + # self attention + # import ipdb; ipdb.set_trace() + src2 = self.self_attn( + query=self.with_pos_embed(src, pos), + reference_points=reference_points, + value=src, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + key_padding_mask=key_padding_mask, + ) + src = src + self.dropout1(src2) + src = self.norm1(src) + + # ffn + src = self.forward_ffn(src) + + return src + + +class DeformableTransformerDecoderLayer(nn.Module): + def __init__( + self, + d_model=256, + d_ffn=1024, + dropout=0.1, + activation="relu", + n_levels=4, + n_heads=8, + n_points=4, + use_text_feat_guide=False, + use_text_cross_attention=False, + ): + super().__init__() + + # cross attention + self.cross_attn = MSDeformAttn( + embed_dim=d_model, + num_levels=n_levels, + num_heads=n_heads, + num_points=n_points, + batch_first=True, + ) + self.dropout1 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.norm1 = nn.LayerNorm(d_model) + + # cross attention text + if use_text_cross_attention: + self.ca_text = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) + self.catext_dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.catext_norm = nn.LayerNorm(d_model) + + # self attention + self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) + self.dropout2 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.norm2 = nn.LayerNorm(d_model) + + # ffn + self.linear1 = nn.Linear(d_model, d_ffn) + self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1) + self.dropout3 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.linear2 = nn.Linear(d_ffn, d_model) + self.dropout4 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.norm3 = nn.LayerNorm(d_model) + + self.key_aware_proj = None + self.use_text_feat_guide = use_text_feat_guide + assert not use_text_feat_guide + self.use_text_cross_attention = use_text_cross_attention + + def rm_self_attn_modules(self): + self.self_attn = None + self.dropout2 = None + self.norm2 = None + + @staticmethod + def with_pos_embed(tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_ffn(self, tgt): + with torch.cuda.amp.autocast(enabled=False): + tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout4(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward( + self, + # for tgt + tgt: Optional[Tensor], # nq, bs, d_model + tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos)) + tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos) + tgt_key_padding_mask: Optional[Tensor] = None, + tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4 + memory_text: Optional[Tensor] = None, # bs, num_token, d_model + text_attention_mask: Optional[Tensor] = None, # bs, num_token + # for memory + memory: Optional[Tensor] = None, # hw, bs, d_model + memory_key_padding_mask: Optional[Tensor] = None, + memory_level_start_index: Optional[Tensor] = None, # num_levels + memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2 + memory_pos: Optional[Tensor] = None, # pos for memory + # sa + self_attn_mask: Optional[Tensor] = None, # mask used for self-attention + cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention + ): + """ + Input: + - tgt/tgt_query_pos: nq, bs, d_model + - + """ + assert cross_attn_mask is None + + # self attention + if self.self_attn is not None: + # import ipdb; ipdb.set_trace() + q = k = self.with_pos_embed(tgt, tgt_query_pos) + tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + + if self.use_text_cross_attention: + tgt2 = self.ca_text( + self.with_pos_embed(tgt, tgt_query_pos), + memory_text.transpose(0, 1), + memory_text.transpose(0, 1), + key_padding_mask=text_attention_mask, + )[0] + tgt = tgt + self.catext_dropout(tgt2) + tgt = self.catext_norm(tgt) + + tgt2 = self.cross_attn( + query=self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1), + reference_points=tgt_reference_points.transpose(0, 1).contiguous(), + value=memory.transpose(0, 1), + spatial_shapes=memory_spatial_shapes, + level_start_index=memory_level_start_index, + key_padding_mask=memory_key_padding_mask, + ).transpose(0, 1) + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + + # ffn + tgt = self.forward_ffn(tgt) + + return tgt + + +def build_transformer(args): + return Transformer( + d_model=args.hidden_dim, + dropout=args.dropout, + nhead=args.nheads, + num_queries=args.num_queries, + dim_feedforward=args.dim_feedforward, + num_encoder_layers=args.enc_layers, + num_decoder_layers=args.dec_layers, + normalize_before=args.pre_norm, + return_intermediate_dec=True, + query_dim=args.query_dim, + activation=args.transformer_activation, + num_patterns=args.num_patterns, + num_feature_levels=args.num_feature_levels, + enc_n_points=args.enc_n_points, + dec_n_points=args.dec_n_points, + learnable_tgt_init=True, + # two stage + two_stage_type=args.two_stage_type, # ['no', 'standard', 'early'] + embed_init_tgt=args.embed_init_tgt, + use_text_enhancer=args.use_text_enhancer, + use_fusion_layer=args.use_fusion_layer, + use_checkpoint=args.use_checkpoint, + use_transformer_ckpt=args.use_transformer_ckpt, + use_text_cross_attention=args.use_text_cross_attention, + text_dropout=args.text_dropout, + fusion_dropout=args.fusion_dropout, + fusion_droppath=args.fusion_droppath, + ) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer_vanilla.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer_vanilla.py new file mode 100644 index 0000000000..f6ddceb006 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer_vanilla.py @@ -0,0 +1,115 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR Transformer class. + +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +from typing import Optional + +import torch +from torch import Tensor, nn + +from .utils import _get_activation_fn, _get_clones + + +class TextTransformer(nn.Module): + def __init__(self, num_layers, d_model=256, nheads=8, dim_feedforward=2048, dropout=0.1): + super().__init__() + self.num_layers = num_layers + self.d_model = d_model + self.nheads = nheads + self.dim_feedforward = dim_feedforward + self.norm = None + + single_encoder_layer = TransformerEncoderLayer( + d_model=d_model, nhead=nheads, dim_feedforward=dim_feedforward, dropout=dropout + ) + self.layers = _get_clones(single_encoder_layer, num_layers) + + def forward(self, memory_text: torch.Tensor, text_attention_mask: torch.Tensor): + """ + + Args: + text_attention_mask: bs, num_token + memory_text: bs, num_token, d_model + + Raises: + RuntimeError: _description_ + + Returns: + output: bs, num_token, d_model + """ + + output = memory_text.transpose(0, 1) + + for layer in self.layers: + output = layer(output, src_key_padding_mask=text_attention_mask) + + if self.norm is not None: + output = self.norm(output) + + return output.transpose(0, 1) + + +class TransformerEncoderLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + self.nhead = nhead + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward( + self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + # repeat attn mask + if src_mask.dim() == 3 and src_mask.shape[0] == src.shape[1]: + # bs, num_q, num_k + src_mask = src_mask.repeat(self.nhead, 1, 1) + + q = k = self.with_pos_embed(src, pos) + + src2 = self.self_attn(q, k, value=src, attn_mask=src_mask)[0] + + # src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/utils.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/utils.py new file mode 100644 index 0000000000..459d90ab35 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/utils.py @@ -0,0 +1,258 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ + +import copy +import math + +import torch +import torch.nn.functional as F +from torch import Tensor, nn + + +def _get_clones(module, N, layer_share=False): + # import ipdb; ipdb.set_trace() + if layer_share: + return nn.ModuleList([module for i in range(N)]) + else: + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def get_sine_pos_embed( + pos_tensor: torch.Tensor, + num_pos_feats: int = 128, + temperature: int = 10000, + exchange_xy: bool = True, +): + """generate sine position embedding from a position tensor + Args: + pos_tensor (torch.Tensor): shape: [..., n]. + num_pos_feats (int): projected shape for each float in the tensor. + temperature (int): temperature in the sine/cosine function. + exchange_xy (bool, optional): exchange pos x and pos y. \ + For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True. + Returns: + pos_embed (torch.Tensor): shape: [..., n*num_pos_feats]. + """ + scale = 2 * math.pi + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) + + def sine_func(x: torch.Tensor): + sin_x = x * scale / dim_t + sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) + return sin_x + + pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)] + if exchange_xy: + pos_res[0], pos_res[1] = pos_res[1], pos_res[0] + pos_res = torch.cat(pos_res, dim=-1) + return pos_res + + +def gen_encoder_output_proposals(memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None): + """ + Input: + - memory: bs, \sum{hw}, d_model + - memory_padding_mask: bs, \sum{hw} + - spatial_shapes: nlevel, 2 + - learnedwh: 2 + Output: + - output_memory: bs, \sum{hw}, d_model + - output_proposals: bs, \sum{hw}, 4 + """ + N_, S_, C_ = memory.shape + proposals = [] + _cur = 0 + for lvl, (H_, W_) in enumerate(spatial_shapes): + mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1) + valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + # import ipdb; ipdb.set_trace() + + grid_y, grid_x = torch.meshgrid( + torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device), + torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device), + ) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2 + + scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale + + if learnedwh is not None: + # import ipdb; ipdb.set_trace() + wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl) + else: + wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) + + # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1) + # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale + # wh = torch.ones_like(grid) / scale + proposal = torch.cat((grid, wh), -1).view(N_, -1, 4) + proposals.append(proposal) + _cur += H_ * W_ + # import ipdb; ipdb.set_trace() + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid + output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf")) + output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) + + output_memory = memory + output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0)) + output_memory = output_memory.masked_fill(~output_proposals_valid, float(0)) + + # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf')) + # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf')) + + output_proposals = output_proposals.to(output_memory.dtype) + return output_memory, output_proposals + + +class RandomBoxPerturber: + def __init__(self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2) -> None: + self.noise_scale = torch.Tensor([x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale]) + + def __call__(self, refanchors: Tensor) -> Tensor: + nq, bs, query_dim = refanchors.shape + device = refanchors.device + + noise_raw = torch.rand_like(refanchors) + noise_scale = self.noise_scale.to(device)[:query_dim] + + new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale) + return new_refanchors.clamp_(0, 1) + + +def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = -1 (no weighting). + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + Returns: + Loss tensor + """ + prob = inputs.sigmoid() + ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + if no_reduction: + return loss + + return loss.mean(1).sum() / num_boxes + + +class MLP(nn.Module): + """Very simple multi-layer perceptron (also called FFN)""" + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +def _get_activation_fn(activation, d_model=256, batch_dim=0): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + if activation == "prelu": + return nn.PReLU() + if activation == "selu": + return F.selu + + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") + + +def gen_sineembed_for_position(pos_tensor): + # n_query, bs, _ = pos_tensor.size() + # sineembed_tensor = torch.zeros(n_query, bs, 256) + scale = 2 * math.pi + dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device) + dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode="floor")) / 128) + x_embed = pos_tensor[:, :, 0] * scale + y_embed = pos_tensor[:, :, 1] * scale + pos_x = x_embed[:, :, None] / dim_t + pos_y = y_embed[:, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) + pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) + if pos_tensor.size(-1) == 2: + pos = torch.cat((pos_y, pos_x), dim=2) + elif pos_tensor.size(-1) == 4: + w_embed = pos_tensor[:, :, 2] * scale + pos_w = w_embed[:, :, None] / dim_t + pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2) + + h_embed = pos_tensor[:, :, 3] * scale + pos_h = h_embed[:, :, None] / dim_t + pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2) + + pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) + else: + raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1))) + pos = pos.to(pos_tensor.dtype) + return pos + + +class ContrastiveEmbed(nn.Module): + def __init__(self, max_text_len=256): + """ + Args: + max_text_len: max length of text. + """ + super().__init__() + self.max_text_len = max_text_len + + def forward(self, x, text_dict): + """_summary_ + + Args: + x (_type_): _description_ + text_dict (_type_): _description_ + { + 'encoded_text': encoded_text, # bs, 195, d_model + 'text_token_mask': text_token_mask, # bs, 195 + # True for used tokens. False for padding tokens + } + Returns: + _type_: _description_ + """ + assert isinstance(text_dict, dict) + + y = text_dict["encoded_text"] + text_token_mask = text_dict["text_token_mask"] + + res = x @ y.transpose(-1, -2) + res.masked_fill_(~text_token_mask[:, None, :], float("-inf")) + + # padding to max_text_len + new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device, dtype=res.dtype) + new_res[..., : res.shape[-1]] = res + + return new_res diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/__init__.py new file mode 100644 index 0000000000..e922c219f6 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/__init__.py @@ -0,0 +1,18 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .GroundingDINO import build_groundingdino # noqa + + +def build_model(args): + # we use register to maintain models from catdet6 on. + from .registry import MODULE_BUILD_FUNCS + + assert args.modelname in MODULE_BUILD_FUNCS._module_dict + build_func = MODULE_BUILD_FUNCS.get(args.modelname) + model = build_func(args) + return model diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/registry.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/registry.py new file mode 100644 index 0000000000..fb4715fa9b --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/registry.py @@ -0,0 +1,60 @@ +# ------------------------------------------------------------------------ +# Grounding DINO +# url: https://github.com/IDEA-Research/GroundingDINO +# Copyright (c) 2023 IDEA. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# -*- coding: utf-8 -*- +# @Author: Yihao Chen +# @Date: 2021-08-16 16:03:17 +# @Last Modified by: Shilong Liu +# @Last Modified time: 2022-01-23 15:26 +# modified from mmcv + +import inspect +from functools import partial + + +class Registry(object): + def __init__(self, name): + self._name = name + self._module_dict = dict() + + def __repr__(self): + format_str = self.__class__.__name__ + "(name={}, items={})".format(self._name, list(self._module_dict.keys())) + return format_str + + def __len__(self): + return len(self._module_dict) + + @property + def name(self): + return self._name + + @property + def module_dict(self): + return self._module_dict + + def get(self, key): + return self._module_dict.get(key, None) + + def registe_with_name(self, module_name=None, force=False): + return partial(self.register, module_name=module_name, force=force) + + def register(self, module_build_function, module_name=None, force=False): + """Register a module build function. + Args: + module (:obj:`nn.Module`): Module to be registered. + """ + if not inspect.isfunction(module_build_function): + raise TypeError("module_build_function must be a function, but got {}".format(type(module_build_function))) + if module_name is None: + module_name = module_build_function.__name__ + if not force and module_name in self._module_dict: + raise KeyError("{} is already registered in {}".format(module_name, self.name)) + self._module_dict[module_name] = module_build_function + + return module_build_function + + +MODULE_BUILD_FUNCS = Registry("model build functions") diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/__init__.py new file mode 100644 index 0000000000..168f9979a4 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/box_ops.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/box_ops.py new file mode 100644 index 0000000000..781068d294 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/box_ops.py @@ -0,0 +1,140 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Utilities for bounding box manipulation and GIoU. +""" +import torch +from torchvision.ops.boxes import box_area + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +# modified from torchvision to also return the union +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + # import ipdb; ipdb.set_trace() + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / (union + 1e-6) + return iou, union + + +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/ + + The boxes should be in [x0, y0, x1, y1] format + + Returns a [N, M] pairwise matrix, where N = len(boxes1) + and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + # except: + # import ipdb; ipdb.set_trace() + iou, union = box_iou(boxes1, boxes2) + + lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + wh = (rb - lt).clamp(min=0) # [N,M,2] + area = wh[:, :, 0] * wh[:, :, 1] + + return iou - (area - union) / (area + 1e-6) + + +# modified from torchvision to also return the union +def box_iou_pairwise(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # [N,2] + rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # [N,2] + + wh = (rb - lt).clamp(min=0) # [N,2] + inter = wh[:, 0] * wh[:, 1] # [N] + + union = area1 + area2 - inter + + iou = inter / union + return iou, union + + +def generalized_box_iou_pairwise(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/ + + Input: + - boxes1, boxes2: N,4 + Output: + - giou: N, 4 + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + assert boxes1.shape == boxes2.shape + iou, union = box_iou_pairwise(boxes1, boxes2) # N, 4 + + lt = torch.min(boxes1[:, :2], boxes2[:, :2]) + rb = torch.max(boxes1[:, 2:], boxes2[:, 2:]) + + wh = (rb - lt).clamp(min=0) # [N,2] + area = wh[:, 0] * wh[:, 1] + + return iou - (area - union) / area + + +def masks_to_boxes(masks): + """Compute the bounding boxes around the provided masks + + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + + Returns a [N, 4] tensors, with the boxes in xyxy format + """ + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device) + + h, w = masks.shape[-2:] + + y = torch.arange(0, h, dtype=torch.float) + x = torch.arange(0, w, dtype=torch.float) + y, x = torch.meshgrid(y, x) + + x_mask = masks * x.unsqueeze(0) + x_max = x_mask.flatten(1).max(-1)[0] + x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + y_mask = masks * y.unsqueeze(0) + y_max = y_mask.flatten(1).max(-1)[0] + y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + return torch.stack([x_min, y_min, x_max, y_max], 1) + + +if __name__ == "__main__": + x = torch.rand(5, 4) + y = torch.rand(3, 4) + iou, union = box_iou(x, y) + import ipdb + + ipdb.set_trace() diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/get_tokenlizer.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/get_tokenlizer.py new file mode 100644 index 0000000000..0adf91bc03 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/get_tokenlizer.py @@ -0,0 +1,24 @@ +from transformers import AutoTokenizer, BertModel, RobertaModel + + +def get_tokenlizer(text_encoder_type): + if not isinstance(text_encoder_type, str): + # print("text_encoder_type is not a str") + if hasattr(text_encoder_type, "text_encoder_type"): + text_encoder_type = text_encoder_type.text_encoder_type + elif text_encoder_type.get("text_encoder_type", False): + text_encoder_type = text_encoder_type.get("text_encoder_type") + else: + raise ValueError("Unknown type of text_encoder_type: {}".format(type(text_encoder_type))) + print("final text_encoder_type: {}".format(text_encoder_type)) + + tokenizer = AutoTokenizer.from_pretrained(text_encoder_type) + return tokenizer + + +def get_pretrained_language_model(text_encoder_type): + if text_encoder_type == "bert-base-uncased": + return BertModel.from_pretrained(text_encoder_type) + if text_encoder_type == "roberta-base": + return RobertaModel.from_pretrained(text_encoder_type) + raise ValueError("Unknown text_encoder_type {}".format(text_encoder_type)) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/inference.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/inference.py new file mode 100644 index 0000000000..9bfb2cbca9 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/inference.py @@ -0,0 +1,222 @@ +from typing import List, Tuple + +import cv2 +import numpy as np +import supervision as sv +import torch +from PIL import Image +from torchvision.ops import box_convert + +import groundingdino.datasets.transforms as T +from groundingdino.models import build_model +from groundingdino.util.misc import clean_state_dict +from groundingdino.util.slconfig import SLConfig +from groundingdino.util.utils import get_phrases_from_posmap + +# ---------------------------------------------------------------------------------------------------------------------- +# OLD API +# ---------------------------------------------------------------------------------------------------------------------- + + +def preprocess_caption(caption: str) -> str: + result = caption.lower().strip() + if result.endswith("."): + return result + return result + "." + + +def load_model(model_config_path: str, model_checkpoint_path: str, device: str = "cuda"): + args = SLConfig.fromfile(model_config_path) + args.device = device + model = build_model(args) + checkpoint = torch.load(model_checkpoint_path, map_location="cpu") + model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False) + model.eval() + return model + + +def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]: + transform = T.Compose( + [ + T.RandomResize([800], max_size=1333), + T.ToTensor(), + T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] + ) + image_source = Image.open(image_path).convert("RGB") + image = np.asarray(image_source) + image_transformed, _ = transform(image_source, None) + return image, image_transformed + + +def predict( + model, image: torch.Tensor, caption: str, box_threshold: float, text_threshold: float, device: str = "cuda" +) -> Tuple[torch.Tensor, torch.Tensor, List[str]]: + caption = preprocess_caption(caption=caption) + + model = model.to(device) + image = image.to(device) + + with torch.no_grad(): + outputs = model(image[None], captions=[caption]) + + prediction_logits = outputs["pred_logits"].cpu().sigmoid()[0] # prediction_logits.shape = (nq, 256) + prediction_boxes = outputs["pred_boxes"].cpu()[0] # prediction_boxes.shape = (nq, 4) + + mask = prediction_logits.max(dim=1)[0] > box_threshold + logits = prediction_logits[mask] # logits.shape = (n, 256) + boxes = prediction_boxes[mask] # boxes.shape = (n, 4) + + tokenizer = model.tokenizer + tokenized = tokenizer(caption) + + phrases = [ + get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer).replace(".", "") for logit in logits + ] + + return boxes, logits.max(dim=1)[0], phrases + + +def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str]) -> np.ndarray: + h, w, _ = image_source.shape + boxes = boxes * torch.Tensor([w, h, w, h]) + xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy() + detections = sv.Detections(xyxy=xyxy) + + labels = [f"{phrase} {logit:.2f}" for phrase, logit in zip(phrases, logits)] + + box_annotator = sv.BoxAnnotator() + annotated_frame = cv2.cvtColor(image_source, cv2.COLOR_RGB2BGR) + annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels) + return annotated_frame + + +# ---------------------------------------------------------------------------------------------------------------------- +# NEW API +# ---------------------------------------------------------------------------------------------------------------------- + + +class Model: + + def __init__(self, model_config_path: str, model_checkpoint_path: str, device: str = "cuda"): + self.model = load_model( + model_config_path=model_config_path, model_checkpoint_path=model_checkpoint_path, device=device + ).to(device) + self.device = device + + def predict_with_caption( + self, image: np.ndarray, caption: str, box_threshold: float = 0.35, text_threshold: float = 0.25 + ) -> Tuple[sv.Detections, List[str]]: + """ + import cv2 + + image = cv2.imread(IMAGE_PATH) + + model = Model(model_config_path=CONFIG_PATH, model_checkpoint_path=WEIGHTS_PATH) + detections, labels = model.predict_with_caption( + image=image, + caption=caption, + box_threshold=BOX_THRESHOLD, + text_threshold=TEXT_THRESHOLD + ) + + import supervision as sv + + box_annotator = sv.BoxAnnotator() + annotated_image = box_annotator.annotate(scene=image, detections=detections, labels=labels) + """ + processed_image = Model.preprocess_image(image_bgr=image).to(self.device) + boxes, logits, phrases = predict( + model=self.model, + image=processed_image, + caption=caption, + box_threshold=box_threshold, + text_threshold=text_threshold, + device=self.device, + ) + source_h, source_w, _ = image.shape + detections = Model.post_process_result(source_h=source_h, source_w=source_w, boxes=boxes, logits=logits) + return detections, phrases + + def predict_with_classes( + self, image: np.ndarray, classes: List[str], box_threshold: float, text_threshold: float + ) -> sv.Detections: + """ + import cv2 + + image = cv2.imread(IMAGE_PATH) + + model = Model(model_config_path=CONFIG_PATH, model_checkpoint_path=WEIGHTS_PATH) + detections = model.predict_with_classes( + image=image, + classes=CLASSES, + box_threshold=BOX_THRESHOLD, + text_threshold=TEXT_THRESHOLD + ) + + + import supervision as sv + + box_annotator = sv.BoxAnnotator() + annotated_image = box_annotator.annotate(scene=image, detections=detections) + """ + caption = ". ".join(classes) + processed_image = Model.preprocess_image(image_bgr=image).to(self.device) + boxes, logits, phrases = predict( + model=self.model, + image=processed_image, + caption=caption, + box_threshold=box_threshold, + text_threshold=text_threshold, + device=self.device, + ) + source_h, source_w, _ = image.shape + detections = Model.post_process_result(source_h=source_h, source_w=source_w, boxes=boxes, logits=logits) + class_id = Model.phrases2classes(phrases=phrases, classes=classes) + detections.class_id = class_id + return detections + + @staticmethod + def preprocess_image(image_bgr: np.ndarray) -> torch.Tensor: + transform = T.Compose( + [ + T.RandomResize([800], max_size=1333), + T.ToTensor(), + T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] + ) + image_pillow = Image.fromarray(cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)) + image_transformed, _ = transform(image_pillow, None) + return image_transformed + + @staticmethod + def post_process_result(source_h: int, source_w: int, boxes: torch.Tensor, logits: torch.Tensor) -> sv.Detections: + boxes = boxes * torch.Tensor([source_w, source_h, source_w, source_h]) + xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy() + confidence = logits.numpy() + return sv.Detections(xyxy=xyxy, confidence=confidence) + + @staticmethod + def phrases2classes(phrases: List[str], classes: List[str]) -> np.ndarray: + class_ids = [] + for phrase in phrases: + try: + # class_ids.append(classes.index(phrase)) + class_ids.append(Model.find_index(phrase, classes)) + except ValueError: + class_ids.append(None) + return np.array(class_ids) + + @staticmethod + def find_index(string, lst): + # if meet string like "lake river" will only keep "lake" + # this is an hack implementation for visualization which will be updated in the future + string = string.lower().split()[0] + for i, s in enumerate(lst): + if string in s.lower(): + return i + print( + "There's a wrong phrase happen, this is because of our post-process merged wrong tokens, which will be \ + modified in the future. We will assign it with a random label at this time." + ) + return 0 diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/logger.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/logger.py new file mode 100644 index 0000000000..679e0f5926 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/logger.py @@ -0,0 +1,91 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import functools +import logging +import os +import sys + +from termcolor import colored + + +class _ColorfulFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): + self._root_name = kwargs.pop("root_name") + "." + self._abbrev_name = kwargs.pop("abbrev_name", "") + if len(self._abbrev_name): + self._abbrev_name = self._abbrev_name + "." + super(_ColorfulFormatter, self).__init__(*args, **kwargs) + + def formatMessage(self, record): + record.name = record.name.replace(self._root_name, self._abbrev_name) + log = super(_ColorfulFormatter, self).formatMessage(record) + if record.levelno == logging.WARNING: + prefix = colored("WARNING", "red", attrs=["blink"]) + elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: + prefix = colored("ERROR", "red", attrs=["blink", "underline"]) + else: + return log + return prefix + " " + log + + +# so that calling setup_logger multiple times won't add many handlers +@functools.lru_cache() +def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None): + """ + Initialize the detectron2 logger and set its verbosity level to "INFO". + + Args: + output (str): a file name or a directory to save log. If None, will not save log file. + If ends with ".txt" or ".log", assumed to be a file name. + Otherwise, logs will be saved to `output/log.txt`. + name (str): the root module name of this logger + + Returns: + logging.Logger: a logger + """ + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.propagate = False + + if abbrev_name is None: + abbrev_name = name + + plain_formatter = logging.Formatter("[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S") + # stdout logging: master only + if distributed_rank == 0: + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + if color: + formatter = _ColorfulFormatter( + colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s", + datefmt="%m/%d %H:%M:%S", + root_name=name, + abbrev_name=str(abbrev_name), + ) + else: + formatter = plain_formatter + ch.setFormatter(formatter) + logger.addHandler(ch) + + # file logging: all workers + if output is not None: + if output.endswith(".txt") or output.endswith(".log"): + filename = output + else: + filename = os.path.join(output, "log.txt") + if distributed_rank > 0: + filename = filename + f".rank{distributed_rank}" + os.makedirs(os.path.dirname(filename), exist_ok=True) + + fh = logging.StreamHandler(_cached_log_stream(filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(plain_formatter) + logger.addHandler(fh) + + return logger + + +# cache the opened file object, so that different calls to `setup_logger` +# with the same file name can safely write to the same file. +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + return open(filename, "a") diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/misc.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/misc.py new file mode 100644 index 0000000000..89e58d144b --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/misc.py @@ -0,0 +1,701 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +import colorsys +import datetime +import functools +import io +import json +import os +import pickle +import subprocess +import time +from collections import OrderedDict, defaultdict, deque +from typing import List, Optional + +import numpy as np +import torch +import torch.distributed as dist + +# needed due to empty tensor bug in pytorch and torchvision 0.5 +import torchvision +from torch import Tensor + +__torchvision_need_compat_flag = float(torchvision.__version__.split(".")[1]) < 7 +if __torchvision_need_compat_flag: + from torchvision.ops import _new_empty_tensor + from torchvision.ops.misc import _output_size + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + if d.shape[0] == 0: + return 0 + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + if os.environ.get("SHILONG_AMP", None) == "1": + eps = 1e-4 + else: + eps = 1e-6 + return self.total / (self.count + eps) + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value, + ) + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + + return dist.group.WORLD + + +def all_gather_cpu(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + + world_size = get_world_size() + if world_size == 1: + return [data] + + cpu_group = _get_global_gloo_group() + + buffer = io.BytesIO() + torch.save(data, buffer) + data_view = buffer.getbuffer() + device = "cuda" if cpu_group is None else "cpu" + tensor = torch.ByteTensor(data_view).to(device) + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long) + size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)] + if cpu_group is None: + dist.all_gather(size_list, local_size) + else: + print("gathering on cpu") + dist.all_gather(size_list, local_size, group=cpu_group) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + assert isinstance(local_size.item(), int) + local_size = int(local_size.item()) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device)) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device) + tensor = torch.cat((tensor, padding), dim=0) + if cpu_group is None: + dist.all_gather(tensor_list, tensor) + else: + dist.all_gather(tensor_list, tensor, group=cpu_group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + tensor = torch.split(tensor, [size, max_size - size], dim=0)[0] + buffer = io.BytesIO(tensor.cpu().numpy()) + obj = torch.load(buffer) + data_list.append(obj) + + return data_list + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + + if os.getenv("CPU_REDUCE") == "1": + return all_gather_cpu(data) + + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device="cuda") + size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + # print(name, str(meter)) + # import ipdb;ipdb.set_trace() + if meter.count > 0: + loss_str.append("{}: {}".format(name, str(meter))) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None, logger=None): + if logger is None: + print_func = print + else: + print_func = logger.info + + i = 0 + if not header: + header = "" + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt="{avg:.4f}") + data_time = SmoothedValue(fmt="{avg:.4f}") + space_fmt = ":" + str(len(str(len(iterable)))) + "d" + if torch.cuda.is_available(): + log_msg = self.delimiter.join( + [ + header, + "[{0" + space_fmt + "}/{1}]", + "eta: {eta}", + "{meters}", + "time: {time}", + "data: {data}", + "max mem: {memory:.0f}", + ] + ) + else: + log_msg = self.delimiter.join( + [ + header, + "[{0" + space_fmt + "}/{1}]", + "eta: {eta}", + "{meters}", + "time: {time}", + "data: {data}", + ] + ) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + # import ipdb; ipdb.set_trace() + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print_func( + log_msg.format( + i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB, + ) + ) + else: + print_func( + log_msg.format( + i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time), + ) + ) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print_func("{} Total time: {} ({:.4f} s / it)".format(header, total_time_str, total_time / len(iterable))) + + +def get_sha(): + cwd = os.path.dirname(os.path.abspath(__file__)) + + def _run(command): + return subprocess.check_output(command, cwd=cwd).decode("ascii").strip() + + sha = "N/A" + diff = "clean" + branch = "N/A" + try: + sha = _run(["git", "rev-parse", "HEAD"]) + subprocess.check_output(["git", "diff"], cwd=cwd) + diff = _run(["git", "diff-index", "HEAD"]) + diff = "has uncommited changes" if diff else "clean" + branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"]) + except Exception: + pass + message = f"sha: {sha}, status: {diff}, branch: {branch}" + return message + + +def collate_fn(batch): + # import ipdb; ipdb.set_trace() + batch = list(zip(*batch)) + batch[0] = nested_tensor_from_tensor_list(batch[0]) + return tuple(batch) + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + if mask == "auto": + self.mask = torch.zeros_like(tensors).to(tensors.device) + if self.mask.dim() == 3: + self.mask = self.mask.sum(0).to(bool) + elif self.mask.dim() == 4: + self.mask = self.mask.sum(1).to(bool) + else: + raise ValueError("tensors dim must be 3 or 4 but {}({})".format(self.tensors.dim(), self.tensors.shape)) + + def imgsize(self): + res = [] + for i in range(self.tensors.shape[0]): + mask = self.mask[i] + maxH = (~mask).sum(0).max() + maxW = (~mask).sum(1).max() + res.append(torch.Tensor([maxH, maxW])) + return res + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def to_img_list_single(self, tensor, mask): + assert tensor.dim() == 3, "dim of tensor should be 3 but {}".format(tensor.dim()) + maxH = (~mask).sum(0).max() + maxW = (~mask).sum(1).max() + img = tensor[:, :maxH, :maxW] + return img + + def to_img_list(self): + """remove the padding and convert to img list + + Returns: + [type]: [description] + """ + if self.tensors.dim() == 3: + return self.to_img_list_single(self.tensors, self.mask) + else: + res = [] + for i in range(self.tensors.shape[0]): + tensor_i = self.tensors[i] + mask_i = self.mask[i] + res.append(self.to_img_list_single(tensor_i, mask_i)) + return res + + @property + def device(self): + return self.tensors.device + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + @property + def shape(self): + return {"tensors.shape": self.tensors.shape, "mask.shape": self.mask.shape} + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + if torchvision._is_tracing(): + # nested_tensor_from_tensor_list() does not export well to ONNX + # call _onnx_nested_tensor_from_tensor_list() instead + return _onnx_nested_tensor_from_tensor_list(tensor_list) + + # TODO make it support different-sized images + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], : img.shape[2]] = False + else: + raise ValueError("not supported") + return NestedTensor(tensor, mask) + + +# _onnx_nested_tensor_from_tensor_list() is an implementation of +# nested_tensor_from_tensor_list() that is supported by ONNX tracing. +@torch.jit.unused +def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: + max_size = [] + for i in range(tensor_list[0].dim()): + max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64) + max_size.append(max_size_i) + max_size = tuple(max_size) + + # work around for + # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + # m[: img.shape[1], :img.shape[2]] = False + # which is not yet supported in onnx + padded_imgs = [] + padded_masks = [] + for img in tensor_list: + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) + padded_imgs.append(padded_img) + + m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) + padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) + padded_masks.append(padded_mask.to(torch.bool)) + + tensor = torch.stack(padded_imgs) + mask = torch.stack(padded_masks) + + return NestedTensor(tensor, mask=mask) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop("force", False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if "WORLD_SIZE" in os.environ and os.environ["WORLD_SIZE"] != "": # 'RANK' in os.environ and + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ["WORLD_SIZE"]) + args.gpu = args.local_rank = int(os.environ["LOCAL_RANK"]) + + # launch by torch.distributed.launch + # Single node + # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ... + # Multi nodes + # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ... + # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ... + # args.rank = int(os.environ.get('OMPI_COMM_WORLD_RANK')) + # local_world_size = int(os.environ['GPU_PER_NODE_COUNT']) + # args.world_size = args.world_size * local_world_size + # args.gpu = args.local_rank = int(os.environ['LOCAL_RANK']) + # args.rank = args.rank * local_world_size + args.local_rank + print("world size: {}, rank: {}, local rank: {}".format(args.world_size, args.rank, args.local_rank)) + print(json.dumps(dict(os.environ), indent=2)) + elif "SLURM_PROCID" in os.environ: + args.rank = int(os.environ["SLURM_PROCID"]) + args.gpu = args.local_rank = int(os.environ["SLURM_LOCALID"]) + args.world_size = int(os.environ["SLURM_NPROCS"]) + + print( + "world size: {}, world rank: {}, local rank: {}, device_count: {}".format( + args.world_size, args.rank, args.local_rank, torch.cuda.device_count() + ) + ) + else: + print("Not using distributed mode") + args.distributed = False + args.world_size = 1 + args.rank = 0 + args.local_rank = 0 + return + + print("world_size:{} rank:{} local_rank:{}".format(args.world_size, args.rank, args.local_rank)) + args.distributed = True + torch.cuda.set_device(args.local_rank) + args.dist_backend = "nccl" + print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) + + torch.distributed.init_process_group( + backend=args.dist_backend, + world_size=args.world_size, + rank=args.rank, + init_method=args.dist_url, + ) + + print("Before torch.distributed.barrier()") + torch.distributed.barrier() + print("End torch.distributed.barrier()") + setup_for_distributed(args.rank == 0) + + +@torch.no_grad() +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +@torch.no_grad() +def accuracy_onehot(pred, gt): + """_summary_ + + Args: + pred (_type_): n, c + gt (_type_): n, c + """ + tp = ((pred - gt).abs().sum(-1) < 1e-4).float().sum() + acc = tp / gt.shape[0] * 100 + return acc + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor + """ + Equivalent to nn.functional.interpolate, but with support for empty batch sizes. + This will eventually be supported natively by PyTorch, and this + class can go away. + """ + if __torchvision_need_compat_flag < 0.7: + if input.numel() > 0: + return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners) + + output_shape = _output_size(2, input, size, scale_factor) + output_shape = list(input.shape[:-2]) + list(output_shape) + return _new_empty_tensor(input, output_shape) + else: + return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) + + +class color_sys: + def __init__(self, num_colors) -> None: + self.num_colors = num_colors + colors = [] + for i in np.arange(0.0, 360.0, 360.0 / num_colors): + hue = i / 360.0 + lightness = (50 + np.random.rand() * 10) / 100.0 + saturation = (90 + np.random.rand() * 10) / 100.0 + colors.append(tuple([int(j * 255) for j in colorsys.hls_to_rgb(hue, lightness, saturation)])) + self.colors = colors + + def __call__(self, idx): + return self.colors[idx] + + +def inverse_sigmoid(x, eps=1e-3): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +def clean_state_dict(state_dict): + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + if k[:7] == "module.": + k = k[7:] # remove `module.` + new_state_dict[k] = v + return new_state_dict diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/slconfig.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/slconfig.py new file mode 100644 index 0000000000..e3eba6a4ea --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/slconfig.py @@ -0,0 +1,419 @@ +# ========================================================== +# Modified from mmcv +# ========================================================== +import ast +import os.path as osp +import platform +import shutil +import sys +import tempfile +from argparse import Action +from importlib import import_module + +from addict import Dict +from yapf.yapflib.yapf_api import FormatCode + +BASE_KEY = "_base_" +DELETE_KEY = "_delete_" +RESERVED_KEYS = ["filename", "text", "pretty_text", "get", "dump", "merge_from_dict"] + + +def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): + if not osp.isfile(filename): + raise FileNotFoundError(msg_tmpl.format(filename)) + + +class ConfigDict(Dict): + def __missing__(self, name): + raise KeyError(name) + + def __getattr__(self, name): + try: + value = super(ConfigDict, self).__getattr__(name) + except KeyError: + ex = AttributeError(f"'{self.__class__.__name__}' object has no " f"attribute '{name}'") + except Exception as e: + ex = e + else: + return value + raise ex + + +class SLConfig(object): + """ + config files. + only support .py file as config now. + + ref: mmcv.utils.config + + Example: + >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1]))) + >>> cfg.a + 1 + >>> cfg.b + {'b1': [0, 1]} + >>> cfg.b.b1 + [0, 1] + >>> cfg = Config.fromfile('tests/data/config/a.py') + >>> cfg.filename + "/home/kchen/projects/mmcv/tests/data/config/a.py" + >>> cfg.item4 + 'test' + >>> cfg + "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: " + "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}" + """ + + @staticmethod + def _validate_py_syntax(filename): + with open(filename) as f: + content = f.read() + try: + ast.parse(content) + except SyntaxError: + raise SyntaxError("There are syntax errors in config " f"file {filename}") + + @staticmethod + def _file2dict(filename): + filename = osp.abspath(osp.expanduser(filename)) + check_file_exist(filename) + if filename.lower().endswith(".py"): + with tempfile.TemporaryDirectory() as temp_config_dir: + temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=".py") + temp_config_name = osp.basename(temp_config_file.name) + if platform.system() == "Windows": + temp_config_file.close() + shutil.copyfile(filename, osp.join(temp_config_dir, temp_config_name)) + temp_module_name = osp.splitext(temp_config_name)[0] + sys.path.insert(0, temp_config_dir) + SLConfig._validate_py_syntax(filename) + mod = import_module(temp_module_name) + sys.path.pop(0) + cfg_dict = {name: value for name, value in mod.__dict__.items() if not name.startswith("__")} + # delete imported module + del sys.modules[temp_module_name] + # close temp file + temp_config_file.close() + elif filename.lower().endswith((".yml", ".yaml", ".json")): + from .slio import slload + + cfg_dict = slload(filename) + else: + raise IOError("Only py/yml/yaml/json type are supported now!") + + cfg_text = filename + "\n" + with open(filename, "r") as f: + cfg_text += f.read() + + # parse the base file + if BASE_KEY in cfg_dict: + cfg_dir = osp.dirname(filename) + base_filename = cfg_dict.pop(BASE_KEY) + base_filename = base_filename if isinstance(base_filename, list) else [base_filename] + + cfg_dict_list = list() + cfg_text_list = list() + for f in base_filename: + _cfg_dict, _cfg_text = SLConfig._file2dict(osp.join(cfg_dir, f)) + cfg_dict_list.append(_cfg_dict) + cfg_text_list.append(_cfg_text) + + base_cfg_dict = dict() + for c in cfg_dict_list: + if len(base_cfg_dict.keys() & c.keys()) > 0: + raise KeyError("Duplicate key is not allowed among bases") + # TODO Allow the duplicate key while warnning user + base_cfg_dict.update(c) + + base_cfg_dict = SLConfig._merge_a_into_b(cfg_dict, base_cfg_dict) + cfg_dict = base_cfg_dict + + # merge cfg_text + cfg_text_list.append(cfg_text) + cfg_text = "\n".join(cfg_text_list) + + return cfg_dict, cfg_text + + @staticmethod + def _merge_a_into_b(a, b): + """merge dict `a` into dict `b` (non-inplace). + values in `a` will overwrite `b`. + copy first to avoid inplace modification + + Args: + a ([type]): [description] + b ([type]): [description] + + Returns: + [dict]: [description] + """ + # import ipdb; ipdb.set_trace() + if not isinstance(a, dict): + return a + + b = b.copy() + for k, v in a.items(): + if isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False): + + if not isinstance(b[k], dict) and not isinstance(b[k], list): + # if : + # import ipdb; ipdb.set_trace() + raise TypeError( + f"{k}={v} in child config cannot inherit from base " + f"because {k} is a dict in the child config but is of " + f"type {type(b[k])} in base config. You may set " + f"`{DELETE_KEY}=True` to ignore the base config" + ) + b[k] = SLConfig._merge_a_into_b(v, b[k]) + elif isinstance(b, list): + try: + _ = int(k) + except: + raise TypeError(f"b is a list, " f"index {k} should be an int when input but {type(k)}") + b[int(k)] = SLConfig._merge_a_into_b(v, b[int(k)]) + else: + b[k] = v + + return b + + @staticmethod + def fromfile(filename): + cfg_dict, cfg_text = SLConfig._file2dict(filename) + return SLConfig(cfg_dict, cfg_text=cfg_text, filename=filename) + + def __init__(self, cfg_dict=None, cfg_text=None, filename=None): + if cfg_dict is None: + cfg_dict = dict() + elif not isinstance(cfg_dict, dict): + raise TypeError("cfg_dict must be a dict, but " f"got {type(cfg_dict)}") + for key in cfg_dict: + if key in RESERVED_KEYS: + raise KeyError(f"{key} is reserved for config file") + + super(SLConfig, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict)) + super(SLConfig, self).__setattr__("_filename", filename) + if cfg_text: + text = cfg_text + elif filename: + with open(filename, "r") as f: + text = f.read() + else: + text = "" + super(SLConfig, self).__setattr__("_text", text) + + @property + def filename(self): + return self._filename + + @property + def text(self): + return self._text + + @property + def pretty_text(self): + + indent = 4 + + def _indent(s_, num_spaces): + s = s_.split("\n") + if len(s) == 1: + return s_ + first = s.pop(0) + s = [(num_spaces * " ") + line for line in s] + s = "\n".join(s) + s = first + "\n" + s + return s + + def _format_basic_types(k, v, use_mapping=False): + if isinstance(v, str): + v_str = f"'{v}'" + else: + v_str = str(v) + + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f"{k_str}: {v_str}" + else: + attr_str = f"{str(k)}={v_str}" + attr_str = _indent(attr_str, indent) + + return attr_str + + def _format_list(k, v, use_mapping=False): + # check if all items in the list are dict + if all(isinstance(_, dict) for _ in v): + v_str = "[\n" + v_str += "\n".join(f"dict({_indent(_format_dict(v_), indent)})," for v_ in v).rstrip(",") + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f"{k_str}: {v_str}" + else: + attr_str = f"{str(k)}={v_str}" + attr_str = _indent(attr_str, indent) + "]" + else: + attr_str = _format_basic_types(k, v, use_mapping) + return attr_str + + def _contain_invalid_identifier(dict_str): + contain_invalid_identifier = False + for key_name in dict_str: + contain_invalid_identifier |= not str(key_name).isidentifier() + return contain_invalid_identifier + + def _format_dict(input_dict, outest_level=False): + r = "" + s = [] + + use_mapping = _contain_invalid_identifier(input_dict) + if use_mapping: + r += "{" + for idx, (k, v) in enumerate(input_dict.items()): + is_last = idx >= len(input_dict) - 1 + end = "" if outest_level or is_last else "," + if isinstance(v, dict): + v_str = "\n" + _format_dict(v) + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f"{k_str}: dict({v_str}" + else: + attr_str = f"{str(k)}=dict({v_str}" + attr_str = _indent(attr_str, indent) + ")" + end + elif isinstance(v, list): + attr_str = _format_list(k, v, use_mapping) + end + else: + attr_str = _format_basic_types(k, v, use_mapping) + end + + s.append(attr_str) + r += "\n".join(s) + if use_mapping: + r += "}" + return r + + cfg_dict = self._cfg_dict.to_dict() + text = _format_dict(cfg_dict, outest_level=True) + # copied from setup.cfg + yapf_style = dict( + based_on_style="pep8", + blank_line_before_nested_class_or_def=True, + split_before_expression_after_opening_paren=True, + ) + text, _ = FormatCode(text, style_config=yapf_style, verify=True) + + return text + + def __repr__(self): + return f"Config (path: {self.filename}): {self._cfg_dict.__repr__()}" + + def __len__(self): + return len(self._cfg_dict) + + def __getattr__(self, name): + # # debug + # print('+'*15) + # print('name=%s' % name) + # print("addr:", id(self)) + # # print('type(self):', type(self)) + # print(self.__dict__) + # print('+'*15) + # if self.__dict__ == {}: + # raise ValueError + + return getattr(self._cfg_dict, name) + + def __getitem__(self, name): + return self._cfg_dict.__getitem__(name) + + def __setattr__(self, name, value): + if isinstance(value, dict): + value = ConfigDict(value) + self._cfg_dict.__setattr__(name, value) + + def __setitem__(self, name, value): + if isinstance(value, dict): + value = ConfigDict(value) + self._cfg_dict.__setitem__(name, value) + + def __iter__(self): + return iter(self._cfg_dict) + + def dump(self, file=None): + # import ipdb; ipdb.set_trace() + if file is None: + return self.pretty_text + else: + with open(file, "w") as f: + f.write(self.pretty_text) + + def merge_from_dict(self, options): + """Merge list into cfg_dict + + Merge the dict parsed by MultipleKVAction into this cfg. + + Examples: + >>> options = {'model.backbone.depth': 50, + ... 'model.backbone.with_cp':True} + >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet')))) + >>> cfg.merge_from_dict(options) + >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') + >>> assert cfg_dict == dict( + ... model=dict(backbone=dict(depth=50, with_cp=True))) + + Args: + options (dict): dict of configs to merge from. + """ + option_cfg_dict = {} + for full_key, v in options.items(): + d = option_cfg_dict + key_list = full_key.split(".") + for subkey in key_list[:-1]: + d.setdefault(subkey, ConfigDict()) + d = d[subkey] + subkey = key_list[-1] + d[subkey] = v + + cfg_dict = super(SLConfig, self).__getattribute__("_cfg_dict") + super(SLConfig, self).__setattr__("_cfg_dict", SLConfig._merge_a_into_b(option_cfg_dict, cfg_dict)) + + # for multiprocess + def __setstate__(self, state): + self.__init__(state) + + def copy(self): + return SLConfig(self._cfg_dict.copy()) + + def deepcopy(self): + return SLConfig(self._cfg_dict.deepcopy()) + + +class DictAction(Action): + """ + argparse action to split an argument into KEY=VALUE form + on the first = and append to a dictionary. List options should + be passed as comma separated values, i.e KEY=V1,V2,V3 + """ + + @staticmethod + def _parse_int_float_bool(val): + try: + return int(val) + except ValueError: + pass + try: + return float(val) + except ValueError: + pass + if val.lower() in ["true", "false"]: + return True if val.lower() == "true" else False + if val.lower() in ["none", "null"]: + return None + return val + + def __call__(self, parser, namespace, values, option_string=None): + options = {} + for kv in values: + key, val = kv.split("=", maxsplit=1) + val = [self._parse_int_float_bool(v) for v in val.split(",")] + if len(val) == 1: + val = val[0] + options[key] = val + setattr(namespace, self.dest, options) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/slio.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/slio.py new file mode 100644 index 0000000000..f4923dd17e --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/slio.py @@ -0,0 +1,178 @@ +# ========================================================== +# Modified from mmcv +# ========================================================== + +import json +import pickle +from abc import ABCMeta, abstractmethod +from pathlib import Path + +import yaml + +try: + from yaml import CDumper as Dumper + from yaml import CLoader as Loader +except ImportError: + from yaml import Dumper, Loader + + +# =========================== +# Rigister handler +# =========================== + + +class BaseFileHandler(metaclass=ABCMeta): + @abstractmethod + def load_from_fileobj(self, file, **kwargs): + pass + + @abstractmethod + def dump_to_fileobj(self, obj, file, **kwargs): + pass + + @abstractmethod + def dump_to_str(self, obj, **kwargs): + pass + + def load_from_path(self, filepath, mode="r", **kwargs): + with open(filepath, mode) as f: + return self.load_from_fileobj(f, **kwargs) + + def dump_to_path(self, obj, filepath, mode="w", **kwargs): + with open(filepath, mode) as f: + self.dump_to_fileobj(obj, f, **kwargs) + + +class JsonHandler(BaseFileHandler): + def load_from_fileobj(self, file): + return json.load(file) + + def dump_to_fileobj(self, obj, file, **kwargs): + json.dump(obj, file, **kwargs) + + def dump_to_str(self, obj, **kwargs): + return json.dumps(obj, **kwargs) + + +class PickleHandler(BaseFileHandler): + def load_from_fileobj(self, file, **kwargs): + return pickle.load(file, **kwargs) + + def load_from_path(self, filepath, **kwargs): + return super(PickleHandler, self).load_from_path(filepath, mode="rb", **kwargs) + + def dump_to_str(self, obj, **kwargs): + kwargs.setdefault("protocol", 2) + return pickle.dumps(obj, **kwargs) + + def dump_to_fileobj(self, obj, file, **kwargs): + kwargs.setdefault("protocol", 2) + pickle.dump(obj, file, **kwargs) + + def dump_to_path(self, obj, filepath, **kwargs): + super(PickleHandler, self).dump_to_path(obj, filepath, mode="wb", **kwargs) + + +class YamlHandler(BaseFileHandler): + def load_from_fileobj(self, file, **kwargs): + kwargs.setdefault("Loader", Loader) + return yaml.load(file, **kwargs) + + def dump_to_fileobj(self, obj, file, **kwargs): + kwargs.setdefault("Dumper", Dumper) + yaml.dump(obj, file, **kwargs) + + def dump_to_str(self, obj, **kwargs): + kwargs.setdefault("Dumper", Dumper) + return yaml.dump(obj, **kwargs) + + +file_handlers = { + "json": JsonHandler(), + "yaml": YamlHandler(), + "yml": YamlHandler(), + "pickle": PickleHandler(), + "pkl": PickleHandler(), +} + +# =========================== +# load and dump +# =========================== + + +def is_str(x): + """Whether the input is an string instance. + + Note: This method is deprecated since python 2 is no longer supported. + """ + return isinstance(x, str) + + +def slload(file, file_format=None, **kwargs): + """Load data from json/yaml/pickle files. + + This method provides a unified api for loading data from serialized files. + + Args: + file (str or :obj:`Path` or file-like object): Filename or a file-like + object. + file_format (str, optional): If not specified, the file format will be + inferred from the file extension, otherwise use the specified one. + Currently supported formats include "json", "yaml/yml" and + "pickle/pkl". + + Returns: + The content from the file. + """ + if isinstance(file, Path): + file = str(file) + if file_format is None and is_str(file): + file_format = file.split(".")[-1] + if file_format not in file_handlers: + raise TypeError(f"Unsupported format: {file_format}") + + handler = file_handlers[file_format] + if is_str(file): + obj = handler.load_from_path(file, **kwargs) + elif hasattr(file, "read"): + obj = handler.load_from_fileobj(file, **kwargs) + else: + raise TypeError('"file" must be a filepath str or a file-object') + return obj + + +def sldump(obj, file=None, file_format=None, **kwargs): + """Dump data to json/yaml/pickle strings or files. + + This method provides a unified api for dumping data as strings or to files, + and also supports custom arguments for each file format. + + Args: + obj (any): The python object to be dumped. + file (str or :obj:`Path` or file-like object, optional): If not + specified, then the object is dump to a str, otherwise to a file + specified by the filename or file-like object. + file_format (str, optional): Same as :func:`load`. + + Returns: + bool: True for success, False otherwise. + """ + if isinstance(file, Path): + file = str(file) + if file_format is None: + if is_str(file): + file_format = file.split(".")[-1] + elif file is None: + raise ValueError("file_format must be specified since file is None") + if file_format not in file_handlers: + raise TypeError(f"Unsupported format: {file_format}") + + handler = file_handlers[file_format] + if file is None: + return handler.dump_to_str(obj, **kwargs) + elif is_str(file): + handler.dump_to_path(obj, file, **kwargs) + elif hasattr(file, "write"): + handler.dump_to_fileobj(obj, file, **kwargs) + else: + raise TypeError('"file" must be a filename str or a file-object') diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/time_counter.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/time_counter.py new file mode 100644 index 0000000000..0aedb2e4d6 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/time_counter.py @@ -0,0 +1,62 @@ +import json +import time + + +class TimeCounter: + def __init__(self) -> None: + pass + + def clear(self): + self.timedict = {} + self.basetime = time.perf_counter() + + def timeit(self, name): + nowtime = time.perf_counter() - self.basetime + self.timedict[name] = nowtime + self.basetime = time.perf_counter() + + +class TimeHolder: + def __init__(self) -> None: + self.timedict = {} + + def update(self, _timedict: dict): + for k, v in _timedict.items(): + if k not in self.timedict: + self.timedict[k] = AverageMeter(name=k, val_only=True) + self.timedict[k].update(val=v) + + def final_res(self): + return {k: v.avg for k, v in self.timedict.items()} + + def __str__(self): + return json.dumps(self.final_res(), indent=2) + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self, name, fmt=":f", val_only=False): + self.name = name + self.fmt = fmt + self.val_only = val_only + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + if self.val_only: + fmtstr = "{name} {val" + self.fmt + "}" + else: + fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})" + return fmtstr.format(**self.__dict__) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/utils.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/utils.py new file mode 100644 index 0000000000..c69ed7d4f1 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/utils.py @@ -0,0 +1,598 @@ +import argparse +import json +import warnings +from collections import OrderedDict +from copy import deepcopy +from typing import Any, Dict, List + +import numpy as np +import torch +from transformers import AutoTokenizer + +from groundingdino.util.slconfig import SLConfig + + +def slprint(x, name="x"): + if isinstance(x, (torch.Tensor, np.ndarray)): + print(f"{name}.shape:", x.shape) + elif isinstance(x, (tuple, list)): + print("type x:", type(x)) + for i in range(min(10, len(x))): + slprint(x[i], f"{name}[{i}]") + elif isinstance(x, dict): + for k, v in x.items(): + slprint(v, f"{name}[{k}]") + else: + print(f"{name}.type:", type(x)) + + +def clean_state_dict(state_dict): + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + if k[:7] == "module.": + k = k[7:] # remove `module.` + new_state_dict[k] = v + return new_state_dict + + +def renorm(img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) -> torch.FloatTensor: + # img: tensor(3,H,W) or tensor(B,3,H,W) + # return: same as img + assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() + if img.dim() == 3: + assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( + img.size(0), + str(img.size()), + ) + img_perm = img.permute(1, 2, 0) + mean = torch.Tensor(mean) + std = torch.Tensor(std) + img_res = img_perm * std + mean + return img_res.permute(2, 0, 1) + else: # img.dim() == 4 + assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( + img.size(1), + str(img.size()), + ) + img_perm = img.permute(0, 2, 3, 1) + mean = torch.Tensor(mean) + std = torch.Tensor(std) + img_res = img_perm * std + mean + return img_res.permute(0, 3, 1, 2) + + +class CocoClassMapper: + def __init__(self) -> None: + self.category_map_str = { + "1": 1, + "2": 2, + "3": 3, + "4": 4, + "5": 5, + "6": 6, + "7": 7, + "8": 8, + "9": 9, + "10": 10, + "11": 11, + "13": 12, + "14": 13, + "15": 14, + "16": 15, + "17": 16, + "18": 17, + "19": 18, + "20": 19, + "21": 20, + "22": 21, + "23": 22, + "24": 23, + "25": 24, + "27": 25, + "28": 26, + "31": 27, + "32": 28, + "33": 29, + "34": 30, + "35": 31, + "36": 32, + "37": 33, + "38": 34, + "39": 35, + "40": 36, + "41": 37, + "42": 38, + "43": 39, + "44": 40, + "46": 41, + "47": 42, + "48": 43, + "49": 44, + "50": 45, + "51": 46, + "52": 47, + "53": 48, + "54": 49, + "55": 50, + "56": 51, + "57": 52, + "58": 53, + "59": 54, + "60": 55, + "61": 56, + "62": 57, + "63": 58, + "64": 59, + "65": 60, + "67": 61, + "70": 62, + "72": 63, + "73": 64, + "74": 65, + "75": 66, + "76": 67, + "77": 68, + "78": 69, + "79": 70, + "80": 71, + "81": 72, + "82": 73, + "84": 74, + "85": 75, + "86": 76, + "87": 77, + "88": 78, + "89": 79, + "90": 80, + } + self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()} + self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()} + + def origin2compact(self, idx): + return self.origin2compact_mapper[int(idx)] + + def compact2origin(self, idx): + return self.compact2origin_mapper[int(idx)] + + +def to_device(item, device): + if isinstance(item, torch.Tensor): + return item.to(device) + elif isinstance(item, list): + return [to_device(i, device) for i in item] + elif isinstance(item, dict): + return {k: to_device(v, device) for k, v in item.items()} + else: + raise NotImplementedError("Call Shilong if you use other containers! type: {}".format(type(item))) + + +# +def get_gaussian_mean(x, axis, other_axis, softmax=True): + """ + + Args: + x (float): Input images(BxCxHxW) + axis (int): The index for weighted mean + other_axis (int): The other index + + Returns: weighted index for axis, BxC + + """ + mat2line = torch.sum(x, axis=other_axis) + # mat2line = mat2line / mat2line.mean() * 10 + if softmax: + u = torch.softmax(mat2line, axis=2) + else: + u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6) + size = x.shape[axis] + ind = torch.linspace(0, 1, size).to(x.device) + batch = x.shape[0] + channel = x.shape[1] + index = ind.repeat([batch, channel, 1]) + mean_position = torch.sum(index * u, dim=2) + return mean_position + + +def get_expected_points_from_map(hm, softmax=True): + """get_gaussian_map_from_points + B,C,H,W -> B,N,2 float(0, 1) float(0, 1) + softargmax function + + Args: + hm (float): Input images(BxCxHxW) + + Returns: + weighted index for axis, BxCx2. float between 0 and 1. + + """ + # hm = 10*hm + B, C, H, W = hm.shape + y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C + x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C + # return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2) + return torch.stack([x_mean, y_mean], dim=2) + + +# Positional encoding (section 5.1) +# borrow from nerf +class Embedder: + def __init__(self, **kwargs): + self.kwargs = kwargs + self.create_embedding_fn() + + def create_embedding_fn(self): + embed_fns = [] + d = self.kwargs["input_dims"] + out_dim = 0 + if self.kwargs["include_input"]: + embed_fns.append(lambda x: x) + out_dim += d + + max_freq = self.kwargs["max_freq_log2"] + N_freqs = self.kwargs["num_freqs"] + + if self.kwargs["log_sampling"]: + freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs) + else: + freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs) + + for freq in freq_bands: + for p_fn in self.kwargs["periodic_fns"]: + embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) + out_dim += d + + self.embed_fns = embed_fns + self.out_dim = out_dim + + def embed(self, inputs): + return torch.cat([fn(inputs) for fn in self.embed_fns], -1) + + +def get_embedder(multires, i=0): + import torch.nn as nn + + if i == -1: + return nn.Identity(), 3 + + embed_kwargs = { + "include_input": True, + "input_dims": 3, + "max_freq_log2": multires - 1, + "num_freqs": multires, + "log_sampling": True, + "periodic_fns": [torch.sin, torch.cos], + } + + embedder_obj = Embedder(**embed_kwargs) + embed = lambda x, eo=embedder_obj: eo.embed(x) + return embed, embedder_obj.out_dim + + +class APOPMeter: + def __init__(self) -> None: + self.tp = 0 + self.fp = 0 + self.tn = 0 + self.fn = 0 + + def update(self, pred, gt): + """ + Input: + pred, gt: Tensor() + """ + assert pred.shape == gt.shape + self.tp += torch.logical_and(pred == 1, gt == 1).sum().item() + self.fp += torch.logical_and(pred == 1, gt == 0).sum().item() + self.tn += torch.logical_and(pred == 0, gt == 0).sum().item() + self.tn += torch.logical_and(pred == 1, gt == 0).sum().item() + + def update_cm(self, tp, fp, tn, fn): + self.tp += tp + self.fp += fp + self.tn += tn + self.tn += fn + + +def inverse_sigmoid(x, eps=1e-5): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +def get_raw_dict(args): + """ + return the dicf contained in args. + + e.g: + >>> with open(path, 'w') as f: + json.dump(get_raw_dict(args), f, indent=2) + """ + if isinstance(args, argparse.Namespace): + return vars(args) + elif isinstance(args, dict): + return args + elif isinstance(args, SLConfig): + return args._cfg_dict + else: + raise NotImplementedError("Unknown type {}".format(type(args))) + + +def stat_tensors(tensor): + assert tensor.dim() == 1 + tensor_sm = tensor.softmax(0) + entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum() + + return { + "max": tensor.max(), + "min": tensor.min(), + "mean": tensor.mean(), + "var": tensor.var(), + "std": tensor.var() ** 0.5, + "entropy": entropy, + } + + +class NiceRepr: + """Inherit from this class and define ``__nice__`` to "nicely" print your + objects. + + Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function + Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. + If the inheriting class has a ``__len__``, method then the default + ``__nice__`` method will return its length. + + Example: + >>> class Foo(NiceRepr): + ... def __nice__(self): + ... return 'info' + >>> foo = Foo() + >>> assert str(foo) == '' + >>> assert repr(foo).startswith('>> class Bar(NiceRepr): + ... pass + >>> bar = Bar() + >>> import pytest + >>> with pytest.warns(None) as record: + >>> assert 'object at' in str(bar) + >>> assert 'object at' in repr(bar) + + Example: + >>> class Baz(NiceRepr): + ... def __len__(self): + ... return 5 + >>> baz = Baz() + >>> assert str(baz) == '' + """ + + def __nice__(self): + """str: a "nice" summary string describing this module""" + if hasattr(self, "__len__"): + # It is a common pattern for objects to use __len__ in __nice__ + # As a convenience we define a default __nice__ for these objects + return str(len(self)) + else: + # In all other cases force the subclass to overload __nice__ + raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}") + + def __repr__(self): + """str: the string of the module""" + try: + nice = self.__nice__() + classname = self.__class__.__name__ + return f"<{classname}({nice}) at {hex(id(self))}>" + except NotImplementedError as ex: + warnings.warn(str(ex), category=RuntimeWarning) + return object.__repr__(self) + + def __str__(self): + """str: the string of the module""" + try: + classname = self.__class__.__name__ + nice = self.__nice__() + return f"<{classname}({nice})>" + except NotImplementedError as ex: + warnings.warn(str(ex), category=RuntimeWarning) + return object.__repr__(self) + + +def ensure_rng(rng=None): + """Coerces input into a random number generator. + + If the input is None, then a global random state is returned. + + If the input is a numeric value, then that is used as a seed to construct a + random state. Otherwise the input is returned as-is. + + Adapted from [1]_. + + Args: + rng (int | numpy.random.RandomState | None): + if None, then defaults to the global rng. Otherwise this can be an + integer or a RandomState class + Returns: + (numpy.random.RandomState) : rng - + a numpy random number generator + + References: + .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 + """ + + if rng is None: + rng = np.random.mtrand._rand + elif isinstance(rng, int): + rng = np.random.RandomState(rng) + else: + rng = rng + return rng + + +def random_boxes(num=1, scale=1, rng=None): + """Simple version of ``kwimage.Boxes.random`` + + Returns: + Tensor: shape (n, 4) in x1, y1, x2, y2 format. + + References: + https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 + + Example: + >>> num = 3 + >>> scale = 512 + >>> rng = 0 + >>> boxes = random_boxes(num, scale, rng) + >>> print(boxes) + tensor([[280.9925, 278.9802, 308.6148, 366.1769], + [216.9113, 330.6978, 224.0446, 456.5878], + [405.3632, 196.3221, 493.3953, 270.7942]]) + """ + rng = ensure_rng(rng) + + tlbr = rng.rand(num, 4).astype(np.float32) + + tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) + tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) + br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) + br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) + + tlbr[:, 0] = tl_x * scale + tlbr[:, 1] = tl_y * scale + tlbr[:, 2] = br_x * scale + tlbr[:, 3] = br_y * scale + + boxes = torch.from_numpy(tlbr) + return boxes + + +class ModelEma(torch.nn.Module): + def __init__(self, model, decay=0.9997, device=None): + super(ModelEma, self).__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + + # import ipdb; ipdb.set_trace() + + self.decay = decay + self.device = device # perform ema on different device from model if set + if self.device is not None: + self.module.to(device=device) + + def _update(self, model, update_fn): + with torch.no_grad(): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if self.device is not None: + model_v = model_v.to(device=self.device) + ema_v.copy_(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) + + +class BestMetricSingle: + def __init__(self, init_res=0.0, better="large") -> None: + self.init_res = init_res + self.best_res = init_res + self.best_ep = -1 + + self.better = better + assert better in ["large", "small"] + + def isbetter(self, new_res, old_res): + if self.better == "large": + return new_res > old_res + if self.better == "small": + return new_res < old_res + + def update(self, new_res, ep): + if self.isbetter(new_res, self.best_res): + self.best_res = new_res + self.best_ep = ep + return True + return False + + def __str__(self) -> str: + return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep) + + def __repr__(self) -> str: + return self.__str__() + + def summary(self) -> dict: + return { + "best_res": self.best_res, + "best_ep": self.best_ep, + } + + +class BestMetricHolder: + def __init__(self, init_res=0.0, better="large", use_ema=False) -> None: + self.best_all = BestMetricSingle(init_res, better) + self.use_ema = use_ema + if use_ema: + self.best_ema = BestMetricSingle(init_res, better) + self.best_regular = BestMetricSingle(init_res, better) + + def update(self, new_res, epoch, is_ema=False): + """ + return if the results is the best. + """ + if not self.use_ema: + return self.best_all.update(new_res, epoch) + else: + if is_ema: + self.best_ema.update(new_res, epoch) + return self.best_all.update(new_res, epoch) + else: + self.best_regular.update(new_res, epoch) + return self.best_all.update(new_res, epoch) + + def summary(self): + if not self.use_ema: + return self.best_all.summary() + + res = {} + res.update({f"all_{k}": v for k, v in self.best_all.summary().items()}) + res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()}) + res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()}) + return res + + def __repr__(self) -> str: + return json.dumps(self.summary(), indent=2) + + def __str__(self) -> str: + return self.__repr__() + + +def targets_to(targets: List[Dict[str, Any]], device): + """Moves the target dicts to the given device.""" + excluded_keys = [ + "questionId", + "tokens_positive", + "strings_positive", + "tokens", + "dataset_name", + "sentence_id", + "original_img_id", + "nb_eval", + "task_id", + "original_id", + "token_span", + "caption", + "dataset_type", + ] + return [{k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets] + + +def get_phrases_from_posmap(posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer): + assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor" + if posmap.dim() == 1: + non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() + token_ids = [tokenized["input_ids"][i] for i in non_zero_idx] + return tokenizer.decode(token_ids) + else: + raise NotImplementedError("posmap must be 1-dim") diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/visualizer.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/visualizer.py new file mode 100644 index 0000000000..09e004ce1b --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/visualizer.py @@ -0,0 +1,309 @@ +# -*- coding: utf-8 -*- +""" +@File : visualizer.py +@Time : 2022/04/05 11:39:33 +@Author : Shilong Liu +@Contact : slongliu86@gmail.com +""" + +import datetime +import os + +import matplotlib.pyplot as plt +import numpy as np +import torch +from matplotlib import transforms +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon +from pycocotools import mask as maskUtils + + +def renorm(img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) -> torch.FloatTensor: + # img: tensor(3,H,W) or tensor(B,3,H,W) + # return: same as img + assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() + if img.dim() == 3: + assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( + img.size(0), + str(img.size()), + ) + img_perm = img.permute(1, 2, 0) + mean = torch.Tensor(mean) + std = torch.Tensor(std) + img_res = img_perm * std + mean + return img_res.permute(2, 0, 1) + else: # img.dim() == 4 + assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( + img.size(1), + str(img.size()), + ) + img_perm = img.permute(0, 2, 3, 1) + mean = torch.Tensor(mean) + std = torch.Tensor(std) + img_res = img_perm * std + mean + return img_res.permute(0, 3, 1, 2) + + +class ColorMap: + def __init__(self, basergb=[255, 255, 0]): + self.basergb = np.array(basergb) + + def __call__(self, attnmap): + # attnmap: h, w. np.uint8. + # return: h, w, 4. np.uint8. + assert attnmap.dtype == np.uint8 + h, w = attnmap.shape + res = self.basergb.copy() + res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3 + attn1 = attnmap.copy()[..., None] # h, w, 1 + res = np.concatenate((res, attn1), axis=-1).astype(np.uint8) + return res + + +def rainbow_text(x, y, ls, lc, **kw): + """ + Take a list of strings ``ls`` and colors ``lc`` and place them next to each + other, with text ls[i] being shown in color lc[i]. + + This example shows how to do both vertical and horizontal text, and will + pass all keyword arguments to plt.text, so you can set the font size, + family, etc. + """ + t = plt.gca().transData + fig = plt.gcf() + plt.show() + + # horizontal version + for s, c in zip(ls, lc): + text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw) + text.draw(fig.canvas.get_renderer()) + ex = text.get_window_extent() + t = transforms.offset_copy(text._transform, x=ex.width, units="dots") + + # #vertical version + # for s,c in zip(ls,lc): + # text = plt.text(x,y," "+s+" ",color=c, transform=t, + # rotation=90,va='bottom',ha='center',**kw) + # text.draw(fig.canvas.get_renderer()) + # ex = text.get_window_extent() + # t = transforms.offset_copy(text._transform, y=ex.height, units='dots') + + +class COCOVisualizer: + def __init__(self, coco=None, tokenlizer=None) -> None: + self.coco = coco + + def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"): + """ + img: tensor(3, H, W) + tgt: make sure they are all on cpu. + must have items: 'image_id', 'boxes', 'size' + """ + plt.figure(dpi=dpi) + plt.rcParams["font.size"] = "5" + ax = plt.gca() + img = renorm(img).permute(1, 2, 0) + # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': + # import ipdb; ipdb.set_trace() + ax.imshow(img) + + self.addtgt(tgt) + + if tgt is None: + image_id = 0 + elif "image_id" not in tgt: + image_id = 0 + else: + image_id = tgt["image_id"] + + if caption is None: + savename = "{}/{}-{}.png".format(savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-")) + else: + savename = "{}/{}-{}-{}.png".format( + savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-") + ) + print("savename: {}".format(savename)) + os.makedirs(os.path.dirname(savename), exist_ok=True) + plt.savefig(savename) + plt.close() + + def addtgt(self, tgt): + """ """ + if tgt is None or not "boxes" in tgt: + ax = plt.gca() + + if "caption" in tgt: + ax.set_title(tgt["caption"], wrap=True) + + ax.set_axis_off() + return + + ax = plt.gca() + H, W = tgt["size"] + numbox = tgt["boxes"].shape[0] + + color = [] + polygons = [] + boxes = [] + for box in tgt["boxes"].cpu(): + unnormbbox = box * torch.Tensor([W, H, W, H]) + unnormbbox[:2] -= unnormbbox[2:] / 2 + [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist() + boxes.append([bbox_x, bbox_y, bbox_w, bbox_h]) + poly = [ + [bbox_x, bbox_y], + [bbox_x, bbox_y + bbox_h], + [bbox_x + bbox_w, bbox_y + bbox_h], + [bbox_x + bbox_w, bbox_y], + ] + np_poly = np.array(poly).reshape((4, 2)) + polygons.append(Polygon(np_poly)) + c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] + color.append(c) + + p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1) + ax.add_collection(p) + p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) + ax.add_collection(p) + + if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0: + assert len(tgt["strings_positive"]) == numbox, f"{len(tgt['strings_positive'])} = {numbox}, " + for idx, strlist in enumerate(tgt["strings_positive"]): + cate_id = int(tgt["labels"][idx]) + _string = str(cate_id) + ":" + " ".join(strlist) + bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] + # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) + ax.text( + bbox_x, + bbox_y, + _string, + color="black", + bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, + ) + + if "box_label" in tgt: + assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, " + for idx, bl in enumerate(tgt["box_label"]): + _string = str(bl) + bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] + # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) + ax.text( + bbox_x, + bbox_y, + _string, + color="black", + bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, + ) + + if "caption" in tgt: + ax.set_title(tgt["caption"], wrap=True) + # plt.figure() + # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(), + # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black']) + + if "attn" in tgt: + # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': + # import ipdb; ipdb.set_trace() + if isinstance(tgt["attn"], tuple): + tgt["attn"] = [tgt["attn"]] + for item in tgt["attn"]: + attn_map, basergb = item + attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3) + attn_map = (attn_map * 255).astype(np.uint8) + cm = ColorMap(basergb) + heatmap = cm(attn_map) + ax.imshow(heatmap) + ax.set_axis_off() + + def showAnns(self, anns, draw_bbox=False): + """ + Display the specified annotations. + :param anns (array of object): annotations to display + :return: None + """ + if len(anns) == 0: + return 0 + if "segmentation" in anns[0] or "keypoints" in anns[0]: + datasetType = "instances" + elif "caption" in anns[0]: + datasetType = "captions" + else: + raise Exception("datasetType not supported") + if datasetType == "instances": + ax = plt.gca() + ax.set_autoscale_on(False) + polygons = [] + color = [] + for ann in anns: + c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] + if "segmentation" in ann: + if type(ann["segmentation"]) == list: + # polygon + for seg in ann["segmentation"]: + poly = np.array(seg).reshape((int(len(seg) / 2), 2)) + polygons.append(Polygon(poly)) + color.append(c) + else: + # mask + t = self.imgs[ann["image_id"]] + if type(ann["segmentation"]["counts"]) == list: + rle = maskUtils.frPyObjects([ann["segmentation"]], t["height"], t["width"]) + else: + rle = [ann["segmentation"]] + m = maskUtils.decode(rle) + img = np.ones((m.shape[0], m.shape[1], 3)) + if ann["iscrowd"] == 1: + color_mask = np.array([2.0, 166.0, 101.0]) / 255 + if ann["iscrowd"] == 0: + color_mask = np.random.random((1, 3)).tolist()[0] + for i in range(3): + img[:, :, i] = color_mask[i] + ax.imshow(np.dstack((img, m * 0.5))) + if "keypoints" in ann and type(ann["keypoints"]) == list: + # turn skeleton into zero-based index + sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1 + kp = np.array(ann["keypoints"]) + x = kp[0::3] + y = kp[1::3] + v = kp[2::3] + for sk in sks: + if np.all(v[sk] > 0): + plt.plot(x[sk], y[sk], linewidth=3, color=c) + plt.plot( + x[v > 0], + y[v > 0], + "o", + markersize=8, + markerfacecolor=c, + markeredgecolor="k", + markeredgewidth=2, + ) + plt.plot( + x[v > 1], + y[v > 1], + "o", + markersize=8, + markerfacecolor=c, + markeredgecolor=c, + markeredgewidth=2, + ) + + if draw_bbox: + [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"] + poly = [ + [bbox_x, bbox_y], + [bbox_x, bbox_y + bbox_h], + [bbox_x + bbox_w, bbox_y + bbox_h], + [bbox_x + bbox_w, bbox_y], + ] + np_poly = np.array(poly).reshape((4, 2)) + polygons.append(Polygon(np_poly)) + color.append(c) + + # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) + # ax.add_collection(p) + p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) + ax.add_collection(p) + elif datasetType == "captions": + for ann in anns: + print(ann["caption"]) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/vl_utils.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/vl_utils.py new file mode 100644 index 0000000000..89c653ed78 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/vl_utils.py @@ -0,0 +1,100 @@ +import os +import random +from typing import List + +import torch + + +def create_positive_map_from_span(tokenized, token_span, max_text_len=256): + """construct a map such that positive_map[i,j] = True iff box i is associated to token j + Input: + - tokenized: + - input_ids: Tensor[1, ntokens] + - attention_mask: Tensor[1, ntokens] + - token_span: list with length num_boxes. + - each item: [start_idx, end_idx] + """ + positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float) + for j, tok_list in enumerate(token_span): + for beg, end in tok_list: + beg_pos = tokenized.char_to_token(beg) + end_pos = tokenized.char_to_token(end - 1) + if beg_pos is None: + try: + beg_pos = tokenized.char_to_token(beg + 1) + if beg_pos is None: + beg_pos = tokenized.char_to_token(beg + 2) + except: + beg_pos = None + if end_pos is None: + try: + end_pos = tokenized.char_to_token(end - 2) + if end_pos is None: + end_pos = tokenized.char_to_token(end - 3) + except: + end_pos = None + if beg_pos is None or end_pos is None: + continue + + assert beg_pos is not None and end_pos is not None + if os.environ.get("SHILONG_DEBUG_ONLY_ONE_POS", None) == "TRUE": + positive_map[j, beg_pos] = 1 + break + else: + positive_map[j, beg_pos : end_pos + 1].fill_(1) + + return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) + + +def build_captions_and_token_span(cat_list, force_lowercase): + """ + Return: + captions: str + cat2tokenspan: dict + { + 'dog': [[0, 2]], + ... + } + """ + + cat2tokenspan = {} + captions = "" + for catname in cat_list: + class_name = catname + if force_lowercase: + class_name = class_name.lower() + if "/" in class_name: + class_name_list: List = class_name.strip().split("/") + class_name_list.append(class_name) + class_name: str = random.choice(class_name_list) + + tokens_positive_i = [] + subnamelist = [i.strip() for i in class_name.strip().split(" ")] + for subname in subnamelist: + if len(subname) == 0: + continue + if len(captions) > 0: + captions = captions + " " + strat_idx = len(captions) + end_idx = strat_idx + len(subname) + tokens_positive_i.append([strat_idx, end_idx]) + captions = captions + subname + + if len(tokens_positive_i) > 0: + captions = captions + " ." + cat2tokenspan[class_name] = tokens_positive_i + + return captions, cat2tokenspan + + +def build_id2posspan_and_caption(category_dict: dict): + """Build id2pos_span and caption from category_dict + + Args: + category_dict (dict): category_dict + """ + cat_list = [item["name"].lower() for item in category_dict] + id2catname = {item["id"]: item["name"].lower() for item in category_dict} + caption, cat2posspan = build_captions_and_token_span(cat_list, force_lowercase=True) + id2posspan = {catid: cat2posspan[catname] for catid, catname in id2catname.items()} + return id2posspan, caption diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/version.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/version.py new file mode 100644 index 0000000000..3dc1f76bc6 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/version.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/invokeai/backend/image_util/grounding_segment_anything/gsa.py b/invokeai/backend/image_util/grounding_segment_anything/gsa.py new file mode 100644 index 0000000000..c076e9e86f --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/gsa.py @@ -0,0 +1,112 @@ +from typing import Dict, List, Literal, Optional + +import cv2 +import numpy as np +import supervision as sv +import torch +import torchvision + +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.inference import Model +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.build_sam import sam_model_registry +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.predictor import SamPredictor + +GROUNDING_SEGMENT_ANYTHING_MODELS = { + "groundingdino_swint_ogc": "https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth", + "segment_anything_vit_h": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", +} + + +class GroundingSegmentAnythingDetector: + def __init__(self) -> None: + self.grounding_dino_model: Optional[Model] = None + self.segment_anything_model: Optional[SamPredictor] = None + self.grounding_dino_config: str = "./groundingdino/config/GroundingDINO_SwinT_OGC.py" + self.sam_encoder: Literal["vit_h"] = "vit_h" + self.device: torch.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + + def build_grounding_dino(self): + return Model( + model_config_path=self.grounding_dino_config, + model_checkpoint_path="./checkpoints/groundingdino_swint_ogc.pth", + ) + + def build_segment_anything(self): + sam = sam_model_registry[self.sam_encoder](checkpoint="./checkpoints/sam_vit_h_4b8939.pth") + sam.to(device=self.device) + return SamPredictor(sam) + + def build_grounding_sam(self): + self.grounding_dino_model = self.build_grounding_dino() + self.segment_anything_model = self.build_segment_anything() + + def detect_objects( + self, + image: np.ndarray, + prompts: List[str], + box_threshold: float = 0.5, + text_threshold: float = 0.5, + nms_threshold: float = 0.8, + ): + detections = self.grounding_dino_model.predict_with_classes( + image=image, classes=prompts, box_threshold=box_threshold, text_threshold=text_threshold + ) + + nms_idx = ( + torchvision.ops.nms( + torch.from_numpy(detections.xyxy), torch.from_numpy(detections.confidence), nms_threshold + ) + .numpy() + .tolist() + ) + + detections.xyxy = detections.xyxy[nms_idx] + detections.confidence = detections.confidence[nms_idx] + detections.class_id = detections.class_id[nms_idx] + return detections + + def segment_detections( + self, image: np.ndarray, detections: sv.Detections, prompts: List[str] + ) -> Dict[str, np.ndarray]: + self.segment_anything_model.set_image(image) + result_masks = {} + for box, class_id in zip(detections.xyxy, detections.class_id): + masks, scores, logits = self.segment_anything_model.predict(box=box, multimask_output=True) + index = np.argmax(scores) + result_masks.update({prompts[class_id]: masks[index]}) + return result_masks + + def predict( + self, + image: str, + prompt: str, + box_threshold: float = 0.5, + text_threshold: float = 0.5, + nms_threshold: float = 0.8, + ): + if not self.grounding_dino_model or not self.segment_anything_model: + self.build_grounding_sam() + + image = cv2.imread(image) + prompts = prompt.split(",") + + detections = self.detect_objects(image, prompts, box_threshold, text_threshold, nms_threshold) + segments = self.segment_detections(image, detections, prompts) + + if len(segments) > 0: + combined_mask = np.zeros_like(list(segments.values())[0]) + for mask in list(segments.values()): + combined_mask = np.logical_or(combined_mask, mask) + mask_preview = (combined_mask * 255).astype(np.uint8) + else: + mask_preview = np.zeros(image.shape, np.uint8) + + cv2.imwrite("mask.png", mask_preview) + + +if __name__ == "__main__": + gsa = GroundingSegmentAnythingDetector() + image = "./assets/image.webp" + + while True: + prompt = input("Segment: ") + gsa.predict(image, prompt, 0.5, 0.5, 0.8) diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/__init__.py new file mode 100644 index 0000000000..9915b49c41 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .automatic_mask_generator import SamAutomaticMaskGenerator +from .build_sam import build_sam, build_sam_vit_b, build_sam_vit_h, build_sam_vit_l, sam_model_registry +from .build_sam_hq import ( + build_sam_hq, + build_sam_hq_vit_b, + build_sam_hq_vit_h, + build_sam_hq_vit_l, + sam_hq_model_registry, +) +from .predictor import SamPredictor diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/automatic_mask_generator.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/automatic_mask_generator.py new file mode 100644 index 0000000000..2cd252d770 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/automatic_mask_generator.py @@ -0,0 +1,368 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, Dict, List, Optional, Tuple + +import numpy as np +import torch +from torchvision.ops.boxes import batched_nms, box_area # type: ignore + +from .modeling import Sam +from .predictor import SamPredictor +from .utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +class SamAutomaticMaskGenerator: + def __init__( + self, + model: Sam, + points_per_side: Optional[int] = 32, + points_per_batch: int = 64, + pred_iou_thresh: float = 0.88, + stability_score_thresh: float = 0.95, + stability_score_offset: float = 1.0, + box_nms_thresh: float = 0.7, + crop_n_layers: int = 0, + crop_nms_thresh: float = 0.7, + crop_overlap_ratio: float = 512 / 1500, + crop_n_points_downscale_factor: int = 1, + point_grids: Optional[List[np.ndarray]] = None, + min_mask_region_area: int = 0, + output_mode: str = "binary_mask", + ) -> None: + """ + Using a SAM model, generates masks for the entire image. + Generates a grid of point prompts over the image, then filters + low quality and duplicate masks. The default settings are chosen + for SAM with a ViT-H backbone. + + Arguments: + model (Sam): The SAM model to use for mask prediction. + points_per_side (int or None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_per_batch (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + pred_iou_thresh (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + box_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks. + crops_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crops_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray) or None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + min_mask_region_area (int): If >0, postprocessing will be applied + to remove disconnected regions and holes in masks with area smaller + than min_mask_region_area. Requires opencv. + output_mode (str): The form masks are returned in. Can be 'binary_mask', + 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. + For large resolutions, 'binary_mask' may consume large amounts of + memory. + """ + + assert (points_per_side is None) != ( + point_grids is None + ), "Exactly one of points_per_side or point_grid must be provided." + if points_per_side is not None: + self.point_grids = build_all_layer_point_grids( + points_per_side, + crop_n_layers, + crop_n_points_downscale_factor, + ) + elif point_grids is not None: + self.point_grids = point_grids + else: + raise ValueError("Can't have both points_per_side and point_grid be None.") + + assert output_mode in [ + "binary_mask", + "uncompressed_rle", + "coco_rle", + ], f"Unknown output_mode {output_mode}." + if output_mode == "coco_rle": + from pycocotools import mask as mask_utils # type: ignore # noqa: F401 + + if min_mask_region_area > 0: + import cv2 # type: ignore # noqa: F401 + + self.predictor = SamPredictor(model) + self.points_per_batch = points_per_batch + self.pred_iou_thresh = pred_iou_thresh + self.stability_score_thresh = stability_score_thresh + self.stability_score_offset = stability_score_offset + self.box_nms_thresh = box_nms_thresh + self.crop_n_layers = crop_n_layers + self.crop_nms_thresh = crop_nms_thresh + self.crop_overlap_ratio = crop_overlap_ratio + self.crop_n_points_downscale_factor = crop_n_points_downscale_factor + self.min_mask_region_area = min_mask_region_area + self.output_mode = output_mode + + @torch.no_grad() + def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: + """ + Generates masks for the given image. + + Arguments: + image (np.ndarray): The image to generate masks for, in HWC uint8 format. + + Returns: + list(dict(str, any)): A list over records for masks. Each record is + a dict containing the following keys: + segmentation (dict(str, any) or np.ndarray): The mask. If + output_mode='binary_mask', is an array of shape HW. Otherwise, + is a dictionary containing the RLE. + bbox (list(float)): The box around the mask, in XYWH format. + area (int): The area in pixels of the mask. + predicted_iou (float): The model's own prediction of the mask's + quality. This is filtered by the pred_iou_thresh parameter. + point_coords (list(list(float))): The point coordinates input + to the model to generate this mask. + stability_score (float): A measure of the mask's quality. This + is filtered on using the stability_score_thresh parameter. + crop_box (list(float)): The crop of the image used to generate + the mask, given in XYWH format. + """ + + # Generate masks + mask_data = self._generate_masks(image) + + # Filter small disconnected regions and holes in masks + if self.min_mask_region_area > 0: + mask_data = self.postprocess_small_regions( + mask_data, + self.min_mask_region_area, + max(self.box_nms_thresh, self.crop_nms_thresh), + ) + + # Encode masks + if self.output_mode == "coco_rle": + mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] + elif self.output_mode == "binary_mask": + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + else: + mask_data["segmentations"] = mask_data["rles"] + + # Write mask records + curr_anns = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + curr_anns.append(ann) + + return curr_anns + + def _generate_masks(self, image: np.ndarray) -> MaskData: + orig_size = image.shape[:2] + crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) + + # Iterate over image crops + data = MaskData() + for crop_box, layer_idx in zip(crop_boxes, layer_idxs): + crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) + data.cat(crop_data) + + # Remove duplicate masks between crops + if len(crop_boxes) > 1: + # Prefer masks from smaller crops + scores = 1 / box_area(data["crop_boxes"]) + scores = scores.to(data["boxes"].device) + keep_by_nms = batched_nms( + data["boxes"].float(), + scores, + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.crop_nms_thresh, + ) + data.filter(keep_by_nms) + + data.to_numpy() + return data + + def _process_crop( + self, + image: np.ndarray, + crop_box: List[int], + crop_layer_idx: int, + orig_size: Tuple[int, ...], + ) -> MaskData: + # Crop the image and calculate embeddings + x0, y0, x1, y1 = crop_box + cropped_im = image[y0:y1, x0:x1, :] + cropped_im_size = cropped_im.shape[:2] + self.predictor.set_image(cropped_im) + + # Get points for this crop + points_scale = np.array(cropped_im_size)[None, ::-1] + points_for_image = self.point_grids[crop_layer_idx] * points_scale + + # Generate masks for this crop in batches + data = MaskData() + for (points,) in batch_iterator(self.points_per_batch, points_for_image): + batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) + data.cat(batch_data) + del batch_data + self.predictor.reset_image() + + # Remove duplicates within this crop. + keep_by_nms = batched_nms( + data["boxes"].float(), + data["iou_preds"], + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.box_nms_thresh, + ) + data.filter(keep_by_nms) + + # Return to the original image frame + data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) + data["points"] = uncrop_points(data["points"], crop_box) + data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) + + return data + + def _process_batch( + self, + points: np.ndarray, + im_size: Tuple[int, ...], + crop_box: List[int], + orig_size: Tuple[int, ...], + ) -> MaskData: + orig_h, orig_w = orig_size + + # Run model on this batch + transformed_points = self.predictor.transform.apply_coords(points, im_size) + in_points = torch.as_tensor(transformed_points, device=self.predictor.device) + in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) + masks, iou_preds, _ = self.predictor.predict_torch( + in_points[:, None, :], + in_labels[:, None], + multimask_output=True, + return_logits=True, + ) + + # Serialize predictions and store in MaskData + data = MaskData( + masks=masks.flatten(0, 1), + iou_preds=iou_preds.flatten(0, 1), + points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), + ) + del masks + + # Filter by predicted IoU + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + # Calculate stability score + data["stability_score"] = calculate_stability_score( + data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + + # Threshold masks and calculate boxes + data["masks"] = data["masks"] > self.predictor.model.mask_threshold + data["boxes"] = batched_mask_to_box(data["masks"]) + + # Filter boxes that touch crop boundaries + keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) + if not torch.all(keep_mask): + data.filter(keep_mask) + + # Compress to RLE + data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) + data["rles"] = mask_to_rle_pytorch(data["masks"]) + del data["masks"] + + return data + + @staticmethod + def postprocess_small_regions(mask_data: MaskData, min_area: int, nms_thresh: float) -> MaskData: + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. + + Edits mask_data in place. + + Requires open-cv as a dependency. + """ + if len(mask_data["rles"]) == 0: + return mask_data + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for rle in mask_data["rles"]: + mask = rle_to_mask(rle) + + mask, changed = remove_small_regions(mask, min_area, mode="holes") + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode="islands") + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(masks) + keep_by_nms = batched_nms( + boxes.float(), + torch.as_tensor(scores), + torch.zeros(len(boxes)), # categories + iou_threshold=nms_thresh, + ) + + # Only recalculate RLEs for masks that have changed + for i_mask in keep_by_nms: + if scores[i_mask] == 0.0: + mask_torch = masks[i_mask].unsqueeze(0) + mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] + mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly + mask_data.filter(keep_by_nms) + + return mask_data diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam.py new file mode 100644 index 0000000000..012cfdf9af --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam.py @@ -0,0 +1,107 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from functools import partial + +import torch + +from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer + + +def build_sam_vit_h(checkpoint=None): + return _build_sam( + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_global_attn_indexes=[7, 15, 23, 31], + checkpoint=checkpoint, + ) + + +build_sam = build_sam_vit_h + + +def build_sam_vit_l(checkpoint=None): + return _build_sam( + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_global_attn_indexes=[5, 11, 17, 23], + checkpoint=checkpoint, + ) + + +def build_sam_vit_b(checkpoint=None): + return _build_sam( + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_global_attn_indexes=[2, 5, 8, 11], + checkpoint=checkpoint, + ) + + +sam_model_registry = { + "default": build_sam, + "vit_h": build_sam, + "vit_l": build_sam_vit_l, + "vit_b": build_sam_vit_b, +} + + +def _build_sam( + encoder_embed_dim, + encoder_depth, + encoder_num_heads, + encoder_global_attn_indexes, + checkpoint=None, +): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + sam = Sam( + image_encoder=ImageEncoderViT( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f) + sam.load_state_dict(state_dict) + return sam diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam_hq.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam_hq.py new file mode 100644 index 0000000000..416c6aabb9 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam_hq.py @@ -0,0 +1,120 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from functools import partial + +import torch + +from .modeling import ImageEncoderViT, MaskDecoderHQ, PromptEncoder, Sam, TwoWayTransformer + + +def build_sam_hq_vit_h(checkpoint=None): + return _build_sam( + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_global_attn_indexes=[7, 15, 23, 31], + checkpoint=checkpoint, + ) + + +build_sam_hq = build_sam_hq_vit_h + + +def build_sam_hq_vit_l(checkpoint=None): + return _build_sam( + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_global_attn_indexes=[5, 11, 17, 23], + checkpoint=checkpoint, + ) + + +def build_sam_hq_vit_b(checkpoint=None): + return _build_sam( + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_global_attn_indexes=[2, 5, 8, 11], + checkpoint=checkpoint, + ) + + +sam_hq_model_registry = { + "default": build_sam_hq_vit_h, + "vit_h": build_sam_hq_vit_h, + "vit_l": build_sam_hq_vit_l, + "vit_b": build_sam_hq_vit_b, +} + + +def _build_sam( + encoder_embed_dim, + encoder_depth, + encoder_num_heads, + encoder_global_attn_indexes, + checkpoint=None, +): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + sam = Sam( + image_encoder=ImageEncoderViT( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoderHQ( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + vit_dim=encoder_embed_dim, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + # sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + device = "cuda" if torch.cuda.is_available() else "cpu" + state_dict = torch.load(f, map_location=device) + info = sam.load_state_dict(state_dict, strict=False) + print(info) + for n, p in sam.named_parameters(): + if ( + "hf_token" not in n + and "hf_mlp" not in n + and "compress_vit_feat" not in n + and "embedding_encoder" not in n + and "embedding_maskfeature" not in n + ): + p.requires_grad = False + + return sam diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/__init__.py new file mode 100644 index 0000000000..2168e3e83b --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .mask_decoder_hq import MaskDecoderHQ +from .prompt_encoder import PromptEncoder +from .sam import Sam +from .transformer import TwoWayTransformer diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/common.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/common.py new file mode 100644 index 0000000000..e8727816d4 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/common.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Type + +import torch +import torch.nn as nn + + +class MLPBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + mlp_dim: int, + act: Type[nn.Module] = nn.GELU, + ) -> None: + super().__init__() + self.lin1 = nn.Linear(embedding_dim, mlp_dim) + self.lin2 = nn.Linear(mlp_dim, embedding_dim) + self.act = act() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.lin2(self.act(self.lin1(x))) + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/image_encoder.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/image_encoder.py new file mode 100644 index 0000000000..6a5c18f43c --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/image_encoder.py @@ -0,0 +1,392 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional, Tuple, Type + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .common import LayerNorm2d, MLPBlock + + +# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa +class ImageEncoderViT(nn.Module): + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + out_chans: int = 256, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + global_attn_indexes: Tuple[int, ...] = (), + ) -> None: + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + global_attn_indexes (list): Indexes for blocks using global attention. + """ + super().__init__() + self.img_size = img_size + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + self.pos_embed: Optional[nn.Parameter] = None + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)) + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i not in global_attn_indexes else 0, + input_size=(img_size // patch_size, img_size // patch_size), + ) + self.blocks.append(block) + + self.neck = nn.Sequential( + nn.Conv2d( + embed_dim, + out_chans, + kernel_size=1, + bias=False, + ), + LayerNorm2d(out_chans), + nn.Conv2d( + out_chans, + out_chans, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(out_chans), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + if self.pos_embed is not None: + x = x + self.pos_embed + + interm_embeddings = [] + for blk in self.blocks: + x = blk(x) + if blk.window_size == 0: + interm_embeddings.append(x) + + x = self.neck(x.permute(0, 3, 1, 2)) + + return x, interm_embeddings + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then + use global attention. + input_size (tuple(int, int) or None): Input resolution for calculating the relative + positional parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + ) + + self.norm2 = norm_layer(dim) + self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer) + + self.window_size = window_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + x + x = x + self.mlp(self.norm2(x)) + + return x + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (tuple(int, int) or None): Input resolution for calculating the relative + positional parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert input_size is not None, "Input size must be provided if using relative positional encoding." + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition( + windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] +) -> torch.Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos( + attn: torch.Tensor, + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]).view( + B, q_h * q_w, k_h * k_w + ) + + return attn + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, + ) -> None: + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder.py new file mode 100644 index 0000000000..90915101d9 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder.py @@ -0,0 +1,171 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Tuple, Type + +import torch +from torch import nn +from torch.nn import functional as F + +from .common import LayerNorm2d + + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), + activation(), + ) + self.output_hypernetworks_mlps = nn.ModuleList( + [MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)] + ) + + self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth) + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + hq_token_only: bool, + interm_embeddings: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + """ + masks, iou_pred = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + ) + + # Select the correct mask or masks for output + if multimask_output: + mask_slice = slice(1, None) + else: + mask_slice = slice(0, 1) + masks = masks[:, mask_slice, :, :] + iou_pred = iou_pred[:, mask_slice] + + # Prepare output + return masks, iou_pred + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) + output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + src = src + dense_prompt_embeddings + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, 0, :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + upscaled_embedding = self.output_upscaling(src) + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + return masks, iou_pred + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + self.sigmoid_output = sigmoid_output + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder_hq.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder_hq.py new file mode 100644 index 0000000000..5079423465 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder_hq.py @@ -0,0 +1,233 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# Modified by HQ-SAM team +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Tuple, Type + +import torch +from torch import nn +from torch.nn import functional as F + +from .common import LayerNorm2d + + +class MaskDecoderHQ(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + vit_dim: int = 1024, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), + activation(), + ) + self.output_hypernetworks_mlps = nn.ModuleList( + [MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)] + ) + + self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth) + + # HQ-SAM parameters + self.hf_token = nn.Embedding(1, transformer_dim) # HQ-Ouptput-Token + self.hf_mlp = MLP( + transformer_dim, transformer_dim, transformer_dim // 8, 3 + ) # corresponding new MLP layer for HQ-Ouptput-Token + self.num_mask_tokens = self.num_mask_tokens + 1 + + # three conv fusion layers for obtaining HQ-Feature + self.compress_vit_feat = nn.Sequential( + nn.ConvTranspose2d(vit_dim, transformer_dim, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim), + nn.GELU(), + nn.ConvTranspose2d(transformer_dim, transformer_dim // 8, kernel_size=2, stride=2), + ) + + self.embedding_encoder = nn.Sequential( + nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim // 4), + nn.GELU(), + nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), + ) + self.embedding_maskfeature = nn.Sequential( + nn.Conv2d(transformer_dim // 8, transformer_dim // 4, 3, 1, 1), + LayerNorm2d(transformer_dim // 4), + nn.GELU(), + nn.Conv2d(transformer_dim // 4, transformer_dim // 8, 3, 1, 1), + ) + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + hq_token_only: bool, + interm_embeddings: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the ViT image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + """ + vit_features = interm_embeddings[0].permute( + 0, 3, 1, 2 + ) # early-layer ViT feature, after 1st global attention block in ViT + hq_features = self.embedding_encoder(image_embeddings) + self.compress_vit_feat(vit_features) + + masks, iou_pred = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + hq_features=hq_features, + ) + + # Select the correct mask or masks for output + if multimask_output: + # mask with highest score + mask_slice = slice(1, self.num_mask_tokens - 1) + iou_pred = iou_pred[:, mask_slice] + iou_pred, max_iou_idx = torch.max(iou_pred, dim=1) + iou_pred = iou_pred.unsqueeze(1) + masks_multi = masks[:, mask_slice, :, :] + masks_sam = masks_multi[torch.arange(masks_multi.size(0)), max_iou_idx].unsqueeze(1) + else: + # singale mask output, default + mask_slice = slice(0, 1) + iou_pred = iou_pred[:, mask_slice] + masks_sam = masks[:, mask_slice] + + masks_hq = masks[:, slice(self.num_mask_tokens - 1, self.num_mask_tokens)] + if hq_token_only: + masks = masks_hq + else: + masks = masks_sam + masks_hq + # Prepare output + return masks, iou_pred + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + hq_features: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight, self.hf_token.weight], dim=0) + output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + src = src + dense_prompt_embeddings + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, 0, :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + + upscaled_embedding_sam = self.output_upscaling(src) + upscaled_embedding_hq = self.embedding_maskfeature(upscaled_embedding_sam) + hq_features.repeat(b, 1, 1, 1) + + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + if i < self.num_mask_tokens - 1: + hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) + else: + hyper_in_list.append(self.hf_mlp(mask_tokens_out[:, i, :])) + + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding_sam.shape + + masks_sam = (hyper_in[:, : self.num_mask_tokens - 1] @ upscaled_embedding_sam.view(b, c, h * w)).view( + b, -1, h, w + ) + masks_sam_hq = (hyper_in[:, self.num_mask_tokens - 1 :] @ upscaled_embedding_hq.view(b, c, h * w)).view( + b, -1, h, w + ) + masks = torch.cat([masks_sam, masks_sam_hq], dim=1) + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + return masks, iou_pred + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + self.sigmoid_output = sigmoid_output + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/prompt_encoder.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/prompt_encoder.py new file mode 100644 index 0000000000..1887c8d83f --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/prompt_encoder.py @@ -0,0 +1,212 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, Optional, Tuple, Type + +import numpy as np +import torch +from torch import nn + +from .common import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) + point_embedding[labels == -1] = 0.0 + point_embedding[labels == -1] += self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight + point_embedding[labels == 1] += self.point_embeddings[1].weight + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords(self, coords_input: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/sam.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/sam.py new file mode 100644 index 0000000000..c1ce4ecae2 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/sam.py @@ -0,0 +1,174 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, Dict, List, Tuple + +import torch +from torch import nn +from torch.nn import functional as F + +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder + + +class Sam(nn.Module): + mask_threshold: float = 0.0 + image_format: str = "RGB" + + def __init__( + self, + image_encoder: ImageEncoderViT, + prompt_encoder: PromptEncoder, + mask_decoder: MaskDecoder, + pixel_mean: List[float] = [123.675, 116.28, 103.53], + pixel_std: List[float] = [58.395, 57.12, 57.375], + ) -> None: + """ + SAM predicts object masks from an image and input prompts. + + Arguments: + image_encoder (ImageEncoderViT): The backbone used to encode the + image into image embeddings that allow for efficient mask prediction. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts masks from the image embeddings + and encoded prompts. + pixel_mean (list(float)): Mean values for normalizing pixels in the input image. + pixel_std (list(float)): Std values for normalizing pixels in the input image. + """ + super().__init__() + self.image_encoder = image_encoder + self.prompt_encoder = prompt_encoder + self.mask_decoder = mask_decoder + self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self) -> Any: + return self.pixel_mean.device + + @torch.no_grad() + def forward( + self, + batched_input: List[Dict[str, Any]], + multimask_output: bool, + ) -> List[Dict[str, torch.Tensor]]: + """ + Predicts masks end-to-end from provided images and prompts. + If prompts are not known in advance, using SamPredictor is + recommended over calling the model directly. + + Arguments: + batched_input (list(dict)): A list over input images, each a + dictionary with the following keys. A prompt key can be + excluded if it is not present. + 'image': The image as a torch tensor in 3xHxW format, + already transformed for input to the model. + 'original_size': (tuple(int, int)) The original size of + the image before transformation, as (H, W). + 'point_coords': (torch.Tensor) Batched point prompts for + this image, with shape BxNx2. Already transformed to the + input frame of the model. + 'point_labels': (torch.Tensor) Batched labels for point prompts, + with shape BxN. + 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4. + Already transformed to the input frame of the model. + 'mask_inputs': (torch.Tensor) Batched mask inputs to the model, + in the form Bx1xHxW. + multimask_output (bool): Whether the model should predict multiple + disambiguating masks, or return a single mask. + + Returns: + (list(dict)): A list over input images, where each element is + as dictionary with the following keys. + 'masks': (torch.Tensor) Batched binary mask predictions, + with shape BxCxHxW, where B is the number of input promts, + C is determiend by multimask_output, and (H, W) is the + original size of the image. + 'iou_predictions': (torch.Tensor) The model's predictions + of mask quality, in shape BxC. + 'low_res_logits': (torch.Tensor) Low resolution logits with + shape BxCxHxW, where H=W=256. Can be passed as mask input + to subsequent iterations of prediction. + """ + input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0) + image_embeddings = self.image_encoder(input_images) + + outputs = [] + for image_record, curr_embedding in zip(batched_input, image_embeddings): + if "point_coords" in image_record: + points = (image_record["point_coords"], image_record["point_labels"]) + else: + points = None + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=points, + boxes=image_record.get("boxes", None), + masks=image_record.get("mask_inputs", None), + ) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=curr_embedding.unsqueeze(0), + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + masks = self.postprocess_masks( + low_res_masks, + input_size=image_record["image"].shape[-2:], + original_size=image_record["original_size"], + ) + masks = masks > self.mask_threshold + outputs.append( + { + "masks": masks, + "iou_predictions": iou_predictions, + "low_res_logits": low_res_masks, + } + ) + return outputs + + def postprocess_masks( + self, + masks: torch.Tensor, + input_size: Tuple[int, ...], + original_size: Tuple[int, ...], + ) -> torch.Tensor: + """ + Remove padding and upscale masks to the original image size. + + Arguments: + masks (torch.Tensor): Batched masks from the mask_decoder, + in BxCxHxW format. + input_size (tuple(int, int)): The size of the image input to the + model, in (H, W) format. Used to remove padding. + original_size (tuple(int, int)): The original size of the image + before resizing for input to the model, in (H, W) format. + + Returns: + (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) + is given by original_size. + """ + masks = F.interpolate( + masks, + (self.image_encoder.img_size, self.image_encoder.img_size), + mode="bilinear", + align_corners=False, + ) + masks = masks[..., : input_size[0], : input_size[1]] + masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) + return masks + + def preprocess(self, x: torch.Tensor) -> torch.Tensor: + """Normalize pixel values and pad to a square input.""" + # Normalize colors + x = (x - self.pixel_mean) / self.pixel_std + + # Pad + h, w = x.shape[-2:] + padh = self.image_encoder.img_size - h + padw = self.image_encoder.img_size - w + x = F.pad(x, (0, padw, 0, padh)) + return x diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/transformer.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/transformer.py new file mode 100644 index 0000000000..5bdd75377e --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/transformer.py @@ -0,0 +1,232 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import Tuple, Type + +import torch +from torch import Tensor, nn + +from .common import MLPBlock + + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attenion layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]: + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(embedding_dim, self.internal_dim) + self.v_proj = nn.Linear(embedding_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Attention + _, _, _, c_per_head = q.shape + attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens + attn = attn / math.sqrt(c_per_head) + attn = torch.softmax(attn, dim=-1) + + # Get output + out = attn @ v + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/predictor.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/predictor.py new file mode 100644 index 0000000000..cd165f8958 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/predictor.py @@ -0,0 +1,271 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional, Tuple + +import numpy as np +import torch + +from .modeling import Sam +from .utils.transforms import ResizeLongestSide + + +class SamPredictor: + def __init__( + self, + sam_model: Sam, + ) -> None: + """ + Uses SAM to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam): The model to use for mask prediction. + """ + super().__init__() + self.model = sam_model + self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) + self.reset_image() + + def set_image( + self, + image: np.ndarray, + image_format: str = "RGB", + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray): The image for calculating masks. Expects an + image in HWC uint8 format, with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + assert image_format in [ + "RGB", + "BGR", + ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." + # import pdb;pdb.set_trace() + if image_format != self.model.image_format: + image = image[..., ::-1] + + # Transform the image to the form expected by the model + # import pdb;pdb.set_trace() + input_image = self.transform.apply_image(image) + input_image_torch = torch.as_tensor(input_image, device=self.device) + input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] + + self.set_torch_image(input_image_torch, image.shape[:2]) + + @torch.no_grad() + def set_torch_image( + self, + transformed_image: torch.Tensor, + original_image_size: Tuple[int, ...], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. Expects the input + image to be already transformed to the format expected by the model. + + Arguments: + transformed_image (torch.Tensor): The input image, with shape + 1x3xHxW, which has been transformed with ResizeLongestSide. + original_image_size (tuple(int, int)): The size of the image + before transformation, in (H, W) format. + """ + assert ( + len(transformed_image.shape) == 4 + and transformed_image.shape[1] == 3 + and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size + ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." + self.reset_image() + + self.original_size = original_image_size + self.input_size = tuple(transformed_image.shape[-2:]) + input_image = self.model.preprocess(transformed_image) + self.features, self.interm_features = self.model.image_encoder(input_image) + self.is_image_set = True + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + hq_token_only: bool = False, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") + + # Transform input prompts + coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None + if point_coords is not None: + assert point_labels is not None, "point_labels must be supplied if point_coords is supplied." + point_coords = self.transform.apply_coords(point_coords, self.original_size) + coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) + labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) + coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] + if box is not None: + box = self.transform.apply_boxes(box, self.original_size) + box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) + box_torch = box_torch[None, :] + if mask_input is not None: + mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) + mask_input_torch = mask_input_torch[None, :, :, :] + + masks, iou_predictions, low_res_masks = self.predict_torch( + coords_torch, + labels_torch, + box_torch, + mask_input_torch, + multimask_output, + return_logits=return_logits, + hq_token_only=hq_token_only, + ) + + masks_np = masks[0].detach().cpu().numpy() + iou_predictions_np = iou_predictions[0].detach().cpu().numpy() + low_res_masks_np = low_res_masks[0].detach().cpu().numpy() + return masks_np, iou_predictions_np, low_res_masks_np + + @torch.no_grad() + def predict_torch( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + hq_token_only: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using ResizeLongestSide. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + boxes (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") + + if point_coords is not None: + points = (point_coords, point_labels) + else: + points = None + + # Embed prompts + sparse_embeddings, dense_embeddings = self.model.prompt_encoder( + points=points, + boxes=boxes, + masks=mask_input, + ) + + # Predict masks + low_res_masks, iou_predictions = self.model.mask_decoder( + image_embeddings=self.features, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + hq_token_only=hq_token_only, + interm_embeddings=self.interm_features, + ) + + # Upscale the masks to the original image resolution + masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size) + + if not return_logits: + masks = masks > self.model.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) to generate an embedding.") + assert self.features is not None, "Features must exist if an image has been set." + return self.features + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_image(self) -> None: + """Resets the currently set image.""" + self.is_image_set = False + self.features = None + self.orig_h = None + self.orig_w = None + self.input_h = None + self.input_w = None diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/__init__.py new file mode 100644 index 0000000000..5277f46157 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/amg.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/amg.py new file mode 100644 index 0000000000..3a137778e4 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/amg.py @@ -0,0 +1,346 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +import math +from copy import deepcopy +from itertools import product +from typing import Any, Dict, Generator, ItemsView, List, Tuple + + +class MaskData: + """ + A structure for storing masks and their related data in batched format. + Implements basic filtering and concatenation. + """ + + def __init__(self, **kwargs) -> None: + for v in kwargs.values(): + assert isinstance( + v, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats = dict(**kwargs) + + def __setitem__(self, key: str, item: Any) -> None: + assert isinstance( + item, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats[key] = item + + def __delitem__(self, key: str) -> None: + del self._stats[key] + + def __getitem__(self, key: str) -> Any: + return self._stats[key] + + def items(self) -> ItemsView[str, Any]: + return self._stats.items() + + def filter(self, keep: torch.Tensor) -> None: + for k, v in self._stats.items(): + if v is None: + self._stats[k] = None + elif isinstance(v, torch.Tensor): + self._stats[k] = v[torch.as_tensor(keep, device=v.device)] + elif isinstance(v, np.ndarray): + self._stats[k] = v[keep.detach().cpu().numpy()] + elif isinstance(v, list) and keep.dtype == torch.bool: + self._stats[k] = [a for i, a in enumerate(v) if keep[i]] + elif isinstance(v, list): + self._stats[k] = [v[i] for i in keep] + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def cat(self, new_stats: "MaskData") -> None: + for k, v in new_stats.items(): + if k not in self._stats or self._stats[k] is None: + self._stats[k] = deepcopy(v) + elif isinstance(v, torch.Tensor): + self._stats[k] = torch.cat([self._stats[k], v], dim=0) + elif isinstance(v, np.ndarray): + self._stats[k] = np.concatenate([self._stats[k], v], axis=0) + elif isinstance(v, list): + self._stats[k] = self._stats[k] + deepcopy(v) + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def to_numpy(self) -> None: + for k, v in self._stats.items(): + if isinstance(v, torch.Tensor): + self._stats[k] = v.detach().cpu().numpy() + + +def is_box_near_crop_edge( + boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 +) -> torch.Tensor: + """Filter masks at the edge of a crop, but not at the edge of the original image.""" + crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) + orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) + boxes = uncrop_boxes_xyxy(boxes, crop_box).float() + near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) + near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) + near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) + return torch.any(near_crop_edge, dim=1) + + +def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: + box_xywh = deepcopy(box_xyxy) + box_xywh[2] = box_xywh[2] - box_xywh[0] + box_xywh[3] = box_xywh[3] - box_xywh[1] + return box_xywh + + +def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: + assert len(args) > 0 and all( + len(a) == len(args[0]) for a in args + ), "Batched iteration must have inputs of all the same size." + n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) + for b in range(n_batches): + yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] + + +def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: + """ + Encodes masks to an uncompressed RLE, in the format expected by + pycoco tools. + """ + # Put in fortran order and flatten h,w + b, h, w = tensor.shape + tensor = tensor.permute(0, 2, 1).flatten(1) + + # Compute change indices + diff = tensor[:, 1:] ^ tensor[:, :-1] + change_indices = diff.nonzero() + + # Encode run length + out = [] + for i in range(b): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + cur_idxs = torch.cat( + [ + torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), + cur_idxs + 1, + torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), + ] + ) + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if tensor[i, 0] == 0 else [0] + counts.extend(btw_idxs.detach().cpu().tolist()) + out.append({"size": [h, w], "counts": counts}) + return out + + +def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: + """Compute a binary mask from an uncompressed RLE.""" + h, w = rle["size"] + mask = np.empty(h * w, dtype=bool) + idx = 0 + parity = False + for count in rle["counts"]: + mask[idx : idx + count] = parity + idx += count + parity ^= True + mask = mask.reshape(w, h) + return mask.transpose() # Put in C order + + +def area_from_rle(rle: Dict[str, Any]) -> int: + return sum(rle["counts"][1::2]) + + +def calculate_stability_score( + masks: torch.Tensor, mask_threshold: float, threshold_offset: float +) -> torch.Tensor: + """ + Computes the stability score for a batch of masks. The stability + score is the IoU between the binary masks obtained by thresholding + the predicted mask logits at high and low values. + """ + # One mask is always contained inside the other. + # Save memory by preventing unnecesary cast to torch.int64 + intersections = ( + (masks > (mask_threshold + threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + unions = ( + (masks > (mask_threshold - threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + return intersections / unions + + +def build_point_grid(n_per_side: int) -> np.ndarray: + """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" + offset = 1 / (2 * n_per_side) + points_one_side = np.linspace(offset, 1 - offset, n_per_side) + points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) + points_y = np.tile(points_one_side[:, None], (1, n_per_side)) + points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) + return points + + +def build_all_layer_point_grids( + n_per_side: int, n_layers: int, scale_per_layer: int +) -> List[np.ndarray]: + """Generates point grids for all crop layers.""" + points_by_layer = [] + for i in range(n_layers + 1): + n_points = int(n_per_side / (scale_per_layer**i)) + points_by_layer.append(build_point_grid(n_points)) + return points_by_layer + + +def generate_crop_boxes( + im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float +) -> Tuple[List[List[int]], List[int]]: + """ + Generates a list of crop boxes of different sizes. Each layer + has (2**i)**2 boxes for the ith layer. + """ + crop_boxes, layer_idxs = [], [] + im_h, im_w = im_size + short_side = min(im_h, im_w) + + # Original image + crop_boxes.append([0, 0, im_w, im_h]) + layer_idxs.append(0) + + def crop_len(orig_len, n_crops, overlap): + return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) + + for i_layer in range(n_layers): + n_crops_per_side = 2 ** (i_layer + 1) + overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) + + crop_w = crop_len(im_w, n_crops_per_side, overlap) + crop_h = crop_len(im_h, n_crops_per_side, overlap) + + crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] + crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] + + # Crops in XYWH format + for x0, y0 in product(crop_box_x0, crop_box_y0): + box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] + crop_boxes.append(box) + layer_idxs.append(i_layer + 1) + + return crop_boxes, layer_idxs + + +def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = offset.unsqueeze(1) + return boxes + offset + + +def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0]], device=points.device) + # Check if points has a channel dimension + if len(points.shape) == 3: + offset = offset.unsqueeze(1) + return points + offset + + +def uncrop_masks( + masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int +) -> torch.Tensor: + x0, y0, x1, y1 = crop_box + if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) + pad = (x0, pad_x - x0, y0, pad_y - y0) + return torch.nn.functional.pad(masks, pad, value=0) + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + + +def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: + from pycocotools import mask as mask_utils # type: ignore + + h, w = uncompressed_rle["size"] + rle = mask_utils.frPyObjects(uncompressed_rle, h, w) + rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json + return rle + + +def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: + """ + Calculates boxes in XYXY format around masks. Return [0,0,0,0] for + an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. + """ + # torch.max below raises an error on empty inputs, just skip in this case + if torch.numel(masks) == 0: + return torch.zeros(*masks.shape[:-2], 4, device=masks.device) + + # Normalize shape to CxHxW + shape = masks.shape + h, w = shape[-2:] + if len(shape) > 2: + masks = masks.flatten(0, -3) + else: + masks = masks.unsqueeze(0) + + # Get top and bottom edges + in_height, _ = torch.max(masks, dim=-1) + in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] + bottom_edges, _ = torch.max(in_height_coords, dim=-1) + in_height_coords = in_height_coords + h * (~in_height) + top_edges, _ = torch.min(in_height_coords, dim=-1) + + # Get left and right edges + in_width, _ = torch.max(masks, dim=-2) + in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] + right_edges, _ = torch.max(in_width_coords, dim=-1) + in_width_coords = in_width_coords + w * (~in_width) + left_edges, _ = torch.min(in_width_coords, dim=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) + out = out * (~empty_filter).unsqueeze(-1) + + # Return to original shape + if len(shape) > 2: + out = out.reshape(*shape[:-2], 4) + else: + out = out[0] + + return out diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/onnx.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/onnx.py new file mode 100644 index 0000000000..4297b31291 --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/onnx.py @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from typing import Tuple + +from ..modeling import Sam +from .amg import calculate_stability_score + + +class SamOnnxModel(nn.Module): + """ + This model should not be called directly, but is used in ONNX export. + It combines the prompt encoder, mask decoder, and mask postprocessing of Sam, + with some functions modified to enable model tracing. Also supports extra + options controlling what information. See the ONNX export script for details. + """ + + def __init__( + self, + model: Sam, + return_single_mask: bool, + use_stability_score: bool = False, + return_extra_metrics: bool = False, + ) -> None: + super().__init__() + self.mask_decoder = model.mask_decoder + self.model = model + self.img_size = model.image_encoder.img_size + self.return_single_mask = return_single_mask + self.use_stability_score = use_stability_score + self.stability_score_offset = 1.0 + self.return_extra_metrics = return_extra_metrics + + @staticmethod + def resize_longest_image_size( + input_image_size: torch.Tensor, longest_side: int + ) -> torch.Tensor: + input_image_size = input_image_size.to(torch.float32) + scale = longest_side / torch.max(input_image_size) + transformed_size = scale * input_image_size + transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) + return transformed_size + + def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor: + point_coords = point_coords + 0.5 + point_coords = point_coords / self.img_size + point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords) + point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding) + + point_embedding = point_embedding * (point_labels != -1) + point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * ( + point_labels == -1 + ) + + for i in range(self.model.prompt_encoder.num_point_embeddings): + point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[ + i + ].weight * (point_labels == i) + + return point_embedding + + def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor: + mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask) + mask_embedding = mask_embedding + ( + 1 - has_mask_input + ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1) + return mask_embedding + + def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor: + masks = F.interpolate( + masks, + size=(self.img_size, self.img_size), + mode="bilinear", + align_corners=False, + ) + + prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size) + masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])] + + orig_im_size = orig_im_size.to(torch.int64) + h, w = orig_im_size[0], orig_im_size[1] + masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) + return masks + + def select_masks( + self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Determine if we should return the multiclick mask or not from the number of points. + # The reweighting is used to avoid control flow. + score_reweight = torch.tensor( + [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)] + ).to(iou_preds.device) + score = iou_preds + (num_points - 2.5) * score_reweight + best_idx = torch.argmax(score, dim=1) + masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1) + iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) + + return masks, iou_preds + + @torch.no_grad() + def forward( + self, + image_embeddings: torch.Tensor, + point_coords: torch.Tensor, + point_labels: torch.Tensor, + mask_input: torch.Tensor, + has_mask_input: torch.Tensor, + orig_im_size: torch.Tensor, + ): + sparse_embedding = self._embed_points(point_coords, point_labels) + dense_embedding = self._embed_masks(mask_input, has_mask_input) + + masks, scores = self.model.mask_decoder.predict_masks( + image_embeddings=image_embeddings, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embedding, + dense_prompt_embeddings=dense_embedding, + ) + + if self.use_stability_score: + scores = calculate_stability_score( + masks, self.model.mask_threshold, self.stability_score_offset + ) + + if self.return_single_mask: + masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) + + upscaled_masks = self.mask_postprocessing(masks, orig_im_size) + + if self.return_extra_metrics: + stability_scores = calculate_stability_score( + upscaled_masks, self.model.mask_threshold, self.stability_score_offset + ) + areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1) + return upscaled_masks, scores, stability_scores, areas, masks + + return upscaled_masks, scores, masks diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/transforms.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/transforms.py new file mode 100644 index 0000000000..3ad346661f --- /dev/null +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/transforms.py @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch.nn import functional as F +from torchvision.transforms.functional import resize, to_pil_image # type: ignore + +from copy import deepcopy +from typing import Tuple + + +class ResizeLongestSide: + """ + Resizes images to longest side 'target_length', as well as provides + methods for resizing coordinates and boxes. Provides methods for + transforming both numpy array and batched torch tensors. + """ + + def __init__(self, target_length: int) -> None: + self.target_length = target_length + + def apply_image(self, image: np.ndarray) -> np.ndarray: + """ + Expects a numpy array with shape HxWxC in uint8 format. + """ + target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) + return np.array(resize(to_pil_image(image), target_size)) + + def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: + """ + Expects a numpy array of length 2 in the final dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).astype(float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: + """ + Expects a numpy array shape Bx4. Requires the original image size + in (H, W) format. + """ + boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor: + """ + Expects batched images with shape BxCxHxW and float format. This + transformation may not exactly match apply_image. apply_image is + the transformation expected by the model. + """ + # Expects an image in BCHW format. May not exactly match apply_image. + target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) + return F.interpolate( + image, target_size, mode="bilinear", align_corners=False, antialias=True + ) + + def apply_coords_torch( + self, coords: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).to(torch.float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes_torch( + self, boxes: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with shape Bx4. Requires the original image + size in (H, W) format. + """ + boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + @staticmethod + def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]: + """ + Compute the output size given input size and target long side length. + """ + scale = long_side_length * 1.0 / max(oldh, oldw) + newh, neww = oldh * scale, oldw * scale + neww = int(neww + 0.5) + newh = int(newh + 0.5) + return (newh, neww) From b20c70c588af6d06ab29be8eacd896d4d4def4b5 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Fri, 26 Jul 2024 00:43:26 +0530 Subject: [PATCH 2/3] wip: segment anything node --- invokeai/app/invocations/segment_anything.py | 76 +++++++++ .../groundingdino/datasets/transforms.py | 28 +--- .../models/GroundingDINO/__init__.py | 4 +- .../models/GroundingDINO/backbone/backbone.py | 28 ++-- .../backbone/position_encoding.py | 26 +--- .../backbone/swin_transformer.py | 72 +++------ .../models/GroundingDINO/groundingdino.py | 8 +- .../models/GroundingDINO/transformer.py | 13 +- .../groundingdino/util/inference.py | 21 ++- .../groundingdino/util/logger.py | 91 ----------- .../groundingdino/util/misc.py | 12 +- .../groundingdino/util/utils.py | 2 +- .../grounding_segment_anything/gsa.py | 65 +++----- .../segment_anything/__init__.py | 17 ++- .../automatic_mask_generator.py | 6 +- .../segment_anything/build_sam.py | 12 +- .../segment_anything/build_sam_hq.py | 8 +- .../segment_anything/modeling/__init__.py | 20 ++- .../modeling/image_encoder.py | 5 +- .../segment_anything/modeling/mask_decoder.py | 2 +- .../modeling/mask_decoder_hq.py | 2 +- .../modeling/prompt_encoder.py | 2 +- .../segment_anything/modeling/sam.py | 12 +- .../segment_anything/modeling/transformer.py | 2 +- .../segment_anything/predictor.py | 4 +- .../segment_anything/utils/amg.py | 34 ++--- .../segment_anything/utils/onnx.py | 144 ------------------ .../segment_anything/utils/transforms.py | 26 +--- pyproject.toml | 4 +- 29 files changed, 264 insertions(+), 482 deletions(-) create mode 100644 invokeai/app/invocations/segment_anything.py delete mode 100644 invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/logger.py delete mode 100644 invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/onnx.py diff --git a/invokeai/app/invocations/segment_anything.py b/invokeai/app/invocations/segment_anything.py new file mode 100644 index 0000000000..3516f22687 --- /dev/null +++ b/invokeai/app/invocations/segment_anything.py @@ -0,0 +1,76 @@ +from typing import Dict, cast + +import torch + +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import ImageField, InputField +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.image_util.grounding_segment_anything.gsa import GroundingSegmentAnythingDetector +from invokeai.backend.util.devices import TorchDevice + +GROUNDING_SEGMENT_ANYTHING_MODELS = { + "groundingdino_swint_ogc": "https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth", + "segment_anything_vit_h": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", +} + + +@invocation( + "segment_anything", + title="Segment Anything", + tags=["grounding_dino", "segment", "anything"], + category="image", + version="1.0.0", +) +class SegmentAnythingInvocation(BaseInvocation): + """Automatically generate masks from an image using GroundingDINO & Segment Anything""" + + image: ImageField = InputField(description="The image to process") + prompt: str = InputField(default="", description="Keywords to segment", title="Prompt") + box_threshold: float = InputField( + default=0.5, ge=0, le=1, description="Threshold of box detection", title="Box Threshold" + ) + text_threshold: float = InputField( + default=0.5, ge=0, le=1, description="Threshold of text detection", title="Text Threshold" + ) + nms_threshold: float = InputField( + default=0.8, ge=0, le=1, description="Threshold of nms detection", title="NMS Threshold" + ) + + def invoke(self, context: InvocationContext) -> ImageOutput: + input_image = context.images.get_pil(self.image.image_name) + + grounding_dino_model = context.models.load_remote_model( + GROUNDING_SEGMENT_ANYTHING_MODELS["groundingdino_swint_ogc"] + ) + segment_anything_model = context.models.load_remote_model( + GROUNDING_SEGMENT_ANYTHING_MODELS["segment_anything_vit_h"] + ) + + with ( + grounding_dino_model.model_on_device() as (_, grounding_dino_state_dict), + segment_anything_model.model_on_device() as (_, segment_anything_state_dict), + ): + if not grounding_dino_state_dict or not segment_anything_state_dict: + raise RuntimeError("Unable to load segmentation models") + + grounding_dino = GroundingSegmentAnythingDetector.build_grounding_dino( + cast(Dict[str, torch.Tensor], grounding_dino_state_dict) + ) + segment_anything = GroundingSegmentAnythingDetector.build_segment_anything( + cast(Dict[str, torch.Tensor], segment_anything_state_dict), TorchDevice.choose_torch_device() + ) + detector = GroundingSegmentAnythingDetector(grounding_dino, segment_anything) + + mask = detector.predict( + input_image, self.prompt, self.box_threshold, self.text_threshold, self.nms_threshold + ) + image_dto = context.images.save(mask) + + """Builds an ImageOutput and its ImageField""" + processed_image_field = ImageField(image_name=image_dto.image_name) + return ImageOutput( + image=processed_image_field, + width=input_image.width, + height=input_image.height, + ) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/transforms.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/transforms.py index 91cf9269e4..1e7e8a51f2 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/transforms.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/datasets/transforms.py @@ -10,8 +10,8 @@ import torch import torchvision.transforms as T import torchvision.transforms.functional as F -from groundingdino.util.box_ops import box_xyxy_to_cxcywh -from groundingdino.util.misc import interpolate +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.box_ops import box_xyxy_to_cxcywh +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.misc import interpolate def crop(image, target, region): @@ -58,9 +58,7 @@ def crop(image, target, region): if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO": # for debug and visualization only. if "strings_positive" in target: - target["strings_positive"] = [ - _i for _i, _j in zip(target["strings_positive"], keep) if _j - ] + target["strings_positive"] = [_i for _i, _j in zip(target["strings_positive"], keep, strict=False) if _j] return cropped_image, target @@ -73,9 +71,7 @@ def hflip(image, target): target = target.copy() if "boxes" in target: boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor( - [w, 0, w, 0] - ) + boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) target["boxes"] = boxes if "masks" in target: @@ -119,15 +115,13 @@ def resize(image, target, size, max_size=None): if target is None: return rescaled_image, None - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size, strict=False)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor( - [ratio_width, ratio_height, ratio_width, ratio_height] - ) + scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) target["boxes"] = scaled_boxes if "area" in target: @@ -139,9 +133,7 @@ def resize(image, target, size, max_size=None): target["size"] = torch.tensor([h, w]) if "masks" in target: - target["masks"] = ( - interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - ) + target["masks"] = interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5 return rescaled_image, target @@ -192,11 +184,7 @@ class RandomSizeCrop(object): h = random.randint(self.min_size, min(img.height, self.max_size)) region = T.RandomCrop.get_params(img, [h, w]) result_img, result_target = crop(img, target, region) - if ( - not self.respect_boxes - or len(result_target["boxes"]) == init_boxes - or i == max_patience - 1 - ): + if not self.respect_boxes or len(result_target["boxes"]) == init_boxes or i == max_patience - 1: return result_img, result_target return result_img, result_target diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/__init__.py index 2af819d61d..cbcddaac07 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/__init__.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/__init__.py @@ -12,4 +12,6 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ -from .groundingdino import build_groundingdino +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.models.GroundingDINO.groundingdino import ( + build_groundingdino, +) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/backbone.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/backbone.py index c8340c723f..ee25f20e35 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/backbone.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/backbone.py @@ -24,10 +24,13 @@ import torchvision from torch import nn from torchvision.models._utils import IntermediateLayerGetter -from groundingdino.util.misc import NestedTensor, clean_state_dict, is_main_process - -from .position_encoding import build_position_encoding -from .swin_transformer import build_swin_transformer +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.models.GroundingDINO.backbone.position_encoding import ( + build_position_encoding, +) +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.models.GroundingDINO.backbone.swin_transformer import ( + build_swin_transformer, +) +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.misc import NestedTensor, is_main_process class FrozenBatchNorm2d(torch.nn.Module): @@ -80,19 +83,12 @@ class BackboneBase(nn.Module): ): super().__init__() for name, parameter in backbone.named_parameters(): - if ( - not train_backbone - or "layer2" not in name - and "layer3" not in name - and "layer4" not in name - ): + if not train_backbone or "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) return_layers = {} for idx, layer_index in enumerate(return_interm_indices): - return_layers.update( - {"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)} - ) + return_layers.update({"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)}) # if len: # if use_stage1_feature: @@ -214,8 +210,8 @@ def build_backbone(args): model = Joiner(backbone, position_embedding) model.num_channels = bb_num_channels - assert isinstance( - bb_num_channels, List - ), "bb_num_channels is expected to be a List but {}".format(type(bb_num_channels)) + assert isinstance(bb_num_channels, List), "bb_num_channels is expected to be a List but {}".format( + type(bb_num_channels) + ) # import ipdb; ipdb.set_trace() return model diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/position_encoding.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/position_encoding.py index eac7e896bb..6828d7c3cb 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/position_encoding.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/position_encoding.py @@ -24,7 +24,7 @@ import math import torch from torch import nn -from groundingdino.util.misc import NestedTensor +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.misc import NestedTensor class PositionEmbeddingSine(nn.Module): @@ -65,12 +65,8 @@ class PositionEmbeddingSine(nn.Module): pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack( - (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos_y = torch.stack( - (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos @@ -81,9 +77,7 @@ class PositionEmbeddingSineHW(nn.Module): used by the Attention is all you need paper, generalized to work on images. """ - def __init__( - self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None - ): + def __init__(self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None): super().__init__() self.num_pos_feats = num_pos_feats self.temperatureH = temperatureH @@ -111,19 +105,15 @@ class PositionEmbeddingSineHW(nn.Module): x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode='floor')) / self.num_pos_feats) + dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode="floor")) / self.num_pos_feats) pos_x = x_embed[:, :, :, None] / dim_tx dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode='floor')) / self.num_pos_feats) + dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode="floor")) / self.num_pos_feats) pos_y = y_embed[:, :, :, None] / dim_ty - pos_x = torch.stack( - (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos_y = torch.stack( - (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) # import ipdb; ipdb.set_trace() diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/swin_transformer.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/swin_transformer.py index fa8837e400..141c6568b9 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/swin_transformer.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/backbone/swin_transformer.py @@ -18,15 +18,13 @@ import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import DropPath, to_2tuple, trunc_normal_ -from groundingdino.util.misc import NestedTensor +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.misc import NestedTensor class Mlp(nn.Module): """Multilayer perceptron.""" - def __init__( - self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 - ): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features @@ -138,24 +136,16 @@ class WindowAttention(nn.Module): mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = x.shape - qkv = ( - self.qkv(x) - .reshape(B_, N, 3, self.num_heads, C // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = q @ k.transpose(-2, -1) - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1) - ].view( + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1 - ).contiguous() # nH, Wh*Ww, Wh*Ww + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: @@ -228,9 +218,7 @@ class SwinTransformerBlock(nn.Module): self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop - ) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.H = None self.W = None @@ -266,12 +254,8 @@ class SwinTransformerBlock(nn.Module): attn_mask = None # partition windows - x_windows = window_partition( - shifted_x, self.window_size - ) # nW*B, window_size, window_size, C - x_windows = x_windows.view( - -1, self.window_size * self.window_size, C - ) # nW*B, window_size*window_size, C + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C @@ -433,14 +417,10 @@ class BasicLayer(nn.Module): img_mask[:, h, w, :] = cnt cnt += 1 - mask_windows = window_partition( - img_mask, self.window_size - ) # nW, window_size, window_size, 1 + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( - attn_mask == 0, float(0.0) - ) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) for blk in self.blocks: blk.H, blk.W = H, W @@ -589,9 +569,7 @@ class SwinTransformer(nn.Module): self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) - ] # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule # build layers self.layers = nn.ModuleList() @@ -682,9 +660,7 @@ class SwinTransformer(nn.Module): Wh, Ww = x.size(2), x.size(3) if self.ape: # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) + absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic") x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C else: x = x.flatten(2).transpose(1, 2) @@ -718,9 +694,7 @@ class SwinTransformer(nn.Module): Wh, Ww = x.size(2), x.size(3) if self.ape: # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) + absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic") x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C else: x = x.flatten(2).transpose(1, 2) @@ -769,21 +743,11 @@ def build_swin_transformer(modelname, pretrain_img_size, **kw): ] model_para_dict = { - "swin_T_224_1k": dict( - embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7 - ), - "swin_B_224_22k": dict( - embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7 - ), - "swin_B_384_22k": dict( - embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12 - ), - "swin_L_224_22k": dict( - embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7 - ), - "swin_L_384_22k": dict( - embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12 - ), + "swin_T_224_1k": dict(embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7), + "swin_B_224_22k": dict(embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7), + "swin_B_384_22k": dict(embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12), + "swin_L_224_22k": dict(embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7), + "swin_L_384_22k": dict(embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12), } kw_cgf = model_para_dict[modelname] kw_cgf.update(kw) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/groundingdino.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/groundingdino.py index b76df92e13..00ac21bea2 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/groundingdino.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/groundingdino.py @@ -21,8 +21,12 @@ import torch import torch.nn.functional as F from torch import nn -from groundingdino.util import get_tokenlizer -from groundingdino.util.misc import NestedTensor, inverse_sigmoid, nested_tensor_from_tensor_list +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util import get_tokenlizer +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.misc import ( + NestedTensor, + inverse_sigmoid, + nested_tensor_from_tensor_list, +) from ..registry import MODULE_BUILD_FUNCS from .backbone import build_backbone diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer.py index 1a25469604..8bd0a3cf67 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/models/GroundingDINO/transformer.py @@ -22,14 +22,19 @@ import torch import torch.utils.checkpoint as checkpoint from torch import Tensor, nn -from groundingdino.util.misc import inverse_sigmoid +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.misc import inverse_sigmoid from .fuse_modules import BiAttentionBlock from .ms_deform_attn import MultiScaleDeformableAttention as MSDeformAttn from .transformer_vanilla import TransformerEncoderLayer -from .utils import (MLP, _get_activation_fn, _get_clones, - gen_encoder_output_proposals, gen_sineembed_for_position, - get_sine_pos_embed) +from .utils import ( + MLP, + _get_activation_fn, + _get_clones, + gen_encoder_output_proposals, + gen_sineembed_for_position, + get_sine_pos_embed, +) class Transformer(nn.Module): diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/inference.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/inference.py index 9bfb2cbca9..1ea1270cda 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/inference.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/inference.py @@ -1,4 +1,4 @@ -from typing import List, Tuple +from typing import Dict, List, Tuple import cv2 import numpy as np @@ -7,11 +7,11 @@ import torch from PIL import Image from torchvision.ops import box_convert -import groundingdino.datasets.transforms as T -from groundingdino.models import build_model -from groundingdino.util.misc import clean_state_dict -from groundingdino.util.slconfig import SLConfig -from groundingdino.util.utils import get_phrases_from_posmap +import invokeai.backend.image_util.grounding_segment_anything.groundingdino.datasets.transforms as T +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.models import build_model +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.misc import clean_state_dict +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.slconfig import SLConfig +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.utils import get_phrases_from_posmap # ---------------------------------------------------------------------------------------------------------------------- # OLD API @@ -25,12 +25,11 @@ def preprocess_caption(caption: str) -> str: return result + "." -def load_model(model_config_path: str, model_checkpoint_path: str, device: str = "cuda"): +def load_model(model_config_path: str, model_state_dict: Dict[str, torch.Tensor], device: str = "cuda"): args = SLConfig.fromfile(model_config_path) args.device = device model = build_model(args) - checkpoint = torch.load(model_checkpoint_path, map_location="cpu") - model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False) + model.load_state_dict(clean_state_dict(model_state_dict["model"]), strict=False) model.eval() return model @@ -98,9 +97,9 @@ def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor class Model: - def __init__(self, model_config_path: str, model_checkpoint_path: str, device: str = "cuda"): + def __init__(self, model_config_path: str, model_state_dict: Dict[str, torch.Tensor], device: str = "cuda"): self.model = load_model( - model_config_path=model_config_path, model_checkpoint_path=model_checkpoint_path, device=device + model_config_path=model_config_path, model_state_dict=model_state_dict, device=device ).to(device) self.device = device diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/logger.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/logger.py deleted file mode 100644 index 679e0f5926..0000000000 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/logger.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import functools -import logging -import os -import sys - -from termcolor import colored - - -class _ColorfulFormatter(logging.Formatter): - def __init__(self, *args, **kwargs): - self._root_name = kwargs.pop("root_name") + "." - self._abbrev_name = kwargs.pop("abbrev_name", "") - if len(self._abbrev_name): - self._abbrev_name = self._abbrev_name + "." - super(_ColorfulFormatter, self).__init__(*args, **kwargs) - - def formatMessage(self, record): - record.name = record.name.replace(self._root_name, self._abbrev_name) - log = super(_ColorfulFormatter, self).formatMessage(record) - if record.levelno == logging.WARNING: - prefix = colored("WARNING", "red", attrs=["blink"]) - elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: - prefix = colored("ERROR", "red", attrs=["blink", "underline"]) - else: - return log - return prefix + " " + log - - -# so that calling setup_logger multiple times won't add many handlers -@functools.lru_cache() -def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None): - """ - Initialize the detectron2 logger and set its verbosity level to "INFO". - - Args: - output (str): a file name or a directory to save log. If None, will not save log file. - If ends with ".txt" or ".log", assumed to be a file name. - Otherwise, logs will be saved to `output/log.txt`. - name (str): the root module name of this logger - - Returns: - logging.Logger: a logger - """ - logger = logging.getLogger(name) - logger.setLevel(logging.DEBUG) - logger.propagate = False - - if abbrev_name is None: - abbrev_name = name - - plain_formatter = logging.Formatter("[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S") - # stdout logging: master only - if distributed_rank == 0: - ch = logging.StreamHandler(stream=sys.stdout) - ch.setLevel(logging.DEBUG) - if color: - formatter = _ColorfulFormatter( - colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s", - datefmt="%m/%d %H:%M:%S", - root_name=name, - abbrev_name=str(abbrev_name), - ) - else: - formatter = plain_formatter - ch.setFormatter(formatter) - logger.addHandler(ch) - - # file logging: all workers - if output is not None: - if output.endswith(".txt") or output.endswith(".log"): - filename = output - else: - filename = os.path.join(output, "log.txt") - if distributed_rank > 0: - filename = filename + f".rank{distributed_rank}" - os.makedirs(os.path.dirname(filename), exist_ok=True) - - fh = logging.StreamHandler(_cached_log_stream(filename)) - fh.setLevel(logging.DEBUG) - fh.setFormatter(plain_formatter) - logger.addHandler(fh) - - return logger - - -# cache the opened file object, so that different calls to `setup_logger` -# with the same file name can safely write to the same file. -@functools.lru_cache(maxsize=None) -def _cached_log_stream(filename): - return open(filename, "a") diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/misc.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/misc.py index 89e58d144b..f921d00274 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/misc.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/misc.py @@ -161,7 +161,7 @@ def all_gather_cpu(data): dist.all_gather(tensor_list, tensor, group=cpu_group) data_list = [] - for size, tensor in zip(size_list, tensor_list): + for size, tensor in zip(size_list, tensor_list, strict=False): tensor = torch.split(tensor, [size, max_size - size], dim=0)[0] buffer = io.BytesIO(tensor.cpu().numpy()) obj = torch.load(buffer) @@ -210,7 +210,7 @@ def all_gather(data): dist.all_gather(tensor_list, tensor) data_list = [] - for size, tensor in zip(size_list, tensor_list): + for size, tensor in zip(size_list, tensor_list, strict=False): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) @@ -240,7 +240,7 @@ def reduce_dict(input_dict, average=True): dist.all_reduce(values) if average: values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} + reduced_dict = {k: v for k, v in zip(names, values, strict=False)} return reduced_dict @@ -378,7 +378,7 @@ def get_sha(): def collate_fn(batch): # import ipdb; ipdb.set_trace() - batch = list(zip(*batch)) + batch = list(zip(*batch, strict=False)) batch[0] = nested_tensor_from_tensor_list(batch[0]) return tuple(batch) @@ -480,7 +480,7 @@ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask): + for img, pad_img, m in zip(tensor_list, tensor, mask, strict=False): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False else: @@ -505,7 +505,7 @@ def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTen padded_imgs = [] padded_masks = [] for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape), strict=False)] padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) padded_imgs.append(padded_img) diff --git a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/utils.py b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/utils.py index c69ed7d4f1..807c12124c 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/utils.py +++ b/invokeai/backend/image_util/grounding_segment_anything/groundingdino/util/utils.py @@ -9,7 +9,7 @@ import numpy as np import torch from transformers import AutoTokenizer -from groundingdino.util.slconfig import SLConfig +from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.slconfig import SLConfig def slprint(x, name="x"): diff --git a/invokeai/backend/image_util/grounding_segment_anything/gsa.py b/invokeai/backend/image_util/grounding_segment_anything/gsa.py index c076e9e86f..3102091bef 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/gsa.py +++ b/invokeai/backend/image_util/grounding_segment_anything/gsa.py @@ -1,44 +1,38 @@ -from typing import Dict, List, Literal, Optional +import pathlib +from typing import Dict, List, Optional -import cv2 import numpy as np import supervision as sv import torch import torchvision +from PIL import Image from invokeai.backend.image_util.grounding_segment_anything.groundingdino.util.inference import Model from invokeai.backend.image_util.grounding_segment_anything.segment_anything.build_sam import sam_model_registry from invokeai.backend.image_util.grounding_segment_anything.segment_anything.predictor import SamPredictor -GROUNDING_SEGMENT_ANYTHING_MODELS = { - "groundingdino_swint_ogc": "https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth", - "segment_anything_vit_h": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", -} - class GroundingSegmentAnythingDetector: - def __init__(self) -> None: - self.grounding_dino_model: Optional[Model] = None - self.segment_anything_model: Optional[SamPredictor] = None - self.grounding_dino_config: str = "./groundingdino/config/GroundingDINO_SwinT_OGC.py" - self.sam_encoder: Literal["vit_h"] = "vit_h" - self.device: torch.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + def __init__(self, grounding_dino_model: Model, segment_anything_model: SamPredictor) -> None: + self.grounding_dino_model: Optional[Model] = grounding_dino_model + self.segment_anything_model: Optional[SamPredictor] = segment_anything_model - def build_grounding_dino(self): + @staticmethod + def build_grounding_dino(grounding_dino_state_dict: Dict[str, torch.Tensor]): + grounding_dino_config = pathlib.Path( + "./invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinT_OGC.py" + ) return Model( - model_config_path=self.grounding_dino_config, - model_checkpoint_path="./checkpoints/groundingdino_swint_ogc.pth", + model_state_dict=grounding_dino_state_dict, + model_config_path=grounding_dino_config.as_posix(), ) - def build_segment_anything(self): - sam = sam_model_registry[self.sam_encoder](checkpoint="./checkpoints/sam_vit_h_4b8939.pth") - sam.to(device=self.device) + @staticmethod + def build_segment_anything(segment_anything_state_dict: Dict[str, torch.Tensor], device: torch.device): + sam = sam_model_registry["vit_h"](checkpoint=segment_anything_state_dict) + sam.to(device=device) return SamPredictor(sam) - def build_grounding_sam(self): - self.grounding_dino_model = self.build_grounding_dino() - self.segment_anything_model = self.build_segment_anything() - def detect_objects( self, image: np.ndarray, @@ -77,20 +71,18 @@ class GroundingSegmentAnythingDetector: def predict( self, - image: str, + image: Image.Image, prompt: str, box_threshold: float = 0.5, text_threshold: float = 0.5, nms_threshold: float = 0.8, ): - if not self.grounding_dino_model or not self.segment_anything_model: - self.build_grounding_sam() - - image = cv2.imread(image) + open_cv_image = np.array(image) + open_cv_image = open_cv_image[:, :, ::-1].copy() prompts = prompt.split(",") - detections = self.detect_objects(image, prompts, box_threshold, text_threshold, nms_threshold) - segments = self.segment_detections(image, detections, prompts) + detections = self.detect_objects(open_cv_image, prompts, box_threshold, text_threshold, nms_threshold) + segments = self.segment_detections(open_cv_image, detections, prompts) if len(segments) > 0: combined_mask = np.zeros_like(list(segments.values())[0]) @@ -98,15 +90,6 @@ class GroundingSegmentAnythingDetector: combined_mask = np.logical_or(combined_mask, mask) mask_preview = (combined_mask * 255).astype(np.uint8) else: - mask_preview = np.zeros(image.shape, np.uint8) + mask_preview = np.zeros(open_cv_image.shape, np.uint8) - cv2.imwrite("mask.png", mask_preview) - - -if __name__ == "__main__": - gsa = GroundingSegmentAnythingDetector() - image = "./assets/image.webp" - - while True: - prompt = input("Segment: ") - gsa.predict(image, prompt, 0.5, 0.5, 0.8) + return Image.fromarray(mask_preview) diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/__init__.py index 9915b49c41..bf29bfd709 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/__init__.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/__init__.py @@ -4,13 +4,22 @@ # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. -from .automatic_mask_generator import SamAutomaticMaskGenerator -from .build_sam import build_sam, build_sam_vit_b, build_sam_vit_h, build_sam_vit_l, sam_model_registry -from .build_sam_hq import ( + +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.automatic_mask_generator import ( + SamAutomaticMaskGenerator, +) # noqa +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.build_sam import ( # noqa + build_sam, + build_sam_vit_b, + build_sam_vit_h, + build_sam_vit_l, + sam_model_registry, +) +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.build_sam_hq import ( # noqa build_sam_hq, build_sam_hq_vit_b, build_sam_hq_vit_h, build_sam_hq_vit_l, sam_hq_model_registry, ) -from .predictor import SamPredictor +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.predictor import SamPredictor # noqa diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/automatic_mask_generator.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/automatic_mask_generator.py index 2cd252d770..440bfd5f02 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/automatic_mask_generator.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/automatic_mask_generator.py @@ -10,9 +10,9 @@ import numpy as np import torch from torchvision.ops.boxes import batched_nms, box_area # type: ignore -from .modeling import Sam -from .predictor import SamPredictor -from .utils.amg import ( +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling import Sam +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.predictor import SamPredictor +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.utils.amg import ( MaskData, area_from_rle, batch_iterator, diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam.py index 012cfdf9af..ee600dcaef 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam.py @@ -8,7 +8,13 @@ from functools import partial import torch -from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling import ( + ImageEncoderViT, + MaskDecoder, + PromptEncoder, + Sam, + TwoWayTransformer, +) def build_sam_vit_h(checkpoint=None): @@ -101,7 +107,5 @@ def _build_sam( ) sam.eval() if checkpoint is not None: - with open(checkpoint, "rb") as f: - state_dict = torch.load(f) - sam.load_state_dict(state_dict) + sam.load_state_dict(checkpoint) return sam diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam_hq.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam_hq.py index 416c6aabb9..670b3ee41b 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam_hq.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/build_sam_hq.py @@ -8,7 +8,13 @@ from functools import partial import torch -from .modeling import ImageEncoderViT, MaskDecoderHQ, PromptEncoder, Sam, TwoWayTransformer +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling import ( + ImageEncoderViT, + MaskDecoderHQ, + PromptEncoder, + Sam, + TwoWayTransformer, +) def build_sam_hq_vit_h(checkpoint=None): diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/__init__.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/__init__.py index 2168e3e83b..027b418940 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/__init__.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/__init__.py @@ -4,9 +4,17 @@ # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. -from .image_encoder import ImageEncoderViT -from .mask_decoder import MaskDecoder -from .mask_decoder_hq import MaskDecoderHQ -from .prompt_encoder import PromptEncoder -from .sam import Sam -from .transformer import TwoWayTransformer +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.image_encoder import ( + ImageEncoderViT, +) +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.mask_decoder import MaskDecoder +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.mask_decoder_hq import ( + MaskDecoderHQ, +) +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.prompt_encoder import ( + PromptEncoder, +) +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.sam import Sam +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.transformer import ( + TwoWayTransformer, +) diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/image_encoder.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/image_encoder.py index 6a5c18f43c..c6a4e22755 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/image_encoder.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/image_encoder.py @@ -10,7 +10,10 @@ import torch import torch.nn as nn import torch.nn.functional as F -from .common import LayerNorm2d, MLPBlock +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.common import ( + LayerNorm2d, + MLPBlock, +) # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder.py index 90915101d9..e9e42993e4 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder.py @@ -10,7 +10,7 @@ import torch from torch import nn from torch.nn import functional as F -from .common import LayerNorm2d +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.common import LayerNorm2d class MaskDecoder(nn.Module): diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder_hq.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder_hq.py index 5079423465..5b830ef9ba 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder_hq.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/mask_decoder_hq.py @@ -11,7 +11,7 @@ import torch from torch import nn from torch.nn import functional as F -from .common import LayerNorm2d +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.common import LayerNorm2d class MaskDecoderHQ(nn.Module): diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/prompt_encoder.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/prompt_encoder.py index 1887c8d83f..1d00295ff6 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/prompt_encoder.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/prompt_encoder.py @@ -10,7 +10,7 @@ import numpy as np import torch from torch import nn -from .common import LayerNorm2d +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.common import LayerNorm2d class PromptEncoder(nn.Module): diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/sam.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/sam.py index c1ce4ecae2..5dfb03e124 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/sam.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/sam.py @@ -10,9 +10,13 @@ import torch from torch import nn from torch.nn import functional as F -from .image_encoder import ImageEncoderViT -from .mask_decoder import MaskDecoder -from .prompt_encoder import PromptEncoder +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.image_encoder import ( + ImageEncoderViT, +) +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.mask_decoder import MaskDecoder +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.prompt_encoder import ( + PromptEncoder, +) class Sam(nn.Module): @@ -98,7 +102,7 @@ class Sam(nn.Module): image_embeddings = self.image_encoder(input_images) outputs = [] - for image_record, curr_embedding in zip(batched_input, image_embeddings): + for image_record, curr_embedding in zip(batched_input, image_embeddings, strict=False): if "point_coords" in image_record: points = (image_record["point_coords"], image_record["point_labels"]) else: diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/transformer.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/transformer.py index 5bdd75377e..5d0054cee7 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/transformer.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/modeling/transformer.py @@ -10,7 +10,7 @@ from typing import Tuple, Type import torch from torch import Tensor, nn -from .common import MLPBlock +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling.common import MLPBlock class TwoWayTransformer(nn.Module): diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/predictor.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/predictor.py index cd165f8958..2f4f418524 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/predictor.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/predictor.py @@ -9,8 +9,8 @@ from typing import Optional, Tuple import numpy as np import torch -from .modeling import Sam -from .utils.transforms import ResizeLongestSide +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.modeling import Sam +from invokeai.backend.image_util.grounding_segment_anything.segment_anything.utils.transforms import ResizeLongestSide class SamPredictor: diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/amg.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/amg.py index 3a137778e4..1c9c491fa1 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/amg.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/amg.py @@ -4,14 +4,14 @@ # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. -import numpy as np -import torch - import math from copy import deepcopy from itertools import product from typing import Any, Dict, Generator, ItemsView, List, Tuple +import numpy as np +import torch + class MaskData: """ @@ -153,9 +153,7 @@ def area_from_rle(rle: Dict[str, Any]) -> int: return sum(rle["counts"][1::2]) -def calculate_stability_score( - masks: torch.Tensor, mask_threshold: float, threshold_offset: float -) -> torch.Tensor: +def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor: """ Computes the stability score for a batch of masks. The stability score is the IoU between the binary masks obtained by thresholding @@ -163,16 +161,8 @@ def calculate_stability_score( """ # One mask is always contained inside the other. # Save memory by preventing unnecesary cast to torch.int64 - intersections = ( - (masks > (mask_threshold + threshold_offset)) - .sum(-1, dtype=torch.int16) - .sum(-1, dtype=torch.int32) - ) - unions = ( - (masks > (mask_threshold - threshold_offset)) - .sum(-1, dtype=torch.int16) - .sum(-1, dtype=torch.int32) - ) + intersections = (masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) + unions = (masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) return intersections / unions @@ -186,9 +176,7 @@ def build_point_grid(n_per_side: int) -> np.ndarray: return points -def build_all_layer_point_grids( - n_per_side: int, n_layers: int, scale_per_layer: int -) -> List[np.ndarray]: +def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]: """Generates point grids for all crop layers.""" points_by_layer = [] for i in range(n_layers + 1): @@ -252,9 +240,7 @@ def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: return points + offset -def uncrop_masks( - masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int -) -> torch.Tensor: +def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor: x0, y0, x1, y1 = crop_box if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: return masks @@ -264,9 +250,7 @@ def uncrop_masks( return torch.nn.functional.pad(masks, pad, value=0) -def remove_small_regions( - mask: np.ndarray, area_thresh: float, mode: str -) -> Tuple[np.ndarray, bool]: +def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]: """ Removes small disconnected regions and holes in a mask. Returns the mask and an indicator of if the mask has been modified. diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/onnx.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/onnx.py deleted file mode 100644 index 4297b31291..0000000000 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/onnx.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -from torch.nn import functional as F - -from typing import Tuple - -from ..modeling import Sam -from .amg import calculate_stability_score - - -class SamOnnxModel(nn.Module): - """ - This model should not be called directly, but is used in ONNX export. - It combines the prompt encoder, mask decoder, and mask postprocessing of Sam, - with some functions modified to enable model tracing. Also supports extra - options controlling what information. See the ONNX export script for details. - """ - - def __init__( - self, - model: Sam, - return_single_mask: bool, - use_stability_score: bool = False, - return_extra_metrics: bool = False, - ) -> None: - super().__init__() - self.mask_decoder = model.mask_decoder - self.model = model - self.img_size = model.image_encoder.img_size - self.return_single_mask = return_single_mask - self.use_stability_score = use_stability_score - self.stability_score_offset = 1.0 - self.return_extra_metrics = return_extra_metrics - - @staticmethod - def resize_longest_image_size( - input_image_size: torch.Tensor, longest_side: int - ) -> torch.Tensor: - input_image_size = input_image_size.to(torch.float32) - scale = longest_side / torch.max(input_image_size) - transformed_size = scale * input_image_size - transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) - return transformed_size - - def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor: - point_coords = point_coords + 0.5 - point_coords = point_coords / self.img_size - point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords) - point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding) - - point_embedding = point_embedding * (point_labels != -1) - point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * ( - point_labels == -1 - ) - - for i in range(self.model.prompt_encoder.num_point_embeddings): - point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[ - i - ].weight * (point_labels == i) - - return point_embedding - - def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor: - mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask) - mask_embedding = mask_embedding + ( - 1 - has_mask_input - ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1) - return mask_embedding - - def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor: - masks = F.interpolate( - masks, - size=(self.img_size, self.img_size), - mode="bilinear", - align_corners=False, - ) - - prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size) - masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])] - - orig_im_size = orig_im_size.to(torch.int64) - h, w = orig_im_size[0], orig_im_size[1] - masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) - return masks - - def select_masks( - self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int - ) -> Tuple[torch.Tensor, torch.Tensor]: - # Determine if we should return the multiclick mask or not from the number of points. - # The reweighting is used to avoid control flow. - score_reweight = torch.tensor( - [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)] - ).to(iou_preds.device) - score = iou_preds + (num_points - 2.5) * score_reweight - best_idx = torch.argmax(score, dim=1) - masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1) - iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) - - return masks, iou_preds - - @torch.no_grad() - def forward( - self, - image_embeddings: torch.Tensor, - point_coords: torch.Tensor, - point_labels: torch.Tensor, - mask_input: torch.Tensor, - has_mask_input: torch.Tensor, - orig_im_size: torch.Tensor, - ): - sparse_embedding = self._embed_points(point_coords, point_labels) - dense_embedding = self._embed_masks(mask_input, has_mask_input) - - masks, scores = self.model.mask_decoder.predict_masks( - image_embeddings=image_embeddings, - image_pe=self.model.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embedding, - dense_prompt_embeddings=dense_embedding, - ) - - if self.use_stability_score: - scores = calculate_stability_score( - masks, self.model.mask_threshold, self.stability_score_offset - ) - - if self.return_single_mask: - masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) - - upscaled_masks = self.mask_postprocessing(masks, orig_im_size) - - if self.return_extra_metrics: - stability_scores = calculate_stability_score( - upscaled_masks, self.model.mask_threshold, self.stability_score_offset - ) - areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1) - return upscaled_masks, scores, stability_scores, areas, masks - - return upscaled_masks, scores, masks diff --git a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/transforms.py b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/transforms.py index 3ad346661f..96a4ed6bc2 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/transforms.py +++ b/invokeai/backend/image_util/grounding_segment_anything/segment_anything/utils/transforms.py @@ -4,14 +4,14 @@ # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. +from copy import deepcopy +from typing import Tuple + import numpy as np import torch from torch.nn import functional as F from torchvision.transforms.functional import resize, to_pil_image # type: ignore -from copy import deepcopy -from typing import Tuple - class ResizeLongestSide: """ @@ -36,9 +36,7 @@ class ResizeLongestSide: original image size in (H, W) format. """ old_h, old_w = original_size - new_h, new_w = self.get_preprocess_shape( - original_size[0], original_size[1], self.target_length - ) + new_h, new_w = self.get_preprocess_shape(original_size[0], original_size[1], self.target_length) coords = deepcopy(coords).astype(float) coords[..., 0] = coords[..., 0] * (new_w / old_w) coords[..., 1] = coords[..., 1] * (new_h / old_h) @@ -60,29 +58,21 @@ class ResizeLongestSide: """ # Expects an image in BCHW format. May not exactly match apply_image. target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) - return F.interpolate( - image, target_size, mode="bilinear", align_corners=False, antialias=True - ) + return F.interpolate(image, target_size, mode="bilinear", align_corners=False, antialias=True) - def apply_coords_torch( - self, coords: torch.Tensor, original_size: Tuple[int, ...] - ) -> torch.Tensor: + def apply_coords_torch(self, coords: torch.Tensor, original_size: Tuple[int, ...]) -> torch.Tensor: """ Expects a torch tensor with length 2 in the last dimension. Requires the original image size in (H, W) format. """ old_h, old_w = original_size - new_h, new_w = self.get_preprocess_shape( - original_size[0], original_size[1], self.target_length - ) + new_h, new_w = self.get_preprocess_shape(original_size[0], original_size[1], self.target_length) coords = deepcopy(coords).to(torch.float) coords[..., 0] = coords[..., 0] * (new_w / old_w) coords[..., 1] = coords[..., 1] * (new_h / old_h) return coords - def apply_boxes_torch( - self, boxes: torch.Tensor, original_size: Tuple[int, ...] - ) -> torch.Tensor: + def apply_boxes_torch(self, boxes: torch.Tensor, original_size: Tuple[int, ...]) -> torch.Tensor: """ Expects a torch tensor with shape Bx4. Requires the original image size in (H, W) format. diff --git a/pyproject.toml b/pyproject.toml index 9acaa17e44..74cbfbc172 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,12 +74,13 @@ dependencies = [ "easing-functions", "einops", "facexlib", - "matplotlib", # needed for plotting of Penner easing functions + "matplotlib", # needed for plotting of Penner easing functions "npyscreen", "omegaconf", "picklescan", "pillow", "prompt-toolkit", + "pycocotools", "pympler~=1.0.1", "pypatchmatch", 'pyperclip', @@ -90,6 +91,7 @@ dependencies = [ "scikit-image~=0.21.0", "semver~=3.0.1", "send2trash", + "supervision", "test-tube~=0.7.5", "windows-curses; sys_platform=='win32'", ] From 737356fd147dc8088c0c722498db65de3bbc8e4c Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Fri, 26 Jul 2024 00:55:22 +0530 Subject: [PATCH 3/3] fix: pass device to grounding dino initializer --- invokeai/app/invocations/segment_anything.py | 2 +- .../grounding_segment_anything/gsa.py | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/invokeai/app/invocations/segment_anything.py b/invokeai/app/invocations/segment_anything.py index 3516f22687..7b9adf584a 100644 --- a/invokeai/app/invocations/segment_anything.py +++ b/invokeai/app/invocations/segment_anything.py @@ -55,7 +55,7 @@ class SegmentAnythingInvocation(BaseInvocation): raise RuntimeError("Unable to load segmentation models") grounding_dino = GroundingSegmentAnythingDetector.build_grounding_dino( - cast(Dict[str, torch.Tensor], grounding_dino_state_dict) + cast(Dict[str, torch.Tensor], grounding_dino_state_dict), TorchDevice.choose_torch_device() ) segment_anything = GroundingSegmentAnythingDetector.build_segment_anything( cast(Dict[str, torch.Tensor], segment_anything_state_dict), TorchDevice.choose_torch_device() diff --git a/invokeai/backend/image_util/grounding_segment_anything/gsa.py b/invokeai/backend/image_util/grounding_segment_anything/gsa.py index 3102091bef..aa07e0218b 100644 --- a/invokeai/backend/image_util/grounding_segment_anything/gsa.py +++ b/invokeai/backend/image_util/grounding_segment_anything/gsa.py @@ -1,5 +1,5 @@ import pathlib -from typing import Dict, List, Optional +from typing import Any, Dict, List, Optional import numpy as np import supervision as sv @@ -18,13 +18,14 @@ class GroundingSegmentAnythingDetector: self.segment_anything_model: Optional[SamPredictor] = segment_anything_model @staticmethod - def build_grounding_dino(grounding_dino_state_dict: Dict[str, torch.Tensor]): + def build_grounding_dino(grounding_dino_state_dict: Dict[str, torch.Tensor], device: torch.device): grounding_dino_config = pathlib.Path( "./invokeai/backend/image_util/grounding_segment_anything/groundingdino/config/GroundingDINO_SwinT_OGC.py" ) return Model( model_state_dict=grounding_dino_state_dict, model_config_path=grounding_dino_config.as_posix(), + device=device.type, ) @staticmethod @@ -35,12 +36,15 @@ class GroundingSegmentAnythingDetector: def detect_objects( self, - image: np.ndarray, + image: np.ndarray[Any, Any], prompts: List[str], box_threshold: float = 0.5, text_threshold: float = 0.5, nms_threshold: float = 0.8, ): + if not self.grounding_dino_model: + raise RuntimeError("GroundingDINO model could not load.") + detections = self.grounding_dino_model.predict_with_classes( image=image, classes=prompts, box_threshold=box_threshold, text_threshold=text_threshold ) @@ -52,15 +56,18 @@ class GroundingSegmentAnythingDetector: .numpy() .tolist() ) - detections.xyxy = detections.xyxy[nms_idx] detections.confidence = detections.confidence[nms_idx] detections.class_id = detections.class_id[nms_idx] + return detections def segment_detections( - self, image: np.ndarray, detections: sv.Detections, prompts: List[str] - ) -> Dict[str, np.ndarray]: + self, image: np.ndarray[Any, Any], detections: sv.Detections, prompts: List[str] + ) -> Dict[str, np.ndarray[Any, Any]]: + if not self.segment_anything_model: + raise RuntimeError("Segment Anything model could not be loaded") + self.segment_anything_model.set_image(image) result_masks = {} for box, class_id in zip(detections.xyxy, detections.class_id):