diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 6d27c013f1..1ab2cf9beb 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -361,7 +361,7 @@ setting environment variables INVOKEAI_. log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features') nsfw_checker : bool = Field(default=True, description="Enable/disable the NSFW checker", category='Features') patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features') - restore : bool = Field(default=True, description="Enable/disable face restoration code", category='Features') + restore : bool = Field(default=True, description="Enable/disable face restoration code (DEPRECATED)", category='DEPRECATED') always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance') free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance') diff --git a/invokeai/backend/restoration/__init__.py b/invokeai/backend/restoration/__init__.py deleted file mode 100644 index 165ef080b3..0000000000 --- a/invokeai/backend/restoration/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -Initialization file for the invokeai.backend.restoration package -""" -from .base import Restoration diff --git a/invokeai/backend/restoration/base.py b/invokeai/backend/restoration/base.py deleted file mode 100644 index 956f99cf16..0000000000 --- a/invokeai/backend/restoration/base.py +++ /dev/null @@ -1,45 +0,0 @@ -import invokeai.backend.util.logging as logger - -class Restoration: - def __init__(self) -> None: - pass - - def load_face_restore_models( - self, gfpgan_model_path="./models/core/face_restoration/gfpgan/GFPGANv1.4.pth" - ): - # Load GFPGAN - gfpgan = self.load_gfpgan(gfpgan_model_path) - if gfpgan.gfpgan_model_exists: - logger.info("GFPGAN Initialized") - else: - logger.info("GFPGAN Disabled") - gfpgan = None - - # Load CodeFormer - codeformer = self.load_codeformer() - if codeformer.codeformer_model_exists: - logger.info("CodeFormer Initialized") - else: - logger.info("CodeFormer Disabled") - codeformer = None - - return gfpgan, codeformer - - # Face Restore Models - def load_gfpgan(self, gfpgan_model_path): - from .gfpgan import GFPGAN - - return GFPGAN(gfpgan_model_path) - - def load_codeformer(self): - from .codeformer import CodeFormerRestoration - - return CodeFormerRestoration() - - # Upscale Models - def load_esrgan(self, esrgan_bg_tile=400): - from .realesrgan import ESRGAN - - esrgan = ESRGAN(esrgan_bg_tile) - logger.info("ESRGAN Initialized") - return esrgan diff --git a/invokeai/backend/restoration/codeformer.py b/invokeai/backend/restoration/codeformer.py deleted file mode 100644 index 92fcd06e0b..0000000000 --- a/invokeai/backend/restoration/codeformer.py +++ /dev/null @@ -1,120 +0,0 @@ -import os -import sys -import warnings - -import numpy as np -import torch - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig - -pretrained_model_url = ( - "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" -) - - -class CodeFormerRestoration: - def __init__( - self, codeformer_dir="./models/core/face_restoration/codeformer", codeformer_model_path="codeformer.pth" - ) -> None: - - self.globals = InvokeAIAppConfig.get_config() - codeformer_dir = self.globals.root_dir / codeformer_dir - self.model_path = codeformer_dir / codeformer_model_path - self.codeformer_model_exists = self.model_path.exists() - - if not self.codeformer_model_exists: - logger.error(f"NOT FOUND: CodeFormer model not found at {self.model_path}") - sys.path.append(os.path.abspath(codeformer_dir)) - - def process(self, image, strength, device, seed=None, fidelity=0.75): - if seed is not None: - logger.info(f"CodeFormer - Restoring Faces for image seed:{seed}") - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - warnings.filterwarnings("ignore", category=UserWarning) - - from basicsr.utils import img2tensor, tensor2img - from basicsr.utils.download_util import load_file_from_url - from facexlib.utils.face_restoration_helper import FaceRestoreHelper - from PIL import Image - from torchvision.transforms.functional import normalize - - from .codeformer_arch import CodeFormer - - cf_class = CodeFormer - - cf = cf_class( - dim_embd=512, - codebook_size=1024, - n_head=8, - n_layers=9, - connect_list=["32", "64", "128", "256"], - ).to(device) - - # note that this file should already be downloaded and cached at - # this point - checkpoint_path = load_file_from_url( - url=pretrained_model_url, - model_dir=os.path.abspath(os.path.dirname(self.model_path)), - progress=True, - ) - checkpoint = torch.load(checkpoint_path)["params_ema"] - cf.load_state_dict(checkpoint) - cf.eval() - - image = image.convert("RGB") - # Codeformer expects a BGR np array; make array and flip channels - bgr_image_array = np.array(image, dtype=np.uint8)[..., ::-1] - - face_helper = FaceRestoreHelper( - upscale_factor=1, - use_parse=True, - device=device, - model_rootpath = self.globals.model_path / 'core/face_restoration/gfpgan/weights' - ) - face_helper.clean_all() - face_helper.read_image(bgr_image_array) - face_helper.get_face_landmarks_5(resize=640, eye_dist_threshold=5) - face_helper.align_warp_face() - - for idx, cropped_face in enumerate(face_helper.cropped_faces): - cropped_face_t = img2tensor( - cropped_face / 255.0, bgr2rgb=True, float32=True - ) - normalize( - cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True - ) - cropped_face_t = cropped_face_t.unsqueeze(0).to(device) - - try: - with torch.no_grad(): - output = cf(cropped_face_t, w=fidelity, adain=True)[0] - restored_face = tensor2img( - output.squeeze(0), rgb2bgr=True, min_max=(-1, 1) - ) - del output - torch.cuda.empty_cache() - except RuntimeError as error: - logger.error(f"Failed inference for CodeFormer: {error}.") - restored_face = cropped_face - - restored_face = restored_face.astype("uint8") - face_helper.add_restored_face(restored_face) - - face_helper.get_inverse_affine(None) - - restored_img = face_helper.paste_faces_to_input_image() - - # Flip the channels back to RGB - res = Image.fromarray(restored_img[..., ::-1]) - - if strength < 1.0: - # Resize the image to the new image if the sizes have changed - if restored_img.size != image.size: - image = image.resize(res.size) - res = Image.blend(image, res, strength) - - cf = None - - return res diff --git a/invokeai/backend/restoration/codeformer_arch.py b/invokeai/backend/restoration/codeformer_arch.py deleted file mode 100644 index 0f6b881020..0000000000 --- a/invokeai/backend/restoration/codeformer_arch.py +++ /dev/null @@ -1,325 +0,0 @@ -import math -from typing import List, Optional - -import numpy as np -import torch -import torch.nn.functional as F -from basicsr.utils import get_root_logger -from basicsr.utils.registry import ARCH_REGISTRY -from torch import Tensor, nn - -from .vqgan_arch import * - - -def calc_mean_std(feat, eps=1e-5): - """Calculate mean and std for adaptive_instance_normalization. - - Args: - feat (Tensor): 4D tensor. - eps (float): A small value added to the variance to avoid - divide-by-zero. Default: 1e-5. - """ - size = feat.size() - assert len(size) == 4, "The input feature should be 4D tensor." - b, c = size[:2] - feat_var = feat.view(b, c, -1).var(dim=2) + eps - feat_std = feat_var.sqrt().view(b, c, 1, 1) - feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) - return feat_mean, feat_std - - -def adaptive_instance_normalization(content_feat, style_feat): - """Adaptive instance normalization. - - Adjust the reference features to have the similar color and illuminations - as those in the degradate features. - - Args: - content_feat (Tensor): The reference feature. - style_feat (Tensor): The degradate features. - """ - size = content_feat.size() - style_mean, style_std = calc_mean_std(style_feat) - content_mean, content_std = calc_mean_std(content_feat) - normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand( - size - ) - return normalized_feat * style_std.expand(size) + style_mean.expand(size) - - -class PositionEmbeddingSine(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - - def __init__( - self, num_pos_feats=64, temperature=10000, normalize=False, scale=None - ): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, x, mask=None): - if mask is None: - mask = torch.zeros( - (x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool - ) - not_mask = ~mask - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) - - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack( - (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos_y = torch.stack( - (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - -def _get_activation_fn(activation): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - raise RuntimeError(f"activation should be relu/gelu, not {activation}.") - - -class TransformerSALayer(nn.Module): - def __init__( - self, embed_dim, nhead=8, dim_mlp=2048, dropout=0.0, activation="gelu" - ): - super().__init__() - self.self_attn = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout) - # Implementation of Feedforward model - MLP - self.linear1 = nn.Linear(embed_dim, dim_mlp) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_mlp, embed_dim) - - self.norm1 = nn.LayerNorm(embed_dim) - self.norm2 = nn.LayerNorm(embed_dim) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward( - self, - tgt, - tgt_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None, - ): - # self attention - tgt2 = self.norm1(tgt) - q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn( - q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask - )[0] - tgt = tgt + self.dropout1(tgt2) - - # ffn - tgt2 = self.norm2(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) - tgt = tgt + self.dropout2(tgt2) - return tgt - - -class Fuse_sft_block(nn.Module): - def __init__(self, in_ch, out_ch): - super().__init__() - self.encode_enc = ResBlock(2 * in_ch, out_ch) - - self.scale = nn.Sequential( - nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), - nn.LeakyReLU(0.2, True), - nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1), - ) - - self.shift = nn.Sequential( - nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), - nn.LeakyReLU(0.2, True), - nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1), - ) - - def forward(self, enc_feat, dec_feat, w=1): - enc_feat = self.encode_enc(torch.cat([enc_feat, dec_feat], dim=1)) - scale = self.scale(enc_feat) - shift = self.shift(enc_feat) - residual = w * (dec_feat * scale + shift) - out = dec_feat + residual - return out - - -@ARCH_REGISTRY.register() -class CodeFormer(VQAutoEncoder): - def __init__( - self, - dim_embd=512, - n_head=8, - n_layers=9, - codebook_size=1024, - latent_size=256, - connect_list=["32", "64", "128", "256"], - fix_modules=["quantize", "generator"], - ): - super(CodeFormer, self).__init__( - 512, 64, [1, 2, 2, 4, 4, 8], "nearest", 2, [16], codebook_size - ) - - if fix_modules is not None: - for module in fix_modules: - for param in getattr(self, module).parameters(): - param.requires_grad = False - - self.connect_list = connect_list - self.n_layers = n_layers - self.dim_embd = dim_embd - self.dim_mlp = dim_embd * 2 - - self.position_emb = nn.Parameter(torch.zeros(latent_size, self.dim_embd)) - self.feat_emb = nn.Linear(256, self.dim_embd) - - # transformer - self.ft_layers = nn.Sequential( - *[ - TransformerSALayer( - embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0 - ) - for _ in range(self.n_layers) - ] - ) - - # logits_predict head - self.idx_pred_layer = nn.Sequential( - nn.LayerNorm(dim_embd), nn.Linear(dim_embd, codebook_size, bias=False) - ) - - self.channels = { - "16": 512, - "32": 256, - "64": 256, - "128": 128, - "256": 128, - "512": 64, - } - - # after second residual block for > 16, before attn layer for ==16 - self.fuse_encoder_block = { - "512": 2, - "256": 5, - "128": 8, - "64": 11, - "32": 14, - "16": 18, - } - # after first residual block for > 16, before attn layer for ==16 - self.fuse_generator_block = { - "16": 6, - "32": 9, - "64": 12, - "128": 15, - "256": 18, - "512": 21, - } - - # fuse_convs_dict - self.fuse_convs_dict = nn.ModuleDict() - for f_size in self.connect_list: - in_ch = self.channels[f_size] - self.fuse_convs_dict[f_size] = Fuse_sft_block(in_ch, in_ch) - - def _init_weights(self, module): - if isinstance(module, (nn.Linear, nn.Embedding)): - module.weight.data.normal_(mean=0.0, std=0.02) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def forward(self, x, w=0, detach_16=True, code_only=False, adain=False): - # ################### Encoder ##################### - enc_feat_dict = {} - out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list] - for i, block in enumerate(self.encoder.blocks): - x = block(x) - if i in out_list: - enc_feat_dict[str(x.shape[-1])] = x.clone() - - lq_feat = x - # ################# Transformer ################### - # quant_feat, codebook_loss, quant_stats = self.quantize(lq_feat) - pos_emb = self.position_emb.unsqueeze(1).repeat(1, x.shape[0], 1) - # BCHW -> BC(HW) -> (HW)BC - feat_emb = self.feat_emb(lq_feat.flatten(2).permute(2, 0, 1)) - query_emb = feat_emb - # Transformer encoder - for layer in self.ft_layers: - query_emb = layer(query_emb, query_pos=pos_emb) - - # output logits - logits = self.idx_pred_layer(query_emb) # (hw)bn - logits = logits.permute(1, 0, 2) # (hw)bn -> b(hw)n - - if code_only: # for training stage II - # logits doesn't need softmax before cross_entropy loss - return logits, lq_feat - - # ################# Quantization ################### - # if self.training: - # quant_feat = torch.einsum('btn,nc->btc', [soft_one_hot, self.quantize.embedding.weight]) - # # b(hw)c -> bc(hw) -> bchw - # quant_feat = quant_feat.permute(0,2,1).view(lq_feat.shape) - # ------------ - soft_one_hot = F.softmax(logits, dim=2) - _, top_idx = torch.topk(soft_one_hot, 1, dim=2) - quant_feat = self.quantize.get_codebook_feat( - top_idx, shape=[x.shape[0], 16, 16, 256] - ) - # preserve gradients - # quant_feat = lq_feat + (quant_feat - lq_feat).detach() - - if detach_16: - quant_feat = quant_feat.detach() # for training stage III - if adain: - quant_feat = adaptive_instance_normalization(quant_feat, lq_feat) - - # ################## Generator #################### - x = quant_feat - fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list] - - for i, block in enumerate(self.generator.blocks): - x = block(x) - if i in fuse_list: # fuse after i-th block - f_size = str(x.shape[-1]) - if w > 0: - x = self.fuse_convs_dict[f_size]( - enc_feat_dict[f_size].detach(), x, w - ) - out = x - # logits doesn't need softmax before cross_entropy loss - return out, logits, lq_feat diff --git a/invokeai/backend/restoration/gfpgan.py b/invokeai/backend/restoration/gfpgan.py deleted file mode 100644 index 927d026c0c..0000000000 --- a/invokeai/backend/restoration/gfpgan.py +++ /dev/null @@ -1,84 +0,0 @@ -import os -import sys -import warnings - -import numpy as np -import torch -from PIL import Image - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig - -class GFPGAN: - def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None: - self.globals = InvokeAIAppConfig.get_config() - if not os.path.isabs(gfpgan_model_path): - gfpgan_model_path = self.globals.root_dir / gfpgan_model_path - self.model_path = gfpgan_model_path - self.gfpgan_model_exists = os.path.isfile(self.model_path) - - if not self.gfpgan_model_exists: - logger.error(f"NOT FOUND: GFPGAN model not found at {self.model_path}") - return None - - def model_exists(self): - return os.path.isfile(self.model_path) - - def process(self, image, strength: float, seed: str = None): - if seed is not None: - logger.info(f"GFPGAN - Restoring Faces for image seed:{seed}") - - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - warnings.filterwarnings("ignore", category=UserWarning) - cwd = os.getcwd() - os.chdir(self.globals.root_dir / 'models') - try: - from gfpgan import GFPGANer - - self.gfpgan = GFPGANer( - model_path=self.model_path, - upscale=1, - arch="clean", - channel_multiplier=2, - bg_upsampler=None, - ) - except Exception: - import traceback - - logger.error("Error loading GFPGAN:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - os.chdir(cwd) - - if self.gfpgan is None: - logger.warning("WARNING: GFPGAN not initialized.") - logger.warning( - f"Download https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth to {self.model_path}" - ) - - image = image.convert("RGB") - - # GFPGAN expects a BGR np array; make array and flip channels - bgr_image_array = np.array(image, dtype=np.uint8)[..., ::-1] - - _, _, restored_img = self.gfpgan.enhance( - bgr_image_array, - has_aligned=False, - only_center_face=False, - paste_back=True, - ) - - # Flip the channels back to RGB - res = Image.fromarray(restored_img[..., ::-1]) - - if strength < 1.0: - # Resize the image to the new image if the sizes have changed - if restored_img.size != image.size: - image = image.resize(res.size) - res = Image.blend(image, res, strength) - - if torch.cuda.is_available(): - torch.cuda.empty_cache() - self.gfpgan = None - - return res diff --git a/invokeai/backend/restoration/outcrop.py b/invokeai/backend/restoration/outcrop.py deleted file mode 100644 index 07f76d6bf9..0000000000 --- a/invokeai/backend/restoration/outcrop.py +++ /dev/null @@ -1,118 +0,0 @@ -import math - -from PIL import Image -import invokeai.backend.util.logging as logger - -class Outcrop(object): - def __init__( - self, - image, - generate, # current generate object - ): - self.image = image - self.generate = generate - - def process( - self, - extents: dict, - opt, # current options - orig_opt, # ones originally used to generate the image - image_callback=None, - prefix=None, - ): - # grow and mask the image - extended_image = self._extend_all(extents) - - # switch samplers temporarily - curr_sampler = self.generate.sampler - self.generate.sampler_name = opt.sampler_name - self.generate._set_scheduler() - - def wrapped_callback(img, seed, **kwargs): - preferred_seed = ( - orig_opt.seed - if orig_opt.seed is not None and orig_opt.seed >= 0 - else seed - ) - image_callback(img, preferred_seed, use_prefix=prefix, **kwargs) - - result = self.generate.prompt2image( - opt.prompt, - seed=opt.seed or orig_opt.seed, - sampler=self.generate.sampler, - steps=opt.steps, - cfg_scale=opt.cfg_scale, - ddim_eta=self.generate.ddim_eta, - width=extended_image.width, - height=extended_image.height, - init_img=extended_image, - strength=0.90, - image_callback=wrapped_callback if image_callback else None, - seam_size=opt.seam_size or 96, - seam_blur=opt.seam_blur or 16, - seam_strength=opt.seam_strength or 0.7, - seam_steps=20, - tile_size=32, - color_match=True, - force_outpaint=True, # this just stops the warning about erased regions - ) - - # swap sampler back - self.generate.sampler = curr_sampler - return result - - def _extend_all( - self, - extents: dict, - ) -> Image: - """ - Extend the image in direction ('top','bottom','left','right') by - the indicated value. The image canvas is extended, and the empty - rectangular section will be filled with a blurred copy of the - adjacent image. - """ - image = self.image - for direction in extents: - assert direction in [ - "top", - "left", - "bottom", - "right", - ], 'Direction must be one of "top", "left", "bottom", "right"' - pixels = extents[direction] - # round pixels up to the nearest 64 - pixels = math.ceil(pixels / 64) * 64 - logger.info(f"extending image {direction}ward by {pixels} pixels") - image = self._rotate(image, direction) - image = self._extend(image, pixels) - image = self._rotate(image, direction, reverse=True) - return image - - def _rotate(self, image: Image, direction: str, reverse=False) -> Image: - """ - Rotates image so that the area to extend is always at the top top. - Simplifies logic later. The reverse argument, if true, will undo the - previous transpose. - """ - transposes = { - "right": ["ROTATE_90", "ROTATE_270"], - "bottom": ["ROTATE_180", "ROTATE_180"], - "left": ["ROTATE_270", "ROTATE_90"], - } - if direction not in transposes: - return image - transpose = transposes[direction][1 if reverse else 0] - return image.transpose(Image.Transpose.__dict__[transpose]) - - def _extend(self, image: Image, pixels: int) -> Image: - extended_img = Image.new("RGBA", (image.width, image.height + pixels)) - - extended_img.paste((0, 0, 0), [0, 0, image.width, image.height + pixels]) - extended_img.paste(image, box=(0, pixels)) - - # now make the top part transparent to use as a mask - alpha = extended_img.getchannel("A") - alpha.paste(0, (0, 0, extended_img.width, pixels)) - extended_img.putalpha(alpha) - - return extended_img diff --git a/invokeai/backend/restoration/outpaint.py b/invokeai/backend/restoration/outpaint.py deleted file mode 100644 index 184db10fa3..0000000000 --- a/invokeai/backend/restoration/outpaint.py +++ /dev/null @@ -1,102 +0,0 @@ -import math -import warnings - -from PIL import Image, ImageFilter - - -class Outpaint(object): - def __init__(self, image, generate): - self.image = image - self.generate = generate - - def process(self, opt, old_opt, image_callback=None, prefix=None): - image = self._create_outpaint_image(self.image, opt.out_direction) - - seed = old_opt.seed - prompt = old_opt.prompt - - def wrapped_callback(img, seed, **kwargs): - image_callback(img, seed, use_prefix=prefix, **kwargs) - - return self.generate.prompt2image( - prompt, - seed=seed, - sampler=self.generate.sampler, - steps=opt.steps, - cfg_scale=opt.cfg_scale, - ddim_eta=self.generate.ddim_eta, - width=opt.width, - height=opt.height, - init_img=image, - strength=0.83, - image_callback=wrapped_callback, - prefix=prefix, - ) - - def _create_outpaint_image(self, image, direction_args): - assert len(direction_args) in [ - 1, - 2, - ], "Direction (-D) must have exactly one or two arguments." - - if len(direction_args) == 1: - direction = direction_args[0] - pixels = None - elif len(direction_args) == 2: - direction = direction_args[0] - pixels = int(direction_args[1]) - - assert direction in [ - "top", - "left", - "bottom", - "right", - ], 'Direction (-D) must be one of "top", "left", "bottom", "right"' - - image = image.convert("RGBA") - # we always extend top, but rotate to extend along the requested side - if direction == "left": - image = image.transpose(Image.Transpose.ROTATE_270) - elif direction == "bottom": - image = image.transpose(Image.Transpose.ROTATE_180) - elif direction == "right": - image = image.transpose(Image.Transpose.ROTATE_90) - - pixels = image.height // 2 if pixels is None else int(pixels) - assert ( - 0 < pixels < image.height - ), "Direction (-D) pixels length must be in the range 0 - image.size" - - # the top part of the image is taken from the source image mirrored - # coordinates (0,0) are the upper left corner of an image - top = image.transpose(Image.Transpose.FLIP_TOP_BOTTOM).convert("RGBA") - top = top.crop((0, top.height - pixels, top.width, top.height)) - - # setting all alpha of the top part to 0 - alpha = top.getchannel("A") - alpha.paste(0, (0, 0, top.width, top.height)) - top.putalpha(alpha) - - # taking the bottom from the original image - bottom = image.crop((0, 0, image.width, image.height - pixels)) - - new_img = image.copy() - new_img.paste(top, (0, 0)) - new_img.paste(bottom, (0, pixels)) - - # create a 10% dither in the middle - dither = min(image.height // 10, pixels) - for x in range(0, image.width, 2): - for y in range(pixels - dither, pixels + dither): - (r, g, b, a) = new_img.getpixel((x, y)) - new_img.putpixel((x, y), (r, g, b, 0)) - - # let's rotate back again - if direction == "left": - new_img = new_img.transpose(Image.Transpose.ROTATE_90) - elif direction == "bottom": - new_img = new_img.transpose(Image.Transpose.ROTATE_180) - elif direction == "right": - new_img = new_img.transpose(Image.Transpose.ROTATE_270) - - return new_img diff --git a/invokeai/backend/restoration/realesrgan.py b/invokeai/backend/restoration/realesrgan.py deleted file mode 100644 index 1f29ceadba..0000000000 --- a/invokeai/backend/restoration/realesrgan.py +++ /dev/null @@ -1,104 +0,0 @@ -import warnings - -import numpy as np -import torch -from PIL import Image -from PIL.Image import Image as ImageType - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig -config = InvokeAIAppConfig.get_config() - -class ESRGAN: - def __init__(self, bg_tile_size=400) -> None: - self.bg_tile_size = bg_tile_size - - def load_esrgan_bg_upsampler(self, denoise_str): - if not torch.cuda.is_available(): # CPU or MPS on M1 - use_half_precision = False - else: - use_half_precision = True - - from realesrgan import RealESRGANer - from realesrgan.archs.srvgg_arch import SRVGGNetCompact - - model = SRVGGNetCompact( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_conv=32, - upscale=4, - act_type="prelu", - ) - model_path = config.models_path / "core/upscaling/realesrgan/realesr-general-x4v3.pth" - wdn_model_path = config.models_path / "core/upscaling/realesrgan/realesr-general-wdn-x4v3.pth" - scale = 4 - - bg_upsampler = RealESRGANer( - scale=scale, - model_path=[model_path, wdn_model_path], - model=model, - tile=self.bg_tile_size, - dni_weight=[denoise_str, 1 - denoise_str], - tile_pad=10, - pre_pad=0, - half=use_half_precision, - ) - - return bg_upsampler - - def process( - self, - image: ImageType, - strength: float, - seed: str = None, - upsampler_scale: int = 2, - denoise_str: float = 0.75, - ): - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - warnings.filterwarnings("ignore", category=UserWarning) - - try: - upsampler = self.load_esrgan_bg_upsampler(denoise_str) - except Exception: - import sys - import traceback - - logger.error("Error loading Real-ESRGAN:") - print(traceback.format_exc(), file=sys.stderr) - - if upsampler_scale == 0: - logger.warning("Real-ESRGAN: Invalid scaling option. Image not upscaled.") - return image - - if seed is not None: - logger.info( - f"Real-ESRGAN Upscaling seed:{seed}, scale:{upsampler_scale}x, tile:{self.bg_tile_size}, denoise:{denoise_str}" - ) - # ESRGAN outputs images with partial transparency if given RGBA images; convert to RGB - image = image.convert("RGB") - - # REALSRGAN expects a BGR np array; make array and flip channels - bgr_image_array = np.array(image, dtype=np.uint8)[..., ::-1] - - output, _ = upsampler.enhance( - bgr_image_array, - outscale=upsampler_scale, - alpha_upsampler="realesrgan", - ) - - # Flip the channels back to RGB - res = Image.fromarray(output[..., ::-1]) - - if strength < 1.0: - # Resize the image to the new image if the sizes have changed - if output.size != image.size: - image = image.resize(res.size) - res = Image.blend(image, res, strength) - - if torch.cuda.is_available(): - torch.cuda.empty_cache() - upsampler = None - - return res diff --git a/invokeai/backend/restoration/vqgan_arch.py b/invokeai/backend/restoration/vqgan_arch.py deleted file mode 100644 index 96d5f04eee..0000000000 --- a/invokeai/backend/restoration/vqgan_arch.py +++ /dev/null @@ -1,514 +0,0 @@ -""" -VQGAN code, adapted from the original created by the Unleashing Transformers authors: -https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py - -""" -import copy - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from basicsr.utils import get_root_logger -from basicsr.utils.registry import ARCH_REGISTRY - - -def normalize(in_channels): - return torch.nn.GroupNorm( - num_groups=32, num_channels=in_channels, eps=1e-6, affine=True - ) - - -@torch.jit.script -def swish(x): - return x * torch.sigmoid(x) - - -# Define VQVAE classes -class VectorQuantizer(nn.Module): - def __init__(self, codebook_size, emb_dim, beta): - super(VectorQuantizer, self).__init__() - self.codebook_size = codebook_size # number of embeddings - self.emb_dim = emb_dim # dimension of embedding - self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2 - self.embedding = nn.Embedding(self.codebook_size, self.emb_dim) - self.embedding.weight.data.uniform_( - -1.0 / self.codebook_size, 1.0 / self.codebook_size - ) - - def forward(self, z): - # reshape z -> (batch, height, width, channel) and flatten - z = z.permute(0, 2, 3, 1).contiguous() - z_flattened = z.view(-1, self.emb_dim) - - # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z - d = ( - (z_flattened**2).sum(dim=1, keepdim=True) - + (self.embedding.weight**2).sum(1) - - 2 * torch.matmul(z_flattened, self.embedding.weight.t()) - ) - - mean_distance = torch.mean(d) - # find closest encodings - # min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1) - min_encoding_scores, min_encoding_indices = torch.topk( - d, 1, dim=1, largest=False - ) - # [0-1], higher score, higher confidence - min_encoding_scores = torch.exp(-min_encoding_scores / 10) - - min_encodings = torch.zeros( - min_encoding_indices.shape[0], self.codebook_size - ).to(z) - min_encodings.scatter_(1, min_encoding_indices, 1) - - # get quantized latent vectors - z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape) - # compute loss for embedding - loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean( - (z_q - z.detach()) ** 2 - ) - # preserve gradients - z_q = z + (z_q - z).detach() - - # perplexity - e_mean = torch.mean(min_encodings, dim=0) - perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10))) - # reshape back to match original input shape - z_q = z_q.permute(0, 3, 1, 2).contiguous() - - return ( - z_q, - loss, - { - "perplexity": perplexity, - "min_encodings": min_encodings, - "min_encoding_indices": min_encoding_indices, - "min_encoding_scores": min_encoding_scores, - "mean_distance": mean_distance, - }, - ) - - def get_codebook_feat(self, indices, shape): - # input indices: batch*token_num -> (batch*token_num)*1 - # shape: batch, height, width, channel - indices = indices.view(-1, 1) - min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices) - min_encodings.scatter_(1, indices, 1) - # get quantized latent vectors - z_q = torch.matmul(min_encodings.float(), self.embedding.weight) - - if shape is not None: # reshape back to match original input shape - z_q = z_q.view(shape).permute(0, 3, 1, 2).contiguous() - - return z_q - - -class GumbelQuantizer(nn.Module): - def __init__( - self, - codebook_size, - emb_dim, - num_hiddens, - straight_through=False, - kl_weight=5e-4, - temp_init=1.0, - ): - super().__init__() - self.codebook_size = codebook_size # number of embeddings - self.emb_dim = emb_dim # dimension of embedding - self.straight_through = straight_through - self.temperature = temp_init - self.kl_weight = kl_weight - self.proj = nn.Conv2d( - num_hiddens, codebook_size, 1 - ) # projects last encoder layer to quantized logits - self.embed = nn.Embedding(codebook_size, emb_dim) - - def forward(self, z): - hard = self.straight_through if self.training else True - - logits = self.proj(z) - - soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard) - - z_q = torch.einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight) - - # + kl divergence to the prior loss - qy = F.softmax(logits, dim=1) - diff = ( - self.kl_weight - * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean() - ) - min_encoding_indices = soft_one_hot.argmax(dim=1) - - return z_q, diff, {"min_encoding_indices": min_encoding_indices} - - -class Downsample(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=3, stride=2, padding=0 - ) - - def forward(self, x): - pad = (0, 1, 0, 1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - return x - - -class Upsample(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.conv = nn.Conv2d( - in_channels, in_channels, kernel_size=3, stride=1, padding=1 - ) - - def forward(self, x): - x = F.interpolate(x, scale_factor=2.0, mode="nearest") - x = self.conv(x) - - return x - - -class ResBlock(nn.Module): - def __init__(self, in_channels, out_channels=None): - super(ResBlock, self).__init__() - self.in_channels = in_channels - self.out_channels = in_channels if out_channels is None else out_channels - self.norm1 = normalize(in_channels) - self.conv1 = nn.Conv2d( - in_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) - self.norm2 = normalize(out_channels) - self.conv2 = nn.Conv2d( - out_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) - if self.in_channels != self.out_channels: - self.conv_out = nn.Conv2d( - in_channels, out_channels, kernel_size=1, stride=1, padding=0 - ) - - def forward(self, x_in): - x = x_in - x = self.norm1(x) - x = swish(x) - x = self.conv1(x) - x = self.norm2(x) - x = swish(x) - x = self.conv2(x) - if self.in_channels != self.out_channels: - x_in = self.conv_out(x_in) - - return x + x_in - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = normalize(in_channels) - self.q = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.k = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.v = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.proj_out = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b, c, h, w = q.shape - q = q.reshape(b, c, h * w) - q = q.permute(0, 2, 1) - k = k.reshape(b, c, h * w) - w_ = torch.bmm(q, k) - w_ = w_ * (int(c) ** (-0.5)) - w_ = F.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b, c, h * w) - w_ = w_.permute(0, 2, 1) - h_ = torch.bmm(v, w_) - h_ = h_.reshape(b, c, h, w) - - h_ = self.proj_out(h_) - - return x + h_ - - -class Encoder(nn.Module): - def __init__( - self, - in_channels, - nf, - emb_dim, - ch_mult, - num_res_blocks, - resolution, - attn_resolutions, - ): - super().__init__() - self.nf = nf - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.attn_resolutions = attn_resolutions - - curr_res = self.resolution - in_ch_mult = (1,) + tuple(ch_mult) - - blocks = [] - # initial convultion - blocks.append(nn.Conv2d(in_channels, nf, kernel_size=3, stride=1, padding=1)) - - # residual and downsampling blocks, with attention on smaller res (16x16) - for i in range(self.num_resolutions): - block_in_ch = nf * in_ch_mult[i] - block_out_ch = nf * ch_mult[i] - for _ in range(self.num_res_blocks): - blocks.append(ResBlock(block_in_ch, block_out_ch)) - block_in_ch = block_out_ch - if curr_res in attn_resolutions: - blocks.append(AttnBlock(block_in_ch)) - - if i != self.num_resolutions - 1: - blocks.append(Downsample(block_in_ch)) - curr_res = curr_res // 2 - - # non-local attention block - blocks.append(ResBlock(block_in_ch, block_in_ch)) - blocks.append(AttnBlock(block_in_ch)) - blocks.append(ResBlock(block_in_ch, block_in_ch)) - - # normalise and convert to latent size - blocks.append(normalize(block_in_ch)) - blocks.append( - nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1) - ) - self.blocks = nn.ModuleList(blocks) - - def forward(self, x): - for block in self.blocks: - x = block(x) - - return x - - -class Generator(nn.Module): - def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions): - super().__init__() - self.nf = nf - self.ch_mult = ch_mult - self.num_resolutions = len(self.ch_mult) - self.num_res_blocks = res_blocks - self.resolution = img_size - self.attn_resolutions = attn_resolutions - self.in_channels = emb_dim - self.out_channels = 3 - block_in_ch = self.nf * self.ch_mult[-1] - curr_res = self.resolution // 2 ** (self.num_resolutions - 1) - - blocks = [] - # initial conv - blocks.append( - nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1) - ) - - # non-local attention block - blocks.append(ResBlock(block_in_ch, block_in_ch)) - blocks.append(AttnBlock(block_in_ch)) - blocks.append(ResBlock(block_in_ch, block_in_ch)) - - for i in reversed(range(self.num_resolutions)): - block_out_ch = self.nf * self.ch_mult[i] - - for _ in range(self.num_res_blocks): - blocks.append(ResBlock(block_in_ch, block_out_ch)) - block_in_ch = block_out_ch - - if curr_res in self.attn_resolutions: - blocks.append(AttnBlock(block_in_ch)) - - if i != 0: - blocks.append(Upsample(block_in_ch)) - curr_res = curr_res * 2 - - blocks.append(normalize(block_in_ch)) - blocks.append( - nn.Conv2d( - block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1 - ) - ) - - self.blocks = nn.ModuleList(blocks) - - def forward(self, x): - for block in self.blocks: - x = block(x) - - return x - - -@ARCH_REGISTRY.register() -class VQAutoEncoder(nn.Module): - def __init__( - self, - img_size, - nf, - ch_mult, - quantizer="nearest", - res_blocks=2, - attn_resolutions=[16], - codebook_size=1024, - emb_dim=256, - beta=0.25, - gumbel_straight_through=False, - gumbel_kl_weight=1e-8, - model_path=None, - ): - super().__init__() - logger = get_root_logger() - self.in_channels = 3 - self.nf = nf - self.n_blocks = res_blocks - self.codebook_size = codebook_size - self.embed_dim = emb_dim - self.ch_mult = ch_mult - self.resolution = img_size - self.attn_resolutions = attn_resolutions - self.quantizer_type = quantizer - self.encoder = Encoder( - self.in_channels, - self.nf, - self.embed_dim, - self.ch_mult, - self.n_blocks, - self.resolution, - self.attn_resolutions, - ) - if self.quantizer_type == "nearest": - self.beta = beta # 0.25 - self.quantize = VectorQuantizer( - self.codebook_size, self.embed_dim, self.beta - ) - elif self.quantizer_type == "gumbel": - self.gumbel_num_hiddens = emb_dim - self.straight_through = gumbel_straight_through - self.kl_weight = gumbel_kl_weight - self.quantize = GumbelQuantizer( - self.codebook_size, - self.embed_dim, - self.gumbel_num_hiddens, - self.straight_through, - self.kl_weight, - ) - self.generator = Generator( - self.nf, - self.embed_dim, - self.ch_mult, - self.n_blocks, - self.resolution, - self.attn_resolutions, - ) - - if model_path is not None: - chkpt = torch.load(model_path, map_location="cpu") - if "params_ema" in chkpt: - self.load_state_dict( - torch.load(model_path, map_location="cpu")["params_ema"] - ) - logger.info(f"vqgan is loaded from: {model_path} [params_ema]") - elif "params" in chkpt: - self.load_state_dict( - torch.load(model_path, map_location="cpu")["params"] - ) - logger.info(f"vqgan is loaded from: {model_path} [params]") - else: - raise ValueError(f"Wrong params!") - - def forward(self, x): - x = self.encoder(x) - quant, codebook_loss, quant_stats = self.quantize(x) - x = self.generator(quant) - return x, codebook_loss, quant_stats - - -# patch based discriminator -@ARCH_REGISTRY.register() -class VQGANDiscriminator(nn.Module): - def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None): - super().__init__() - - layers = [ - nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), - nn.LeakyReLU(0.2, True), - ] - ndf_mult = 1 - ndf_mult_prev = 1 - for n in range(1, n_layers): # gradually increase the number of filters - ndf_mult_prev = ndf_mult - ndf_mult = min(2**n, 8) - layers += [ - nn.Conv2d( - ndf * ndf_mult_prev, - ndf * ndf_mult, - kernel_size=4, - stride=2, - padding=1, - bias=False, - ), - nn.BatchNorm2d(ndf * ndf_mult), - nn.LeakyReLU(0.2, True), - ] - - ndf_mult_prev = ndf_mult - ndf_mult = min(2**n_layers, 8) - - layers += [ - nn.Conv2d( - ndf * ndf_mult_prev, - ndf * ndf_mult, - kernel_size=4, - stride=1, - padding=1, - bias=False, - ), - nn.BatchNorm2d(ndf * ndf_mult), - nn.LeakyReLU(0.2, True), - ] - - layers += [ - nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1) - ] # output 1 channel prediction map - self.main = nn.Sequential(*layers) - - if model_path is not None: - chkpt = torch.load(model_path, map_location="cpu") - if "params_d" in chkpt: - self.load_state_dict( - torch.load(model_path, map_location="cpu")["params_d"] - ) - elif "params" in chkpt: - self.load_state_dict( - torch.load(model_path, map_location="cpu")["params"] - ) - else: - raise ValueError(f"Wrong params!") - - def forward(self, x): - return self.main(x)