mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Updates based on code review by @RyanJDick
This commit is contained in:
parent
5f37176938
commit
494c2a9b05
@ -66,7 +66,7 @@ class CalculateImageTilesInvocation(BaseInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"calculate_image_tiles_Even_Split",
|
||||
"calculate_image_tiles_even_split",
|
||||
title="Calculate Image Tiles Even Split",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
@ -93,7 +93,7 @@ class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
default=0.25,
|
||||
ge=0,
|
||||
lt=1,
|
||||
description="Overlap amount of tile size (0-1)",
|
||||
description="Overlap between adjacent tiles as a fraction of the tile's dimensions (0-1)",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> CalculateImageTilesOutput:
|
||||
@ -126,7 +126,8 @@ class CalculateImageTilesMinimumOverlapInvocation(BaseInvocation):
|
||||
min_overlap: int = InputField(
|
||||
default=128,
|
||||
ge=0,
|
||||
description="minimum tile overlap size (must be a multiple of 8)",
|
||||
multiple_of=8,
|
||||
description="Minimum overlap between adjacent tiles, in pixels(must be a multiple of 8).",
|
||||
)
|
||||
round_to_8: bool = InputField(
|
||||
default=False,
|
||||
@ -260,10 +261,12 @@ class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
||||
merge_tiles_with_linear_blending(
|
||||
dst_image=np_image, tiles=tiles, tile_images=tile_np_images, blend_amount=self.blend_amount
|
||||
)
|
||||
else:
|
||||
elif self.blend_mode == "Seam":
|
||||
merge_tiles_with_seam_blending(
|
||||
dst_image=np_image, tiles=tiles, tile_images=tile_np_images, blend_amount=self.blend_amount
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported blend mode: '{self.blend_mode}'.")
|
||||
|
||||
# Convert into a PIL image and save
|
||||
pil_image = Image.fromarray(np_image)
|
||||
|
@ -2,11 +2,12 @@ import math
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
from invokeai.app.invocations.latent import LATENT_SCALE_FACTOR
|
||||
|
||||
from invokeai.backend.tiles.utils import TBLR, Tile, paste, seam_blend
|
||||
|
||||
|
||||
def calc_overlap(tiles: list[Tile], num_tiles_x, num_tiles_y) -> list[Tile]:
|
||||
def calc_overlap(tiles: list[Tile], num_tiles_x: int, num_tiles_y: int) -> list[Tile]:
|
||||
"""Calculate and update the overlap of a list of tiles.
|
||||
|
||||
Args:
|
||||
@ -110,23 +111,27 @@ def calc_tiles_even_split(
|
||||
image_width (int): The image width in px.
|
||||
num_x_tiles (int): The number of tile to split the image into on the X-axis.
|
||||
num_y_tiles (int): The number of tile to split the image into on the Y-axis.
|
||||
overlap (int, optional): The target overlap amount of the tiles size. Defaults to 0.
|
||||
overlap (float, optional): The target overlap amount of the tiles size. Defaults to 0.
|
||||
|
||||
Returns:
|
||||
list[Tile]: A list of tiles that cover the image shape. Ordered from left-to-right, top-to-bottom.
|
||||
"""
|
||||
|
||||
# Ensure tile size is divisible by 8
|
||||
if image_width % 8 != 0 or image_height % 8 != 0:
|
||||
if image_width % LATENT_SCALE_FACTOR != 0 or image_height % LATENT_SCALE_FACTOR != 0:
|
||||
raise ValueError(f"image size (({image_width}, {image_height})) must be divisible by 8")
|
||||
|
||||
# Calculate the overlap size based on the percentage and adjust it to be divisible by 8 (rounding up)
|
||||
overlap_x = 8 * math.ceil(int((image_width / num_tiles_x) * overlap) / 8)
|
||||
overlap_y = 8 * math.ceil(int((image_height / num_tiles_y) * overlap) / 8)
|
||||
overlap_x = LATENT_SCALE_FACTOR * math.ceil(int((image_width / num_tiles_x) * overlap) / LATENT_SCALE_FACTOR)
|
||||
overlap_y = LATENT_SCALE_FACTOR * math.ceil(int((image_height / num_tiles_y) * overlap) / LATENT_SCALE_FACTOR)
|
||||
|
||||
# Calculate the tile size based on the number of tiles and overlap, and ensure it's divisible by 8 (rounding down)
|
||||
tile_size_x = 8 * math.floor(((image_width + overlap_x * (num_tiles_x - 1)) // num_tiles_x) / 8)
|
||||
tile_size_y = 8 * math.floor(((image_height + overlap_y * (num_tiles_y - 1)) // num_tiles_y) / 8)
|
||||
tile_size_x = LATENT_SCALE_FACTOR * math.floor(
|
||||
((image_width + overlap_x * (num_tiles_x - 1)) // num_tiles_x) / LATENT_SCALE_FACTOR
|
||||
)
|
||||
tile_size_y = LATENT_SCALE_FACTOR * math.floor(
|
||||
((image_height + overlap_y * (num_tiles_y - 1)) // num_tiles_y) / LATENT_SCALE_FACTOR
|
||||
)
|
||||
|
||||
# tiles[y * num_tiles_x + x] is the tile for the y'th row, x'th column.
|
||||
tiles: list[Tile] = []
|
||||
@ -196,13 +201,13 @@ def calc_tiles_min_overlap(
|
||||
for tile_idx_y in range(num_tiles_y):
|
||||
top = (tile_idx_y * (image_height - tile_height)) // (num_tiles_y - 1) if num_tiles_y > 1 else 0
|
||||
if round_to_8:
|
||||
top = 8 * (top // 8)
|
||||
top = LATENT_SCALE_FACTOR * (top // LATENT_SCALE_FACTOR)
|
||||
bottom = top + tile_height
|
||||
|
||||
for tile_idx_x in range(num_tiles_x):
|
||||
left = (tile_idx_x * (image_width - tile_width)) // (num_tiles_x - 1) if num_tiles_x > 1 else 0
|
||||
if round_to_8:
|
||||
left = 8 * (left // 8)
|
||||
left = LATENT_SCALE_FACTOR * (left // LATENT_SCALE_FACTOR)
|
||||
right = left + tile_width
|
||||
|
||||
tile = Tile(
|
||||
|
@ -33,10 +33,10 @@ def paste(dst_image: np.ndarray, src_image: np.ndarray, box: TBLR, mask: Optiona
|
||||
"""Paste a source image into a destination image.
|
||||
|
||||
Args:
|
||||
dst_image (torch.Tensor): The destination image to paste into. Shape: (H, W, C).
|
||||
src_image (torch.Tensor): The source image to paste. Shape: (H, W, C). H and W must be compatible with 'box'.
|
||||
dst_image (np.array): The destination image to paste into. Shape: (H, W, C).
|
||||
src_image (np.array): The source image to paste. Shape: (H, W, C). H and W must be compatible with 'box'.
|
||||
box (TBLR): Box defining the region in the 'dst_image' where 'src_image' will be pasted.
|
||||
mask (Optional[torch.Tensor]): A mask that defines the blending between 'src_image' and 'dst_image'.
|
||||
mask (Optional[np.array]): A mask that defines the blending between 'src_image' and 'dst_image'.
|
||||
Range: [0.0, 1.0], Shape: (H, W). The output is calculate per-pixel according to
|
||||
`src * mask + dst * (1 - mask)`.
|
||||
"""
|
||||
@ -55,8 +55,8 @@ def seam_blend(ia1: np.ndarray, ia2: np.ndarray, blend_amount: int, x_seam: bool
|
||||
It is assumed that input images will be RGB np arrays and are the same size.
|
||||
|
||||
Args:
|
||||
ia1 (torch.Tensor): Image array 1 Shape: (H, W, C).
|
||||
ia2 (torch.Tensor): Image array 2 Shape: (H, W, C).
|
||||
ia1 (np.array): Image array 1 Shape: (H, W, C).
|
||||
ia2 (np.array): Image array 2 Shape: (H, W, C).
|
||||
x_seam (bool): If the images should be blended on the x axis or not.
|
||||
blend_amount (int): The size of the blur to use on the seam. Half of this value will be used to avoid the edges of the image.
|
||||
"""
|
||||
@ -74,7 +74,7 @@ def seam_blend(ia1: np.ndarray, ia2: np.ndarray, blend_amount: int, x_seam: bool
|
||||
return result
|
||||
|
||||
# Assume RGB and convert to grey
|
||||
iag1 = np.dot(ia1, [0.2989, 0.5870, 0.1140])
|
||||
iag1 = np.dot(ia1, [0.2989, 0.5870, 0.1140]) # BT.601 perceived brightness
|
||||
iag2 = np.dot(ia2, [0.2989, 0.5870, 0.1140])
|
||||
|
||||
# Calc Difference between the images
|
||||
|
Loading…
Reference in New Issue
Block a user