mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Consolidate and generalize saturation/luminosity adjusters (#4425)
* Consolidated saturation/luminosity adjust. Now allows increasing and inverting. Accepts any color PIL format and channel designation. * Updated docs/nodes/defaultNodes.md * shortened tags list to channel types only * fix typo in mode list * split features into offset and multiply nodes * Updated documentation * Change invert to discrete boolean. Previous math was unclear and had issues with 0 values. * chore: black * chore(ui): typegen --------- Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
This commit is contained in:
parent
78cc5a7825
commit
10eec546ad
@ -35,13 +35,13 @@ The table below contains a list of the default nodes shipped with InvokeAI and t
|
|||||||
|Inverse Lerp Image | Inverse linear interpolation of all pixels of an image|
|
|Inverse Lerp Image | Inverse linear interpolation of all pixels of an image|
|
||||||
|Image Primitive | An image primitive value|
|
|Image Primitive | An image primitive value|
|
||||||
|Lerp Image | Linear interpolation of all pixels of an image|
|
|Lerp Image | Linear interpolation of all pixels of an image|
|
||||||
|Image Luminosity Adjustment | Adjusts the Luminosity (Value) of an image.|
|
|Offset Image Channel | Add to or subtract from an image color channel by a uniform value.|
|
||||||
|
|Multiply Image Channel | Multiply or Invert an image color channel by a scalar value.|
|
||||||
|Multiply Images | Multiplies two images together using `PIL.ImageChops.multiply()`.|
|
|Multiply Images | Multiplies two images together using `PIL.ImageChops.multiply()`.|
|
||||||
|Blur NSFW Image | Add blur to NSFW-flagged images|
|
|Blur NSFW Image | Add blur to NSFW-flagged images|
|
||||||
|Paste Image | Pastes an image into another image.|
|
|Paste Image | Pastes an image into another image.|
|
||||||
|ImageProcessor | Base class for invocations that preprocess images for ControlNet|
|
|ImageProcessor | Base class for invocations that preprocess images for ControlNet|
|
||||||
|Resize Image | Resizes an image to specific dimensions|
|
|Resize Image | Resizes an image to specific dimensions|
|
||||||
|Image Saturation Adjustment | Adjusts the Saturation of an image.|
|
|
||||||
|Scale Image | Scales an image by a factor|
|
|Scale Image | Scales an image by a factor|
|
||||||
|Image to Latents | Encodes an image into latents.|
|
|Image to Latents | Encodes an image into latents.|
|
||||||
|Add Invisible Watermark | Add an invisible watermark to an image|
|
|Add Invisible Watermark | Add an invisible watermark to an image|
|
||||||
|
@ -773,39 +773,95 @@ class ImageHueAdjustmentInvocation(BaseInvocation):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
COLOR_CHANNELS = Literal[
|
||||||
|
"Red (RGBA)",
|
||||||
|
"Green (RGBA)",
|
||||||
|
"Blue (RGBA)",
|
||||||
|
"Alpha (RGBA)",
|
||||||
|
"Cyan (CMYK)",
|
||||||
|
"Magenta (CMYK)",
|
||||||
|
"Yellow (CMYK)",
|
||||||
|
"Black (CMYK)",
|
||||||
|
"Hue (HSV)",
|
||||||
|
"Saturation (HSV)",
|
||||||
|
"Value (HSV)",
|
||||||
|
"Luminosity (LAB)",
|
||||||
|
"A (LAB)",
|
||||||
|
"B (LAB)",
|
||||||
|
"Y (YCbCr)",
|
||||||
|
"Cb (YCbCr)",
|
||||||
|
"Cr (YCbCr)",
|
||||||
|
]
|
||||||
|
|
||||||
|
CHANNEL_FORMATS = {
|
||||||
|
"Red (RGBA)": ("RGBA", 0),
|
||||||
|
"Green (RGBA)": ("RGBA", 1),
|
||||||
|
"Blue (RGBA)": ("RGBA", 2),
|
||||||
|
"Alpha (RGBA)": ("RGBA", 3),
|
||||||
|
"Cyan (CMYK)": ("CMYK", 0),
|
||||||
|
"Magenta (CMYK)": ("CMYK", 1),
|
||||||
|
"Yellow (CMYK)": ("CMYK", 2),
|
||||||
|
"Black (CMYK)": ("CMYK", 3),
|
||||||
|
"Hue (HSV)": ("HSV", 0),
|
||||||
|
"Saturation (HSV)": ("HSV", 1),
|
||||||
|
"Value (HSV)": ("HSV", 2),
|
||||||
|
"Luminosity (LAB)": ("LAB", 0),
|
||||||
|
"A (LAB)": ("LAB", 1),
|
||||||
|
"B (LAB)": ("LAB", 2),
|
||||||
|
"Y (YCbCr)": ("YCbCr", 0),
|
||||||
|
"Cb (YCbCr)": ("YCbCr", 1),
|
||||||
|
"Cr (YCbCr)": ("YCbCr", 2),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"img_luminosity_adjust",
|
"img_channel_offset",
|
||||||
title="Adjust Image Luminosity",
|
title="Offset Image Channel",
|
||||||
tags=["image", "luminosity", "hsl"],
|
tags=[
|
||||||
|
"image",
|
||||||
|
"offset",
|
||||||
|
"red",
|
||||||
|
"green",
|
||||||
|
"blue",
|
||||||
|
"alpha",
|
||||||
|
"cyan",
|
||||||
|
"magenta",
|
||||||
|
"yellow",
|
||||||
|
"black",
|
||||||
|
"hue",
|
||||||
|
"saturation",
|
||||||
|
"luminosity",
|
||||||
|
"value",
|
||||||
|
],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
)
|
)
|
||||||
class ImageLuminosityAdjustmentInvocation(BaseInvocation):
|
class ImageChannelOffsetInvocation(BaseInvocation):
|
||||||
"""Adjusts the Luminosity (Value) of an image."""
|
"""Add or subtract a value from a specific color channel of an image."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to adjust")
|
image: ImageField = InputField(description="The image to adjust")
|
||||||
luminosity: float = InputField(
|
channel: COLOR_CHANNELS = InputField(description="Which channel to adjust")
|
||||||
default=1.0, ge=0, le=1, description="The factor by which to adjust the luminosity (value)"
|
offset: int = InputField(default=0, ge=-255, le=255, description="The amount to adjust the channel by")
|
||||||
)
|
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
pil_image = context.services.images.get_pil_image(self.image.image_name)
|
pil_image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
# Convert PIL image to OpenCV format (numpy array), note color channel
|
# extract the channel and mode from the input and reference tuple
|
||||||
# ordering is changed from RGB to BGR
|
mode = CHANNEL_FORMATS[self.channel][0]
|
||||||
image = numpy.array(pil_image.convert("RGB"))[:, :, ::-1]
|
channel_number = CHANNEL_FORMATS[self.channel][1]
|
||||||
|
|
||||||
# Convert image to HSV color space
|
# Convert PIL image to new format
|
||||||
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
converted_image = numpy.array(pil_image.convert(mode)).astype(int)
|
||||||
|
image_channel = converted_image[:, :, channel_number]
|
||||||
|
|
||||||
# Adjust the luminosity (value)
|
# Adjust the value, clipping to 0..255
|
||||||
hsv_image[:, :, 2] = numpy.clip(hsv_image[:, :, 2] * self.luminosity, 0, 255)
|
image_channel = numpy.clip(image_channel + self.offset, 0, 255)
|
||||||
|
|
||||||
# Convert image back to BGR color space
|
# Put the channel back into the image
|
||||||
image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
|
converted_image[:, :, channel_number] = image_channel
|
||||||
|
|
||||||
# Convert back to PIL format and to original color mode
|
# Convert back to RGBA format and output
|
||||||
pil_image = Image.fromarray(image[:, :, ::-1], "RGB").convert("RGBA")
|
pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA")
|
||||||
|
|
||||||
image_dto = context.services.images.create(
|
image_dto = context.services.images.create(
|
||||||
image=pil_image,
|
image=pil_image,
|
||||||
@ -827,36 +883,60 @@ class ImageLuminosityAdjustmentInvocation(BaseInvocation):
|
|||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"img_saturation_adjust",
|
"img_channel_multiply",
|
||||||
title="Adjust Image Saturation",
|
title="Multiply Image Channel",
|
||||||
tags=["image", "saturation", "hsl"],
|
tags=[
|
||||||
|
"image",
|
||||||
|
"invert",
|
||||||
|
"scale",
|
||||||
|
"multiply",
|
||||||
|
"red",
|
||||||
|
"green",
|
||||||
|
"blue",
|
||||||
|
"alpha",
|
||||||
|
"cyan",
|
||||||
|
"magenta",
|
||||||
|
"yellow",
|
||||||
|
"black",
|
||||||
|
"hue",
|
||||||
|
"saturation",
|
||||||
|
"luminosity",
|
||||||
|
"value",
|
||||||
|
],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
)
|
)
|
||||||
class ImageSaturationAdjustmentInvocation(BaseInvocation):
|
class ImageChannelMultiplyInvocation(BaseInvocation):
|
||||||
"""Adjusts the Saturation of an image."""
|
"""Scale a specific color channel of an image."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to adjust")
|
image: ImageField = InputField(description="The image to adjust")
|
||||||
saturation: float = InputField(default=1.0, ge=0, le=1, description="The factor by which to adjust the saturation")
|
channel: COLOR_CHANNELS = InputField(description="Which channel to adjust")
|
||||||
|
scale: float = InputField(default=1.0, ge=0.0, description="The amount to scale the channel by.")
|
||||||
|
invert_channel: bool = InputField(default=False, description="Invert the channel after scaling")
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
pil_image = context.services.images.get_pil_image(self.image.image_name)
|
pil_image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
# Convert PIL image to OpenCV format (numpy array), note color channel
|
# extract the channel and mode from the input and reference tuple
|
||||||
# ordering is changed from RGB to BGR
|
mode = CHANNEL_FORMATS[self.channel][0]
|
||||||
image = numpy.array(pil_image.convert("RGB"))[:, :, ::-1]
|
channel_number = CHANNEL_FORMATS[self.channel][1]
|
||||||
|
|
||||||
# Convert image to HSV color space
|
# Convert PIL image to new format
|
||||||
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
converted_image = numpy.array(pil_image.convert(mode)).astype(float)
|
||||||
|
image_channel = converted_image[:, :, channel_number]
|
||||||
|
|
||||||
# Adjust the saturation
|
# Adjust the value, clipping to 0..255
|
||||||
hsv_image[:, :, 1] = numpy.clip(hsv_image[:, :, 1] * self.saturation, 0, 255)
|
image_channel = numpy.clip(image_channel * self.scale, 0, 255)
|
||||||
|
|
||||||
# Convert image back to BGR color space
|
# Invert the channel if requested
|
||||||
image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
|
if self.invert_channel:
|
||||||
|
image_channel = 255 - image_channel
|
||||||
|
|
||||||
# Convert back to PIL format and to original color mode
|
# Put the channel back into the image
|
||||||
pil_image = Image.fromarray(image[:, :, ::-1], "RGB").convert("RGBA")
|
converted_image[:, :, channel_number] = image_channel
|
||||||
|
|
||||||
|
# Convert back to RGBA format and output
|
||||||
|
pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA")
|
||||||
|
|
||||||
image_dto = context.services.images.create(
|
image_dto = context.services.images.create(
|
||||||
image=pil_image,
|
image=pil_image,
|
||||||
|
192
invokeai/frontend/web/src/services/api/schema.d.ts
vendored
192
invokeai/frontend/web/src/services/api/schema.d.ts
vendored
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user