2023-05-05 21:12:19 +00:00
|
|
|
# InvokeAI nodes for ControlNet image preprocessors
|
|
|
|
# initial implementation by Gregg Helt, 2023
|
|
|
|
# heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
|
2023-05-05 21:12:19 +00:00
|
|
|
|
2023-04-30 02:40:22 +00:00
|
|
|
import numpy as np
|
2023-05-04 21:21:11 +00:00
|
|
|
from typing import Literal, Optional, Union, List
|
2023-04-30 02:40:22 +00:00
|
|
|
from PIL import Image, ImageFilter, ImageOps
|
2023-04-30 02:40:22 +00:00
|
|
|
from pydantic import BaseModel, Field
|
|
|
|
|
|
|
|
from ..models.image import ImageField, ImageType
|
|
|
|
from .baseinvocation import (
|
|
|
|
BaseInvocation,
|
|
|
|
BaseInvocationOutput,
|
|
|
|
InvocationContext,
|
|
|
|
InvocationConfig,
|
|
|
|
)
|
|
|
|
|
2023-05-05 00:06:49 +00:00
|
|
|
from controlnet_aux import (
|
|
|
|
CannyDetector,
|
|
|
|
HEDdetector,
|
|
|
|
LineartDetector,
|
|
|
|
LineartAnimeDetector,
|
|
|
|
MidasDetector,
|
|
|
|
MLSDdetector,
|
|
|
|
NormalBaeDetector,
|
|
|
|
OpenposeDetector,
|
|
|
|
PidiNetDetector,
|
|
|
|
ContentShuffleDetector,
|
2023-05-05 21:12:19 +00:00
|
|
|
ZoeDetector)
|
2023-05-05 00:06:49 +00:00
|
|
|
|
2023-04-30 02:40:22 +00:00
|
|
|
from .image import ImageOutput, build_image_output, PILInvocationConfig
|
|
|
|
|
|
|
|
|
2023-05-04 21:21:11 +00:00
|
|
|
class ControlField(BaseModel):
|
|
|
|
image: ImageField = Field(default=None, description="processed image")
|
2023-05-09 07:30:45 +00:00
|
|
|
control_model: Optional[str] = Field(default=None, description="control model used")
|
|
|
|
control_weight: Optional[float] = Field(default=None, description="weight given to controlnet")
|
2023-05-12 11:01:35 +00:00
|
|
|
begin_step_percent: float = Field(default=0, ge=0, le=1,
|
|
|
|
description="% of total steps at which controlnet is first applied")
|
|
|
|
end_step_percent: float = Field(default=1, ge=0, le=1,
|
|
|
|
description="% of total steps at which controlnet is last applied")
|
2023-05-04 21:21:11 +00:00
|
|
|
|
|
|
|
class Config:
|
|
|
|
schema_extra = {
|
2023-05-12 11:01:35 +00:00
|
|
|
"required": ["image", "control_model", "control_weight", "begin_step_percent", "end_step_percent"]
|
2023-05-04 21:21:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class ControlOutput(BaseInvocationOutput):
|
2023-05-04 23:01:22 +00:00
|
|
|
"""node output for ControlNet info"""
|
2023-05-04 21:21:11 +00:00
|
|
|
# fmt: off
|
|
|
|
type: Literal["control_output"] = "control_output"
|
2023-05-05 21:12:19 +00:00
|
|
|
control: Optional[ControlField] = Field(default=None, description="The control info dict")
|
2023-05-04 21:21:11 +00:00
|
|
|
# fmt: on
|
|
|
|
|
2023-05-05 00:06:49 +00:00
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class ControlNetInvocation(BaseInvocation):
|
|
|
|
"""Collects ControlNet info to pass to other nodes"""
|
2023-05-05 00:06:49 +00:00
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["controlnet"] = "controlnet"
|
2023-05-06 04:41:07 +00:00
|
|
|
# Inputs
|
|
|
|
image: ImageField = Field(default=None, description="image to process")
|
2023-05-05 21:12:19 +00:00
|
|
|
control_model: str = Field(default=None, description="control model to use")
|
2023-05-09 07:30:45 +00:00
|
|
|
control_weight: float = Field(default=0.5, ge=0, le=1, description="weight given to controlnet")
|
2023-05-12 11:01:35 +00:00
|
|
|
# TODO: add support in backend core for begin_step_percent, end_step_percent, guess_mode
|
|
|
|
begin_step_percent: float = Field(default=0, ge=0, le=1,
|
|
|
|
description="% of total steps at which controlnet is first applied")
|
|
|
|
end_step_percent: float = Field(default=1, ge=0, le=1,
|
|
|
|
description="% of total steps at which controlnet is last applied")
|
2023-05-06 04:41:07 +00:00
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> ControlOutput:
|
|
|
|
|
|
|
|
return ControlOutput(
|
|
|
|
control=ControlField(
|
|
|
|
image=self.image,
|
|
|
|
control_model=self.control_model,
|
2023-05-12 11:01:35 +00:00
|
|
|
control_weight=self.control_weight,
|
|
|
|
begin_step_percent=self.begin_step_percent,
|
|
|
|
end_step_percent=self.end_step_percent,
|
2023-05-06 04:41:07 +00:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
# TODO: move image processors to separate file (image_analysis.py
|
|
|
|
class ImageProcessorInvocation(BaseInvocation, PILInvocationConfig):
|
|
|
|
"""Base class for invocations that preprocess images for ControlNet"""
|
|
|
|
|
|
|
|
# fmt: off
|
|
|
|
type: Literal["image_processor"] = "image_processor"
|
|
|
|
# Inputs
|
|
|
|
image: ImageField = Field(default=None, description="image to process")
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
|
2023-05-05 00:06:49 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
# superclass just passes through image without processing
|
|
|
|
return image
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
2023-05-06 00:11:31 +00:00
|
|
|
raw_image = context.services.images.get(
|
2023-05-05 00:06:49 +00:00
|
|
|
self.image.image_type, self.image.image_name
|
|
|
|
)
|
|
|
|
# image type should be PIL.PngImagePlugin.PngImageFile ?
|
2023-05-06 00:11:31 +00:00
|
|
|
processed_image = self.run_processor(raw_image)
|
2023-05-05 21:12:19 +00:00
|
|
|
# currently can't see processed image in node UI without a showImage node,
|
|
|
|
# so for now setting image_type to RESULT instead of INTERMEDIATE so will get saved in gallery
|
2023-05-05 05:40:50 +00:00
|
|
|
# image_type = ImageType.INTERMEDIATE
|
|
|
|
image_type = ImageType.RESULT
|
2023-05-05 00:06:49 +00:00
|
|
|
image_name = context.services.images.create_name(
|
|
|
|
context.graph_execution_state_id, self.id
|
|
|
|
)
|
|
|
|
metadata = context.services.metadata.build_metadata(
|
|
|
|
session_id=context.graph_execution_state_id, node=self
|
|
|
|
)
|
|
|
|
context.services.images.save(image_type, image_name, processed_image, metadata)
|
|
|
|
|
|
|
|
"""Builds an ImageOutput and its ImageField"""
|
2023-05-06 04:41:07 +00:00
|
|
|
processed_image_field = ImageField(
|
2023-05-05 00:06:49 +00:00
|
|
|
image_name=image_name,
|
|
|
|
image_type=image_type,
|
|
|
|
)
|
2023-05-06 04:41:07 +00:00
|
|
|
return ImageOutput(
|
|
|
|
image=processed_image_field,
|
|
|
|
width=processed_image.width,
|
|
|
|
height=processed_image.height,
|
|
|
|
mode=processed_image.mode,
|
2023-04-30 02:40:22 +00:00
|
|
|
)
|
2023-05-04 23:01:22 +00:00
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class CannyImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-04 21:21:11 +00:00
|
|
|
"""Canny edge detection for ControlNet"""
|
2023-04-30 02:40:22 +00:00
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["canny_image_processor"] = "canny_image_processor"
|
2023-05-05 21:12:19 +00:00
|
|
|
# Input
|
2023-05-04 21:21:11 +00:00
|
|
|
low_threshold: float = Field(default=100, ge=0, description="low threshold of Canny pixel gradient")
|
2023-04-30 02:40:22 +00:00
|
|
|
high_threshold: float = Field(default=200, ge=0, description="high threshold of Canny pixel gradient")
|
|
|
|
# fmt: on
|
|
|
|
|
2023-05-04 23:01:22 +00:00
|
|
|
def run_processor(self, image):
|
2023-04-30 02:40:22 +00:00
|
|
|
canny_processor = CannyDetector()
|
|
|
|
processed_image = canny_processor(image, self.low_threshold, self.high_threshold)
|
2023-05-04 23:01:22 +00:00
|
|
|
return processed_image
|
|
|
|
|
2023-05-04 21:21:11 +00:00
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class HedImageprocessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 05:40:50 +00:00
|
|
|
"""Applies HED edge detection to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["hed_image_processor"] = "hed_image_processor"
|
2023-05-05 05:40:50 +00:00
|
|
|
# Inputs
|
|
|
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
|
|
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
2023-05-05 21:12:19 +00:00
|
|
|
safe: bool = Field(default=False, description="whether to use safe mode")
|
2023-05-05 05:40:50 +00:00
|
|
|
scribble: bool = Field(default=False, description="whether to use scribble mode")
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
hed_processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = hed_processor(image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
2023-05-05 21:12:19 +00:00
|
|
|
safe=self.safe,
|
2023-05-05 05:40:50 +00:00
|
|
|
scribble=self.scribble,
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class LineartImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 05:40:50 +00:00
|
|
|
"""Applies line art processing to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["lineart_image_processor"] = "lineart_image_processor"
|
2023-05-05 05:40:50 +00:00
|
|
|
# Inputs
|
|
|
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
|
|
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
|
|
|
coarse: bool = Field(default=False, description="whether to use coarse mode")
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
lineart_processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = lineart_processor(image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
coarse=self.coarse)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies line art anime processing to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["lineart_anime_image_processor"] = "lineart_anime_image_processor"
|
2023-05-05 21:12:19 +00:00
|
|
|
# Inputs
|
|
|
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
|
|
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = processor(image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
2023-05-05 05:40:50 +00:00
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class OpenposeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies Openpose processing to image"""
|
2023-05-05 05:40:50 +00:00
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["openpose_image_processor"] = "openpose_image_processor"
|
2023-05-05 05:40:50 +00:00
|
|
|
# Inputs
|
|
|
|
hand_and_face: bool = Field(default=False, description="whether to use hands and face mode")
|
|
|
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
|
|
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = openpose_processor(image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
hand_and_face=self.hand_and_face,
|
2023-05-05 21:12:19 +00:00
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies Midas depth processing to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["midas_depth_image_processor"] = "midas_depth_image_processor"
|
2023-05-05 21:12:19 +00:00
|
|
|
# Inputs
|
|
|
|
a_mult: float = Field(default=2.0, ge=0, description="Midas parameter a = amult * PI")
|
|
|
|
bg_th: float = Field(default=0.1, ge=0, description="Midas parameter bg_th")
|
2023-05-05 21:12:19 +00:00
|
|
|
depth_and_normal: bool = Field(default=False, description="whether to use depth and normal mode")
|
2023-05-05 21:12:19 +00:00
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = midas_processor(image,
|
|
|
|
a=np.pi * self.a_mult,
|
|
|
|
bg_th=self.bg_th,
|
2023-05-05 21:12:19 +00:00
|
|
|
depth_and_normal=self.depth_and_normal)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies NormalBae processing to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["normalbae_image_processor"] = "normalbae_image_processor"
|
2023-05-05 21:12:19 +00:00
|
|
|
# Inputs
|
|
|
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
|
|
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = normalbae_processor(image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class MlsdImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies MLSD processing to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["mlsd_image_processor"] = "mlsd_image_processor"
|
2023-05-05 21:12:19 +00:00
|
|
|
# Inputs
|
|
|
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
|
|
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
|
|
|
thr_v: float = Field(default=0.1, ge=0, description="MLSD parameter thr_v")
|
|
|
|
thr_d: float = Field(default=0.1, ge=0, description="MLSD parameter thr_d")
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = mlsd_processor(image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
thr_v=self.thr_v,
|
|
|
|
thr_d=self.thr_d)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class PidiImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies PIDI processing to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["pidi_image_processor"] = "pidi_image_processor"
|
2023-05-05 21:12:19 +00:00
|
|
|
# Inputs
|
|
|
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
|
|
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
|
|
|
safe: bool = Field(default=False, description="whether to use safe mode")
|
|
|
|
scribble: bool = Field(default=False, description="whether to use scribble mode")
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = pidi_processor(image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
safe=self.safe,
|
|
|
|
scribble=self.scribble)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies content shuffle processing to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["content_shuffle_image_processor"] = "content_shuffle_image_processor"
|
2023-05-05 21:12:19 +00:00
|
|
|
# Inputs
|
|
|
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
|
|
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
2023-05-05 21:12:19 +00:00
|
|
|
h: Union[int | None] = Field(default=None, ge=0, description="content shuffle h parameter")
|
|
|
|
w: Union[int | None] = Field(default=None, ge=0, description="content shuffle w parameter")
|
|
|
|
f: Union[int | None] = Field(default=None, ge=0, description="cont")
|
2023-05-05 21:12:19 +00:00
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
content_shuffle_processor = ContentShuffleDetector()
|
|
|
|
processed_image = content_shuffle_processor(image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
h=self.h,
|
|
|
|
w=self.w,
|
|
|
|
f=self.f
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
2023-05-23 20:27:13 +00:00
|
|
|
"""Applies Zoe depth processing to image"""
|
|
|
|
# fmt: off
|
2023-05-06 04:41:07 +00:00
|
|
|
type: Literal["zoe_depth_image_processor"] = "zoe_depth_image_processor"
|
2023-05-23 20:27:13 +00:00
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
def run_processor(self, image):
|
|
|
|
zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = zoe_depth_processor(image)
|
|
|
|
return processed_image
|
2023-04-30 02:40:22 +00:00
|
|
|
|