mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Added more preprocessor nodes for:
MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
This commit is contained in:
parent
754017b59e
commit
f3666eda63
@ -1,3 +1,8 @@
|
|||||||
|
# InvokeAI nodes for ControlNet image preprocessors
|
||||||
|
# initial implementation by Gregg Helt, 2023
|
||||||
|
# heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
from typing import Literal, Optional, Union, List
|
from typing import Literal, Optional, Union, List
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
@ -20,7 +25,6 @@ from controlnet_aux import (
|
|||||||
OpenposeDetector,
|
OpenposeDetector,
|
||||||
PidiNetDetector,
|
PidiNetDetector,
|
||||||
ContentShuffleDetector,
|
ContentShuffleDetector,
|
||||||
# StyleShuffleDetector,
|
|
||||||
ZoeDetector)
|
ZoeDetector)
|
||||||
|
|
||||||
from .image import ImageOutput, build_image_output, PILInvocationConfig
|
from .image import ImageOutput, build_image_output, PILInvocationConfig
|
||||||
@ -43,25 +47,26 @@ class ControlField(BaseModel):
|
|||||||
|
|
||||||
class ControlOutput(BaseInvocationOutput):
|
class ControlOutput(BaseInvocationOutput):
|
||||||
"""node output for ControlNet info"""
|
"""node output for ControlNet info"""
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
type: Literal["control_output"] = "control_output"
|
type: Literal["control_output"] = "control_output"
|
||||||
control: Optional[ControlField] = Field(default=None, description="The control info dict")
|
control: Optional[ControlField] = Field(default=None, description="The control info dict")
|
||||||
raw_processed_image: ImageField = Field(default=None, description="outputs just them image info (which is also included in control output)")
|
raw_processed_image: ImageField = Field(default=None,
|
||||||
|
description="outputs just the image info (also included in control output)")
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
class PreprocessedControlInvocation(BaseInvocation, PILInvocationConfig):
|
# This super class handles invoke() call, which in turn calls run_processor(image)
|
||||||
|
# subclasses override run_processor() instead of implementing their own invoke()
|
||||||
|
class PreprocessedControlNetInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
"""Base class for invocations that preprocess images for ControlNet"""
|
"""Base class for invocations that preprocess images for ControlNet"""
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
type: Literal["preprocessed_control"] = "preprocessed_control"
|
type: Literal["preprocessed_control"] = "preprocessed_control"
|
||||||
|
|
||||||
# Inputs
|
# Inputs
|
||||||
image: ImageField = Field(default=None, description="image to process")
|
image: ImageField = Field(default=None, description="image to process")
|
||||||
control_model: str = Field(default=None, description="control model to use")
|
control_model: str = Field(default=None, description="control model to use")
|
||||||
control_weight: float = Field(default=0.5, ge=0, le=1, description="control weight")
|
control_weight: float = Field(default=0.5, ge=0, le=1, description="control weight")
|
||||||
|
# TODO: support additional ControlNet parameters (mostly just passthroughs to other nodes with ControlField inputs)
|
||||||
# begin_step_percent: float = Field(default=0, ge=0, le=1,
|
# begin_step_percent: float = Field(default=0, ge=0, le=1,
|
||||||
# description="% of total steps at which controlnet is first applied")
|
# description="% of total steps at which controlnet is first applied")
|
||||||
# end_step_percent: float = Field(default=1, ge=0, le=1,
|
# end_step_percent: float = Field(default=1, ge=0, le=1,
|
||||||
@ -69,8 +74,7 @@ class PreprocessedControlInvocation(BaseInvocation, PILInvocationConfig):
|
|||||||
# guess_mode: bool = Field(default=False, description="use guess mode (controlnet ignores prompt)")
|
# guess_mode: bool = Field(default=False, description="use guess mode (controlnet ignores prompt)")
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
# This super class handles invoke() call, which in turn calls run_processor(image)
|
|
||||||
# subclasses override run_processor instead of implementing their own invoke()
|
|
||||||
def run_processor(self, image):
|
def run_processor(self, image):
|
||||||
# superclass just passes through image without processing
|
# superclass just passes through image without processing
|
||||||
return image
|
return image
|
||||||
@ -81,6 +85,8 @@ class PreprocessedControlInvocation(BaseInvocation, PILInvocationConfig):
|
|||||||
)
|
)
|
||||||
# image type should be PIL.PngImagePlugin.PngImageFile ?
|
# image type should be PIL.PngImagePlugin.PngImageFile ?
|
||||||
processed_image = self.run_processor(image)
|
processed_image = self.run_processor(image)
|
||||||
|
# currently can't see processed image in node UI without a showImage node,
|
||||||
|
# so for now setting image_type to RESULT instead of INTERMEDIATE so will get saved in gallery
|
||||||
# image_type = ImageType.INTERMEDIATE
|
# image_type = ImageType.INTERMEDIATE
|
||||||
image_type = ImageType.RESULT
|
image_type = ImageType.RESULT
|
||||||
image_name = context.services.images.create_name(
|
image_name = context.services.images.create_name(
|
||||||
@ -106,97 +112,209 @@ class PreprocessedControlInvocation(BaseInvocation, PILInvocationConfig):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CannyControlInvocation(PreprocessedControlInvocation, PILInvocationConfig):
|
class CannyControlInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
"""Canny edge detection for ControlNet"""
|
"""Canny edge detection for ControlNet"""
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
type: Literal["canny_control"] = "canny_control"
|
type: Literal["cannycontrol"] = "cannycontrol"
|
||||||
# Inputs
|
# Input
|
||||||
low_threshold: float = Field(default=100, ge=0, description="low threshold of Canny pixel gradient")
|
low_threshold: float = Field(default=100, ge=0, description="low threshold of Canny pixel gradient")
|
||||||
high_threshold: float = Field(default=200, ge=0, description="high threshold of Canny pixel gradient")
|
high_threshold: float = Field(default=200, ge=0, description="high threshold of Canny pixel gradient")
|
||||||
|
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
def run_processor(self, image):
|
def run_processor(self, image):
|
||||||
print("**** running Canny processor ****")
|
|
||||||
print("image type: ", type(image))
|
|
||||||
canny_processor = CannyDetector()
|
canny_processor = CannyDetector()
|
||||||
processed_image = canny_processor(image, self.low_threshold, self.high_threshold)
|
processed_image = canny_processor(image, self.low_threshold, self.high_threshold)
|
||||||
print("processed image type: ", type(image))
|
|
||||||
return processed_image
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
class HedProcessorInvocation(PreprocessedControlInvocation, PILInvocationConfig):
|
class HedControlNetInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
"""Applies HED edge detection to image"""
|
"""Applies HED edge detection to image"""
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
type: Literal["hed_control"] = "hed_control"
|
type: Literal["hed_control"] = "hed_control"
|
||||||
|
|
||||||
# Inputs
|
# Inputs
|
||||||
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
||||||
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
||||||
safe: bool = Field(default=False, description="whether to use safe mode")
|
safe: bool = Field(default=False, description="whether to use safe mode")
|
||||||
scribble: bool = Field(default=False, description="whether to use scribble mode")
|
scribble: bool = Field(default=False, description="whether to use scribble mode")
|
||||||
return_pil: bool = Field(default=True, description="whether to return PIL image")
|
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
def run_processor(self, image):
|
def run_processor(self, image):
|
||||||
print("**** running HED processor ****")
|
|
||||||
hed_processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
hed_processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
||||||
processed_image = hed_processor(image,
|
processed_image = hed_processor(image,
|
||||||
detect_resolution=self.detect_resolution,
|
detect_resolution=self.detect_resolution,
|
||||||
image_resolution=self.image_resolution,
|
image_resolution=self.image_resolution,
|
||||||
safe=self.safe,
|
safe=self.safe,
|
||||||
return_pil=self.return_pil,
|
|
||||||
scribble=self.scribble,
|
scribble=self.scribble,
|
||||||
)
|
)
|
||||||
return processed_image
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
class LineartProcessorInvocation(PreprocessedControlInvocation, PILInvocationConfig):
|
class LineartControlInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
"""Applies line art processing to image"""
|
"""Applies line art processing to image"""
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
type: Literal["lineart_control"] = "lineart_control"
|
type: Literal["lineart_control"] = "lineart_control"
|
||||||
# Inputs
|
# Inputs
|
||||||
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
||||||
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
||||||
coarse: bool = Field(default=False, description="whether to use coarse mode")
|
coarse: bool = Field(default=False, description="whether to use coarse mode")
|
||||||
return_pil: bool = Field(default=True, description="whether to return PIL image")
|
|
||||||
|
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
def run_processor(self, image):
|
def run_processor(self, image):
|
||||||
print("**** running Lineart processor ****")
|
|
||||||
print("image type: ", type(image))
|
|
||||||
lineart_processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
lineart_processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
processed_image = lineart_processor(image,
|
processed_image = lineart_processor(image,
|
||||||
detect_resolution=self.detect_resolution,
|
detect_resolution=self.detect_resolution,
|
||||||
image_resolution=self.image_resolution,
|
image_resolution=self.image_resolution,
|
||||||
return_pil=self.return_pil,
|
|
||||||
coarse=self.coarse)
|
coarse=self.coarse)
|
||||||
return processed_image
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
class OpenposeProcessorInvocation(PreprocessedControlInvocation, PILInvocationConfig):
|
class LineartAnimeControlInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
"""Applies Openpose processing to image"""
|
"""Applies line art anime processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["lineart_anime_control"] = "lineart_anime_control"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class OpenposeControlInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
|
"""Applies Openpose processing to image"""
|
||||||
# fmt: off
|
# fmt: off
|
||||||
type: Literal["openpose_control"] = "openpose_control"
|
type: Literal["openpose_control"] = "openpose_control"
|
||||||
# Inputs
|
# Inputs
|
||||||
hand_and_face: bool = Field(default=False, description="whether to use hands and face mode")
|
hand_and_face: bool = Field(default=False, description="whether to use hands and face mode")
|
||||||
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
||||||
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
||||||
return_pil: bool = Field(default=True, description="whether to return PIL image")
|
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
def run_processor(self, image):
|
def run_processor(self, image):
|
||||||
print("**** running Openpose processor ****")
|
|
||||||
print("image type: ", type(image))
|
|
||||||
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
processed_image = openpose_processor(image,
|
processed_image = openpose_processor(image,
|
||||||
detect_resolution=self.detect_resolution,
|
detect_resolution=self.detect_resolution,
|
||||||
image_resolution=self.image_resolution,
|
image_resolution=self.image_resolution,
|
||||||
hand_and_face=self.hand_and_face,
|
hand_and_face=self.hand_and_face,
|
||||||
return_pil=self.return_pil)
|
)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class MidasDepthControlInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
|
"""Applies Midas depth processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["midas_control"] = "midas_control"
|
||||||
|
# Inputs
|
||||||
|
a_mult: float = Field(default=2.0, ge=0, description="Midas parameter a = amult * PI")
|
||||||
|
bg_th: float = Field(default=0.1, ge=0, description="Midas parameter bg_th")
|
||||||
|
depth_and_normal: bool = Field(default=False, description="whether to use depth and normal mode")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = midas_processor(image,
|
||||||
|
a=np.pi * self.a_mult,
|
||||||
|
bg_th=self.bg_th,
|
||||||
|
depth_and_normal=self.depth_and_normal)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class NormalbaeControlNetInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
|
"""Applies NormalBae processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["normalbae_control"] = "normalbae_control"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = normalbae_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class MLSDControlNetInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
|
"""Applies MLSD processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["mlsd_control"] = "mlsd_control"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
||||||
|
thr_v: float = Field(default=0.1, ge=0, description="MLSD parameter thr_v")
|
||||||
|
thr_d: float = Field(default=0.1, ge=0, description="MLSD parameter thr_d")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = mlsd_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
thr_v=self.thr_v,
|
||||||
|
thr_d=self.thr_d)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class PidiControlNetInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
|
"""Applies PIDI processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["pidi_control"] = "pidi_control"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
||||||
|
safe: bool = Field(default=False, description="whether to use safe mode")
|
||||||
|
scribble: bool = Field(default=False, description="whether to use scribble mode")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = pidi_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
safe=self.safe,
|
||||||
|
scribble=self.scribble)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class ContentShuffleControlInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
|
"""Applies content shuffle processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["content_shuffle_control"] = "content_shuffle_control"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image")
|
||||||
|
h: Union[int | None] = Field(default=None, ge=0, description="content shuffle h parameter")
|
||||||
|
w: Union[int | None] = Field(default=None, ge=0, description="content shuffle w parameter")
|
||||||
|
f: Union[int | None] = Field(default=None, ge=0, description="cont")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
content_shuffle_processor = ContentShuffleDetector()
|
||||||
|
processed_image = content_shuffle_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
h=self.h,
|
||||||
|
w=self.w,
|
||||||
|
f=self.f
|
||||||
|
)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class ZoeDepthControlInvocation(PreprocessedControlNetInvocation, PILInvocationConfig):
|
||||||
|
"""Applies Zoe depth processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["zoe_depth_control"] = "zoe_depth_control"
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = zoe_depth_processor(image)
|
||||||
return processed_image
|
return processed_image
|
||||||
|
Loading…
Reference in New Issue
Block a user