mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
changes to base class for controlnet nodes
This commit is contained in:
parent
f2f4c37f19
commit
dc12fa6cd6
@ -9,12 +9,24 @@ from .baseinvocation import (
|
|||||||
InvocationConfig,
|
InvocationConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
from controlnet_aux import CannyDetector
|
from controlnet_aux import (
|
||||||
|
CannyDetector,
|
||||||
|
HEDdetector,
|
||||||
|
LineartDetector,
|
||||||
|
LineartAnimeDetector,
|
||||||
|
MidasDetector,
|
||||||
|
MLSDdetector,
|
||||||
|
NormalBaeDetector,
|
||||||
|
OpenposeDetector,
|
||||||
|
PidiNetDetector,
|
||||||
|
ContentShuffleDetector,
|
||||||
|
# StyleShuffleDetector,
|
||||||
|
ZoeDetector)
|
||||||
|
|
||||||
from .image import ImageOutput, build_image_output, PILInvocationConfig
|
from .image import ImageOutput, build_image_output, PILInvocationConfig
|
||||||
|
|
||||||
|
|
||||||
class ControlField(BaseModel):
|
class ControlField(BaseModel):
|
||||||
|
|
||||||
image: ImageField = Field(default=None, description="processed image")
|
image: ImageField = Field(default=None, description="processed image")
|
||||||
# width: Optional[int] = Field(default=None, description="The width of the image in pixels")
|
# width: Optional[int] = Field(default=None, description="The width of the image in pixels")
|
||||||
# height: Optional[int] = Field(default=None, description="The height of the image in pixels")
|
# height: Optional[int] = Field(default=None, description="The height of the image in pixels")
|
||||||
@ -38,6 +50,7 @@ class ControlOutput(BaseInvocationOutput):
|
|||||||
# image: ImageField = Field(default=None, description="outputs just them image info (which is also included in control output)")
|
# image: ImageField = Field(default=None, description="outputs just them image info (which is also included in control output)")
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
class PreprocessedControlInvocation(BaseInvocation, PILInvocationConfig):
|
class PreprocessedControlInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
"""Base class for invocations that preprocess images for ControlNet"""
|
"""Base class for invocations that preprocess images for ControlNet"""
|
||||||
|
|
||||||
@ -48,6 +61,7 @@ class PreprocessedControlInvocation(BaseInvocation, PILInvocationConfig):
|
|||||||
image: ImageField = Field(default=None, description="image to process")
|
image: ImageField = Field(default=None, description="image to process")
|
||||||
control_model: str = Field(default=None, description="control model to use")
|
control_model: str = Field(default=None, description="control model to use")
|
||||||
control_weight: float = Field(default=0.5, ge=0, le=1, description="control weight")
|
control_weight: float = Field(default=0.5, ge=0, le=1, description="control weight")
|
||||||
|
|
||||||
# begin_step_percent: float = Field(default=0, ge=0, le=1,
|
# begin_step_percent: float = Field(default=0, ge=0, le=1,
|
||||||
# description="% of total steps at which controlnet is first applied")
|
# description="% of total steps at which controlnet is first applied")
|
||||||
# end_step_percent: float = Field(default=1, ge=0, le=1,
|
# end_step_percent: float = Field(default=1, ge=0, le=1,
|
||||||
@ -58,7 +72,7 @@ class PreprocessedControlInvocation(BaseInvocation, PILInvocationConfig):
|
|||||||
# This super class handles invoke() call, which in turn calls run_processor(image)
|
# This super class handles invoke() call, which in turn calls run_processor(image)
|
||||||
# subclasses override run_processor instead of implementing their own invoke()
|
# subclasses override run_processor instead of implementing their own invoke()
|
||||||
def run_processor(self, image):
|
def run_processor(self, image):
|
||||||
# super class pass through of image
|
# superclass just passes through image without processing
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> ControlOutput:
|
def invoke(self, context: InvocationContext) -> ControlOutput:
|
||||||
@ -94,10 +108,11 @@ class CannyControlInvocation(PreprocessedControlInvocation, PILInvocationConfig)
|
|||||||
"""Canny edge detection for ControlNet"""
|
"""Canny edge detection for ControlNet"""
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
type: Literal["cannycontrol"] = "cannycontrol"
|
type: Literal["canny_control"] = "canny_control"
|
||||||
# Inputs
|
# Inputs
|
||||||
low_threshold: float = Field(default=100, ge=0, description="low threshold of Canny pixel gradient")
|
low_threshold: float = Field(default=100, ge=0, description="low threshold of Canny pixel gradient")
|
||||||
high_threshold: float = Field(default=200, ge=0, description="high threshold of Canny pixel gradient")
|
high_threshold: float = Field(default=200, ge=0, description="high threshold of Canny pixel gradient")
|
||||||
|
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
def run_processor(self, image):
|
def run_processor(self, image):
|
||||||
|
Loading…
Reference in New Issue
Block a user