2023-06-26 11:27:26 +00:00
|
|
|
# Invocations for ControlNet image preprocessors
|
2023-05-05 21:12:19 +00:00
|
|
|
# initial implementation by Gregg Helt, 2023
|
|
|
|
# heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
|
2023-07-18 14:26:45 +00:00
|
|
|
from builtins import bool, float
|
|
|
|
from typing import Dict, List, Literal, Optional, Union
|
2023-05-05 21:12:19 +00:00
|
|
|
|
2023-06-26 11:27:26 +00:00
|
|
|
import cv2
|
2023-04-30 02:40:22 +00:00
|
|
|
import numpy as np
|
2023-07-18 14:26:45 +00:00
|
|
|
from controlnet_aux import (
|
|
|
|
CannyDetector,
|
|
|
|
ContentShuffleDetector,
|
|
|
|
HEDdetector,
|
|
|
|
LeresDetector,
|
|
|
|
LineartAnimeDetector,
|
|
|
|
LineartDetector,
|
|
|
|
MediapipeFaceDetector,
|
|
|
|
MidasDetector,
|
|
|
|
MLSDdetector,
|
|
|
|
NormalBaeDetector,
|
|
|
|
OpenposeDetector,
|
|
|
|
PidiNetDetector,
|
|
|
|
SamDetector,
|
|
|
|
ZoeDetector,
|
|
|
|
)
|
|
|
|
from controlnet_aux.util import HWC3, ade_palette
|
2023-07-03 16:17:45 +00:00
|
|
|
from PIL import Image
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
from pydantic import BaseModel, Field, validator
|
2023-04-30 02:40:22 +00:00
|
|
|
|
2023-08-14 09:41:29 +00:00
|
|
|
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
|
|
|
|
2023-08-17 22:45:25 +00:00
|
|
|
from ...backend.model_management import BaseModelType
|
2023-08-14 09:41:29 +00:00
|
|
|
from ..models.image import ImageCategory, ResourceOrigin
|
2023-08-14 03:23:09 +00:00
|
|
|
from .baseinvocation import (
|
|
|
|
BaseInvocation,
|
|
|
|
BaseInvocationOutput,
|
|
|
|
FieldDescriptions,
|
|
|
|
Input,
|
2023-08-18 14:57:18 +00:00
|
|
|
InputField,
|
2023-08-14 03:23:09 +00:00
|
|
|
InvocationContext,
|
|
|
|
OutputField,
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
invocation,
|
|
|
|
invocation_output,
|
2023-08-14 03:23:09 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"]
|
2023-07-20 02:17:24 +00:00
|
|
|
CONTROLNET_RESIZE_VALUES = Literal[
|
2023-08-14 03:23:09 +00:00
|
|
|
"just_resize",
|
|
|
|
"crop_resize",
|
|
|
|
"fill_resize",
|
|
|
|
"just_resize_simple",
|
2023-07-27 14:54:01 +00:00
|
|
|
]
|
2023-06-26 19:03:05 +00:00
|
|
|
|
2023-04-30 02:40:22 +00:00
|
|
|
|
2023-07-05 17:00:43 +00:00
|
|
|
class ControlNetModelField(BaseModel):
|
|
|
|
"""ControlNet model field"""
|
|
|
|
|
|
|
|
model_name: str = Field(description="Name of the ControlNet model")
|
|
|
|
base_model: BaseModelType = Field(description="Base model")
|
|
|
|
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-04 21:21:11 +00:00
|
|
|
class ControlField(BaseModel):
|
2023-08-14 03:23:09 +00:00
|
|
|
image: ImageField = Field(description="The control image")
|
|
|
|
control_model: ControlNetModelField = Field(description="The ControlNet model to use")
|
2023-07-18 14:26:45 +00:00
|
|
|
control_weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
|
|
|
begin_step_percent: float = Field(
|
|
|
|
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
|
|
|
)
|
|
|
|
end_step_percent: float = Field(
|
|
|
|
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
|
|
|
)
|
|
|
|
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use")
|
2023-07-20 02:17:24 +00:00
|
|
|
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
2023-06-14 04:08:34 +00:00
|
|
|
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
@validator("control_weight")
|
2023-07-15 07:06:17 +00:00
|
|
|
def validate_control_weight(cls, v):
|
|
|
|
"""Validate that all control weights in the valid range"""
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
if isinstance(v, list):
|
|
|
|
for i in v:
|
2023-07-15 07:06:17 +00:00
|
|
|
if i < -1 or i > 2:
|
2023-07-18 14:26:45 +00:00
|
|
|
raise ValueError("Control weights must be within -1 to 2 range")
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
else:
|
2023-07-15 07:06:17 +00:00
|
|
|
if v < -1 or v > 2:
|
|
|
|
raise ValueError("Control weights must be within -1 to 2 range")
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
return v
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-04 21:21:11 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation_output("control_output")
|
2023-05-04 21:21:11 +00:00
|
|
|
class ControlOutput(BaseInvocationOutput):
|
2023-05-04 23:01:22 +00:00
|
|
|
"""node output for ControlNet info"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
# Outputs
|
|
|
|
control: ControlField = OutputField(description=FieldDescriptions.control)
|
2023-05-05 00:06:49 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.0.0")
|
2023-05-06 04:41:07 +00:00
|
|
|
class ControlNetInvocation(BaseInvocation):
|
|
|
|
"""Collects ControlNet info to pass to other nodes"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
image: ImageField = InputField(description="The control image")
|
feat: polymorphic fields
Initial support for polymorphic field types. Polymorphic types are a single of or list of a specific type. For example, `Union[str, list[str]]`.
Polymorphics do not yet have support for direct input in the UI (will come in the future). They will be forcibly set as Connection-only fields, in which case users will not be able to provide direct input to the field.
If a polymorphic should present as a singleton type - which would allow direct input - the node must provide an explicit type hint.
For example, `DenoiseLatents`' `CFG Scale` is polymorphic, but in the node editor, we want to present this as a number input. In the node definition, the field is given `ui_type=UIType.Float`, which tells the UI to treat this as a `float` field.
The connection validation logic will prevent connecting a collection to `CFG Scale` in this situation, because it is typed as `float`. The workaround is to disable validation from the settings to make this specific connection. A future improvement will resolve this.
This also introduces better support for collection field types. Like polymorphics, collection types are parsed automatically by the client and do not need any specific type hints.
Also like polymorphics, there is no support yet for direct input of collection types in the UI.
- Disabling validation in workflow editor now displays the visual hints for valid connections, but lets you connect to anything.
- Added `ui_order: int` to `InputField` and `OutputField`. The UI will use this, if present, to order fields in a node UI. See usage in `DenoiseLatents` for an example.
- Updated the field colors - duplicate colors have just been lightened a bit. It's not perfect but it was a quick fix.
- Field handles for collections are the same color as their single counterparts, but have a dark dot in the center of them.
- Field handles for polymorphics are a rounded square with dot in the middle.
- Removed all fields that just render `null` from `InputFieldRenderer`, replaced with a single fallback
- Removed logic in `zValidatedWorkflow`, which checked for existence of node templates for each node in a workflow. This logic introduced a circular dependency, due to importing the global redux `store` in order to get the node templates within a zod schema. It's actually fine to just leave this out entirely; The case of a missing node template is handled by the UI. Fixing it otherwise would introduce a substantial headache.
- Fixed the `ControlNetInvocation.control_model` field default, which was a string when it shouldn't have one.
2023-09-01 09:40:27 +00:00
|
|
|
control_model: ControlNetModelField = InputField(description=FieldDescriptions.controlnet_model, input=Input.Direct)
|
2023-08-14 03:23:09 +00:00
|
|
|
control_weight: Union[float, List[float]] = InputField(
|
2023-09-15 01:01:37 +00:00
|
|
|
default=1.0, description="The weight given to the ControlNet"
|
2023-08-14 03:23:09 +00:00
|
|
|
)
|
|
|
|
begin_step_percent: float = InputField(
|
|
|
|
default=0, ge=-1, le=2, description="When the ControlNet is first applied (% of total steps)"
|
|
|
|
)
|
|
|
|
end_step_percent: float = InputField(
|
|
|
|
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
|
|
|
)
|
|
|
|
control_mode: CONTROLNET_MODE_VALUES = InputField(default="balanced", description="The control mode used")
|
|
|
|
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
2023-05-06 04:41:07 +00:00
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> ControlOutput:
|
|
|
|
return ControlOutput(
|
|
|
|
control=ControlField(
|
|
|
|
image=self.image,
|
2023-07-15 03:00:46 +00:00
|
|
|
control_model=self.control_model,
|
2023-05-12 11:01:35 +00:00
|
|
|
control_weight=self.control_weight,
|
|
|
|
begin_step_percent=self.begin_step_percent,
|
|
|
|
end_step_percent=self.end_step_percent,
|
2023-06-14 04:08:34 +00:00
|
|
|
control_mode=self.control_mode,
|
2023-07-20 02:17:24 +00:00
|
|
|
resize_mode=self.resize_mode,
|
2023-05-06 04:41:07 +00:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2023-06-26 19:03:05 +00:00
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation(
|
|
|
|
"image_processor", title="Base Image Processor", tags=["controlnet"], category="controlnet", version="1.0.0"
|
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class ImageProcessorInvocation(BaseInvocation):
|
2023-05-06 04:41:07 +00:00
|
|
|
"""Base class for invocations that preprocess images for ControlNet"""
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
image: ImageField = InputField(description="The image to process")
|
2023-05-06 04:41:07 +00:00
|
|
|
|
2023-05-05 00:06:49 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
# superclass just passes through image without processing
|
|
|
|
return image
|
|
|
|
|
2023-05-06 04:41:07 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
2023-06-14 11:40:09 +00:00
|
|
|
raw_image = context.services.images.get_pil_image(self.image.image_name)
|
2023-05-05 00:06:49 +00:00
|
|
|
# image type should be PIL.PngImagePlugin.PngImageFile ?
|
2023-05-06 00:11:31 +00:00
|
|
|
processed_image = self.run_processor(raw_image)
|
2023-05-26 23:47:27 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
# currently can't see processed image in node UI without a showImage node,
|
|
|
|
# so for now setting image_type to RESULT instead of INTERMEDIATE so will get saved in gallery
|
2023-05-26 23:47:27 +00:00
|
|
|
image_dto = context.services.images.create(
|
|
|
|
image=processed_image,
|
2023-05-27 11:55:29 +00:00
|
|
|
image_origin=ResourceOrigin.INTERNAL,
|
|
|
|
image_category=ImageCategory.CONTROL,
|
2023-05-26 23:47:27 +00:00
|
|
|
session_id=context.graph_execution_state_id,
|
|
|
|
node_id=self.id,
|
|
|
|
is_intermediate=self.is_intermediate,
|
2023-08-24 11:42:32 +00:00
|
|
|
workflow=self.workflow,
|
2023-05-05 00:06:49 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
"""Builds an ImageOutput and its ImageField"""
|
2023-06-14 11:40:09 +00:00
|
|
|
processed_image_field = ImageField(image_name=image_dto.image_name)
|
2023-05-06 04:41:07 +00:00
|
|
|
return ImageOutput(
|
|
|
|
image=processed_image_field,
|
2023-05-26 23:47:27 +00:00
|
|
|
# width=processed_image.width,
|
2023-07-18 14:26:45 +00:00
|
|
|
width=image_dto.width,
|
2023-05-26 23:47:27 +00:00
|
|
|
# height=processed_image.height,
|
2023-07-18 14:26:45 +00:00
|
|
|
height=image_dto.height,
|
2023-05-26 23:47:27 +00:00
|
|
|
# mode=processed_image.mode,
|
2023-04-30 02:40:22 +00:00
|
|
|
)
|
2023-05-04 23:01:22 +00:00
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"canny_image_processor",
|
|
|
|
title="Canny Processor",
|
|
|
|
tags=["controlnet", "canny"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-04 21:21:11 +00:00
|
|
|
"""Canny edge detection for ControlNet"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
low_threshold: int = InputField(
|
|
|
|
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
|
|
|
|
)
|
|
|
|
high_threshold: int = InputField(
|
|
|
|
default=200, ge=0, le=255, description="The high threshold of the Canny pixel gradient (0-255)"
|
|
|
|
)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-04 23:01:22 +00:00
|
|
|
def run_processor(self, image):
|
2023-04-30 02:40:22 +00:00
|
|
|
canny_processor = CannyDetector()
|
2023-07-18 14:26:45 +00:00
|
|
|
processed_image = canny_processor(image, self.low_threshold, self.high_threshold)
|
2023-05-04 23:01:22 +00:00
|
|
|
return processed_image
|
|
|
|
|
2023-05-04 21:21:11 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"hed_image_processor",
|
|
|
|
title="HED (softedge) Processor",
|
|
|
|
tags=["controlnet", "hed", "softedge"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 05:40:50 +00:00
|
|
|
"""Applies HED edge detection to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
2023-05-17 02:44:45 +00:00
|
|
|
# safe not supported in controlnet_aux v0.0.3
|
2023-08-14 03:23:09 +00:00
|
|
|
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
|
|
|
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 05:40:50 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
hed_processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = hed_processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
2023-05-17 02:44:45 +00:00
|
|
|
# safe not supported in controlnet_aux v0.0.3
|
|
|
|
# safe=self.safe,
|
2023-05-05 05:40:50 +00:00
|
|
|
scribble=self.scribble,
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"lineart_image_processor",
|
|
|
|
title="Lineart Processor",
|
|
|
|
tags=["controlnet", "lineart"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 05:40:50 +00:00
|
|
|
"""Applies line art processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
|
|
|
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 05:40:50 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
lineart_processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = lineart_processor(
|
|
|
|
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution, coarse=self.coarse
|
|
|
|
)
|
2023-05-05 05:40:50 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"lineart_anime_image_processor",
|
|
|
|
title="Lineart Anime Processor",
|
|
|
|
tags=["controlnet", "lineart", "anime"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies line art anime processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
2023-05-05 21:12:19 +00:00
|
|
|
processed_image = processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
2023-05-05 05:40:50 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"openpose_image_processor",
|
|
|
|
title="Openpose Processor",
|
|
|
|
tags=["controlnet", "openpose", "pose"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies Openpose processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
hand_and_face: bool = InputField(default=False, description="Whether to use hands and face mode")
|
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 05:40:50 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = openpose_processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
hand_and_face=self.hand_and_face,
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"midas_depth_image_processor",
|
|
|
|
title="Midas Depth Processor",
|
|
|
|
tags=["controlnet", "midas"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies Midas depth processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
|
|
|
bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
2023-05-17 02:44:45 +00:00
|
|
|
# depth_and_normal not supported in controlnet_aux v0.0.3
|
2023-08-14 03:23:09 +00:00
|
|
|
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = midas_processor(
|
|
|
|
image,
|
|
|
|
a=np.pi * self.a_mult,
|
|
|
|
bg_th=self.bg_th,
|
2023-05-17 02:44:45 +00:00
|
|
|
# dept_and_normal not supported in controlnet_aux v0.0.3
|
|
|
|
# depth_and_normal=self.depth_and_normal,
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"normalbae_image_processor",
|
|
|
|
title="Normal BAE Processor",
|
|
|
|
tags=["controlnet"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies NormalBae processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = normalbae_processor(
|
|
|
|
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation(
|
|
|
|
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.0.0"
|
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies MLSD processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
|
|
|
thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`")
|
|
|
|
thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
|
2023-07-18 14:26:45 +00:00
|
|
|
processed_image = mlsd_processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
thr_v=self.thr_v,
|
|
|
|
thr_d=self.thr_d,
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation(
|
|
|
|
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.0.0"
|
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies PIDI processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
|
|
|
safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
|
|
|
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = pidi_processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
safe=self.safe,
|
|
|
|
scribble=self.scribble,
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"content_shuffle_image_processor",
|
|
|
|
title="Content Shuffle Processor",
|
|
|
|
tags=["controlnet", "contentshuffle"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies content shuffle processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
|
|
|
h: Optional[int] = InputField(default=512, ge=0, description="Content shuffle `h` parameter")
|
|
|
|
w: Optional[int] = InputField(default=512, ge=0, description="Content shuffle `w` parameter")
|
|
|
|
f: Optional[int] = InputField(default=256, ge=0, description="Content shuffle `f` parameter")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
content_shuffle_processor = ContentShuffleDetector()
|
|
|
|
processed_image = content_shuffle_processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
h=self.h,
|
|
|
|
w=self.w,
|
|
|
|
f=self.f,
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-26 23:47:27 +00:00
|
|
|
# should work with controlnet_aux >= 0.0.4 and timm <= 0.6.13
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"zoe_depth_image_processor",
|
|
|
|
title="Zoe (Depth) Processor",
|
|
|
|
tags=["controlnet", "zoe", "depth"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-26 23:47:27 +00:00
|
|
|
"""Applies Zoe depth processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-05-26 23:47:27 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
|
2023-05-26 23:47:27 +00:00
|
|
|
processed_image = zoe_depth_processor(image)
|
|
|
|
return processed_image
|
2023-04-30 02:40:22 +00:00
|
|
|
|
2023-05-23 23:21:13 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"mediapipe_face_processor",
|
|
|
|
title="Mediapipe Face Processor",
|
|
|
|
tags=["controlnet", "mediapipe", "face"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
2023-05-23 23:21:13 +00:00
|
|
|
"""Applies mediapipe face processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
|
|
|
|
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-23 23:21:13 +00:00
|
|
|
def run_processor(self, image):
|
2023-06-26 11:29:43 +00:00
|
|
|
# MediaPipeFaceDetector throws an error if image has alpha channel
|
|
|
|
# so convert to RGB if needed
|
|
|
|
if image.mode == "RGBA":
|
|
|
|
image = image.convert("RGB")
|
2023-05-23 23:21:13 +00:00
|
|
|
mediapipe_face_processor = MediapipeFaceDetector()
|
2023-07-18 14:26:45 +00:00
|
|
|
processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence)
|
2023-05-23 23:21:13 +00:00
|
|
|
return processed_image
|
2023-06-25 18:16:39 +00:00
|
|
|
|
2023-07-18 14:26:45 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"leres_image_processor",
|
|
|
|
title="Leres (Depth) Processor",
|
|
|
|
tags=["controlnet", "leres", "depth"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
2023-06-28 03:45:47 +00:00
|
|
|
"""Applies leres processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
thr_a: float = InputField(default=0, description="Leres parameter `thr_a`")
|
|
|
|
thr_b: float = InputField(default=0, description="Leres parameter `thr_b`")
|
|
|
|
boost: bool = InputField(default=False, description="Whether to use boost mode")
|
|
|
|
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-06-28 03:45:47 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
|
2023-07-18 14:26:45 +00:00
|
|
|
processed_image = leres_processor(
|
|
|
|
image,
|
|
|
|
thr_a=self.thr_a,
|
|
|
|
thr_b=self.thr_b,
|
|
|
|
boost=self.boost,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
)
|
2023-06-28 03:45:47 +00:00
|
|
|
return processed_image
|
2023-06-25 18:16:39 +00:00
|
|
|
|
2023-06-26 11:27:26 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"tile_image_processor",
|
|
|
|
title="Tile Resample Processor",
|
|
|
|
tags=["controlnet", "tile"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
|
|
|
"""Tile resampler processor"""
|
|
|
|
|
|
|
|
# res: int = InputField(default=512, ge=0, le=1024, description="The pixel resolution for each tile")
|
|
|
|
down_sampling_rate: float = InputField(default=1.0, ge=1.0, le=8.0, description="Down sampling rate")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-06-26 11:27:26 +00:00
|
|
|
# tile_resample copied from sd-webui-controlnet/scripts/processor.py
|
|
|
|
def tile_resample(
|
|
|
|
self,
|
|
|
|
np_img: np.ndarray,
|
|
|
|
res=512, # never used?
|
|
|
|
down_sampling_rate=1.0,
|
|
|
|
):
|
|
|
|
np_img = HWC3(np_img)
|
|
|
|
if down_sampling_rate < 1.1:
|
|
|
|
return np_img
|
|
|
|
H, W, C = np_img.shape
|
|
|
|
H = int(float(H) / float(down_sampling_rate))
|
|
|
|
W = int(float(W) / float(down_sampling_rate))
|
|
|
|
np_img = cv2.resize(np_img, (W, H), interpolation=cv2.INTER_AREA)
|
|
|
|
return np_img
|
|
|
|
|
|
|
|
def run_processor(self, img):
|
|
|
|
np_img = np.array(img, dtype=np.uint8)
|
|
|
|
processed_np_image = self.tile_resample(
|
|
|
|
np_img,
|
2023-07-18 14:26:45 +00:00
|
|
|
# res=self.tile_size,
|
2023-06-26 11:27:26 +00:00
|
|
|
down_sampling_rate=self.down_sampling_rate,
|
|
|
|
)
|
|
|
|
processed_image = Image.fromarray(processed_np_image)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"segment_anything_processor",
|
|
|
|
title="Segment Anything Processor",
|
|
|
|
tags=["controlnet", "segmentanything"],
|
|
|
|
category="controlnet",
|
2023-09-04 08:11:56 +00:00
|
|
|
version="1.0.0",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
2023-06-25 18:16:39 +00:00
|
|
|
"""Applies segment anything processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-06-25 18:16:39 +00:00
|
|
|
def run_processor(self, image):
|
2023-06-25 19:38:17 +00:00
|
|
|
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
|
2023-07-18 14:26:45 +00:00
|
|
|
segment_anything_processor = SamDetectorReproducibleColors.from_pretrained(
|
|
|
|
"ybelkada/segment-anything", subfolder="checkpoints"
|
|
|
|
)
|
2023-06-26 11:27:26 +00:00
|
|
|
np_img = np.array(image, dtype=np.uint8)
|
|
|
|
processed_image = segment_anything_processor(np_img)
|
2023-06-25 18:16:39 +00:00
|
|
|
return processed_image
|
2023-06-25 19:38:17 +00:00
|
|
|
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-06-25 19:38:17 +00:00
|
|
|
class SamDetectorReproducibleColors(SamDetector):
|
|
|
|
# overriding SamDetector.show_anns() method to use reproducible colors for segmentation image
|
|
|
|
# base class show_anns() method randomizes colors,
|
|
|
|
# which seems to also lead to non-reproducible image generation
|
|
|
|
# so using ADE20k color palette instead
|
|
|
|
def show_anns(self, anns: List[Dict]):
|
|
|
|
if len(anns) == 0:
|
|
|
|
return
|
|
|
|
sorted_anns = sorted(anns, key=(lambda x: x["area"]), reverse=True)
|
|
|
|
h, w = anns[0]["segmentation"].shape
|
2023-07-18 14:26:45 +00:00
|
|
|
final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB")
|
2023-06-25 19:38:17 +00:00
|
|
|
palette = ade_palette()
|
|
|
|
for i, ann in enumerate(sorted_anns):
|
|
|
|
m = ann["segmentation"]
|
|
|
|
img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8)
|
|
|
|
# doing modulo just in case number of annotated regions exceeds number of colors in palette
|
|
|
|
ann_color = palette[i % len(palette)]
|
2023-06-25 19:54:48 +00:00
|
|
|
img[:, :] = ann_color
|
2023-07-18 14:26:45 +00:00
|
|
|
final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m * 255)))
|
2023-06-25 19:38:17 +00:00
|
|
|
return np.array(final_img, dtype=np.uint8)
|
2023-09-22 20:32:27 +00:00
|
|
|
|
|
|
|
|
|
|
|
@invocation(
|
|
|
|
"color_map_image_processor",
|
|
|
|
title="Color Map Processor",
|
|
|
|
tags=["controlnet"],
|
|
|
|
category="controlnet",
|
|
|
|
version="1.0.0",
|
|
|
|
)
|
|
|
|
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
|
|
|
"""Generates a color map from the provided image"""
|
|
|
|
|
|
|
|
color_map_tile_size: int = InputField(default=64, ge=0, description=FieldDescriptions.tile_size)
|
|
|
|
|
|
|
|
def run_processor(self, image: Image.Image):
|
|
|
|
image = image.convert("RGB")
|
|
|
|
image = np.array(image, dtype=np.uint8)
|
|
|
|
height, width = image.shape[:2]
|
|
|
|
|
2023-09-22 20:48:59 +00:00
|
|
|
width_tile_size = min(self.color_map_tile_size, width)
|
|
|
|
height_tile_size = min(self.color_map_tile_size, height)
|
|
|
|
|
2023-09-22 20:32:27 +00:00
|
|
|
color_map = cv2.resize(
|
|
|
|
image,
|
2023-09-22 20:48:59 +00:00
|
|
|
(width // width_tile_size, height // height_tile_size),
|
2023-09-22 20:32:27 +00:00
|
|
|
interpolation=cv2.INTER_CUBIC,
|
|
|
|
)
|
|
|
|
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
|
2023-09-22 20:48:59 +00:00
|
|
|
color_map = Image.fromarray(color_map)
|
2023-09-22 20:32:27 +00:00
|
|
|
return color_map
|