2023-06-26 11:27:26 +00:00
|
|
|
# Invocations for ControlNet image preprocessors
|
2023-05-05 21:12:19 +00:00
|
|
|
# initial implementation by Gregg Helt, 2023
|
|
|
|
# heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
|
2023-07-18 14:26:45 +00:00
|
|
|
from builtins import bool, float
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
from typing import Dict, List, Literal, Union
|
2023-05-05 21:12:19 +00:00
|
|
|
|
2023-06-26 11:27:26 +00:00
|
|
|
import cv2
|
2023-04-30 02:40:22 +00:00
|
|
|
import numpy as np
|
2024-02-09 22:32:05 +00:00
|
|
|
from controlnet_aux import (
|
|
|
|
ContentShuffleDetector,
|
|
|
|
LeresDetector,
|
|
|
|
MediapipeFaceDetector,
|
|
|
|
MidasDetector,
|
|
|
|
MLSDdetector,
|
|
|
|
NormalBaeDetector,
|
|
|
|
PidiNetDetector,
|
|
|
|
SamDetector,
|
|
|
|
ZoeDetector,
|
|
|
|
)
|
2023-07-18 14:26:45 +00:00
|
|
|
from controlnet_aux.util import HWC3, ade_palette
|
2023-07-03 16:17:45 +00:00
|
|
|
from PIL import Image
|
2024-02-06 03:56:32 +00:00
|
|
|
from pydantic import BaseModel, Field, field_validator, model_validator
|
2023-04-30 02:40:22 +00:00
|
|
|
|
2024-02-07 05:33:55 +00:00
|
|
|
from invokeai.app.invocations.fields import (
|
|
|
|
FieldDescriptions,
|
|
|
|
ImageField,
|
|
|
|
Input,
|
|
|
|
InputField,
|
|
|
|
OutputField,
|
2024-03-08 10:37:00 +00:00
|
|
|
UIType,
|
2024-02-07 05:33:55 +00:00
|
|
|
WithBoard,
|
|
|
|
WithMetadata,
|
|
|
|
)
|
2024-03-09 08:43:24 +00:00
|
|
|
from invokeai.app.invocations.model import ModelIdentifierField
|
2024-01-13 12:23:16 +00:00
|
|
|
from invokeai.app.invocations.primitives import ImageOutput
|
2024-02-09 22:32:05 +00:00
|
|
|
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
2024-02-05 06:16:35 +00:00
|
|
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
2024-04-29 12:38:29 +00:00
|
|
|
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
2024-03-21 07:58:05 +00:00
|
|
|
from invokeai.backend.image_util.canny import get_canny_edges
|
2024-01-22 21:00:56 +00:00
|
|
|
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
2024-02-11 08:00:51 +00:00
|
|
|
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
|
2024-03-21 08:39:42 +00:00
|
|
|
from invokeai.backend.image_util.hed import HEDProcessor
|
2024-03-21 08:55:51 +00:00
|
|
|
from invokeai.backend.image_util.lineart import LineartProcessor
|
2024-03-21 09:06:14 +00:00
|
|
|
from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
|
2024-04-29 12:38:29 +00:00
|
|
|
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
2023-08-14 09:41:29 +00:00
|
|
|
|
2024-04-29 12:38:29 +00:00
|
|
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output
|
2023-08-14 03:23:09 +00:00
|
|
|
|
2023-04-30 02:40:22 +00:00
|
|
|
|
2023-05-04 21:21:11 +00:00
|
|
|
class ControlField(BaseModel):
|
2023-08-14 03:23:09 +00:00
|
|
|
image: ImageField = Field(description="The control image")
|
2024-03-09 08:43:24 +00:00
|
|
|
control_model: ModelIdentifierField = Field(description="The ControlNet model to use")
|
2023-07-18 14:26:45 +00:00
|
|
|
control_weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
|
|
|
begin_step_percent: float = Field(
|
|
|
|
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
|
|
|
)
|
|
|
|
end_step_percent: float = Field(
|
|
|
|
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
|
|
|
)
|
|
|
|
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use")
|
2023-07-20 02:17:24 +00:00
|
|
|
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
2023-06-14 04:08:34 +00:00
|
|
|
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
@field_validator("control_weight")
|
2024-01-02 00:13:49 +00:00
|
|
|
@classmethod
|
2023-07-15 07:06:17 +00:00
|
|
|
def validate_control_weight(cls, v):
|
2024-01-02 00:13:49 +00:00
|
|
|
validate_weights(v)
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
return v
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2024-01-02 00:13:49 +00:00
|
|
|
@model_validator(mode="after")
|
|
|
|
def validate_begin_end_step_percent(self):
|
|
|
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
|
|
|
return self
|
|
|
|
|
2023-05-04 21:21:11 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation_output("control_output")
|
2023-05-04 21:21:11 +00:00
|
|
|
class ControlOutput(BaseInvocationOutput):
|
2023-05-04 23:01:22 +00:00
|
|
|
"""node output for ControlNet info"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
# Outputs
|
|
|
|
control: ControlField = OutputField(description=FieldDescriptions.control)
|
2023-05-05 00:06:49 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
|
2024-01-02 00:13:49 +00:00
|
|
|
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.1")
|
2023-05-06 04:41:07 +00:00
|
|
|
class ControlNetInvocation(BaseInvocation):
|
|
|
|
"""Collects ControlNet info to pass to other nodes"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
image: ImageField = InputField(description="The control image")
|
2024-03-09 08:43:24 +00:00
|
|
|
control_model: ModelIdentifierField = InputField(
|
2024-03-08 10:37:00 +00:00
|
|
|
description=FieldDescriptions.controlnet_model, input=Input.Direct, ui_type=UIType.ControlNetModel
|
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
control_weight: Union[float, List[float]] = InputField(
|
2024-01-02 00:13:49 +00:00
|
|
|
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
2023-08-14 03:23:09 +00:00
|
|
|
)
|
|
|
|
begin_step_percent: float = InputField(
|
2024-01-02 00:13:49 +00:00
|
|
|
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
2023-08-14 03:23:09 +00:00
|
|
|
)
|
|
|
|
end_step_percent: float = InputField(
|
|
|
|
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
|
|
|
)
|
|
|
|
control_mode: CONTROLNET_MODE_VALUES = InputField(default="balanced", description="The control mode used")
|
|
|
|
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
2023-05-06 04:41:07 +00:00
|
|
|
|
2024-01-02 00:13:49 +00:00
|
|
|
@field_validator("control_weight")
|
|
|
|
@classmethod
|
|
|
|
def validate_control_weight(cls, v):
|
|
|
|
validate_weights(v)
|
|
|
|
return v
|
|
|
|
|
|
|
|
@model_validator(mode="after")
|
|
|
|
def validate_begin_end_step_percent(self) -> "ControlNetInvocation":
|
|
|
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
|
|
|
return self
|
|
|
|
|
2024-02-05 06:16:35 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> ControlOutput:
|
2023-05-06 04:41:07 +00:00
|
|
|
return ControlOutput(
|
|
|
|
control=ControlField(
|
|
|
|
image=self.image,
|
2023-07-15 03:00:46 +00:00
|
|
|
control_model=self.control_model,
|
2023-05-12 11:01:35 +00:00
|
|
|
control_weight=self.control_weight,
|
|
|
|
begin_step_percent=self.begin_step_percent,
|
|
|
|
end_step_percent=self.end_step_percent,
|
2023-06-14 04:08:34 +00:00
|
|
|
control_mode=self.control_mode,
|
2023-07-20 02:17:24 +00:00
|
|
|
resize_mode=self.resize_mode,
|
2023-05-06 04:41:07 +00:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2023-06-26 19:03:05 +00:00
|
|
|
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
# This invocation exists for other invocations to subclass it - do not register with @invocation!
|
2024-02-07 05:33:55 +00:00
|
|
|
class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
2023-05-06 04:41:07 +00:00
|
|
|
"""Base class for invocations that preprocess images for ControlNet"""
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
image: ImageField = InputField(description="The image to process")
|
2023-05-06 04:41:07 +00:00
|
|
|
|
2023-10-17 06:23:10 +00:00
|
|
|
def run_processor(self, image: Image.Image) -> Image.Image:
|
2023-05-05 00:06:49 +00:00
|
|
|
# superclass just passes through image without processing
|
|
|
|
return image
|
|
|
|
|
2024-02-18 21:29:31 +00:00
|
|
|
def load_image(self, context: InvocationContext) -> Image.Image:
|
|
|
|
# allows override for any special formatting specific to the preprocessor
|
|
|
|
return context.images.get_pil(self.image.image_name, "RGB")
|
|
|
|
|
2024-02-05 06:16:35 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
2024-02-18 21:29:31 +00:00
|
|
|
raw_image = self.load_image(context)
|
2023-05-05 00:06:49 +00:00
|
|
|
# image type should be PIL.PngImagePlugin.PngImageFile ?
|
2023-05-06 00:11:31 +00:00
|
|
|
processed_image = self.run_processor(raw_image)
|
2023-05-26 23:47:27 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
# currently can't see processed image in node UI without a showImage node,
|
|
|
|
# so for now setting image_type to RESULT instead of INTERMEDIATE so will get saved in gallery
|
2024-01-13 12:23:16 +00:00
|
|
|
image_dto = context.images.save(image=processed_image)
|
2023-05-05 00:06:49 +00:00
|
|
|
|
|
|
|
"""Builds an ImageOutput and its ImageField"""
|
2023-06-14 11:40:09 +00:00
|
|
|
processed_image_field = ImageField(image_name=image_dto.image_name)
|
2023-05-06 04:41:07 +00:00
|
|
|
return ImageOutput(
|
|
|
|
image=processed_image_field,
|
2023-05-26 23:47:27 +00:00
|
|
|
# width=processed_image.width,
|
2023-07-18 14:26:45 +00:00
|
|
|
width=image_dto.width,
|
2023-05-26 23:47:27 +00:00
|
|
|
# height=processed_image.height,
|
2023-07-18 14:26:45 +00:00
|
|
|
height=image_dto.height,
|
2023-05-26 23:47:27 +00:00
|
|
|
# mode=processed_image.mode,
|
2023-04-30 02:40:22 +00:00
|
|
|
)
|
2023-05-04 23:01:22 +00:00
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"canny_image_processor",
|
|
|
|
title="Canny Processor",
|
|
|
|
tags=["controlnet", "canny"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.3.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-04 21:21:11 +00:00
|
|
|
"""Canny edge detection for ControlNet"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-08-14 03:23:09 +00:00
|
|
|
low_threshold: int = InputField(
|
|
|
|
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
|
|
|
|
)
|
|
|
|
high_threshold: int = InputField(
|
|
|
|
default=200, ge=0, le=255, description="The high threshold of the Canny pixel gradient (0-255)"
|
|
|
|
)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2024-02-18 21:29:31 +00:00
|
|
|
def load_image(self, context: InvocationContext) -> Image.Image:
|
|
|
|
# Keep alpha channel for Canny processing to detect edges of transparent areas
|
|
|
|
return context.images.get_pil(self.image.image_name, "RGBA")
|
|
|
|
|
2024-03-21 07:58:05 +00:00
|
|
|
def run_processor(self, image: Image.Image) -> Image.Image:
|
|
|
|
processed_image = get_canny_edges(
|
2024-03-18 19:07:58 +00:00
|
|
|
image,
|
|
|
|
self.low_threshold,
|
|
|
|
self.high_threshold,
|
2024-03-21 07:17:53 +00:00
|
|
|
detect_resolution=self.detect_resolution,
|
2024-03-21 07:58:05 +00:00
|
|
|
image_resolution=self.image_resolution,
|
2024-03-18 19:07:58 +00:00
|
|
|
)
|
2023-05-04 23:01:22 +00:00
|
|
|
return processed_image
|
|
|
|
|
2023-05-04 21:21:11 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"hed_image_processor",
|
|
|
|
title="HED (softedge) Processor",
|
|
|
|
tags=["controlnet", "hed", "softedge"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 05:40:50 +00:00
|
|
|
"""Applies HED edge detection to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-05-17 02:44:45 +00:00
|
|
|
# safe not supported in controlnet_aux v0.0.3
|
2023-08-14 03:23:09 +00:00
|
|
|
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
|
|
|
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2024-03-21 08:39:42 +00:00
|
|
|
def run_processor(self, image: Image.Image) -> Image.Image:
|
|
|
|
hed_processor = HEDProcessor()
|
|
|
|
processed_image = hed_processor.run(
|
2023-05-05 05:40:50 +00:00
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
2023-05-17 02:44:45 +00:00
|
|
|
# safe not supported in controlnet_aux v0.0.3
|
|
|
|
# safe=self.safe,
|
2023-05-05 05:40:50 +00:00
|
|
|
scribble=self.scribble,
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"lineart_image_processor",
|
|
|
|
title="Lineart Processor",
|
|
|
|
tags=["controlnet", "lineart"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 05:40:50 +00:00
|
|
|
"""Applies line art processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-08-14 03:23:09 +00:00
|
|
|
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2024-03-21 08:55:51 +00:00
|
|
|
def run_processor(self, image: Image.Image) -> Image.Image:
|
|
|
|
lineart_processor = LineartProcessor()
|
|
|
|
processed_image = lineart_processor.run(
|
2023-07-18 14:26:45 +00:00
|
|
|
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution, coarse=self.coarse
|
|
|
|
)
|
2023-05-05 05:40:50 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"lineart_anime_image_processor",
|
|
|
|
title="Lineart Anime Processor",
|
|
|
|
tags=["controlnet", "lineart", "anime"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies line art anime processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2024-03-21 09:06:14 +00:00
|
|
|
def run_processor(self, image: Image.Image) -> Image.Image:
|
|
|
|
processor = LineartAnimeProcessor()
|
|
|
|
processed_image = processor.run(
|
2023-05-05 21:12:19 +00:00
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
2023-05-05 05:40:50 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"midas_depth_image_processor",
|
|
|
|
title="Midas Depth Processor",
|
|
|
|
tags=["controlnet", "midas"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.4",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies Midas depth processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
|
|
|
bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-05-17 02:44:45 +00:00
|
|
|
# depth_and_normal not supported in controlnet_aux v0.0.3
|
2023-08-14 03:23:09 +00:00
|
|
|
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = midas_processor(
|
|
|
|
image,
|
|
|
|
a=np.pi * self.a_mult,
|
|
|
|
bg_th=self.bg_th,
|
2024-03-18 19:07:58 +00:00
|
|
|
image_resolution=self.image_resolution,
|
2024-03-21 07:17:53 +00:00
|
|
|
detect_resolution=self.detect_resolution,
|
2023-05-17 02:44:45 +00:00
|
|
|
# dept_and_normal not supported in controlnet_aux v0.0.3
|
|
|
|
# depth_and_normal=self.depth_and_normal,
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"normalbae_image_processor",
|
|
|
|
title="Normal BAE Processor",
|
|
|
|
tags=["controlnet"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies NormalBae processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = normalbae_processor(
|
|
|
|
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation(
|
2024-05-02 01:28:31 +00:00
|
|
|
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.3"
|
2023-09-04 08:11:56 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies MLSD processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-08-14 03:23:09 +00:00
|
|
|
thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`")
|
|
|
|
thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
|
2023-07-18 14:26:45 +00:00
|
|
|
processed_image = mlsd_processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
thr_v=self.thr_v,
|
|
|
|
thr_d=self.thr_d,
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation(
|
2024-05-02 01:28:31 +00:00
|
|
|
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.3"
|
2023-09-04 08:11:56 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies PIDI processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-08-14 03:23:09 +00:00
|
|
|
safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
|
|
|
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
|
|
|
processed_image = pidi_processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
safe=self.safe,
|
|
|
|
scribble=self.scribble,
|
|
|
|
)
|
2023-05-05 21:12:19 +00:00
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"content_shuffle_image_processor",
|
|
|
|
title="Content Shuffle Processor",
|
|
|
|
tags=["controlnet", "contentshuffle"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-05 21:12:19 +00:00
|
|
|
"""Applies content shuffle processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter")
|
|
|
|
w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter")
|
|
|
|
f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-05 21:12:19 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
content_shuffle_processor = ContentShuffleDetector()
|
|
|
|
processed_image = content_shuffle_processor(
|
|
|
|
image,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
h=self.h,
|
|
|
|
w=self.w,
|
|
|
|
f=self.f,
|
|
|
|
)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
2023-05-26 23:47:27 +00:00
|
|
|
# should work with controlnet_aux >= 0.0.4 and timm <= 0.6.13
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"zoe_depth_image_processor",
|
|
|
|
title="Zoe (Depth) Processor",
|
|
|
|
tags=["controlnet", "zoe", "depth"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
2023-05-26 23:47:27 +00:00
|
|
|
"""Applies Zoe depth processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-05-26 23:47:27 +00:00
|
|
|
def run_processor(self, image):
|
2023-07-18 14:26:45 +00:00
|
|
|
zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
|
2024-03-18 17:42:00 +00:00
|
|
|
processed_image = zoe_depth_processor(image)
|
2023-05-26 23:47:27 +00:00
|
|
|
return processed_image
|
2023-04-30 02:40:22 +00:00
|
|
|
|
2023-05-23 23:21:13 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"mediapipe_face_processor",
|
|
|
|
title="Mediapipe Face Processor",
|
|
|
|
tags=["controlnet", "mediapipe", "face"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.4",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
2023-05-23 23:21:13 +00:00
|
|
|
"""Applies mediapipe face processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
|
|
|
|
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-05-23 23:21:13 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
mediapipe_face_processor = MediapipeFaceDetector()
|
2024-03-18 19:07:58 +00:00
|
|
|
processed_image = mediapipe_face_processor(
|
2024-03-21 07:17:53 +00:00
|
|
|
image,
|
|
|
|
max_faces=self.max_faces,
|
|
|
|
min_confidence=self.min_confidence,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
detect_resolution=self.detect_resolution,
|
2024-03-18 19:07:58 +00:00
|
|
|
)
|
2023-05-23 23:21:13 +00:00
|
|
|
return processed_image
|
2023-06-25 18:16:39 +00:00
|
|
|
|
2023-07-18 14:26:45 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"leres_image_processor",
|
|
|
|
title="Leres (Depth) Processor",
|
|
|
|
tags=["controlnet", "leres", "depth"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
2023-06-28 03:45:47 +00:00
|
|
|
"""Applies leres processing to image"""
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
thr_a: float = InputField(default=0, description="Leres parameter `thr_a`")
|
|
|
|
thr_b: float = InputField(default=0, description="Leres parameter `thr_b`")
|
|
|
|
boost: bool = InputField(default=False, description="Whether to use boost mode")
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-06-28 03:45:47 +00:00
|
|
|
def run_processor(self, image):
|
|
|
|
leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
|
2023-07-18 14:26:45 +00:00
|
|
|
processed_image = leres_processor(
|
|
|
|
image,
|
|
|
|
thr_a=self.thr_a,
|
|
|
|
thr_b=self.thr_b,
|
|
|
|
boost=self.boost,
|
|
|
|
detect_resolution=self.detect_resolution,
|
|
|
|
image_resolution=self.image_resolution,
|
|
|
|
)
|
2023-06-28 03:45:47 +00:00
|
|
|
return processed_image
|
2023-06-25 18:16:39 +00:00
|
|
|
|
2023-06-26 11:27:26 +00:00
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"tile_image_processor",
|
|
|
|
title="Tile Resample Processor",
|
|
|
|
tags=["controlnet", "tile"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
|
|
|
"""Tile resampler processor"""
|
|
|
|
|
|
|
|
# res: int = InputField(default=512, ge=0, le=1024, description="The pixel resolution for each tile")
|
|
|
|
down_sampling_rate: float = InputField(default=1.0, ge=1.0, le=8.0, description="Down sampling rate")
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-06-26 11:27:26 +00:00
|
|
|
# tile_resample copied from sd-webui-controlnet/scripts/processor.py
|
|
|
|
def tile_resample(
|
|
|
|
self,
|
|
|
|
np_img: np.ndarray,
|
|
|
|
res=512, # never used?
|
|
|
|
down_sampling_rate=1.0,
|
|
|
|
):
|
|
|
|
np_img = HWC3(np_img)
|
|
|
|
if down_sampling_rate < 1.1:
|
|
|
|
return np_img
|
|
|
|
H, W, C = np_img.shape
|
|
|
|
H = int(float(H) / float(down_sampling_rate))
|
|
|
|
W = int(float(W) / float(down_sampling_rate))
|
|
|
|
np_img = cv2.resize(np_img, (W, H), interpolation=cv2.INTER_AREA)
|
|
|
|
return np_img
|
|
|
|
|
|
|
|
def run_processor(self, img):
|
|
|
|
np_img = np.array(img, dtype=np.uint8)
|
|
|
|
processed_np_image = self.tile_resample(
|
|
|
|
np_img,
|
2023-07-18 14:26:45 +00:00
|
|
|
# res=self.tile_size,
|
2023-06-26 11:27:26 +00:00
|
|
|
down_sampling_rate=self.down_sampling_rate,
|
|
|
|
)
|
|
|
|
processed_image = Image.fromarray(processed_np_image)
|
|
|
|
return processed_image
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"segment_anything_processor",
|
|
|
|
title="Segment Anything Processor",
|
|
|
|
tags=["controlnet", "segmentanything"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.4",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
2023-06-25 18:16:39 +00:00
|
|
|
"""Applies segment anything processing to image"""
|
2024-03-18 19:07:58 +00:00
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2023-07-27 14:54:01 +00:00
|
|
|
|
2023-06-25 18:16:39 +00:00
|
|
|
def run_processor(self, image):
|
2023-06-25 19:38:17 +00:00
|
|
|
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
|
2023-07-18 14:26:45 +00:00
|
|
|
segment_anything_processor = SamDetectorReproducibleColors.from_pretrained(
|
|
|
|
"ybelkada/segment-anything", subfolder="checkpoints"
|
|
|
|
)
|
2023-06-26 11:27:26 +00:00
|
|
|
np_img = np.array(image, dtype=np.uint8)
|
2024-03-21 07:17:53 +00:00
|
|
|
processed_image = segment_anything_processor(
|
|
|
|
np_img, image_resolution=self.image_resolution, detect_resolution=self.detect_resolution
|
|
|
|
)
|
2023-06-25 18:16:39 +00:00
|
|
|
return processed_image
|
2023-06-25 19:38:17 +00:00
|
|
|
|
2023-07-18 14:26:45 +00:00
|
|
|
|
2023-06-25 19:38:17 +00:00
|
|
|
class SamDetectorReproducibleColors(SamDetector):
|
|
|
|
# overriding SamDetector.show_anns() method to use reproducible colors for segmentation image
|
|
|
|
# base class show_anns() method randomizes colors,
|
|
|
|
# which seems to also lead to non-reproducible image generation
|
|
|
|
# so using ADE20k color palette instead
|
|
|
|
def show_anns(self, anns: List[Dict]):
|
|
|
|
if len(anns) == 0:
|
|
|
|
return
|
|
|
|
sorted_anns = sorted(anns, key=(lambda x: x["area"]), reverse=True)
|
|
|
|
h, w = anns[0]["segmentation"].shape
|
2023-07-18 14:26:45 +00:00
|
|
|
final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB")
|
2023-06-25 19:38:17 +00:00
|
|
|
palette = ade_palette()
|
|
|
|
for i, ann in enumerate(sorted_anns):
|
|
|
|
m = ann["segmentation"]
|
|
|
|
img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8)
|
|
|
|
# doing modulo just in case number of annotated regions exceeds number of colors in palette
|
|
|
|
ann_color = palette[i % len(palette)]
|
2023-06-25 19:54:48 +00:00
|
|
|
img[:, :] = ann_color
|
2023-07-18 14:26:45 +00:00
|
|
|
final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m * 255)))
|
2023-06-25 19:38:17 +00:00
|
|
|
return np.array(final_img, dtype=np.uint8)
|
2023-09-22 20:32:27 +00:00
|
|
|
|
|
|
|
|
|
|
|
@invocation(
|
|
|
|
"color_map_image_processor",
|
|
|
|
title="Color Map Processor",
|
|
|
|
tags=["controlnet"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.2.3",
|
2023-09-22 20:32:27 +00:00
|
|
|
)
|
|
|
|
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
|
|
|
"""Generates a color map from the provided image"""
|
|
|
|
|
2024-05-02 01:28:31 +00:00
|
|
|
color_map_tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size)
|
2023-09-22 20:32:27 +00:00
|
|
|
|
|
|
|
def run_processor(self, image: Image.Image):
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
np_image = np.array(image, dtype=np.uint8)
|
|
|
|
height, width = np_image.shape[:2]
|
2023-09-22 20:32:27 +00:00
|
|
|
|
2023-09-22 20:48:59 +00:00
|
|
|
width_tile_size = min(self.color_map_tile_size, width)
|
|
|
|
height_tile_size = min(self.color_map_tile_size, height)
|
|
|
|
|
2023-09-22 20:32:27 +00:00
|
|
|
color_map = cv2.resize(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
np_image,
|
2023-09-22 20:48:59 +00:00
|
|
|
(width // width_tile_size, height // height_tile_size),
|
2023-09-22 20:32:27 +00:00
|
|
|
interpolation=cv2.INTER_CUBIC,
|
|
|
|
)
|
|
|
|
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
|
2023-09-22 20:48:59 +00:00
|
|
|
color_map = Image.fromarray(color_map)
|
2023-09-22 20:32:27 +00:00
|
|
|
return color_map
|
2024-01-22 21:00:56 +00:00
|
|
|
|
|
|
|
|
|
|
|
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
|
|
|
|
|
|
|
|
|
|
|
@invocation(
|
|
|
|
"depth_anything_image_processor",
|
|
|
|
title="Depth Anything Processor",
|
|
|
|
tags=["controlnet", "depth", "depth anything"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.1.2",
|
2024-01-22 21:00:56 +00:00
|
|
|
)
|
|
|
|
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
|
|
|
"""Generates a depth map based on the Depth Anything algorithm"""
|
|
|
|
|
|
|
|
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
2024-01-23 03:44:47 +00:00
|
|
|
default="small", description="The size of the depth model to use"
|
2024-01-22 21:00:56 +00:00
|
|
|
)
|
2024-05-02 01:28:31 +00:00
|
|
|
resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2024-01-22 21:00:56 +00:00
|
|
|
|
2024-02-09 22:31:01 +00:00
|
|
|
def run_processor(self, image: Image.Image):
|
2024-01-22 21:00:56 +00:00
|
|
|
depth_anything_detector = DepthAnythingDetector()
|
|
|
|
depth_anything_detector.load_model(model_size=self.model_size)
|
2024-02-19 04:11:36 +00:00
|
|
|
|
2024-03-13 08:45:29 +00:00
|
|
|
processed_image = depth_anything_detector(image=image, resolution=self.resolution)
|
2024-01-22 21:00:56 +00:00
|
|
|
return processed_image
|
2024-02-09 19:35:19 +00:00
|
|
|
|
|
|
|
|
|
|
|
@invocation(
|
2024-02-11 08:00:51 +00:00
|
|
|
"dw_openpose_image_processor",
|
|
|
|
title="DW Openpose Image Processor",
|
2024-02-09 19:35:19 +00:00
|
|
|
tags=["controlnet", "dwpose", "openpose"],
|
|
|
|
category="controlnet",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.1.1",
|
2024-02-09 19:35:19 +00:00
|
|
|
)
|
2024-02-11 08:00:51 +00:00
|
|
|
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
2024-02-09 19:35:19 +00:00
|
|
|
"""Generates an openpose pose from an image using DWPose"""
|
|
|
|
|
|
|
|
draw_body: bool = InputField(default=True)
|
|
|
|
draw_face: bool = InputField(default=False)
|
|
|
|
draw_hands: bool = InputField(default=False)
|
2024-05-02 01:28:31 +00:00
|
|
|
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
2024-02-09 19:35:19 +00:00
|
|
|
|
2024-02-18 19:26:27 +00:00
|
|
|
def run_processor(self, image: Image.Image):
|
2024-02-11 08:00:51 +00:00
|
|
|
dw_openpose = DWOpenposeDetector()
|
|
|
|
processed_image = dw_openpose(
|
2024-02-09 22:05:10 +00:00
|
|
|
image,
|
|
|
|
draw_face=self.draw_face,
|
|
|
|
draw_hands=self.draw_hands,
|
|
|
|
draw_body=self.draw_body,
|
|
|
|
resolution=self.image_resolution,
|
|
|
|
)
|
2024-02-09 19:35:19 +00:00
|
|
|
return processed_image
|
2024-04-29 12:38:29 +00:00
|
|
|
|
|
|
|
|
|
|
|
@invocation(
|
|
|
|
"heuristic_resize",
|
|
|
|
title="Heuristic Resize",
|
|
|
|
tags=["image, controlnet"],
|
|
|
|
category="image",
|
2024-05-02 01:28:31 +00:00
|
|
|
version="1.0.1",
|
2024-04-29 12:38:29 +00:00
|
|
|
classification=Classification.Prototype,
|
|
|
|
)
|
|
|
|
class HeuristicResizeInvocation(BaseInvocation):
|
|
|
|
"""Resize an image using a heuristic method. Preserves edge maps."""
|
|
|
|
|
|
|
|
image: ImageField = InputField(description="The image to resize")
|
2024-05-02 01:28:31 +00:00
|
|
|
width: int = InputField(default=512, ge=1, description="The width to resize to (px)")
|
|
|
|
height: int = InputField(default=512, ge=1, description="The height to resize to (px)")
|
2024-04-29 12:38:29 +00:00
|
|
|
|
|
|
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
|
|
|
image = context.images.get_pil(self.image.image_name, "RGB")
|
|
|
|
np_img = pil_to_np(image)
|
|
|
|
np_resized = heuristic_resize(np_img, (self.width, self.height))
|
|
|
|
resized = np_to_pil(np_resized)
|
|
|
|
image_dto = context.images.save(image=resized)
|
|
|
|
return ImageOutput.build(image_dto)
|