chore: rename DWPose to DW Openpose

This commit is contained in:
blessedcoolant 2024-02-11 13:30:51 +05:30 committed by Kent Keirsey
parent 50b93992cf
commit e82c21b5ba
14 changed files with 69 additions and 68 deletions

View File

@ -81,7 +81,7 @@ their descriptions.
| ONNX Text to Latents | Generates latents from conditionings. |
| ONNX Model Loader | Loads a main model, outputting its submodels. |
| OpenCV Inpaint | Simple inpaint using opencv. |
| DWPose Processor | Applies Openpose processing to image |
| DW Openpose Processor | Applies Openpose processing to image |
| PIDI Processor | Applies PIDI processing to image |
| Prompts from File | Loads prompts from a text file |
| Random Integer | Outputs a single random integer. |

View File

@ -30,7 +30,7 @@ from invokeai.app.invocations.util import validate_begin_end_step, validate_weig
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
from invokeai.backend.image_util.dwpose import DWPoseDetector
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
from ...backend.model_management import BaseModelType
from .baseinvocation import (
@ -611,13 +611,13 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
@invocation(
"dwpose_image_processor",
title="DWPose Image Processor",
"dw_openpose_image_processor",
title="DW Openpose Image Processor",
tags=["controlnet", "dwpose", "openpose"],
category="controlnet",
version="1.0.0",
)
class DWPoseImageProcessorInvocation(ImageProcessorInvocation):
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
"""Generates an openpose pose from an image using DWPose"""
draw_body: bool = InputField(default=True)
@ -626,8 +626,8 @@ class DWPoseImageProcessorInvocation(ImageProcessorInvocation):
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
def run_processor(self, image):
dwpose = DWPoseDetector()
processed_image = dwpose(
dw_openpose = DWOpenposeDetector()
processed_image = dw_openpose(
image,
draw_face=self.draw_face,
draw_hands=self.draw_hands,

View File

@ -3,8 +3,8 @@ import torch
from controlnet_aux.util import resize_image
from PIL import Image
from invokeai.backend.image_util.dwpose.utils import draw_bodypose, draw_facepose, draw_handpose
from invokeai.backend.image_util.dwpose.wholebody import Wholebody
from invokeai.backend.image_util.dw_openpose.utils import draw_bodypose, draw_facepose, draw_handpose
from invokeai.backend.image_util.dw_openpose.wholebody import Wholebody
def draw_pose(pose, H, W, draw_face=True, draw_body=True, draw_hands=True, resolution=512):
@ -33,9 +33,9 @@ def draw_pose(pose, H, W, draw_face=True, draw_body=True, draw_hands=True, resol
return dwpose_image
class DWPoseDetector:
class DWOpenposeDetector:
"""
Code from the original implementation of the DWPose Detector.
Code from the original implementation of the DW Openpose Detector.
Credits: https://github.com/IDEA-Research/DWPose
"""

View File

@ -264,8 +264,8 @@
"noneDescription": "No processing applied",
"normalBae": "Normal BAE",
"normalBaeDescription": "Normal BAE processing",
"dwPose": "DWPose",
"dwPoseDescription": "Human pose estimation using DWPose",
"dwOpenpose": "DW Openpose",
"dwOpenposeDescription": "Human pose estimation using DW Openpose",
"pidi": "PIDI",
"pidiDescription": "PIDI image processing",
"processor": "Processor",

View File

@ -6,7 +6,7 @@ import CannyProcessor from './processors/CannyProcessor';
import ColorMapProcessor from './processors/ColorMapProcessor';
import ContentShuffleProcessor from './processors/ContentShuffleProcessor';
import DepthAnyThingProcessor from './processors/DepthAnyThingProcessor';
import DWPoseProcessor from './processors/DWPoseProcessor';
import DWOpenposeProcessor from './processors/DWOpenposeProcessor';
import HedProcessor from './processors/HedProcessor';
import LineartAnimeProcessor from './processors/LineartAnimeProcessor';
import LineartProcessor from './processors/LineartProcessor';
@ -73,8 +73,8 @@ const ControlAdapterProcessorComponent = ({ id }: Props) => {
return <NormalBaeProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
}
if (processorNode.type === 'dwpose_image_processor') {
return <DWPoseProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
if (processorNode.type === 'dw_openpose_image_processor') {
return <DWOpenposeProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
}
if (processorNode.type === 'pidi_image_processor') {

View File

@ -1,22 +1,23 @@
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
import { useProcessorNodeChanged } from 'features/controlAdapters/components/hooks/useProcessorNodeChanged';
import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants';
import type { RequiredDWPoseImageProcessorInvocation } from 'features/controlAdapters/store/types';
import type { RequiredDWOpenposeImageProcessorInvocation } from 'features/controlAdapters/store/types';
import type { ChangeEvent } from 'react';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import ProcessorWrapper from './common/ProcessorWrapper';
const DEFAULTS = CONTROLNET_PROCESSORS.dwpose_image_processor.default as RequiredDWPoseImageProcessorInvocation;
const DEFAULTS = CONTROLNET_PROCESSORS.dw_openpose_image_processor
.default as RequiredDWOpenposeImageProcessorInvocation;
type Props = {
controlNetId: string;
processorNode: RequiredDWPoseImageProcessorInvocation;
processorNode: RequiredDWOpenposeImageProcessorInvocation;
isEnabled: boolean;
};
const DWPoseProcessor = (props: Props) => {
const DWOpenposeProcessor = (props: Props) => {
const { controlNetId, processorNode, isEnabled } = props;
const { image_resolution, draw_body, draw_face, draw_hands } = processorNode;
const processorChanged = useProcessorNodeChanged();
@ -88,4 +89,4 @@ const DWPoseProcessor = (props: Props) => {
);
};
export default memo(DWPoseProcessor);
export default memo(DWOpenposeProcessor);

View File

@ -205,17 +205,17 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
image_resolution: 512,
},
},
dwpose_image_processor: {
type: 'dwpose_image_processor',
dw_openpose_image_processor: {
type: 'dw_openpose_image_processor',
get label() {
return i18n.t('controlnet.dwPose');
return i18n.t('controlnet.dwOpenpose');
},
get description() {
return i18n.t('controlnet.dwPoseDescription');
return i18n.t('controlnet.dwOpenposeDescription');
},
default: {
id: 'dwpose_image_processor',
type: 'dwpose_image_processor',
id: 'dw_openpose_image_processor',
type: 'dw_openpose_image_processor',
image_resolution: 512,
draw_body: true,
draw_face: false,
@ -267,7 +267,7 @@ export const CONTROLNET_MODEL_DEFAULT_PROCESSORS: {
lineart_anime: 'lineart_anime_image_processor',
softedge: 'hed_image_processor',
shuffle: 'content_shuffle_image_processor',
openpose: 'dwpose_image_processor',
openpose: 'dw_openpose_image_processor',
mediapipe: 'mediapipe_face_processor',
pidi: 'pidi_image_processor',
zoe: 'zoe_depth_image_processor',

View File

@ -11,7 +11,7 @@ import type {
ColorMapImageProcessorInvocation,
ContentShuffleImageProcessorInvocation,
DepthAnythingImageProcessorInvocation,
DWPoseImageProcessorInvocation,
DWOpenposeImageProcessorInvocation,
HedImageProcessorInvocation,
LineartAnimeImageProcessorInvocation,
LineartImageProcessorInvocation,
@ -40,7 +40,7 @@ export type ControlAdapterProcessorNode =
| MidasDepthImageProcessorInvocation
| MlsdImageProcessorInvocation
| NormalbaeImageProcessorInvocation
| DWPoseImageProcessorInvocation
| DWOpenposeImageProcessorInvocation
| PidiImageProcessorInvocation
| ZoeDepthImageProcessorInvocation;
@ -143,10 +143,10 @@ export type RequiredNormalbaeImageProcessorInvocation = O.Required<
>;
/**
* The DWPose processor node, with parameters flagged as required
* The DW Openpose processor node, with parameters flagged as required
*/
export type RequiredDWPoseImageProcessorInvocation = O.Required<
DWPoseImageProcessorInvocation,
export type RequiredDWOpenposeImageProcessorInvocation = O.Required<
DWOpenposeImageProcessorInvocation,
'type' | 'image_resolution' | 'draw_body' | 'draw_face' | 'draw_hands'
>;
@ -179,7 +179,7 @@ export type RequiredControlAdapterProcessorNode =
| RequiredMidasDepthImageProcessorInvocation
| RequiredMlsdImageProcessorInvocation
| RequiredNormalbaeImageProcessorInvocation
| RequiredDWPoseImageProcessorInvocation
| RequiredDWOpenposeImageProcessorInvocation
| RequiredPidiImageProcessorInvocation
| RequiredZoeDepthImageProcessorInvocation,
'id'
@ -299,10 +299,10 @@ export const isNormalbaeImageProcessorInvocation = (obj: unknown): obj is Normal
};
/**
* Type guard for DWPoseImageProcessorInvocation
* Type guard for DWOpenposeImageProcessorInvocation
*/
export const isDWPoseImageProcessorInvocation = (obj: unknown): obj is DWPoseImageProcessorInvocation => {
if (isObject(obj) && 'type' in obj && obj.type === 'dwpose_image_processor') {
export const isDWOpenposeImageProcessorInvocation = (obj: unknown): obj is DWOpenposeImageProcessorInvocation => {
if (isObject(obj) && 'type' in obj && obj.type === 'dw_openpose_image_processor') {
return true;
}
return false;

File diff suppressed because one or more lines are too long

View File

@ -156,7 +156,7 @@ export type MediapipeFaceProcessorInvocation = s['MediapipeFaceProcessorInvocati
export type MidasDepthImageProcessorInvocation = s['MidasDepthImageProcessorInvocation'];
export type MlsdImageProcessorInvocation = s['MlsdImageProcessorInvocation'];
export type NormalbaeImageProcessorInvocation = s['NormalbaeImageProcessorInvocation'];
export type DWPoseImageProcessorInvocation = s['DWPoseImageProcessorInvocation'];
export type DWOpenposeImageProcessorInvocation = s['DWOpenposeImageProcessorInvocation'];
export type PidiImageProcessorInvocation = s['PidiImageProcessorInvocation'];
export type ZoeDepthImageProcessorInvocation = s['ZoeDepthImageProcessorInvocation'];