Merge branch 'main' into release/invokeai-3-0-beta

This commit is contained in:
blessedcoolant 2023-07-19 03:59:10 +12:00 committed by GitHub
commit 54bd7c7f04
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 611 additions and 146 deletions

View File

@ -4,17 +4,12 @@ from typing import Literal
import numpy as np
from pydantic import Field, validator
from invokeai.app.models.image import ImageField
from invokeai.app.models.image import ImageField
from invokeai.app.util.misc import SEED_MAX, get_random_seed
from .baseinvocation import (
BaseInvocation,
InvocationConfig,
InvocationContext,
BaseInvocationOutput,
UIConfig,
)
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
InvocationConfig, InvocationContext, UIConfig)
class IntCollectionOutput(BaseInvocationOutput):
@ -32,7 +27,8 @@ class FloatCollectionOutput(BaseInvocationOutput):
type: Literal["float_collection"] = "float_collection"
# Outputs
collection: list[float] = Field(default=[], description="The float collection")
collection: list[float] = Field(
default=[], description="The float collection")
class ImageCollectionOutput(BaseInvocationOutput):
@ -41,7 +37,8 @@ class ImageCollectionOutput(BaseInvocationOutput):
type: Literal["image_collection"] = "image_collection"
# Outputs
collection: list[ImageField] = Field(default=[], description="The output images")
collection: list[ImageField] = Field(
default=[], description="The output images")
class Config:
schema_extra = {"required": ["type", "collection"]}
@ -57,6 +54,14 @@ class RangeInvocation(BaseInvocation):
stop: int = Field(default=10, description="The stop of the range")
step: int = Field(default=1, description="The step of the range")
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Range",
"tags": ["range", "integer", "collection"]
},
}
@validator("stop")
def stop_gt_start(cls, v, values):
if "start" in values and v <= values["start"]:
@ -79,10 +84,20 @@ class RangeOfSizeInvocation(BaseInvocation):
size: int = Field(default=1, description="The number of values")
step: int = Field(default=1, description="The step of the range")
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Sized Range",
"tags": ["range", "integer", "size", "collection"]
},
}
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
return IntCollectionOutput(
collection=list(range(self.start, self.start + self.size, self.step))
)
collection=list(
range(
self.start, self.start + self.size,
self.step)))
class RandomRangeInvocation(BaseInvocation):
@ -103,11 +118,21 @@ class RandomRangeInvocation(BaseInvocation):
default_factory=get_random_seed,
)
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Random Range",
"tags": ["range", "integer", "random", "collection"]
},
}
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
rng = np.random.default_rng(self.seed)
return IntCollectionOutput(
collection=list(rng.integers(low=self.low, high=self.high, size=self.size))
)
collection=list(
rng.integers(
low=self.low, high=self.high,
size=self.size)))
class ImageCollectionInvocation(BaseInvocation):
@ -121,6 +146,7 @@ class ImageCollectionInvocation(BaseInvocation):
default=[], description="The image collection to load"
)
# fmt: on
def invoke(self, context: InvocationContext) -> ImageCollectionOutput:
return ImageCollectionOutput(collection=self.images)
@ -128,6 +154,7 @@ class ImageCollectionInvocation(BaseInvocation):
schema_extra = {
"ui": {
"type_hints": {
"title": "Image Collection",
"images": "image_collection",
}
},

View File

@ -571,6 +571,14 @@ class ClipSkipInvocation(BaseInvocation):
clip: ClipField = Field(None, description="Clip to use")
skipped_layers: int = Field(0, description="Number of layers to skip in text_encoder")
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "CLIP Skip",
"tags": ["clip", "skip"]
},
}
def invoke(self, context: InvocationContext) -> ClipSkipInvocationOutput:
self.clip.skipped_layers += self.skipped_layers
return ClipSkipInvocationOutput(

View File

@ -1,43 +1,25 @@
# Invocations for ControlNet image preprocessors
# initial implementation by Gregg Helt, 2023
# heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
from builtins import float, bool
from builtins import bool, float
from typing import Dict, List, Literal, Optional, Union
import cv2
import numpy as np
from typing import Literal, Optional, Union, List, Dict
from controlnet_aux import (CannyDetector, ContentShuffleDetector, HEDdetector,
LeresDetector, LineartAnimeDetector,
LineartDetector, MediapipeFaceDetector,
MidasDetector, MLSDdetector, NormalBaeDetector,
OpenposeDetector, PidiNetDetector, SamDetector,
ZoeDetector)
from controlnet_aux.util import HWC3, ade_palette
from PIL import Image
from pydantic import BaseModel, Field, validator
from ...backend.model_management import BaseModelType, ModelType
from ..models.image import ImageField, ImageCategory, ResourceOrigin
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
InvocationContext,
InvocationConfig,
)
from controlnet_aux import (
CannyDetector,
HEDdetector,
LineartDetector,
LineartAnimeDetector,
MidasDetector,
MLSDdetector,
NormalBaeDetector,
OpenposeDetector,
PidiNetDetector,
ContentShuffleDetector,
ZoeDetector,
MediapipeFaceDetector,
SamDetector,
LeresDetector,
)
from controlnet_aux.util import HWC3, ade_palette
from ..models.image import ImageCategory, ImageField, ResourceOrigin
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
InvocationConfig, InvocationContext)
from .image import ImageOutput, PILInvocationConfig
CONTROLNET_DEFAULT_MODELS = [
@ -75,33 +57,34 @@ CONTROLNET_DEFAULT_MODELS = [
"lllyasviel/control_v11e_sd15_ip2p",
"lllyasviel/control_v11f1e_sd15_tile",
#################################################
# thibaud sd v2.1 models (ControlNet v1.0? or v1.1?
##################################################
"thibaud/controlnet-sd21-openpose-diffusers",
"thibaud/controlnet-sd21-canny-diffusers",
"thibaud/controlnet-sd21-depth-diffusers",
"thibaud/controlnet-sd21-scribble-diffusers",
"thibaud/controlnet-sd21-hed-diffusers",
"thibaud/controlnet-sd21-zoedepth-diffusers",
"thibaud/controlnet-sd21-color-diffusers",
"thibaud/controlnet-sd21-openposev2-diffusers",
"thibaud/controlnet-sd21-lineart-diffusers",
"thibaud/controlnet-sd21-normalbae-diffusers",
"thibaud/controlnet-sd21-ade20k-diffusers",
#################################################
# thibaud sd v2.1 models (ControlNet v1.0? or v1.1?
##################################################
"thibaud/controlnet-sd21-openpose-diffusers",
"thibaud/controlnet-sd21-canny-diffusers",
"thibaud/controlnet-sd21-depth-diffusers",
"thibaud/controlnet-sd21-scribble-diffusers",
"thibaud/controlnet-sd21-hed-diffusers",
"thibaud/controlnet-sd21-zoedepth-diffusers",
"thibaud/controlnet-sd21-color-diffusers",
"thibaud/controlnet-sd21-openposev2-diffusers",
"thibaud/controlnet-sd21-lineart-diffusers",
"thibaud/controlnet-sd21-normalbae-diffusers",
"thibaud/controlnet-sd21-ade20k-diffusers",
##############################################
# ControlNetMediaPipeface, ControlNet v1.1
##############################################
# ["CrucibleAI/ControlNetMediaPipeFace", "diffusion_sd15"], # SD 1.5
# diffusion_sd15 needs to be passed to from_pretrained() as subfolder arg
# hacked t2l to split to model & subfolder if format is "model,subfolder"
"CrucibleAI/ControlNetMediaPipeFace,diffusion_sd15", # SD 1.5
"CrucibleAI/ControlNetMediaPipeFace", # SD 2.1?
##############################################
# ControlNetMediaPipeface, ControlNet v1.1
##############################################
# ["CrucibleAI/ControlNetMediaPipeFace", "diffusion_sd15"], # SD 1.5
# diffusion_sd15 needs to be passed to from_pretrained() as subfolder arg
# hacked t2l to split to model & subfolder if format is "model,subfolder"
"CrucibleAI/ControlNetMediaPipeFace,diffusion_sd15", # SD 1.5
"CrucibleAI/ControlNetMediaPipeFace", # SD 2.1?
]
CONTROLNET_NAME_VALUES = Literal[tuple(CONTROLNET_DEFAULT_MODELS)]
CONTROLNET_MODE_VALUES = Literal[tuple(["balanced", "more_prompt", "more_control", "unbalanced"])]
CONTROLNET_MODE_VALUES = Literal[tuple(
["balanced", "more_prompt", "more_control", "unbalanced"])]
# crop and fill options not ready yet
# CONTROLNET_RESIZE_VALUES = Literal[tuple(["just_resize", "crop_resize", "fill_resize"])]
@ -112,16 +95,22 @@ class ControlNetModelField(BaseModel):
model_name: str = Field(description="Name of the ControlNet model")
base_model: BaseModelType = Field(description="Base model")
class ControlField(BaseModel):
image: ImageField = Field(default=None, description="The control image")
control_model: Optional[ControlNetModelField] = Field(default=None, description="The ControlNet model to use")
control_model: Optional[ControlNetModelField] = Field(
default=None, description="The ControlNet model to use")
# control_weight: Optional[float] = Field(default=1, description="weight given to controlnet")
control_weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
begin_step_percent: float = Field(default=0, ge=0, le=1,
description="When the ControlNet is first applied (% of total steps)")
end_step_percent: float = Field(default=1, ge=0, le=1,
description="When the ControlNet is last applied (% of total steps)")
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use")
control_weight: Union[float, List[float]] = Field(
default=1, description="The weight given to the ControlNet")
begin_step_percent: float = Field(
default=0, ge=0, le=1,
description="When the ControlNet is first applied (% of total steps)")
end_step_percent: float = Field(
default=1, ge=0, le=1,
description="When the ControlNet is last applied (% of total steps)")
control_mode: CONTROLNET_MODE_VALUES = Field(
default="balanced", description="The control mode to use")
# resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
@validator("control_weight")
@ -130,11 +119,13 @@ class ControlField(BaseModel):
if isinstance(v, list):
for i in v:
if i < -1 or i > 2:
raise ValueError('Control weights must be within -1 to 2 range')
raise ValueError(
'Control weights must be within -1 to 2 range')
else:
if v < -1 or v > 2:
raise ValueError('Control weights must be within -1 to 2 range')
return v
class Config:
schema_extra = {
"required": ["image", "control_model", "control_weight", "begin_step_percent", "end_step_percent"],
@ -175,13 +166,14 @@ class ControlNetInvocation(BaseInvocation):
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["latents"],
"title": "ControlNet",
"tags": ["controlnet", "latents"],
"type_hints": {
"model": "model",
"control": "control",
# "cfg_scale": "float",
"cfg_scale": "number",
"control_weight": "float",
"model": "model",
"control": "control",
# "cfg_scale": "float",
"cfg_scale": "number",
"control_weight": "float",
}
},
}
@ -208,6 +200,13 @@ class ImageProcessorInvocation(BaseInvocation, PILInvocationConfig):
image: ImageField = Field(default=None, description="The image to process")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Image Processor",
"tags": ["image", "processor"]
},
}
def run_processor(self, image):
# superclass just passes through image without processing
@ -239,14 +238,15 @@ class ImageProcessorInvocation(BaseInvocation, PILInvocationConfig):
return ImageOutput(
image=processed_image_field,
# width=processed_image.width,
width = image_dto.width,
width=image_dto.width,
# height=processed_image.height,
height = image_dto.height,
height=image_dto.height,
# mode=processed_image.mode,
)
class CannyImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class CannyImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Canny edge detection for ControlNet"""
# fmt: off
type: Literal["canny_image_processor"] = "canny_image_processor"
@ -255,13 +255,23 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfi
high_threshold: int = Field(default=200, ge=0, le=255, description="The high threshold of the Canny pixel gradient (0-255)")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Canny Processor",
"tags": ["controlnet", "canny", "image", "processor"]
},
}
def run_processor(self, image):
canny_processor = CannyDetector()
processed_image = canny_processor(image, self.low_threshold, self.high_threshold)
processed_image = canny_processor(
image, self.low_threshold, self.high_threshold)
return processed_image
class HedImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class HedImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies HED edge detection to image"""
# fmt: off
type: Literal["hed_image_processor"] = "hed_image_processor"
@ -273,6 +283,14 @@ class HedImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig)
scribble: bool = Field(default=False, description="Whether to use scribble mode")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Softedge(HED) Processor",
"tags": ["controlnet", "softedge", "hed", "image", "processor"]
},
}
def run_processor(self, image):
hed_processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
processed_image = hed_processor(image,
@ -285,7 +303,8 @@ class HedImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig)
return processed_image
class LineartImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class LineartImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies line art processing to image"""
# fmt: off
type: Literal["lineart_image_processor"] = "lineart_image_processor"
@ -295,16 +314,25 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation, PILInvocationCon
coarse: bool = Field(default=False, description="Whether to use coarse mode")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Lineart Processor",
"tags": ["controlnet", "lineart", "image", "processor"]
},
}
def run_processor(self, image):
lineart_processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
processed_image = lineart_processor(image,
detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution,
coarse=self.coarse)
lineart_processor = LineartDetector.from_pretrained(
"lllyasviel/Annotators")
processed_image = lineart_processor(
image, detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution, coarse=self.coarse)
return processed_image
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class LineartAnimeImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies line art anime processing to image"""
# fmt: off
type: Literal["lineart_anime_image_processor"] = "lineart_anime_image_processor"
@ -313,8 +341,17 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation, PILInvocati
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Lineart Anime Processor",
"tags": ["controlnet", "lineart", "anime", "image", "processor"]
},
}
def run_processor(self, image):
processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
processor = LineartAnimeDetector.from_pretrained(
"lllyasviel/Annotators")
processed_image = processor(image,
detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution,
@ -322,7 +359,8 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation, PILInvocati
return processed_image
class OpenposeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class OpenposeImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies Openpose processing to image"""
# fmt: off
type: Literal["openpose_image_processor"] = "openpose_image_processor"
@ -332,17 +370,26 @@ class OpenposeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationCo
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Openpose Processor",
"tags": ["controlnet", "openpose", "image", "processor"]
},
}
def run_processor(self, image):
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
processed_image = openpose_processor(image,
detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution,
hand_and_face=self.hand_and_face,
)
openpose_processor = OpenposeDetector.from_pretrained(
"lllyasviel/Annotators")
processed_image = openpose_processor(
image, detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution,
hand_and_face=self.hand_and_face,)
return processed_image
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class MidasDepthImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies Midas depth processing to image"""
# fmt: off
type: Literal["midas_depth_image_processor"] = "midas_depth_image_processor"
@ -353,6 +400,14 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocation
# depth_and_normal: bool = Field(default=False, description="whether to use depth and normal mode")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Midas (Depth) Processor",
"tags": ["controlnet", "midas", "depth", "image", "processor"]
},
}
def run_processor(self, image):
midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
processed_image = midas_processor(image,
@ -364,7 +419,8 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocation
return processed_image
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class NormalbaeImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies NormalBae processing to image"""
# fmt: off
type: Literal["normalbae_image_processor"] = "normalbae_image_processor"
@ -373,15 +429,25 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationC
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Normal BAE Processor",
"tags": ["controlnet", "normal", "bae", "image", "processor"]
},
}
def run_processor(self, image):
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
processed_image = normalbae_processor(image,
detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution)
normalbae_processor = NormalBaeDetector.from_pretrained(
"lllyasviel/Annotators")
processed_image = normalbae_processor(
image, detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution)
return processed_image
class MlsdImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class MlsdImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies MLSD processing to image"""
# fmt: off
type: Literal["mlsd_image_processor"] = "mlsd_image_processor"
@ -392,17 +458,25 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig
thr_d: float = Field(default=0.1, ge=0, description="MLSD parameter `thr_d`")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "MLSD Processor",
"tags": ["controlnet", "mlsd", "image", "processor"]
},
}
def run_processor(self, image):
mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
processed_image = mlsd_processor(image,
detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution,
thr_v=self.thr_v,
thr_d=self.thr_d)
processed_image = mlsd_processor(
image, detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution, thr_v=self.thr_v,
thr_d=self.thr_d)
return processed_image
class PidiImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class PidiImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies PIDI processing to image"""
# fmt: off
type: Literal["pidi_image_processor"] = "pidi_image_processor"
@ -413,17 +487,26 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig
scribble: bool = Field(default=False, description="Whether to use scribble mode")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "PIDI Processor",
"tags": ["controlnet", "pidi", "image", "processor"]
},
}
def run_processor(self, image):
pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
processed_image = pidi_processor(image,
detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution,
safe=self.safe,
scribble=self.scribble)
pidi_processor = PidiNetDetector.from_pretrained(
"lllyasviel/Annotators")
processed_image = pidi_processor(
image, detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution, safe=self.safe,
scribble=self.scribble)
return processed_image
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class ContentShuffleImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies content shuffle processing to image"""
# fmt: off
type: Literal["content_shuffle_image_processor"] = "content_shuffle_image_processor"
@ -435,6 +518,14 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation, PILInvoca
f: Optional[int] = Field(default=256, ge=0, description="Content shuffle `f` parameter")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Content Shuffle Processor",
"tags": ["controlnet", "contentshuffle", "image", "processor"]
},
}
def run_processor(self, image):
content_shuffle_processor = ContentShuffleDetector()
processed_image = content_shuffle_processor(image,
@ -448,19 +539,30 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation, PILInvoca
# should work with controlnet_aux >= 0.0.4 and timm <= 0.6.13
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class ZoeDepthImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies Zoe depth processing to image"""
# fmt: off
type: Literal["zoe_depth_image_processor"] = "zoe_depth_image_processor"
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Zoe (Depth) Processor",
"tags": ["controlnet", "zoe", "depth", "image", "processor"]
},
}
def run_processor(self, image):
zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
zoe_depth_processor = ZoeDetector.from_pretrained(
"lllyasviel/Annotators")
processed_image = zoe_depth_processor(image)
return processed_image
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class MediapipeFaceProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies mediapipe face processing to image"""
# fmt: off
type: Literal["mediapipe_face_processor"] = "mediapipe_face_processor"
@ -469,16 +571,27 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationCo
min_confidence: float = Field(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Mediapipe Processor",
"tags": ["controlnet", "mediapipe", "image", "processor"]
},
}
def run_processor(self, image):
# MediaPipeFaceDetector throws an error if image has alpha channel
# so convert to RGB if needed
if image.mode == 'RGBA':
image = image.convert('RGB')
mediapipe_face_processor = MediapipeFaceDetector()
processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence)
processed_image = mediapipe_face_processor(
image, max_faces=self.max_faces, min_confidence=self.min_confidence)
return processed_image
class LeresImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class LeresImageProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies leres processing to image"""
# fmt: off
type: Literal["leres_image_processor"] = "leres_image_processor"
@ -490,18 +603,25 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfi
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Leres (Depth) Processor",
"tags": ["controlnet", "leres", "depth", "image", "processor"]
},
}
def run_processor(self, image):
leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
processed_image = leres_processor(image,
thr_a=self.thr_a,
thr_b=self.thr_b,
boost=self.boost,
detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution)
processed_image = leres_processor(
image, thr_a=self.thr_a, thr_b=self.thr_b, boost=self.boost,
detect_resolution=self.detect_resolution,
image_resolution=self.image_resolution)
return processed_image
class TileResamplerProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class TileResamplerProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
# fmt: off
type: Literal["tile_image_processor"] = "tile_image_processor"
@ -510,6 +630,14 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation, PILInvocationCo
down_sampling_rate: float = Field(default=1.0, ge=1.0, le=8.0, description="Down sampling rate")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Tile Resample Processor",
"tags": ["controlnet", "tile", "resample", "image", "processor"]
},
}
# tile_resample copied from sd-webui-controlnet/scripts/processor.py
def tile_resample(self,
np_img: np.ndarray,
@ -528,28 +656,33 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation, PILInvocationCo
def run_processor(self, img):
np_img = np.array(img, dtype=np.uint8)
processed_np_image = self.tile_resample(np_img,
#res=self.tile_size,
# res=self.tile_size,
down_sampling_rate=self.down_sampling_rate
)
processed_image = Image.fromarray(processed_np_image)
return processed_image
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
class SegmentAnythingProcessorInvocation(
ImageProcessorInvocation, PILInvocationConfig):
"""Applies segment anything processing to image"""
# fmt: off
type: Literal["segment_anything_processor"] = "segment_anything_processor"
# fmt: on
class Config(InvocationConfig):
schema_extra = {"ui": {"title": "Segment Anything Processor", "tags": [
"controlnet", "segment", "anything", "sam", "image", "processor"]}, }
def run_processor(self, image):
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
segment_anything_processor = SamDetectorReproducibleColors.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
segment_anything_processor = SamDetectorReproducibleColors.from_pretrained(
"ybelkada/segment-anything", subfolder="checkpoints")
np_img = np.array(image, dtype=np.uint8)
processed_image = segment_anything_processor(np_img)
return processed_image
class SamDetectorReproducibleColors(SamDetector):
# overriding SamDetector.show_anns() method to use reproducible colors for segmentation image
@ -561,7 +694,8 @@ class SamDetectorReproducibleColors(SamDetector):
return
sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
h, w = anns[0]['segmentation'].shape
final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB")
final_img = Image.fromarray(
np.zeros((h, w, 3), dtype=np.uint8), mode="RGB")
palette = ade_palette()
for i, ann in enumerate(sorted_anns):
m = ann['segmentation']
@ -569,5 +703,8 @@ class SamDetectorReproducibleColors(SamDetector):
# doing modulo just in case number of annotated regions exceeds number of colors in palette
ann_color = palette[i % len(palette)]
img[:, :] = ann_color
final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m * 255)))
final_img.paste(
Image.fromarray(img, mode="RGB"),
(0, 0),
Image.fromarray(np.uint8(m * 255)))
return np.array(final_img, dtype=np.uint8)

View File

@ -35,6 +35,14 @@ class CvInpaintInvocation(BaseInvocation, CvInvocationConfig):
mask: ImageField = Field(default=None, description="The mask to use when inpainting")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "OpenCV Inpaint",
"tags": ["opencv", "inpaint"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
mask = context.services.images.get_pil_image(self.mask.image_name)

View File

@ -130,6 +130,7 @@ class InpaintInvocation(BaseInvocation):
schema_extra = {
"ui": {
"tags": ["stable-diffusion", "image"],
"title": "Inpaint"
},
}

View File

@ -71,6 +71,15 @@ class LoadImageInvocation(BaseInvocation):
default=None, description="The image to load"
)
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Load Image",
"tags": ["image", "load"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -91,6 +100,14 @@ class ShowImageInvocation(BaseInvocation):
default=None, description="The image to show"
)
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Show Image",
"tags": ["image", "show"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
if image:
@ -119,6 +136,14 @@ class ImageCropInvocation(BaseInvocation, PILInvocationConfig):
height: int = Field(default=512, gt=0, description="The height of the crop rectangle")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Crop Image",
"tags": ["image", "crop"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -157,6 +182,14 @@ class ImagePasteInvocation(BaseInvocation, PILInvocationConfig):
y: int = Field(default=0, description="The top y coordinate at which to paste the image")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Paste Image",
"tags": ["image", "paste"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
base_image = context.services.images.get_pil_image(self.base_image.image_name)
image = context.services.images.get_pil_image(self.image.image_name)
@ -207,6 +240,14 @@ class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig):
invert: bool = Field(default=False, description="Whether or not to invert the mask")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Mask From Alpha",
"tags": ["image", "mask", "alpha"]
},
}
def invoke(self, context: InvocationContext) -> MaskOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -241,6 +282,14 @@ class ImageMultiplyInvocation(BaseInvocation, PILInvocationConfig):
image2: Optional[ImageField] = Field(default=None, description="The second image to multiply")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Multiply Images",
"tags": ["image", "multiply"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image1 = context.services.images.get_pil_image(self.image1.image_name)
image2 = context.services.images.get_pil_image(self.image2.image_name)
@ -277,6 +326,14 @@ class ImageChannelInvocation(BaseInvocation, PILInvocationConfig):
channel: IMAGE_CHANNELS = Field(default="A", description="The channel to get")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Image Channel",
"tags": ["image", "channel"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -312,6 +369,14 @@ class ImageConvertInvocation(BaseInvocation, PILInvocationConfig):
mode: IMAGE_MODES = Field(default="L", description="The mode to convert to")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Convert Image",
"tags": ["image", "convert"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -345,6 +410,14 @@ class ImageBlurInvocation(BaseInvocation, PILInvocationConfig):
blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Blur Image",
"tags": ["image", "blur"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -404,6 +477,14 @@ class ImageResizeInvocation(BaseInvocation, PILInvocationConfig):
resample_mode: PIL_RESAMPLING_MODES = Field(default="bicubic", description="The resampling mode")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Resize Image",
"tags": ["image", "resize"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -442,6 +523,14 @@ class ImageScaleInvocation(BaseInvocation, PILInvocationConfig):
resample_mode: PIL_RESAMPLING_MODES = Field(default="bicubic", description="The resampling mode")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Scale Image",
"tags": ["image", "scale"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -482,6 +571,14 @@ class ImageLerpInvocation(BaseInvocation, PILInvocationConfig):
max: int = Field(default=255, ge=0, le=255, description="The maximum output value")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Image Linear Interpolation",
"tags": ["image", "linear", "interpolation", "lerp"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -518,6 +615,14 @@ class ImageInverseLerpInvocation(BaseInvocation, PILInvocationConfig):
max: int = Field(default=255, ge=0, le=255, description="The maximum input value")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Image Inverse Linear Interpolation",
"tags": ["image", "linear", "interpolation", "inverse"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)

View File

@ -14,6 +14,7 @@ from invokeai.backend.image_util.patchmatch import PatchMatch
from ..models.image import ColorField, ImageCategory, ImageField, ResourceOrigin
from .baseinvocation import (
BaseInvocation,
InvocationConfig,
InvocationContext,
)
@ -133,6 +134,14 @@ class InfillColorInvocation(BaseInvocation):
description="The color to use to infill",
)
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Color Infill",
"tags": ["image", "inpaint", "color", "infill"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -173,6 +182,14 @@ class InfillTileInvocation(BaseInvocation):
default_factory=get_random_seed,
)
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Tile Infill",
"tags": ["image", "inpaint", "tile", "infill"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
@ -206,6 +223,14 @@ class InfillPatchMatchInvocation(BaseInvocation):
default=None, description="The image to infill"
)
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Patch Match Infill",
"tags": ["image", "inpaint", "patchmatch", "infill"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)

View File

@ -139,6 +139,7 @@ class TextToLatentsInvocation(BaseInvocation):
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Text To Latents",
"tags": ["latents"],
"type_hints": {
"model": "model",
@ -389,6 +390,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Latent To Latents",
"tags": ["latents"],
"type_hints": {
"model": "model",
@ -498,6 +500,7 @@ class LatentsToImageInvocation(BaseInvocation):
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Latents To Image",
"tags": ["latents", "image"],
},
}
@ -595,6 +598,14 @@ class ResizeLatentsInvocation(BaseInvocation):
antialias: bool = Field(
default=False,
description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Resize Latents",
"tags": ["latents", "resize"]
},
}
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = context.services.latents.get(self.latents.latents_name)
@ -633,6 +644,14 @@ class ScaleLatentsInvocation(BaseInvocation):
antialias: bool = Field(
default=False,
description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Scale Latents",
"tags": ["latents", "scale"]
},
}
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = context.services.latents.get(self.latents.latents_name)
@ -675,7 +694,8 @@ class ImageToLatentsInvocation(BaseInvocation):
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["latents", "image"],
"title": "Image To Latents",
"tags": ["latents", "image"]
},
}

View File

@ -52,6 +52,14 @@ class AddInvocation(BaseInvocation, MathInvocationConfig):
b: int = Field(default=0, description="The second number")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Add",
"tags": ["math", "add"]
},
}
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=self.a + self.b)
@ -65,6 +73,14 @@ class SubtractInvocation(BaseInvocation, MathInvocationConfig):
b: int = Field(default=0, description="The second number")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Subtract",
"tags": ["math", "subtract"]
},
}
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=self.a - self.b)
@ -78,6 +94,14 @@ class MultiplyInvocation(BaseInvocation, MathInvocationConfig):
b: int = Field(default=0, description="The second number")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Multiply",
"tags": ["math", "multiply"]
},
}
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=self.a * self.b)
@ -91,6 +115,14 @@ class DivideInvocation(BaseInvocation, MathInvocationConfig):
b: int = Field(default=0, description="The second number")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Divide",
"tags": ["math", "divide"]
},
}
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=int(self.a / self.b))
@ -105,5 +137,14 @@ class RandomIntInvocation(BaseInvocation):
default=np.iinfo(np.int32).max, description="The exclusive high value"
)
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Random Integer",
"tags": ["math", "random", "integer"]
},
}
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=np.random.randint(self.low, self.high))

View File

@ -3,7 +3,7 @@ from typing import Literal, Optional, Union
from pydantic import BaseModel, Field
from invokeai.app.invocations.baseinvocation import (BaseInvocation,
BaseInvocationOutput,
BaseInvocationOutput, InvocationConfig,
InvocationContext)
from invokeai.app.invocations.controlnet_image_processors import ControlField
from invokeai.app.invocations.model import (LoRAModelField, MainModelField,
@ -97,6 +97,14 @@ class MetadataAccumulatorInvocation(BaseInvocation):
description="The VAE used for decoding, if the main model's default was not used",
)
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Metadata Accumulator",
"tags": ["image", "metadata", "generation"]
},
}
def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput:
"""Collects and outputs a CoreMetadata object"""

View File

@ -112,6 +112,7 @@ class NoiseInvocation(BaseInvocation):
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Noise",
"tags": ["latents", "noise"],
},
}

View File

@ -43,6 +43,14 @@ class FloatLinearRangeInvocation(BaseInvocation):
stop: float = Field(default=10, description="The last value of the range")
steps: int = Field(default=30, description="number of values to interpolate over (including start and stop)")
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Linear Range (Float)",
"tags": ["math", "float", "linear", "range"]
},
}
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
param_list = list(np.linspace(self.start, self.stop, self.steps))
return FloatCollectionOutput(
@ -113,6 +121,14 @@ class StepParamEasingInvocation(BaseInvocation):
show_easing_plot: bool = Field(default=False, description="show easing plot")
# fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Param Easing By Step",
"tags": ["param", "step", "easing"]
},
}
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
log_diagnostics = False

View File

@ -1,9 +1,12 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
from typing import Literal
from pydantic import Field
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
from .math import IntOutput, FloatOutput
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
InvocationConfig, InvocationContext)
from .math import FloatOutput, IntOutput
# Pass-through parameter nodes - used by subgraphs
@ -14,6 +17,14 @@ class ParamIntInvocation(BaseInvocation):
a: int = Field(default=0, description="The integer value")
#fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["param", "integer"],
"title": "Integer Parameter"
},
}
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=self.a)
@ -24,5 +35,36 @@ class ParamFloatInvocation(BaseInvocation):
param: float = Field(default=0.0, description="The float value")
#fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["param", "float"],
"title": "Float Parameter"
},
}
def invoke(self, context: InvocationContext) -> FloatOutput:
return FloatOutput(param=self.param)
class StringOutput(BaseInvocationOutput):
"""A string output"""
type: Literal["string_output"] = "string_output"
text: str = Field(default=None, description="The output string")
class ParamStringInvocation(BaseInvocation):
"""A string parameter"""
type: Literal['param_string'] = 'param_string'
text: str = Field(default='', description='The string value')
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["param", "string"],
"title": "String Parameter"
},
}
def invoke(self, context: InvocationContext) -> StringOutput:
return StringOutput(text=self.text)

View File

@ -4,7 +4,7 @@ from typing import Literal, Optional
import numpy as np
from pydantic import Field, validator
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext
from dynamicprompts.generators import RandomPromptGenerator, CombinatorialPromptGenerator
class PromptOutput(BaseInvocationOutput):
@ -48,6 +48,14 @@ class DynamicPromptInvocation(BaseInvocation):
default=False, description="Whether to use the combinatorial generator"
)
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Dynamic Prompt",
"tags": ["prompt", "dynamic"]
},
}
def invoke(self, context: InvocationContext) -> PromptCollectionOutput:
if self.combinatorial:
generator = CombinatorialPromptGenerator()
@ -72,6 +80,14 @@ class PromptsFromFileInvocation(BaseInvocation):
max_prompts: int = Field(default=1, ge=0, description="Max lines to read from file (0=all)")
#fmt: on
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Prompts From File",
"tags": ["prompt", "file"]
},
}
@validator("file_path")
def file_path_exists(cls, v):
if not exists(v):

View File

@ -233,6 +233,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "SDXL Text To Latents",
"tags": ["latents"],
"type_hints": {
"model": "model",
@ -462,6 +463,7 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "SDXL Latents to Latents",
"tags": ["latents"],
"type_hints": {
"model": "model",

View File

@ -11,7 +11,7 @@ from realesrgan import RealESRGANer
from invokeai.app.models.image import ImageCategory, ImageField, ResourceOrigin
from .baseinvocation import BaseInvocation, InvocationContext
from .baseinvocation import BaseInvocation, InvocationConfig, InvocationContext
from .image import ImageOutput
# TODO: Populate this from disk?
@ -32,6 +32,14 @@ class RealESRGANInvocation(BaseInvocation):
default="RealESRGAN_x4plus.pth", description="The Real-ESRGAN model to use"
)
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Upscale (RealESRGAN)",
"tags": ["image", "upscale", "realesrgan"]
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)
models_path = context.services.configuration.models_path