mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
044d4c107a
All invocation metadata (type, title, tags and category) are now defined in decorators. The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation. Category is a new invocation metadata, but it is not used by the frontend just yet. - `@invocation()` decorator for invocations ```py @invocation( "sdxl_compel_prompt", title="SDXL Prompt", tags=["sdxl", "compel", "prompt"], category="conditioning", ) class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): ... ``` - `@invocation_output()` decorator for invocation outputs ```py @invocation_output("clip_skip_output") class ClipSkipInvocationOutput(BaseInvocationOutput): ... ``` - update invocation docs - add category to decorator - regen frontend types
56 lines
1.9 KiB
Python
56 lines
1.9 KiB
Python
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
|
|
|
|
|
import cv2 as cv
|
|
import numpy
|
|
from PIL import Image, ImageOps
|
|
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
|
|
|
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
|
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
|
|
|
|
|
|
@invocation(
|
|
"cv_inpaint",
|
|
title="OpenCV Inpaint",
|
|
tags=["opencv", "inpaint"],
|
|
category="inpaint",
|
|
)
|
|
class CvInpaintInvocation(BaseInvocation):
|
|
"""Simple inpaint using opencv."""
|
|
|
|
image: ImageField = InputField(description="The image to inpaint")
|
|
mask: ImageField = InputField(description="The mask to use when inpainting")
|
|
|
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
|
image = context.services.images.get_pil_image(self.image.image_name)
|
|
mask = context.services.images.get_pil_image(self.mask.image_name)
|
|
|
|
# Convert to cv image/mask
|
|
# TODO: consider making these utility functions
|
|
cv_image = cv.cvtColor(numpy.array(image.convert("RGB")), cv.COLOR_RGB2BGR)
|
|
cv_mask = numpy.array(ImageOps.invert(mask.convert("L")))
|
|
|
|
# Inpaint
|
|
cv_inpainted = cv.inpaint(cv_image, cv_mask, 3, cv.INPAINT_TELEA)
|
|
|
|
# Convert back to Pillow
|
|
# TODO: consider making a utility function
|
|
image_inpainted = Image.fromarray(cv.cvtColor(cv_inpainted, cv.COLOR_BGR2RGB))
|
|
|
|
image_dto = context.services.images.create(
|
|
image=image_inpainted,
|
|
image_origin=ResourceOrigin.INTERNAL,
|
|
image_category=ImageCategory.GENERAL,
|
|
node_id=self.id,
|
|
session_id=context.graph_execution_state_id,
|
|
is_intermediate=self.is_intermediate,
|
|
workflow=self.workflow,
|
|
)
|
|
|
|
return ImageOutput(
|
|
image=ImageField(image_name=image_dto.image_name),
|
|
width=image_dto.width,
|
|
height=image_dto.height,
|
|
)
|