diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 5f970ab04f..b4ab48c4f9 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -198,22 +198,15 @@ class CreateGradientMaskInvocation(BaseInvocation): minimum_denoise: float = InputField( default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4 ) - unet: Optional[UNetField] = InputField( - description="OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE", - default=None, - input=Input.Connection, - title="[OPTIONAL] UNet", - ui_order=5, - ) image: Optional[ImageField] = InputField( default=None, - description="OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE", + description="OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE", title="[OPTIONAL] Image", ui_order=6 ) vae: Optional[VAEField] = InputField( default=None, - description="OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE", + description="OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE", title="[OPTIONAL] VAE", input=Input.Connection, ui_order=7 @@ -262,22 +255,18 @@ class CreateGradientMaskInvocation(BaseInvocation): masked_latents_name = None # Check for Inpaint model and generate masked_latents - if self.unet is not None and self.vae is not None and self.image is not None: - #all three fields must be present at the same time - unet_info = context.models.load(self.unet.unet) - assert isinstance(unet_info.model, UNet2DConditionModel) - is_inpaint = unet_info.model.conv_in.in_channels == 9 - if is_inpaint: - mask = blur_tensor - vae_info: LoadedModel = context.models.load(self.vae.vae) - image = context.images.get_pil(self.image.image_name) - image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) - if image_tensor.dim() == 3: - image_tensor = image_tensor.unsqueeze(0) - img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) - masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0) # <1 to include gradient area - masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone()) - masked_latents_name = context.tensors.save(tensor=masked_latents) + if self.vae is not None and self.image is not None: + #both fields must be present at the same time + mask = blur_tensor + vae_info: LoadedModel = context.models.load(self.vae.vae) + image = context.images.get_pil(self.image.image_name) + image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) + if image_tensor.dim() == 3: + image_tensor = image_tensor.unsqueeze(0) + img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) + masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0) # <1 to include gradient area + masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone()) + masked_latents_name = context.tensors.save(tensor=masked_latents) return GradientMaskOutput( diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasInpaintGraph.ts index e4f24d531c..d5b826c43e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasInpaintGraph.ts @@ -175,16 +175,6 @@ export const buildCanvasInpaintGraph = async ( field: 'unet', }, }, - { - source: { - node_id: modelLoaderNodeId, - field: 'unet', - }, - destination: { - node_id: INPAINT_CREATE_MASK, - field: 'unet', - }, - }, { source: { node_id: modelLoaderNodeId, diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasOutpaintGraph.ts index af8aaa7ea2..705b03e6c0 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasOutpaintGraph.ts @@ -200,16 +200,6 @@ export const buildCanvasOutpaintGraph = async ( field: 'unet', }, }, - { - source: { - node_id: modelLoaderNodeId, - field: 'unet', - }, - destination: { - node_id: INPAINT_CREATE_MASK, - field: 'unet', - }, - }, { source: { node_id: modelLoaderNodeId, diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasSDXLInpaintGraph.ts index b6e6876503..be21d1481d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasSDXLInpaintGraph.ts @@ -177,16 +177,6 @@ export const buildCanvasSDXLInpaintGraph = async ( field: 'unet', }, }, - { - source: { - node_id: modelLoaderNodeId, - field: 'unet', - }, - destination: { - node_id: INPAINT_CREATE_MASK, - field: 'unet', - }, - }, { source: { node_id: modelLoaderNodeId, diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasSDXLOutpaintGraph.ts index 21955905f3..d2bf3d3896 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildCanvasSDXLOutpaintGraph.ts @@ -199,16 +199,6 @@ export const buildCanvasSDXLOutpaintGraph = async ( field: 'unet', }, }, - { - source: { - node_id: modelLoaderNodeId, - field: 'unet', - }, - destination: { - node_id: INPAINT_CREATE_MASK, - field: 'unet', - }, - }, { source: { node_id: SDXL_MODEL_LOADER,