Merge remote-tracking branch 'origin/main' into lstein/feat/simple-mm2-api

This commit is contained in:
psychedelicious 2024-06-07 14:23:41 +10:00
commit fde58ce0a3
42 changed files with 1659 additions and 828 deletions

View File

@ -1366,12 +1366,20 @@ the in-memory loaded model:
| `model` | AnyModel | The instantiated model (details below) | | `model` | AnyModel | The instantiated model (details below) |
| `locker` | ModelLockerBase | A context manager that mediates the movement of the model into VRAM | | `locker` | ModelLockerBase | A context manager that mediates the movement of the model into VRAM |
Because the loader can return multiple model types, it is typed to ### get_model_by_key(key, [submodel]) -> LoadedModel
return `AnyModel`, a Union `ModelMixin`, `torch.nn.Module`,
`IAIOnnxRuntimeModel`, `IPAdapter`, `IPAdapterPlus`, and The `get_model_by_key()` method will retrieve the model using its
`EmbeddingModelRaw`. `ModelMixin` is the base class of all diffusers unique database key. For example:
models, `EmbeddingModelRaw` is used for LoRA and TextualInversion
models. The others are obvious. loaded_model = loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
`get_model_by_key()` may raise any of the following exceptions:
* `UnknownModelException` -- key not in database
* `ModelNotFoundException` -- key in database but model not found at path
* `NotImplementedException` -- the loader doesn't know how to load this type of model
### Using the Loaded Model in Inference
`LoadedModel` acts as a context manager. The context loads the model `LoadedModel` acts as a context manager. The context loads the model
into the execution device (e.g. VRAM on CUDA systems), locks the model into the execution device (e.g. VRAM on CUDA systems), locks the model
@ -1379,16 +1387,32 @@ in the execution device for the duration of the context, and returns
the model. Use it like this: the model. Use it like this:
``` ```
model_info = loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae')) loaded_model_= loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
with model_info as vae: with loaded_model as vae:
image = vae.decode(latents)[0] image = vae.decode(latents)[0]
``` ```
`get_model_by_key()` may raise any of the following exceptions: The object returned by the LoadedModel context manager is an
`AnyModel`, which is a Union of `ModelMixin`, `torch.nn.Module`,
`IAIOnnxRuntimeModel`, `IPAdapter`, `IPAdapterPlus`, and
`EmbeddingModelRaw`. `ModelMixin` is the base class of all diffusers
models, `EmbeddingModelRaw` is used for LoRA and TextualInversion
models. The others are obvious.
In addition, you may call `LoadedModel.model_on_device()`, a context
manager that returns a tuple of the model's state dict in CPU and the
model itself in VRAM. It is used to optimize the LoRA patching and
unpatching process:
```
loaded_model_= loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
with loaded_model.model_on_device() as (state_dict, vae):
image = vae.decode(latents)[0]
```
Since not all models have state dicts, the `state_dict` return value
can be None.
* `UnknownModelException` -- key not in database
* `ModelNotFoundException` -- key in database but model not found at path
* `NotImplementedException` -- the loader doesn't know how to load this type of model
### Emitting model loading events ### Emitting model loading events

View File

@ -81,9 +81,13 @@ class CompelInvocation(BaseInvocation):
with ( with (
# apply all patches while the model is on the target device # apply all patches while the model is on the target device
text_encoder_info as text_encoder, text_encoder_info.model_on_device() as (model_state_dict, text_encoder),
tokenizer_info as tokenizer, tokenizer_info as tokenizer,
ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()), ModelPatcher.apply_lora_text_encoder(
text_encoder,
loras=_lora_loader(),
model_state_dict=model_state_dict,
),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers. # Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder, self.clip.skipped_layers), ModelPatcher.apply_clip_skip(text_encoder, self.clip.skipped_layers),
ModelPatcher.apply_ti(tokenizer, text_encoder, ti_list) as ( ModelPatcher.apply_ti(tokenizer, text_encoder, ti_list) as (
@ -172,9 +176,14 @@ class SDXLPromptInvocationBase:
with ( with (
# apply all patches while the model is on the target device # apply all patches while the model is on the target device
text_encoder_info as text_encoder, text_encoder_info.model_on_device() as (state_dict, text_encoder),
tokenizer_info as tokenizer, tokenizer_info as tokenizer,
ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix), ModelPatcher.apply_lora(
text_encoder,
loras=_lora_loader(),
prefix=lora_prefix,
model_state_dict=state_dict,
),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers. # Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder, clip_field.skipped_layers), ModelPatcher.apply_clip_skip(text_encoder, clip_field.skipped_layers),
ModelPatcher.apply_ti(tokenizer, text_encoder, ti_list) as ( ModelPatcher.apply_ti(tokenizer, text_encoder, ti_list) as (

View File

@ -50,7 +50,7 @@ from invokeai.app.invocations.primitives import DenoiseMaskOutput, ImageOutput,
from invokeai.app.invocations.t2i_adapter import T2IAdapterField from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.lora import LoRAModelRaw from invokeai.backend.lora import LoRAModelRaw
from invokeai.backend.model_manager import BaseModelType, LoadedModel from invokeai.backend.model_manager import BaseModelType, LoadedModel
from invokeai.backend.model_manager.config import MainConfigBase, ModelVariantType from invokeai.backend.model_manager.config import MainConfigBase, ModelVariantType
@ -672,34 +672,16 @@ class DenoiseLatentsInvocation(BaseInvocation):
return controlnet_data return controlnet_data
def prep_ip_adapter_data( def prep_ip_adapter_image_prompts(
self, self,
context: InvocationContext, context: InvocationContext,
ip_adapter: Optional[Union[IPAdapterField, list[IPAdapterField]]], ip_adapters: List[IPAdapterField],
exit_stack: ExitStack, ) -> List[Tuple[torch.Tensor, torch.Tensor]]:
latent_height: int, """Run the IPAdapter CLIPVisionModel, returning image prompt embeddings."""
latent_width: int, image_prompts = []
dtype: torch.dtype, for single_ip_adapter in ip_adapters:
) -> Optional[list[IPAdapterData]]: with context.models.load(single_ip_adapter.ip_adapter_model) as ip_adapter_model:
"""If IP-Adapter is enabled, then this function loads the requisite models, and adds the image prompt embeddings assert isinstance(ip_adapter_model, IPAdapter)
to the `conditioning_data` (in-place).
"""
if ip_adapter is None:
return None
# ip_adapter could be a list or a single IPAdapterField. Normalize to a list here.
if not isinstance(ip_adapter, list):
ip_adapter = [ip_adapter]
if len(ip_adapter) == 0:
return None
ip_adapter_data_list = []
for single_ip_adapter in ip_adapter:
ip_adapter_model: Union[IPAdapter, IPAdapterPlus] = exit_stack.enter_context(
context.models.load(single_ip_adapter.ip_adapter_model)
)
image_encoder_model_info = context.models.load(single_ip_adapter.image_encoder_model) image_encoder_model_info = context.models.load(single_ip_adapter.image_encoder_model)
# `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here. # `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here.
single_ipa_image_fields = single_ip_adapter.image single_ipa_image_fields = single_ip_adapter.image
@ -707,19 +689,35 @@ class DenoiseLatentsInvocation(BaseInvocation):
single_ipa_image_fields = [single_ipa_image_fields] single_ipa_image_fields = [single_ipa_image_fields]
single_ipa_images = [context.images.get_pil(image.image_name) for image in single_ipa_image_fields] single_ipa_images = [context.images.get_pil(image.image_name) for image in single_ipa_image_fields]
# TODO(ryand): With some effort, the step of running the CLIP Vision encoder could be done before any other
# models are needed in memory. This would help to reduce peak memory utilization in low-memory environments.
with image_encoder_model_info as image_encoder_model: with image_encoder_model_info as image_encoder_model:
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection) assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
# Get image embeddings from CLIP and ImageProjModel. # Get image embeddings from CLIP and ImageProjModel.
image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds( image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds(
single_ipa_images, image_encoder_model single_ipa_images, image_encoder_model
) )
image_prompts.append((image_prompt_embeds, uncond_image_prompt_embeds))
mask = single_ip_adapter.mask return image_prompts
if mask is not None:
mask = context.tensors.load(mask.tensor_name) def prep_ip_adapter_data(
self,
context: InvocationContext,
ip_adapters: List[IPAdapterField],
image_prompts: List[Tuple[torch.Tensor, torch.Tensor]],
exit_stack: ExitStack,
latent_height: int,
latent_width: int,
dtype: torch.dtype,
) -> Optional[List[IPAdapterData]]:
"""If IP-Adapter is enabled, then this function loads the requisite models and adds the image prompt conditioning data."""
ip_adapter_data_list = []
for single_ip_adapter, (image_prompt_embeds, uncond_image_prompt_embeds) in zip(
ip_adapters, image_prompts, strict=True
):
ip_adapter_model = exit_stack.enter_context(context.models.load(single_ip_adapter.ip_adapter_model))
mask_field = single_ip_adapter.mask
mask = context.tensors.load(mask_field.tensor_name) if mask_field is not None else None
mask = self._preprocess_regional_prompt_mask(mask, latent_height, latent_width, dtype=dtype) mask = self._preprocess_regional_prompt_mask(mask, latent_height, latent_width, dtype=dtype)
ip_adapter_data_list.append( ip_adapter_data_list.append(
@ -734,7 +732,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
) )
) )
return ip_adapter_data_list return ip_adapter_data_list if len(ip_adapter_data_list) > 0 else None
def run_t2i_adapters( def run_t2i_adapters(
self, self,
@ -855,6 +853,16 @@ class DenoiseLatentsInvocation(BaseInvocation):
# At some point, someone decided that schedulers that accept a generator should use the original seed with # At some point, someone decided that schedulers that accept a generator should use the original seed with
# all bits flipped. I don't know the original rationale for this, but now we must keep it like this for # all bits flipped. I don't know the original rationale for this, but now we must keep it like this for
# reproducibility. # reproducibility.
#
# These Invoke-supported schedulers accept a generator as of 2024-06-04:
# - DDIMScheduler
# - DDPMScheduler
# - DPMSolverMultistepScheduler
# - EulerAncestralDiscreteScheduler
# - EulerDiscreteScheduler
# - KDPM2AncestralDiscreteScheduler
# - LCMScheduler
# - TCDScheduler
scheduler_step_kwargs.update({"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)}) scheduler_step_kwargs.update({"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)})
if isinstance(scheduler, TCDScheduler): if isinstance(scheduler, TCDScheduler):
scheduler_step_kwargs.update({"eta": 1.0}) scheduler_step_kwargs.update({"eta": 1.0})
@ -912,6 +920,20 @@ class DenoiseLatentsInvocation(BaseInvocation):
do_classifier_free_guidance=True, do_classifier_free_guidance=True,
) )
ip_adapters: List[IPAdapterField] = []
if self.ip_adapter is not None:
# ip_adapter could be a list or a single IPAdapterField. Normalize to a list here.
if isinstance(self.ip_adapter, list):
ip_adapters = self.ip_adapter
else:
ip_adapters = [self.ip_adapter]
# If there are IP adapters, the following line runs the adapters' CLIPVision image encoders to return
# a series of image conditioning embeddings. This is being done here rather than in the
# big model context below in order to use less VRAM on low-VRAM systems.
# The image prompts are then passed to prep_ip_adapter_data().
image_prompts = self.prep_ip_adapter_image_prompts(context=context, ip_adapters=ip_adapters)
# get the unet's config so that we can pass the base to dispatch_progress() # get the unet's config so that we can pass the base to dispatch_progress()
unet_config = context.models.get_config(self.unet.unet.key) unet_config = context.models.get_config(self.unet.unet.key)
@ -930,11 +952,15 @@ class DenoiseLatentsInvocation(BaseInvocation):
assert isinstance(unet_info.model, UNet2DConditionModel) assert isinstance(unet_info.model, UNet2DConditionModel)
with ( with (
ExitStack() as exit_stack, ExitStack() as exit_stack,
unet_info as unet, unet_info.model_on_device() as (model_state_dict, unet),
ModelPatcher.apply_freeu(unet, self.unet.freeu_config), ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
set_seamless(unet, self.unet.seamless_axes), # FIXME set_seamless(unet, self.unet.seamless_axes), # FIXME
# Apply the LoRA after unet has been moved to its target device for faster patching. # Apply the LoRA after unet has been moved to its target device for faster patching.
ModelPatcher.apply_lora_unet(unet, _lora_loader()), ModelPatcher.apply_lora_unet(
unet,
loras=_lora_loader(),
model_state_dict=model_state_dict,
),
): ):
assert isinstance(unet, UNet2DConditionModel) assert isinstance(unet, UNet2DConditionModel)
latents = latents.to(device=unet.device, dtype=unet.dtype) latents = latents.to(device=unet.device, dtype=unet.dtype)
@ -970,7 +996,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
ip_adapter_data = self.prep_ip_adapter_data( ip_adapter_data = self.prep_ip_adapter_data(
context=context, context=context,
ip_adapter=self.ip_adapter, ip_adapters=ip_adapters,
image_prompts=image_prompts,
exit_stack=exit_stack, exit_stack=exit_stack,
latent_height=latent_height, latent_height=latent_height,
latent_width=latent_width, latent_width=latent_width,
@ -1285,7 +1312,7 @@ class ImageToLatentsInvocation(BaseInvocation):
title="Blend Latents", title="Blend Latents",
tags=["latents", "blend"], tags=["latents", "blend"],
category="latents", category="latents",
version="1.0.2", version="1.0.3",
) )
class BlendLatentsInvocation(BaseInvocation): class BlendLatentsInvocation(BaseInvocation):
"""Blend two latents using a given alpha. Latents must have same size.""" """Blend two latents using a given alpha. Latents must have same size."""
@ -1364,7 +1391,7 @@ class BlendLatentsInvocation(BaseInvocation):
TorchDevice.empty_cache() TorchDevice.empty_cache()
name = context.tensors.save(tensor=blended_latents) name = context.tensors.save(tensor=blended_latents)
return LatentsOutput.build(latents_name=name, latents=blended_latents) return LatentsOutput.build(latents_name=name, latents=blended_latents, seed=self.latents_a.seed)
# The Crop Latents node was copied from @skunkworxdark's implementation here: # The Crop Latents node was copied from @skunkworxdark's implementation here:

View File

@ -4,10 +4,13 @@ Base class for model loading in InvokeAI.
""" """
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass from dataclasses import dataclass
from logging import Logger from logging import Logger
from pathlib import Path from pathlib import Path
from typing import Any, Optional from typing import Any, Dict, Generator, Optional, Tuple
import torch
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_manager.config import ( from invokeai.backend.model_manager.config import (
@ -21,7 +24,42 @@ from invokeai.backend.model_manager.load.model_cache.model_cache_base import Mod
@dataclass @dataclass
class LoadedModelWithoutConfig: class LoadedModelWithoutConfig:
"""Context manager object that mediates transfer from RAM<->VRAM.""" """
Context manager object that mediates transfer from RAM<->VRAM.
This is a context manager object that has two distinct APIs:
1. Older API (deprecated):
Use the LoadedModel object directly as a context manager.
It will move the model into VRAM (on CUDA devices), and
return the model in a form suitable for passing to torch.
Example:
```
loaded_model_= loader.get_model_by_key('f13dd932', SubModelType('vae'))
with loaded_model as vae:
image = vae.decode(latents)[0]
```
2. Newer API (recommended):
Call the LoadedModel's `model_on_device()` method in a
context. It returns a tuple consisting of a copy of
the model's state dict in CPU RAM followed by a copy
of the model in VRAM. The state dict is provided to allow
LoRAs and other model patchers to return the model to
its unpatched state without expensive copy and restore
operations.
Example:
```
loaded_model_= loader.get_model_by_key('f13dd932', SubModelType('vae'))
with loaded_model.model_on_device() as (state_dict, vae):
image = vae.decode(latents)[0]
```
The state_dict should be treated as a read-only object and
never modified. Also be aware that some loadable models do
not have a state_dict, in which case this value will be None.
"""
_locker: ModelLockerBase _locker: ModelLockerBase
@ -34,6 +72,16 @@ class LoadedModelWithoutConfig:
"""Context exit.""" """Context exit."""
self._locker.unlock() self._locker.unlock()
@contextmanager
def model_on_device(self) -> Generator[Tuple[Optional[Dict[str, torch.Tensor]], AnyModel], None, None]:
"""Return a tuple consisting of the model's state dict (if it exists) and the locked model on execution device."""
locked_model = self._locker.lock()
try:
state_dict = self._locker.get_state_dict()
yield (state_dict, locked_model)
finally:
self._locker.unlock()
@property @property
def model(self) -> AnyModel: def model(self) -> AnyModel:
"""Return the model without locking it.""" """Return the model without locking it."""

View File

@ -30,6 +30,11 @@ class ModelLockerBase(ABC):
"""Unlock the contained model, and remove it from VRAM.""" """Unlock the contained model, and remove it from VRAM."""
pass pass
@abstractmethod
def get_state_dict(self) -> Optional[Dict[str, torch.Tensor]]:
"""Return the state dict (if any) for the cached model."""
pass
@property @property
@abstractmethod @abstractmethod
def model(self) -> AnyModel: def model(self) -> AnyModel:
@ -56,6 +61,11 @@ class CacheRecord(Generic[T]):
and then injected into the model. When the model is finished, the VRAM and then injected into the model. When the model is finished, the VRAM
copy of the state dict is deleted, and the RAM version is reinjected copy of the state dict is deleted, and the RAM version is reinjected
into the model. into the model.
The state_dict should be treated as a read-only attribute. Do not attempt
to patch or otherwise modify it. Instead, patch the copy of the state_dict
after it is loaded into the execution device (e.g. CUDA) using the `LoadedModel`
context manager call `model_on_device()`.
""" """
key: str key: str

View File

@ -2,6 +2,8 @@
Base class and implementation of a class that moves models in and out of VRAM. Base class and implementation of a class that moves models in and out of VRAM.
""" """
from typing import Dict, Optional
import torch import torch
from invokeai.backend.model_manager import AnyModel from invokeai.backend.model_manager import AnyModel
@ -27,16 +29,18 @@ class ModelLocker(ModelLockerBase):
"""Return the model without moving it around.""" """Return the model without moving it around."""
return self._cache_entry.model return self._cache_entry.model
def get_state_dict(self) -> Optional[Dict[str, torch.Tensor]]:
"""Return the state dict (if any) for the cached model."""
return self._cache_entry.state_dict
def lock(self) -> AnyModel: def lock(self) -> AnyModel:
"""Move the model into the execution device (GPU) and lock it.""" """Move the model into the execution device (GPU) and lock it."""
self._cache_entry.lock() self._cache_entry.lock()
try: try:
if self._cache.lazy_offloading: if self._cache.lazy_offloading:
self._cache.offload_unlocked_models(self._cache_entry.size) self._cache.offload_unlocked_models(self._cache_entry.size)
self._cache.move_model_to_device(self._cache_entry, self._cache.execution_device) self._cache.move_model_to_device(self._cache_entry, self._cache.execution_device)
self._cache_entry.loaded = True self._cache_entry.loaded = True
self._cache.logger.debug(f"Locking {self._cache_entry.key} in {self._cache.execution_device}") self._cache.logger.debug(f"Locking {self._cache_entry.key} in {self._cache.execution_device}")
self._cache.print_cuda_stats() self._cache.print_cuda_stats()
except torch.cuda.OutOfMemoryError: except torch.cuda.OutOfMemoryError:

View File

@ -5,7 +5,7 @@ from __future__ import annotations
import pickle import pickle
from contextlib import contextmanager from contextlib import contextmanager
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from typing import Any, Dict, Generator, Iterator, List, Optional, Tuple, Union
import numpy as np import numpy as np
import torch import torch
@ -66,8 +66,14 @@ class ModelPatcher:
cls, cls,
unet: UNet2DConditionModel, unet: UNet2DConditionModel,
loras: Iterator[Tuple[LoRAModelRaw, float]], loras: Iterator[Tuple[LoRAModelRaw, float]],
model_state_dict: Optional[Dict[str, torch.Tensor]] = None,
) -> None: ) -> None:
with cls.apply_lora(unet, loras, "lora_unet_"): with cls.apply_lora(
unet,
loras=loras,
prefix="lora_unet_",
model_state_dict=model_state_dict,
):
yield yield
@classmethod @classmethod
@ -76,28 +82,9 @@ class ModelPatcher:
cls, cls,
text_encoder: CLIPTextModel, text_encoder: CLIPTextModel,
loras: Iterator[Tuple[LoRAModelRaw, float]], loras: Iterator[Tuple[LoRAModelRaw, float]],
model_state_dict: Optional[Dict[str, torch.Tensor]] = None,
) -> None: ) -> None:
with cls.apply_lora(text_encoder, loras, "lora_te_"): with cls.apply_lora(text_encoder, loras=loras, prefix="lora_te_", model_state_dict=model_state_dict):
yield
@classmethod
@contextmanager
def apply_sdxl_lora_text_encoder(
cls,
text_encoder: CLIPTextModel,
loras: List[Tuple[LoRAModelRaw, float]],
) -> None:
with cls.apply_lora(text_encoder, loras, "lora_te1_"):
yield
@classmethod
@contextmanager
def apply_sdxl_lora_text_encoder2(
cls,
text_encoder: CLIPTextModel,
loras: List[Tuple[LoRAModelRaw, float]],
) -> None:
with cls.apply_lora(text_encoder, loras, "lora_te2_"):
yield yield
@classmethod @classmethod
@ -107,7 +94,16 @@ class ModelPatcher:
model: AnyModel, model: AnyModel,
loras: Iterator[Tuple[LoRAModelRaw, float]], loras: Iterator[Tuple[LoRAModelRaw, float]],
prefix: str, prefix: str,
) -> None: model_state_dict: Optional[Dict[str, torch.Tensor]] = None,
) -> Generator[Any, None, None]:
"""
Apply one or more LoRAs to a model.
:param model: The model to patch.
:param loras: An iterator that returns the LoRA to patch in and its patch weight.
:param prefix: A string prefix that precedes keys used in the LoRAs weight layers.
:model_state_dict: Read-only copy of the model's state dict in CPU, for unpatching purposes.
"""
original_weights = {} original_weights = {}
try: try:
with torch.no_grad(): with torch.no_grad():
@ -133,6 +129,9 @@ class ModelPatcher:
dtype = module.weight.dtype dtype = module.weight.dtype
if module_key not in original_weights: if module_key not in original_weights:
if model_state_dict is not None: # we were provided with the CPU copy of the state dict
original_weights[module_key] = model_state_dict[module_key + ".weight"]
else:
original_weights[module_key] = module.weight.detach().to(device="cpu", copy=True) original_weights[module_key] = module.weight.detach().to(device="cpu", copy=True)
layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0 layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0

View File

@ -1021,7 +1021,8 @@
"float": "Kommazahlen", "float": "Kommazahlen",
"enum": "Aufzählung", "enum": "Aufzählung",
"fullyContainNodes": "Vollständig ausgewählte Nodes auswählen", "fullyContainNodes": "Vollständig ausgewählte Nodes auswählen",
"editMode": "Im Workflow-Editor bearbeiten" "editMode": "Im Workflow-Editor bearbeiten",
"resetToDefaultValue": "Auf Standardwert zurücksetzen"
}, },
"hrf": { "hrf": {
"enableHrf": "Korrektur für hohe Auflösungen", "enableHrf": "Korrektur für hohe Auflösungen",

View File

@ -6,7 +6,7 @@
"settingsLabel": "Ajustes", "settingsLabel": "Ajustes",
"img2img": "Imagen a Imagen", "img2img": "Imagen a Imagen",
"unifiedCanvas": "Lienzo Unificado", "unifiedCanvas": "Lienzo Unificado",
"nodes": "Editor del flujo de trabajo", "nodes": "Flujos de trabajo",
"upload": "Subir imagen", "upload": "Subir imagen",
"load": "Cargar", "load": "Cargar",
"statusDisconnected": "Desconectado", "statusDisconnected": "Desconectado",
@ -14,7 +14,7 @@
"discordLabel": "Discord", "discordLabel": "Discord",
"back": "Atrás", "back": "Atrás",
"loading": "Cargando", "loading": "Cargando",
"postprocessing": "Tratamiento posterior", "postprocessing": "Postprocesado",
"txt2img": "De texto a imagen", "txt2img": "De texto a imagen",
"accept": "Aceptar", "accept": "Aceptar",
"cancel": "Cancelar", "cancel": "Cancelar",
@ -42,7 +42,42 @@
"copy": "Copiar", "copy": "Copiar",
"beta": "Beta", "beta": "Beta",
"on": "En", "on": "En",
"aboutDesc": "¿Utilizas Invoke para trabajar? Mira aquí:" "aboutDesc": "¿Utilizas Invoke para trabajar? Mira aquí:",
"installed": "Instalado",
"green": "Verde",
"editor": "Editor",
"orderBy": "Ordenar por",
"file": "Archivo",
"goTo": "Ir a",
"imageFailedToLoad": "No se puede cargar la imagen",
"saveAs": "Guardar Como",
"somethingWentWrong": "Algo salió mal",
"nextPage": "Página Siguiente",
"selected": "Seleccionado",
"tab": "Tabulador",
"positivePrompt": "Prompt Positivo",
"negativePrompt": "Prompt Negativo",
"error": "Error",
"format": "formato",
"unknown": "Desconocido",
"input": "Entrada",
"nodeEditor": "Editor de nodos",
"template": "Plantilla",
"prevPage": "Página Anterior",
"red": "Rojo",
"alpha": "Transparencia",
"outputs": "Salidas",
"editing": "Editando",
"learnMore": "Aprende más",
"enabled": "Activado",
"disabled": "Desactivado",
"folder": "Carpeta",
"updated": "Actualizado",
"created": "Creado",
"save": "Guardar",
"unknownError": "Error Desconocido",
"blue": "Azul",
"viewingDesc": "Revisar imágenes en una vista de galería grande"
}, },
"gallery": { "gallery": {
"galleryImageSize": "Tamaño de la imagen", "galleryImageSize": "Tamaño de la imagen",
@ -467,7 +502,8 @@
"about": "Acerca de", "about": "Acerca de",
"createIssue": "Crear un problema", "createIssue": "Crear un problema",
"resetUI": "Interfaz de usuario $t(accessibility.reset)", "resetUI": "Interfaz de usuario $t(accessibility.reset)",
"mode": "Modo" "mode": "Modo",
"submitSupportTicket": "Enviar Ticket de Soporte"
}, },
"nodes": { "nodes": {
"zoomInNodes": "Acercar", "zoomInNodes": "Acercar",
@ -543,5 +579,17 @@
"layers_one": "Capa", "layers_one": "Capa",
"layers_many": "Capas", "layers_many": "Capas",
"layers_other": "Capas" "layers_other": "Capas"
},
"controlnet": {
"crop": "Cortar",
"delete": "Eliminar",
"depthAnythingDescription": "Generación de mapa de profundidad usando la técnica de Depth Anything",
"duplicate": "Duplicar",
"colorMapDescription": "Genera un mapa de color desde la imagen",
"depthMidasDescription": "Crea un mapa de profundidad con Midas",
"balanced": "Equilibrado",
"beginEndStepPercent": "Inicio / Final Porcentaje de pasos",
"detectResolution": "Detectar resolución",
"beginEndStepPercentShort": "Inicio / Final %"
} }
} }

View File

@ -45,7 +45,7 @@
"outputs": "Risultati", "outputs": "Risultati",
"data": "Dati", "data": "Dati",
"somethingWentWrong": "Qualcosa è andato storto", "somethingWentWrong": "Qualcosa è andato storto",
"copyError": "$t(gallery.copy) Errore", "copyError": "Errore $t(gallery.copy)",
"input": "Ingresso", "input": "Ingresso",
"notInstalled": "Non $t(common.installed)", "notInstalled": "Non $t(common.installed)",
"unknownError": "Errore sconosciuto", "unknownError": "Errore sconosciuto",
@ -85,7 +85,11 @@
"viewing": "Visualizza", "viewing": "Visualizza",
"viewingDesc": "Rivedi le immagini in un'ampia vista della galleria", "viewingDesc": "Rivedi le immagini in un'ampia vista della galleria",
"editing": "Modifica", "editing": "Modifica",
"editingDesc": "Modifica nell'area Livelli di controllo" "editingDesc": "Modifica nell'area Livelli di controllo",
"enabled": "Abilitato",
"disabled": "Disabilitato",
"comparingDesc": "Confronta due immagini",
"comparing": "Confronta"
}, },
"gallery": { "gallery": {
"galleryImageSize": "Dimensione dell'immagine", "galleryImageSize": "Dimensione dell'immagine",
@ -122,14 +126,30 @@
"bulkDownloadRequestedDesc": "La tua richiesta di download è in preparazione. L'operazione potrebbe richiedere alcuni istanti.", "bulkDownloadRequestedDesc": "La tua richiesta di download è in preparazione. L'operazione potrebbe richiedere alcuni istanti.",
"bulkDownloadRequestFailed": "Problema durante la preparazione del download", "bulkDownloadRequestFailed": "Problema durante la preparazione del download",
"bulkDownloadFailed": "Scaricamento fallito", "bulkDownloadFailed": "Scaricamento fallito",
"alwaysShowImageSizeBadge": "Mostra sempre le dimensioni dell'immagine" "alwaysShowImageSizeBadge": "Mostra sempre le dimensioni dell'immagine",
"openInViewer": "Apri nel visualizzatore",
"selectForCompare": "Seleziona per il confronto",
"selectAnImageToCompare": "Seleziona un'immagine da confrontare",
"slider": "Cursore",
"sideBySide": "Fianco a Fianco",
"compareImage": "Immagine di confronto",
"viewerImage": "Immagine visualizzata",
"hover": "Al passaggio del mouse",
"swapImages": "Scambia le immagini",
"compareOptions": "Opzioni di confronto",
"stretchToFit": "Scala per adattare",
"exitCompare": "Esci dal confronto",
"compareHelp1": "Tieni premuto <Kbd>Alt</Kbd> mentre fai clic su un'immagine della galleria o usi i tasti freccia per cambiare l'immagine di confronto.",
"compareHelp2": "Premi <Kbd>M</Kbd> per scorrere le modalità di confronto.",
"compareHelp3": "Premi <Kbd>C</Kbd> per scambiare le immagini confrontate.",
"compareHelp4": "Premi <Kbd>Z</Kbd> o <Kbd>Esc</Kbd> per uscire."
}, },
"hotkeys": { "hotkeys": {
"keyboardShortcuts": "Tasti di scelta rapida", "keyboardShortcuts": "Tasti di scelta rapida",
"appHotkeys": "Applicazione", "appHotkeys": "Applicazione",
"generalHotkeys": "Generale", "generalHotkeys": "Generale",
"galleryHotkeys": "Galleria", "galleryHotkeys": "Galleria",
"unifiedCanvasHotkeys": "Tela Unificata", "unifiedCanvasHotkeys": "Tela",
"invoke": { "invoke": {
"title": "Invoke", "title": "Invoke",
"desc": "Genera un'immagine" "desc": "Genera un'immagine"
@ -147,8 +167,8 @@
"desc": "Apre e chiude il pannello delle opzioni" "desc": "Apre e chiude il pannello delle opzioni"
}, },
"pinOptions": { "pinOptions": {
"title": "Appunta le opzioni", "title": "Fissa le opzioni",
"desc": "Blocca il pannello delle opzioni" "desc": "Fissa il pannello delle opzioni"
}, },
"toggleGallery": { "toggleGallery": {
"title": "Attiva/disattiva galleria", "title": "Attiva/disattiva galleria",
@ -332,14 +352,14 @@
"title": "Annulla e cancella" "title": "Annulla e cancella"
}, },
"resetOptionsAndGallery": { "resetOptionsAndGallery": {
"title": "Ripristina Opzioni e Galleria", "title": "Ripristina le opzioni e la galleria",
"desc": "Reimposta le opzioni e i pannelli della galleria" "desc": "Reimposta i pannelli delle opzioni e della galleria"
}, },
"searchHotkeys": "Cerca tasti di scelta rapida", "searchHotkeys": "Cerca tasti di scelta rapida",
"noHotkeysFound": "Nessun tasto di scelta rapida trovato", "noHotkeysFound": "Nessun tasto di scelta rapida trovato",
"toggleOptionsAndGallery": { "toggleOptionsAndGallery": {
"desc": "Apre e chiude le opzioni e i pannelli della galleria", "desc": "Apre e chiude le opzioni e i pannelli della galleria",
"title": "Attiva/disattiva le Opzioni e la Galleria" "title": "Attiva/disattiva le opzioni e la galleria"
}, },
"clearSearch": "Cancella ricerca", "clearSearch": "Cancella ricerca",
"remixImage": { "remixImage": {
@ -348,7 +368,7 @@
}, },
"toggleViewer": { "toggleViewer": {
"title": "Attiva/disattiva il visualizzatore di immagini", "title": "Attiva/disattiva il visualizzatore di immagini",
"desc": "Passa dal Visualizzatore immagini all'area di lavoro per la scheda corrente." "desc": "Passa dal visualizzatore immagini all'area di lavoro per la scheda corrente."
} }
}, },
"modelManager": { "modelManager": {
@ -378,7 +398,7 @@
"convertToDiffusers": "Converti in Diffusori", "convertToDiffusers": "Converti in Diffusori",
"convertToDiffusersHelpText2": "Questo processo sostituirà la voce in Gestione Modelli con la versione Diffusori dello stesso modello.", "convertToDiffusersHelpText2": "Questo processo sostituirà la voce in Gestione Modelli con la versione Diffusori dello stesso modello.",
"convertToDiffusersHelpText4": "Questo è un processo una tantum. Potrebbero essere necessari circa 30-60 secondi a seconda delle specifiche del tuo computer.", "convertToDiffusersHelpText4": "Questo è un processo una tantum. Potrebbero essere necessari circa 30-60 secondi a seconda delle specifiche del tuo computer.",
"convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 2 GB e 7 GB di dimensioni.", "convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 2 GB e 7 GB in dimensione.",
"convertToDiffusersHelpText6": "Vuoi convertire questo modello?", "convertToDiffusersHelpText6": "Vuoi convertire questo modello?",
"modelConverted": "Modello convertito", "modelConverted": "Modello convertito",
"alpha": "Alpha", "alpha": "Alpha",
@ -528,7 +548,7 @@
"layer": { "layer": {
"initialImageNoImageSelected": "Nessuna immagine iniziale selezionata", "initialImageNoImageSelected": "Nessuna immagine iniziale selezionata",
"t2iAdapterIncompatibleDimensions": "L'adattatore T2I richiede che la dimensione dell'immagine sia un multiplo di {{multiple}}", "t2iAdapterIncompatibleDimensions": "L'adattatore T2I richiede che la dimensione dell'immagine sia un multiplo di {{multiple}}",
"controlAdapterNoModelSelected": "Nessun modello di Adattatore di Controllo selezionato", "controlAdapterNoModelSelected": "Nessun modello di adattatore di controllo selezionato",
"controlAdapterIncompatibleBaseModel": "Il modello base dell'adattatore di controllo non è compatibile", "controlAdapterIncompatibleBaseModel": "Il modello base dell'adattatore di controllo non è compatibile",
"controlAdapterNoImageSelected": "Nessuna immagine dell'adattatore di controllo selezionata", "controlAdapterNoImageSelected": "Nessuna immagine dell'adattatore di controllo selezionata",
"controlAdapterImageNotProcessed": "Immagine dell'adattatore di controllo non elaborata", "controlAdapterImageNotProcessed": "Immagine dell'adattatore di controllo non elaborata",
@ -606,25 +626,25 @@
"canvasMerged": "Tela unita", "canvasMerged": "Tela unita",
"sentToImageToImage": "Inviato a Generazione da immagine", "sentToImageToImage": "Inviato a Generazione da immagine",
"sentToUnifiedCanvas": "Inviato alla Tela", "sentToUnifiedCanvas": "Inviato alla Tela",
"parametersNotSet": "Parametri non impostati", "parametersNotSet": "Parametri non richiamati",
"metadataLoadFailed": "Impossibile caricare i metadati", "metadataLoadFailed": "Impossibile caricare i metadati",
"serverError": "Errore del Server", "serverError": "Errore del Server",
"connected": "Connesso al Server", "connected": "Connesso al server",
"canceled": "Elaborazione annullata", "canceled": "Elaborazione annullata",
"uploadFailedInvalidUploadDesc": "Deve essere una singola immagine PNG o JPEG", "uploadFailedInvalidUploadDesc": "Deve essere una singola immagine PNG o JPEG",
"parameterSet": "{{parameter}} impostato", "parameterSet": "Parametro richiamato",
"parameterNotSet": "{{parameter}} non impostato", "parameterNotSet": "Parametro non richiamato",
"problemCopyingImage": "Impossibile copiare l'immagine", "problemCopyingImage": "Impossibile copiare l'immagine",
"baseModelChangedCleared_one": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modello incompatibile", "baseModelChangedCleared_one": "Cancellato o disabilitato {{count}} sottomodello incompatibile",
"baseModelChangedCleared_many": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modelli incompatibili", "baseModelChangedCleared_many": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
"baseModelChangedCleared_other": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modelli incompatibili", "baseModelChangedCleared_other": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
"imageSavingFailed": "Salvataggio dell'immagine non riuscito", "imageSavingFailed": "Salvataggio dell'immagine non riuscito",
"canvasSentControlnetAssets": "Tela inviata a ControlNet & Risorse", "canvasSentControlnetAssets": "Tela inviata a ControlNet & Risorse",
"problemCopyingCanvasDesc": "Impossibile copiare la tela", "problemCopyingCanvasDesc": "Impossibile copiare la tela",
"loadedWithWarnings": "Flusso di lavoro caricato con avvisi", "loadedWithWarnings": "Flusso di lavoro caricato con avvisi",
"canvasCopiedClipboard": "Tela copiata negli appunti", "canvasCopiedClipboard": "Tela copiata negli appunti",
"maskSavedAssets": "Maschera salvata nelle risorse", "maskSavedAssets": "Maschera salvata nelle risorse",
"problemDownloadingCanvas": "Problema durante il download della tela", "problemDownloadingCanvas": "Problema durante lo scarico della tela",
"problemMergingCanvas": "Problema nell'unione delle tele", "problemMergingCanvas": "Problema nell'unione delle tele",
"imageUploaded": "Immagine caricata", "imageUploaded": "Immagine caricata",
"addedToBoard": "Aggiunto alla bacheca", "addedToBoard": "Aggiunto alla bacheca",
@ -658,7 +678,17 @@
"problemDownloadingImage": "Impossibile scaricare l'immagine", "problemDownloadingImage": "Impossibile scaricare l'immagine",
"prunedQueue": "Coda ripulita", "prunedQueue": "Coda ripulita",
"modelImportCanceled": "Importazione del modello annullata", "modelImportCanceled": "Importazione del modello annullata",
"parameters": "Parametri" "parameters": "Parametri",
"parameterSetDesc": "{{parameter}} richiamato",
"parameterNotSetDesc": "Impossibile richiamare {{parameter}}",
"parameterNotSetDescWithMessage": "Impossibile richiamare {{parameter}}: {{message}}",
"parametersSet": "Parametri richiamati",
"errorCopied": "Errore copiato",
"outOfMemoryError": "Errore di memoria esaurita",
"baseModelChanged": "Modello base modificato",
"sessionRef": "Sessione: {{sessionId}}",
"somethingWentWrong": "Qualcosa è andato storto",
"outOfMemoryErrorDesc": "Le impostazioni della generazione attuale superano la capacità del sistema. Modifica le impostazioni e riprova."
}, },
"tooltip": { "tooltip": {
"feature": { "feature": {
@ -674,7 +704,7 @@
"layer": "Livello", "layer": "Livello",
"base": "Base", "base": "Base",
"mask": "Maschera", "mask": "Maschera",
"maskingOptions": "Opzioni di mascheramento", "maskingOptions": "Opzioni maschera",
"enableMask": "Abilita maschera", "enableMask": "Abilita maschera",
"preserveMaskedArea": "Mantieni area mascherata", "preserveMaskedArea": "Mantieni area mascherata",
"clearMask": "Cancella maschera (Shift+C)", "clearMask": "Cancella maschera (Shift+C)",
@ -745,7 +775,8 @@
"mode": "Modalità", "mode": "Modalità",
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente", "resetUI": "$t(accessibility.reset) l'Interfaccia Utente",
"createIssue": "Segnala un problema", "createIssue": "Segnala un problema",
"about": "Informazioni" "about": "Informazioni",
"submitSupportTicket": "Invia ticket di supporto"
}, },
"nodes": { "nodes": {
"zoomOutNodes": "Rimpicciolire", "zoomOutNodes": "Rimpicciolire",
@ -790,7 +821,7 @@
"workflowNotes": "Note", "workflowNotes": "Note",
"versionUnknown": " Versione sconosciuta", "versionUnknown": " Versione sconosciuta",
"unableToValidateWorkflow": "Impossibile convalidare il flusso di lavoro", "unableToValidateWorkflow": "Impossibile convalidare il flusso di lavoro",
"updateApp": "Aggiorna App", "updateApp": "Aggiorna Applicazione",
"unableToLoadWorkflow": "Impossibile caricare il flusso di lavoro", "unableToLoadWorkflow": "Impossibile caricare il flusso di lavoro",
"updateNode": "Aggiorna nodo", "updateNode": "Aggiorna nodo",
"version": "Versione", "version": "Versione",
@ -882,11 +913,14 @@
"missingNode": "Nodo di invocazione mancante", "missingNode": "Nodo di invocazione mancante",
"missingInvocationTemplate": "Modello di invocazione mancante", "missingInvocationTemplate": "Modello di invocazione mancante",
"missingFieldTemplate": "Modello di campo mancante", "missingFieldTemplate": "Modello di campo mancante",
"singleFieldType": "{{name}} (Singola)" "singleFieldType": "{{name}} (Singola)",
"imageAccessError": "Impossibile trovare l'immagine {{image_name}}, ripristino delle impostazioni predefinite",
"boardAccessError": "Impossibile trovare la bacheca {{board_id}}, ripristino ai valori predefiniti",
"modelAccessError": "Impossibile trovare il modello {{key}}, ripristino ai valori predefiniti"
}, },
"boards": { "boards": {
"autoAddBoard": "Aggiungi automaticamente bacheca", "autoAddBoard": "Aggiungi automaticamente bacheca",
"menuItemAutoAdd": "Aggiungi automaticamente a questa Bacheca", "menuItemAutoAdd": "Aggiungi automaticamente a questa bacheca",
"cancel": "Annulla", "cancel": "Annulla",
"addBoard": "Aggiungi Bacheca", "addBoard": "Aggiungi Bacheca",
"bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.", "bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.",
@ -898,7 +932,7 @@
"myBoard": "Bacheca", "myBoard": "Bacheca",
"searchBoard": "Cerca bacheche ...", "searchBoard": "Cerca bacheche ...",
"noMatching": "Nessuna bacheca corrispondente", "noMatching": "Nessuna bacheca corrispondente",
"selectBoard": "Seleziona una Bacheca", "selectBoard": "Seleziona una bacheca",
"uncategorized": "Non categorizzato", "uncategorized": "Non categorizzato",
"downloadBoard": "Scarica la bacheca", "downloadBoard": "Scarica la bacheca",
"deleteBoardOnly": "solo la Bacheca", "deleteBoardOnly": "solo la Bacheca",
@ -919,7 +953,7 @@
"control": "Controllo", "control": "Controllo",
"crop": "Ritaglia", "crop": "Ritaglia",
"depthMidas": "Profondità (Midas)", "depthMidas": "Profondità (Midas)",
"detectResolution": "Rileva risoluzione", "detectResolution": "Rileva la risoluzione",
"controlMode": "Modalità di controllo", "controlMode": "Modalità di controllo",
"cannyDescription": "Canny rilevamento bordi", "cannyDescription": "Canny rilevamento bordi",
"depthZoe": "Profondità (Zoe)", "depthZoe": "Profondità (Zoe)",
@ -930,7 +964,7 @@
"showAdvanced": "Mostra opzioni Avanzate", "showAdvanced": "Mostra opzioni Avanzate",
"bgth": "Soglia rimozione sfondo", "bgth": "Soglia rimozione sfondo",
"importImageFromCanvas": "Importa immagine dalla Tela", "importImageFromCanvas": "Importa immagine dalla Tela",
"lineartDescription": "Converte l'immagine in lineart", "lineartDescription": "Converte l'immagine in linea",
"importMaskFromCanvas": "Importa maschera dalla Tela", "importMaskFromCanvas": "Importa maschera dalla Tela",
"hideAdvanced": "Nascondi opzioni avanzate", "hideAdvanced": "Nascondi opzioni avanzate",
"resetControlImage": "Reimposta immagine di controllo", "resetControlImage": "Reimposta immagine di controllo",
@ -946,7 +980,7 @@
"pidiDescription": "Elaborazione immagini PIDI", "pidiDescription": "Elaborazione immagini PIDI",
"fill": "Riempie", "fill": "Riempie",
"colorMapDescription": "Genera una mappa dei colori dall'immagine", "colorMapDescription": "Genera una mappa dei colori dall'immagine",
"lineartAnimeDescription": "Elaborazione lineart in stile anime", "lineartAnimeDescription": "Elaborazione linea in stile anime",
"imageResolution": "Risoluzione dell'immagine", "imageResolution": "Risoluzione dell'immagine",
"colorMap": "Colore", "colorMap": "Colore",
"lowThreshold": "Soglia inferiore", "lowThreshold": "Soglia inferiore",

View File

@ -87,7 +87,11 @@
"viewing": "Просмотр", "viewing": "Просмотр",
"editing": "Редактирование", "editing": "Редактирование",
"viewingDesc": "Просмотр изображений в режиме большой галереи", "viewingDesc": "Просмотр изображений в режиме большой галереи",
"editingDesc": "Редактировать на холсте слоёв управления" "editingDesc": "Редактировать на холсте слоёв управления",
"enabled": "Включено",
"disabled": "Отключено",
"comparingDesc": "Сравнение двух изображений",
"comparing": "Сравнение"
}, },
"gallery": { "gallery": {
"galleryImageSize": "Размер изображений", "galleryImageSize": "Размер изображений",
@ -124,7 +128,23 @@
"bulkDownloadRequested": "Подготовка к скачиванию", "bulkDownloadRequested": "Подготовка к скачиванию",
"bulkDownloadRequestedDesc": "Ваш запрос на скачивание готовится. Это может занять несколько минут.", "bulkDownloadRequestedDesc": "Ваш запрос на скачивание готовится. Это может занять несколько минут.",
"bulkDownloadRequestFailed": "Возникла проблема при подготовке скачивания", "bulkDownloadRequestFailed": "Возникла проблема при подготовке скачивания",
"alwaysShowImageSizeBadge": "Всегда показывать значок размера изображения" "alwaysShowImageSizeBadge": "Всегда показывать значок размера изображения",
"openInViewer": "Открыть в просмотрщике",
"selectForCompare": "Выбрать для сравнения",
"hover": "Наведение",
"swapImages": "Поменять местами",
"stretchToFit": "Растягивание до нужного размера",
"exitCompare": "Выйти из сравнения",
"compareHelp4": "Нажмите <Kbd>Z</Kbd> или <Kbd>Esc</Kbd> для выхода.",
"compareImage": "Сравнить изображение",
"viewerImage": "Изображение просмотрщика",
"selectAnImageToCompare": "Выберите изображение для сравнения",
"slider": "Слайдер",
"sideBySide": "Бок о бок",
"compareOptions": "Варианты сравнения",
"compareHelp1": "Удерживайте <Kbd>Alt</Kbd> при нажатии на изображение в галерее или при помощи клавиш со стрелками, чтобы изменить сравниваемое изображение.",
"compareHelp2": "Нажмите <Kbd>M</Kbd>, чтобы переключиться между режимами сравнения.",
"compareHelp3": "Нажмите <Kbd>C</Kbd>, чтобы поменять местами сравниваемые изображения."
}, },
"hotkeys": { "hotkeys": {
"keyboardShortcuts": "Горячие клавиши", "keyboardShortcuts": "Горячие клавиши",
@ -528,7 +548,20 @@
"missingFieldTemplate": "Отсутствует шаблон поля", "missingFieldTemplate": "Отсутствует шаблон поля",
"addingImagesTo": "Добавление изображений в", "addingImagesTo": "Добавление изображений в",
"invoke": "Создать", "invoke": "Создать",
"imageNotProcessedForControlAdapter": "Изображение адаптера контроля №{{number}} не обрабатывается" "imageNotProcessedForControlAdapter": "Изображение адаптера контроля №{{number}} не обрабатывается",
"layer": {
"controlAdapterImageNotProcessed": "Изображение адаптера контроля не обработано",
"ipAdapterNoModelSelected": "IP адаптер не выбран",
"controlAdapterNoModelSelected": "не выбрана модель адаптера контроля",
"controlAdapterIncompatibleBaseModel": "несовместимая базовая модель адаптера контроля",
"controlAdapterNoImageSelected": "не выбрано изображение контрольного адаптера",
"initialImageNoImageSelected": "начальное изображение не выбрано",
"rgNoRegion": "регион не выбран",
"rgNoPromptsOrIPAdapters": "нет текстовых запросов или IP-адаптеров",
"ipAdapterIncompatibleBaseModel": "несовместимая базовая модель IP-адаптера",
"t2iAdapterIncompatibleDimensions": "Адаптер T2I требует, чтобы размеры изображения были кратны {{multiple}}",
"ipAdapterNoImageSelected": "изображение IP-адаптера не выбрано"
}
}, },
"isAllowedToUpscale": { "isAllowedToUpscale": {
"useX2Model": "Изображение слишком велико для увеличения с помощью модели x4. Используйте модель x2", "useX2Model": "Изображение слишком велико для увеличения с помощью модели x4. Используйте модель x2",
@ -606,12 +639,12 @@
"connected": "Подключено к серверу", "connected": "Подключено к серверу",
"canceled": "Обработка отменена", "canceled": "Обработка отменена",
"uploadFailedInvalidUploadDesc": "Должно быть одно изображение в формате PNG или JPEG", "uploadFailedInvalidUploadDesc": "Должно быть одно изображение в формате PNG или JPEG",
"parameterNotSet": "Параметр {{parameter}} не задан", "parameterNotSet": "Параметр не задан",
"parameterSet": "Параметр {{parameter}} задан", "parameterSet": "Параметр задан",
"problemCopyingImage": "Не удается скопировать изображение", "problemCopyingImage": "Не удается скопировать изображение",
"baseModelChangedCleared_one": "Базовая модель изменила, очистила или отключила {{count}} несовместимую подмодель", "baseModelChangedCleared_one": "Очищена или отключена {{count}} несовместимая подмодель",
"baseModelChangedCleared_few": "Базовая модель изменила, очистила или отключила {{count}} несовместимые подмодели", "baseModelChangedCleared_few": "Очищены или отключены {{count}} несовместимые подмодели",
"baseModelChangedCleared_many": "Базовая модель изменила, очистила или отключила {{count}} несовместимых подмоделей", "baseModelChangedCleared_many": "Очищены или отключены {{count}} несовместимых подмоделей",
"imageSavingFailed": "Не удалось сохранить изображение", "imageSavingFailed": "Не удалось сохранить изображение",
"canvasSentControlnetAssets": "Холст отправлен в ControlNet и ресурсы", "canvasSentControlnetAssets": "Холст отправлен в ControlNet и ресурсы",
"problemCopyingCanvasDesc": "Невозможно экспортировать базовый слой", "problemCopyingCanvasDesc": "Невозможно экспортировать базовый слой",
@ -652,7 +685,17 @@
"resetInitialImage": "Сбросить начальное изображение", "resetInitialImage": "Сбросить начальное изображение",
"prunedQueue": "Урезанная очередь", "prunedQueue": "Урезанная очередь",
"modelImportCanceled": "Импорт модели отменен", "modelImportCanceled": "Импорт модели отменен",
"parameters": "Параметры" "parameters": "Параметры",
"parameterSetDesc": "Задан {{parameter}}",
"parameterNotSetDesc": "Невозможно задать {{parameter}}",
"baseModelChanged": "Базовая модель сменена",
"parameterNotSetDescWithMessage": "Не удалось задать {{parameter}}: {{message}}",
"parametersSet": "Параметры заданы",
"errorCopied": "Ошибка скопирована",
"sessionRef": "Сессия: {{sessionId}}",
"outOfMemoryError": "Ошибка нехватки памяти",
"outOfMemoryErrorDesc": "Ваши текущие настройки генерации превышают возможности системы. Пожалуйста, измените настройки и повторите попытку.",
"somethingWentWrong": "Что-то пошло не так"
}, },
"tooltip": { "tooltip": {
"feature": { "feature": {
@ -739,7 +782,8 @@
"loadMore": "Загрузить больше", "loadMore": "Загрузить больше",
"resetUI": "$t(accessibility.reset) интерфейс", "resetUI": "$t(accessibility.reset) интерфейс",
"createIssue": "Сообщить о проблеме", "createIssue": "Сообщить о проблеме",
"about": "Об этом" "about": "Об этом",
"submitSupportTicket": "Отправить тикет в службу поддержки"
}, },
"nodes": { "nodes": {
"zoomInNodes": "Увеличьте масштаб", "zoomInNodes": "Увеличьте масштаб",
@ -832,7 +876,7 @@
"workflowName": "Название", "workflowName": "Название",
"collection": "Коллекция", "collection": "Коллекция",
"unknownErrorValidatingWorkflow": "Неизвестная ошибка при проверке рабочего процесса", "unknownErrorValidatingWorkflow": "Неизвестная ошибка при проверке рабочего процесса",
"collectionFieldType": "Коллекция {{name}}", "collectionFieldType": "{{name}} (Коллекция)",
"workflowNotes": "Примечания", "workflowNotes": "Примечания",
"string": "Строка", "string": "Строка",
"unknownNodeType": "Неизвестный тип узла", "unknownNodeType": "Неизвестный тип узла",
@ -848,7 +892,7 @@
"targetNodeDoesNotExist": "Недопустимое ребро: целевой/входной узел {{node}} не существует", "targetNodeDoesNotExist": "Недопустимое ребро: целевой/входной узел {{node}} не существует",
"mismatchedVersion": "Недопустимый узел: узел {{node}} типа {{type}} имеет несоответствующую версию (попробовать обновить?)", "mismatchedVersion": "Недопустимый узел: узел {{node}} типа {{type}} имеет несоответствующую версию (попробовать обновить?)",
"unknownFieldType": "$t(nodes.unknownField) тип: {{type}}", "unknownFieldType": "$t(nodes.unknownField) тип: {{type}}",
"collectionOrScalarFieldType": "Коллекция | Скаляр {{name}}", "collectionOrScalarFieldType": "{{name}} (Один или коллекция)",
"betaDesc": "Этот вызов находится в бета-версии. Пока он не станет стабильным, в нем могут происходить изменения при обновлении приложений. Мы планируем поддерживать этот вызов в течение длительного времени.", "betaDesc": "Этот вызов находится в бета-версии. Пока он не станет стабильным, в нем могут происходить изменения при обновлении приложений. Мы планируем поддерживать этот вызов в течение длительного времени.",
"nodeVersion": "Версия узла", "nodeVersion": "Версия узла",
"loadingNodes": "Загрузка узлов...", "loadingNodes": "Загрузка узлов...",
@ -870,7 +914,16 @@
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений.", "noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений.",
"graph": "График", "graph": "График",
"showEdgeLabels": "Показать метки на ребрах", "showEdgeLabels": "Показать метки на ребрах",
"showEdgeLabelsHelp": "Показать метки на ребрах, указывающие на соединенные узлы" "showEdgeLabelsHelp": "Показать метки на ребрах, указывающие на соединенные узлы",
"cannotMixAndMatchCollectionItemTypes": "Невозможно смешивать и сопоставлять типы элементов коллекции",
"missingNode": "Отсутствует узел вызова",
"missingInvocationTemplate": "Отсутствует шаблон вызова",
"missingFieldTemplate": "Отсутствующий шаблон поля",
"singleFieldType": "{{name}} (Один)",
"noGraph": "Нет графика",
"imageAccessError": "Невозможно найти изображение {{image_name}}, сбрасываем на значение по умолчанию",
"boardAccessError": "Невозможно найти доску {{board_id}}, сбрасываем на значение по умолчанию",
"modelAccessError": "Невозможно найти модель {{key}}, сброс на модель по умолчанию"
}, },
"controlnet": { "controlnet": {
"amult": "a_mult", "amult": "a_mult",
@ -1441,7 +1494,16 @@
"clearQueueAlertDialog2": "Вы уверены, что хотите очистить очередь?", "clearQueueAlertDialog2": "Вы уверены, что хотите очистить очередь?",
"item": "Элемент", "item": "Элемент",
"graphFailedToQueue": "Не удалось поставить график в очередь", "graphFailedToQueue": "Не удалось поставить график в очередь",
"openQueue": "Открыть очередь" "openQueue": "Открыть очередь",
"prompts_one": "Запрос",
"prompts_few": "Запроса",
"prompts_many": "Запросов",
"iterations_one": "Итерация",
"iterations_few": "Итерации",
"iterations_many": "Итераций",
"generations_one": "Генерация",
"generations_few": "Генерации",
"generations_many": "Генераций"
}, },
"sdxl": { "sdxl": {
"refinerStart": "Запуск доработчика", "refinerStart": "Запуск доработчика",

View File

@ -1,6 +1,6 @@
{ {
"common": { "common": {
"nodes": "節點", "nodes": "工作流程",
"img2img": "圖片轉圖片", "img2img": "圖片轉圖片",
"statusDisconnected": "已中斷連線", "statusDisconnected": "已中斷連線",
"back": "返回", "back": "返回",
@ -11,17 +11,239 @@
"reportBugLabel": "回報錯誤", "reportBugLabel": "回報錯誤",
"githubLabel": "GitHub", "githubLabel": "GitHub",
"hotkeysLabel": "快捷鍵", "hotkeysLabel": "快捷鍵",
"languagePickerLabel": "切換語言", "languagePickerLabel": "語言",
"unifiedCanvas": "統一畫布", "unifiedCanvas": "統一畫布",
"cancel": "取消", "cancel": "取消",
"txt2img": "文字轉圖片" "txt2img": "文字轉圖片",
"controlNet": "ControlNet",
"advanced": "進階",
"folder": "資料夾",
"installed": "已安裝",
"accept": "接受",
"goTo": "前往",
"input": "輸入",
"random": "隨機",
"selected": "已選擇",
"communityLabel": "社群",
"loading": "載入中",
"delete": "刪除",
"copy": "複製",
"error": "錯誤",
"file": "檔案",
"format": "格式",
"imageFailedToLoad": "無法載入圖片"
}, },
"accessibility": { "accessibility": {
"invokeProgressBar": "Invoke 進度條", "invokeProgressBar": "Invoke 進度條",
"uploadImage": "上傳圖片", "uploadImage": "上傳圖片",
"reset": "重設", "reset": "重",
"nextImage": "下一張圖片", "nextImage": "下一張圖片",
"previousImage": "上一張圖片", "previousImage": "上一張圖片",
"menu": "選單" "menu": "選單",
"loadMore": "載入更多",
"about": "關於",
"createIssue": "建立問題",
"resetUI": "$t(accessibility.reset) 介面",
"submitSupportTicket": "提交支援工單",
"mode": "模式"
},
"boards": {
"loading": "載入中…",
"movingImagesToBoard_other": "正在移動 {{count}} 張圖片至板上:",
"move": "移動",
"uncategorized": "未分類",
"cancel": "取消"
},
"metadata": {
"workflow": "工作流程",
"steps": "步數",
"model": "模型",
"seed": "種子",
"vae": "VAE",
"seamless": "無縫",
"metadata": "元數據",
"width": "寬度",
"height": "高度"
},
"accordions": {
"control": {
"title": "控制"
},
"compositing": {
"title": "合成"
},
"advanced": {
"title": "進階",
"options": "$t(accordions.advanced.title) 選項"
}
},
"hotkeys": {
"nodesHotkeys": "節點",
"cancel": {
"title": "取消"
},
"generalHotkeys": "一般",
"keyboardShortcuts": "快捷鍵",
"appHotkeys": "應用程式"
},
"modelManager": {
"advanced": "進階",
"allModels": "全部模型",
"variant": "變體",
"config": "配置",
"model": "模型",
"selected": "已選擇",
"huggingFace": "HuggingFace",
"install": "安裝",
"metadata": "元數據",
"delete": "刪除",
"description": "描述",
"cancel": "取消",
"convert": "轉換",
"manual": "手動",
"none": "無",
"name": "名稱",
"load": "載入",
"height": "高度",
"width": "寬度",
"search": "搜尋",
"vae": "VAE",
"settings": "設定"
},
"controlnet": {
"mlsd": "M-LSD",
"canny": "Canny",
"duplicate": "重複",
"none": "無",
"pidi": "PIDI",
"h": "H",
"balanced": "平衡",
"crop": "裁切",
"processor": "處理器",
"control": "控制",
"f": "F",
"lineart": "線條藝術",
"w": "W",
"hed": "HED",
"delete": "刪除"
},
"queue": {
"queue": "佇列",
"canceled": "已取消",
"failed": "已失敗",
"completed": "已完成",
"cancel": "取消",
"session": "工作階段",
"batch": "批量",
"item": "項目",
"completedIn": "完成於",
"notReady": "無法排隊"
},
"parameters": {
"cancel": {
"cancel": "取消"
},
"height": "高度",
"type": "類型",
"symmetry": "對稱性",
"images": "圖片",
"width": "寬度",
"coherenceMode": "模式",
"seed": "種子",
"general": "一般",
"strength": "強度",
"steps": "步數",
"info": "資訊"
},
"settings": {
"beta": "Beta",
"developer": "開發者",
"general": "一般",
"models": "模型"
},
"popovers": {
"paramModel": {
"heading": "模型"
},
"compositingCoherenceMode": {
"heading": "模式"
},
"paramSteps": {
"heading": "步數"
},
"controlNetProcessor": {
"heading": "處理器"
},
"paramVAE": {
"heading": "VAE"
},
"paramHeight": {
"heading": "高度"
},
"paramSeed": {
"heading": "種子"
},
"paramWidth": {
"heading": "寬度"
},
"refinerSteps": {
"heading": "步數"
}
},
"unifiedCanvas": {
"undo": "復原",
"mask": "遮罩",
"eraser": "橡皮擦",
"antialiasing": "抗鋸齒",
"redo": "重做",
"layer": "圖層",
"accept": "接受",
"brush": "刷子",
"move": "移動",
"brushSize": "大小"
},
"nodes": {
"workflowName": "名稱",
"notes": "註釋",
"workflowVersion": "版本",
"workflowNotes": "註釋",
"executionStateError": "錯誤",
"unableToUpdateNodes_other": "無法更新 {{count}} 個節點",
"integer": "整數",
"workflow": "工作流程",
"enum": "枚舉",
"edit": "編輯",
"string": "字串",
"workflowTags": "標籤",
"node": "節點",
"boolean": "布林值",
"workflowAuthor": "作者",
"version": "版本",
"executionStateCompleted": "已完成",
"edge": "邊緣",
"versionUnknown": " 版本未知"
},
"sdxl": {
"steps": "步數",
"loading": "載入中…",
"refiner": "精煉器"
},
"gallery": {
"copy": "複製",
"download": "下載",
"loading": "載入中"
},
"ui": {
"tabs": {
"models": "模型",
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
"queue": "佇列"
}
},
"models": {
"loading": "載入中"
},
"workflows": {
"name": "名稱"
} }
} }

View File

@ -22,7 +22,13 @@ import type { BatchConfig } from 'services/api/types';
import { socketInvocationComplete } from 'services/events/actions'; import { socketInvocationComplete } from 'services/events/actions';
import { assert } from 'tsafe'; import { assert } from 'tsafe';
const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged, caLayerRecalled); const matcher = isAnyOf(
caLayerImageChanged,
caLayerProcessedImageChanged,
caLayerProcessorConfigChanged,
caLayerModelChanged,
caLayerRecalled
);
const DEBOUNCE_MS = 300; const DEBOUNCE_MS = 300;
const log = logger('session'); const log = logger('session');
@ -73,9 +79,10 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
const originalConfig = originalLayer?.controlAdapter.processorConfig; const originalConfig = originalLayer?.controlAdapter.processorConfig;
const image = layer.controlAdapter.image; const image = layer.controlAdapter.image;
const processedImage = layer.controlAdapter.processedImage;
const config = layer.controlAdapter.processorConfig; const config = layer.controlAdapter.processorConfig;
if (isEqual(config, originalConfig) && isEqual(image, originalImage)) { if (isEqual(config, originalConfig) && isEqual(image, originalImage) && processedImage) {
// Neither config nor image have changed, we can bail // Neither config nor image have changed, we can bail
return; return;
} }

View File

@ -4,6 +4,7 @@ import {
caLayerControlModeChanged, caLayerControlModeChanged,
caLayerImageChanged, caLayerImageChanged,
caLayerModelChanged, caLayerModelChanged,
caLayerProcessedImageChanged,
caLayerProcessorConfigChanged, caLayerProcessorConfigChanged,
caOrIPALayerBeginEndStepPctChanged, caOrIPALayerBeginEndStepPctChanged,
caOrIPALayerWeightChanged, caOrIPALayerWeightChanged,
@ -84,6 +85,14 @@ export const CALayerControlAdapterWrapper = memo(({ layerId }: Props) => {
[dispatch, layerId] [dispatch, layerId]
); );
const onErrorLoadingImage = useCallback(() => {
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
}, [dispatch, layerId]);
const onErrorLoadingProcessedImage = useCallback(() => {
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO: null }));
}, [dispatch, layerId]);
const droppableData = useMemo<CALayerImageDropData>( const droppableData = useMemo<CALayerImageDropData>(
() => ({ () => ({
actionType: 'SET_CA_LAYER_IMAGE', actionType: 'SET_CA_LAYER_IMAGE',
@ -114,6 +123,8 @@ export const CALayerControlAdapterWrapper = memo(({ layerId }: Props) => {
onChangeImage={onChangeImage} onChangeImage={onChangeImage}
droppableData={droppableData} droppableData={droppableData}
postUploadAction={postUploadAction} postUploadAction={postUploadAction}
onErrorLoadingImage={onErrorLoadingImage}
onErrorLoadingProcessedImage={onErrorLoadingProcessedImage}
/> />
); );
}); });

View File

@ -28,6 +28,8 @@ type Props = {
onChangeProcessorConfig: (processorConfig: ProcessorConfig | null) => void; onChangeProcessorConfig: (processorConfig: ProcessorConfig | null) => void;
onChangeModel: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void; onChangeModel: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void;
onChangeImage: (imageDTO: ImageDTO | null) => void; onChangeImage: (imageDTO: ImageDTO | null) => void;
onErrorLoadingImage: () => void;
onErrorLoadingProcessedImage: () => void;
droppableData: TypesafeDroppableData; droppableData: TypesafeDroppableData;
postUploadAction: PostUploadAction; postUploadAction: PostUploadAction;
}; };
@ -41,6 +43,8 @@ export const ControlAdapter = memo(
onChangeProcessorConfig, onChangeProcessorConfig,
onChangeModel, onChangeModel,
onChangeImage, onChangeImage,
onErrorLoadingImage,
onErrorLoadingProcessedImage,
droppableData, droppableData,
postUploadAction, postUploadAction,
}: Props) => { }: Props) => {
@ -91,6 +95,8 @@ export const ControlAdapter = memo(
onChangeImage={onChangeImage} onChangeImage={onChangeImage}
droppableData={droppableData} droppableData={droppableData}
postUploadAction={postUploadAction} postUploadAction={postUploadAction}
onErrorLoadingImage={onErrorLoadingImage}
onErrorLoadingProcessedImage={onErrorLoadingProcessedImage}
/> />
</Flex> </Flex>
</Flex> </Flex>

View File

@ -27,10 +27,19 @@ type Props = {
onChangeImage: (imageDTO: ImageDTO | null) => void; onChangeImage: (imageDTO: ImageDTO | null) => void;
droppableData: TypesafeDroppableData; droppableData: TypesafeDroppableData;
postUploadAction: PostUploadAction; postUploadAction: PostUploadAction;
onErrorLoadingImage: () => void;
onErrorLoadingProcessedImage: () => void;
}; };
export const ControlAdapterImagePreview = memo( export const ControlAdapterImagePreview = memo(
({ controlAdapter, onChangeImage, droppableData, postUploadAction }: Props) => { ({
controlAdapter,
onChangeImage,
droppableData,
postUploadAction,
onErrorLoadingImage,
onErrorLoadingProcessedImage,
}: Props) => {
const { t } = useTranslation(); const { t } = useTranslation();
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId); const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
@ -128,10 +137,23 @@ export const ControlAdapterImagePreview = memo(
controlAdapter.processorConfig !== null; controlAdapter.processorConfig !== null;
useEffect(() => { useEffect(() => {
if (isConnected && (isErrorControlImage || isErrorProcessedControlImage)) { if (!isConnected) {
handleResetControlImage(); return;
} }
}, [handleResetControlImage, isConnected, isErrorControlImage, isErrorProcessedControlImage]); if (isErrorControlImage) {
onErrorLoadingImage();
}
if (isErrorProcessedControlImage) {
onErrorLoadingProcessedImage();
}
}, [
handleResetControlImage,
isConnected,
isErrorControlImage,
isErrorProcessedControlImage,
onErrorLoadingImage,
onErrorLoadingProcessedImage,
]);
return ( return (
<Flex <Flex
@ -167,6 +189,7 @@ export const ControlAdapterImagePreview = memo(
droppableData={droppableData} droppableData={droppableData}
imageDTO={processedControlImage} imageDTO={processedControlImage}
isUploadDisabled={true} isUploadDisabled={true}
onError={handleResetControlImage}
/> />
</Box> </Box>

View File

@ -4,20 +4,35 @@ import { createSelector } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger'; import { logger } from 'app/logging/logger';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useMouseEvents } from 'features/controlLayers/hooks/mouseEventHooks'; import { BRUSH_SPACING_PCT, MAX_BRUSH_SPACING_PX, MIN_BRUSH_SPACING_PX } from 'features/controlLayers/konva/constants';
import { setStageEventHandlers } from 'features/controlLayers/konva/events';
import { debouncedRenderers, renderers as normalRenderers } from 'features/controlLayers/konva/renderers';
import { import {
$brushSize,
$brushSpacingPx,
$isDrawing,
$lastAddedPoint,
$lastCursorPos, $lastCursorPos,
$lastMouseDownPos, $lastMouseDownPos,
$selectedLayerId,
$selectedLayerType,
$shouldInvertBrushSizeScrollDirection,
$tool, $tool,
brushSizeChanged,
isRegionalGuidanceLayer, isRegionalGuidanceLayer,
layerBboxChanged, layerBboxChanged,
layerTranslated, layerTranslated,
rgLayerLineAdded,
rgLayerPointsAdded,
rgLayerRectAdded,
selectControlLayersSlice, selectControlLayersSlice,
} from 'features/controlLayers/store/controlLayersSlice'; } from 'features/controlLayers/store/controlLayersSlice';
import { debouncedRenderers, renderers as normalRenderers } from 'features/controlLayers/util/renderers'; import type { AddLineArg, AddPointToLineArg, AddRectArg } from 'features/controlLayers/store/types';
import Konva from 'konva'; import Konva from 'konva';
import type { IRect } from 'konva/lib/types'; import type { IRect } from 'konva/lib/types';
import { clamp } from 'lodash-es';
import { memo, useCallback, useLayoutEffect, useMemo, useState } from 'react'; import { memo, useCallback, useLayoutEffect, useMemo, useState } from 'react';
import { getImageDTO } from 'services/api/endpoints/images';
import { useDevicePixelRatio } from 'use-device-pixel-ratio'; import { useDevicePixelRatio } from 'use-device-pixel-ratio';
import { v4 as uuidv4 } from 'uuid'; import { v4 as uuidv4 } from 'uuid';
@ -47,7 +62,6 @@ const useStageRenderer = (
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const state = useAppSelector((s) => s.controlLayers.present); const state = useAppSelector((s) => s.controlLayers.present);
const tool = useStore($tool); const tool = useStore($tool);
const mouseEventHandlers = useMouseEvents();
const lastCursorPos = useStore($lastCursorPos); const lastCursorPos = useStore($lastCursorPos);
const lastMouseDownPos = useStore($lastMouseDownPos); const lastMouseDownPos = useStore($lastMouseDownPos);
const selectedLayerIdColor = useAppSelector(selectSelectedLayerColor); const selectedLayerIdColor = useAppSelector(selectSelectedLayerColor);
@ -56,6 +70,26 @@ const useStageRenderer = (
const layerCount = useMemo(() => state.layers.length, [state.layers]); const layerCount = useMemo(() => state.layers.length, [state.layers]);
const renderers = useMemo(() => (asPreview ? debouncedRenderers : normalRenderers), [asPreview]); const renderers = useMemo(() => (asPreview ? debouncedRenderers : normalRenderers), [asPreview]);
const dpr = useDevicePixelRatio({ round: false }); const dpr = useDevicePixelRatio({ round: false });
const shouldInvertBrushSizeScrollDirection = useAppSelector((s) => s.canvas.shouldInvertBrushSizeScrollDirection);
const brushSpacingPx = useMemo(
() => clamp(state.brushSize / BRUSH_SPACING_PCT, MIN_BRUSH_SPACING_PX, MAX_BRUSH_SPACING_PX),
[state.brushSize]
);
useLayoutEffect(() => {
$brushSize.set(state.brushSize);
$brushSpacingPx.set(brushSpacingPx);
$selectedLayerId.set(state.selectedLayerId);
$selectedLayerType.set(selectedLayerType);
$shouldInvertBrushSizeScrollDirection.set(shouldInvertBrushSizeScrollDirection);
}, [
brushSpacingPx,
selectedLayerIdColor,
selectedLayerType,
shouldInvertBrushSizeScrollDirection,
state.brushSize,
state.selectedLayerId,
]);
const onLayerPosChanged = useCallback( const onLayerPosChanged = useCallback(
(layerId: string, x: number, y: number) => { (layerId: string, x: number, y: number) => {
@ -71,6 +105,31 @@ const useStageRenderer = (
[dispatch] [dispatch]
); );
const onRGLayerLineAdded = useCallback(
(arg: AddLineArg) => {
dispatch(rgLayerLineAdded(arg));
},
[dispatch]
);
const onRGLayerPointAddedToLine = useCallback(
(arg: AddPointToLineArg) => {
dispatch(rgLayerPointsAdded(arg));
},
[dispatch]
);
const onRGLayerRectAdded = useCallback(
(arg: AddRectArg) => {
dispatch(rgLayerRectAdded(arg));
},
[dispatch]
);
const onBrushSizeChanged = useCallback(
(size: number) => {
dispatch(brushSizeChanged(size));
},
[dispatch]
);
useLayoutEffect(() => { useLayoutEffect(() => {
log.trace('Initializing stage'); log.trace('Initializing stage');
if (!container) { if (!container) {
@ -88,21 +147,29 @@ const useStageRenderer = (
if (asPreview) { if (asPreview) {
return; return;
} }
stage.on('mousedown', mouseEventHandlers.onMouseDown); const cleanup = setStageEventHandlers({
stage.on('mouseup', mouseEventHandlers.onMouseUp); stage,
stage.on('mousemove', mouseEventHandlers.onMouseMove); $tool,
stage.on('mouseleave', mouseEventHandlers.onMouseLeave); $isDrawing,
stage.on('wheel', mouseEventHandlers.onMouseWheel); $lastMouseDownPos,
$lastCursorPos,
$lastAddedPoint,
$brushSize,
$brushSpacingPx,
$selectedLayerId,
$selectedLayerType,
$shouldInvertBrushSizeScrollDirection,
onRGLayerLineAdded,
onRGLayerPointAddedToLine,
onRGLayerRectAdded,
onBrushSizeChanged,
});
return () => { return () => {
log.trace('Cleaning up stage listeners'); log.trace('Removing stage listeners');
stage.off('mousedown', mouseEventHandlers.onMouseDown); cleanup();
stage.off('mouseup', mouseEventHandlers.onMouseUp);
stage.off('mousemove', mouseEventHandlers.onMouseMove);
stage.off('mouseleave', mouseEventHandlers.onMouseLeave);
stage.off('wheel', mouseEventHandlers.onMouseWheel);
}; };
}, [stage, asPreview, mouseEventHandlers]); }, [asPreview, onBrushSizeChanged, onRGLayerLineAdded, onRGLayerPointAddedToLine, onRGLayerRectAdded, stage]);
useLayoutEffect(() => { useLayoutEffect(() => {
log.trace('Updating stage dimensions'); log.trace('Updating stage dimensions');
@ -160,7 +227,7 @@ const useStageRenderer = (
useLayoutEffect(() => { useLayoutEffect(() => {
log.trace('Rendering layers'); log.trace('Rendering layers');
renderers.renderLayers(stage, state.layers, state.globalMaskLayerOpacity, tool, onLayerPosChanged); renderers.renderLayers(stage, state.layers, state.globalMaskLayerOpacity, tool, getImageDTO, onLayerPosChanged);
}, [ }, [
stage, stage,
state.layers, state.layers,

View File

@ -1,233 +0,0 @@
import { $ctrl, $meta } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { calculateNewBrushSize } from 'features/canvas/hooks/useCanvasZoom';
import {
$isDrawing,
$lastCursorPos,
$lastMouseDownPos,
$tool,
brushSizeChanged,
rgLayerLineAdded,
rgLayerPointsAdded,
rgLayerRectAdded,
} from 'features/controlLayers/store/controlLayersSlice';
import type Konva from 'konva';
import type { KonvaEventObject } from 'konva/lib/Node';
import type { Vector2d } from 'konva/lib/types';
import { clamp } from 'lodash-es';
import { useCallback, useMemo, useRef } from 'react';
const getIsFocused = (stage: Konva.Stage) => {
return stage.container().contains(document.activeElement);
};
const getIsMouseDown = (e: KonvaEventObject<MouseEvent>) => e.evt.buttons === 1;
const SNAP_PX = 10;
export const snapPosToStage = (pos: Vector2d, stage: Konva.Stage) => {
const snappedPos = { ...pos };
// Get the normalized threshold for snapping to the edge of the stage
const thresholdX = SNAP_PX / stage.scaleX();
const thresholdY = SNAP_PX / stage.scaleY();
const stageWidth = stage.width() / stage.scaleX();
const stageHeight = stage.height() / stage.scaleY();
// Snap to the edge of the stage if within threshold
if (pos.x - thresholdX < 0) {
snappedPos.x = 0;
} else if (pos.x + thresholdX > stageWidth) {
snappedPos.x = Math.floor(stageWidth);
}
if (pos.y - thresholdY < 0) {
snappedPos.y = 0;
} else if (pos.y + thresholdY > stageHeight) {
snappedPos.y = Math.floor(stageHeight);
}
return snappedPos;
};
export const getScaledFlooredCursorPosition = (stage: Konva.Stage) => {
const pointerPosition = stage.getPointerPosition();
const stageTransform = stage.getAbsoluteTransform().copy();
if (!pointerPosition) {
return;
}
const scaledCursorPosition = stageTransform.invert().point(pointerPosition);
return {
x: Math.floor(scaledCursorPosition.x),
y: Math.floor(scaledCursorPosition.y),
};
};
const syncCursorPos = (stage: Konva.Stage): Vector2d | null => {
const pos = getScaledFlooredCursorPosition(stage);
if (!pos) {
return null;
}
$lastCursorPos.set(pos);
return pos;
};
const BRUSH_SPACING_PCT = 10;
const MIN_BRUSH_SPACING_PX = 5;
const MAX_BRUSH_SPACING_PX = 15;
export const useMouseEvents = () => {
const dispatch = useAppDispatch();
const selectedLayerId = useAppSelector((s) => s.controlLayers.present.selectedLayerId);
const selectedLayerType = useAppSelector((s) => {
const selectedLayer = s.controlLayers.present.layers.find((l) => l.id === s.controlLayers.present.selectedLayerId);
if (!selectedLayer) {
return null;
}
return selectedLayer.type;
});
const tool = useStore($tool);
const lastCursorPosRef = useRef<[number, number] | null>(null);
const shouldInvertBrushSizeScrollDirection = useAppSelector((s) => s.canvas.shouldInvertBrushSizeScrollDirection);
const brushSize = useAppSelector((s) => s.controlLayers.present.brushSize);
const brushSpacingPx = useMemo(
() => clamp(brushSize / BRUSH_SPACING_PCT, MIN_BRUSH_SPACING_PX, MAX_BRUSH_SPACING_PX),
[brushSize]
);
const onMouseDown = useCallback(
(e: KonvaEventObject<MouseEvent>) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = syncCursorPos(stage);
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (tool === 'brush' || tool === 'eraser') {
dispatch(
rgLayerLineAdded({
layerId: selectedLayerId,
points: [pos.x, pos.y, pos.x, pos.y],
tool,
})
);
$isDrawing.set(true);
$lastMouseDownPos.set(pos);
} else if (tool === 'rect') {
$lastMouseDownPos.set(snapPosToStage(pos, stage));
}
},
[dispatch, selectedLayerId, selectedLayerType, tool]
);
const onMouseUp = useCallback(
(e: KonvaEventObject<MouseEvent>) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = $lastCursorPos.get();
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
const lastPos = $lastMouseDownPos.get();
const tool = $tool.get();
if (lastPos && selectedLayerId && tool === 'rect') {
const snappedPos = snapPosToStage(pos, stage);
dispatch(
rgLayerRectAdded({
layerId: selectedLayerId,
rect: {
x: Math.min(snappedPos.x, lastPos.x),
y: Math.min(snappedPos.y, lastPos.y),
width: Math.abs(snappedPos.x - lastPos.x),
height: Math.abs(snappedPos.y - lastPos.y),
},
})
);
}
$isDrawing.set(false);
$lastMouseDownPos.set(null);
},
[dispatch, selectedLayerId, selectedLayerType]
);
const onMouseMove = useCallback(
(e: KonvaEventObject<MouseEvent>) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = syncCursorPos(stage);
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (getIsFocused(stage) && getIsMouseDown(e) && (tool === 'brush' || tool === 'eraser')) {
if ($isDrawing.get()) {
// Continue the last line
if (lastCursorPosRef.current) {
// Dispatching redux events impacts perf substantially - using brush spacing keeps dispatches to a reasonable number
if (Math.hypot(lastCursorPosRef.current[0] - pos.x, lastCursorPosRef.current[1] - pos.y) < brushSpacingPx) {
return;
}
}
lastCursorPosRef.current = [pos.x, pos.y];
dispatch(rgLayerPointsAdded({ layerId: selectedLayerId, point: lastCursorPosRef.current }));
} else {
// Start a new line
dispatch(rgLayerLineAdded({ layerId: selectedLayerId, points: [pos.x, pos.y, pos.x, pos.y], tool }));
}
$isDrawing.set(true);
}
},
[brushSpacingPx, dispatch, selectedLayerId, selectedLayerType, tool]
);
const onMouseLeave = useCallback(
(e: KonvaEventObject<MouseEvent>) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = syncCursorPos(stage);
$isDrawing.set(false);
$lastCursorPos.set(null);
$lastMouseDownPos.set(null);
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (getIsFocused(stage) && getIsMouseDown(e) && (tool === 'brush' || tool === 'eraser')) {
dispatch(rgLayerPointsAdded({ layerId: selectedLayerId, point: [pos.x, pos.y] }));
}
},
[selectedLayerId, selectedLayerType, tool, dispatch]
);
const onMouseWheel = useCallback(
(e: KonvaEventObject<WheelEvent>) => {
e.evt.preventDefault();
if (selectedLayerType !== 'regional_guidance_layer' || (tool !== 'brush' && tool !== 'eraser')) {
return;
}
// checking for ctrl key is pressed or not,
// so that brush size can be controlled using ctrl + scroll up/down
// Invert the delta if the property is set to true
let delta = e.evt.deltaY;
if (shouldInvertBrushSizeScrollDirection) {
delta = -delta;
}
if ($ctrl.get() || $meta.get()) {
dispatch(brushSizeChanged(calculateNewBrushSize(brushSize, delta)));
}
},
[selectedLayerType, tool, shouldInvertBrushSizeScrollDirection, dispatch, brushSize]
);
const handlers = useMemo(
() => ({ onMouseDown, onMouseUp, onMouseMove, onMouseLeave, onMouseWheel }),
[onMouseDown, onMouseUp, onMouseMove, onMouseLeave, onMouseWheel]
);
return handlers;
};

View File

@ -1,11 +1,10 @@
import openBase64ImageInTab from 'common/util/openBase64ImageInTab'; import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import { imageDataToDataURL } from 'features/canvas/util/blobToDataURL'; import { imageDataToDataURL } from 'features/canvas/util/blobToDataURL';
import { RG_LAYER_OBJECT_GROUP_NAME } from 'features/controlLayers/store/controlLayersSlice';
import Konva from 'konva'; import Konva from 'konva';
import type { IRect } from 'konva/lib/types'; import type { IRect } from 'konva/lib/types';
import { assert } from 'tsafe'; import { assert } from 'tsafe';
const GET_CLIENT_RECT_CONFIG = { skipTransform: true }; import { RG_LAYER_OBJECT_GROUP_NAME } from './naming';
type Extents = { type Extents = {
minX: number; minX: number;
@ -14,10 +13,13 @@ type Extents = {
maxY: number; maxY: number;
}; };
const GET_CLIENT_RECT_CONFIG = { skipTransform: true };
//#region getImageDataBbox
/** /**
* Get the bounding box of an image. * Get the bounding box of an image.
* @param imageData The ImageData object to get the bounding box of. * @param imageData The ImageData object to get the bounding box of.
* @returns The minimum and maximum x and y values of the image's bounding box. * @returns The minimum and maximum x and y values of the image's bounding box, or null if the image has no pixels.
*/ */
const getImageDataBbox = (imageData: ImageData): Extents | null => { const getImageDataBbox = (imageData: ImageData): Extents | null => {
const { data, width, height } = imageData; const { data, width, height } = imageData;
@ -51,7 +53,9 @@ const getImageDataBbox = (imageData: ImageData): Extents | null => {
return isEmpty ? null : { minX, minY, maxX, maxY }; return isEmpty ? null : { minX, minY, maxX, maxY };
}; };
//#endregion
//#region getIsolatedRGLayerClone
/** /**
* Clones a regional guidance konva layer onto an offscreen stage/canvas. This allows the pixel data for a given layer * Clones a regional guidance konva layer onto an offscreen stage/canvas. This allows the pixel data for a given layer
* to be captured, manipulated or analyzed without interference from other layers. * to be captured, manipulated or analyzed without interference from other layers.
@ -88,7 +92,9 @@ const getIsolatedRGLayerClone = (layer: Konva.Layer): { stageClone: Konva.Stage;
return { stageClone, layerClone }; return { stageClone, layerClone };
}; };
//#endregion
//#region getLayerBboxPixels
/** /**
* Get the bounding box of a regional prompt konva layer. This function has special handling for regional prompt layers. * Get the bounding box of a regional prompt konva layer. This function has special handling for regional prompt layers.
* @param layer The konva layer to get the bounding box of. * @param layer The konva layer to get the bounding box of.
@ -137,7 +143,9 @@ export const getLayerBboxPixels = (layer: Konva.Layer, preview: boolean = false)
return correctedLayerBbox; return correctedLayerBbox;
}; };
//#endregion
//#region getLayerBboxFast
/** /**
* Get the bounding box of a konva layer. This function is faster than `getLayerBboxPixels` but less accurate. It * Get the bounding box of a konva layer. This function is faster than `getLayerBboxPixels` but less accurate. It
* should only be used when there are no eraser strokes or shapes in the layer. * should only be used when there are no eraser strokes or shapes in the layer.
@ -153,3 +161,4 @@ export const getLayerBboxFast = (layer: Konva.Layer): IRect => {
height: Math.floor(bbox.height), height: Math.floor(bbox.height),
}; };
}; };
//#endregion

View File

@ -0,0 +1,36 @@
/**
* A transparency checker pattern image.
* This is invokeai/frontend/web/public/assets/images/transparent_bg.png as a dataURL
*/
export const TRANSPARENCY_CHECKER_PATTERN =
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAAEsmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS41LjAiPgogPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyIKICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIgogICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgZXhpZjpQaXhlbFhEaW1lbnNpb249IjIwIgogICBleGlmOlBpeGVsWURpbWVuc2lvbj0iMjAiCiAgIGV4aWY6Q29sb3JTcGFjZT0iMSIKICAgdGlmZjpJbWFnZVdpZHRoPSIyMCIKICAgdGlmZjpJbWFnZUxlbmd0aD0iMjAiCiAgIHRpZmY6UmVzb2x1dGlvblVuaXQ9IjIiCiAgIHRpZmY6WFJlc29sdXRpb249IjMwMC8xIgogICB0aWZmOllSZXNvbHV0aW9uPSIzMDAvMSIKICAgcGhvdG9zaG9wOkNvbG9yTW9kZT0iMyIKICAgcGhvdG9zaG9wOklDQ1Byb2ZpbGU9InNSR0IgSUVDNjE5NjYtMi4xIgogICB4bXA6TW9kaWZ5RGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCIKICAgeG1wOk1ldGFkYXRhRGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCI+CiAgIDx4bXBNTTpIaXN0b3J5PgogICAgPHJkZjpTZXE+CiAgICAgPHJkZjpsaQogICAgICBzdEV2dDphY3Rpb249InByb2R1Y2VkIgogICAgICBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZmZpbml0eSBQaG90byAxLjEwLjgiCiAgICAgIHN0RXZ0OndoZW49IjIwMjQtMDQtMjNUMDg6MjA6NDcrMTA6MDAiLz4KICAgIDwvcmRmOlNlcT4KICAgPC94bXBNTTpIaXN0b3J5PgogIDwvcmRmOkRlc2NyaXB0aW9uPgogPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KPD94cGFja2V0IGVuZD0iciI/Pn9pdVgAAAGBaUNDUHNSR0IgSUVDNjE5NjYtMi4xAAAokXWR3yuDURjHP5uJmKghFy6WxpVpqMWNMgm1tGbKr5vt3S+1d3t73y3JrXKrKHHj1wV/AbfKtVJESq53TdywXs9rakv2nJ7zfM73nOfpnOeAPZJRVMPhAzWb18NTAffC4pK7oYiDTjpw4YgqhjYeCgWpaR8P2Kx457Vq1T73rzXHE4YCtkbhMUXT88LTwsG1vGbxrnC7ko7Ghc+F+3W5oPC9pcfKXLQ4VeYvi/VIeALsbcLuVBXHqlhJ66qwvByPmikov/exXuJMZOfnJPaId2MQZooAbmaYZAI/g4zK7MfLEAOyoka+7yd/lpzkKjJrrKOzSoo0efpFLUj1hMSk6AkZGdat/v/tq5EcHipXdwag/sU033qhYQdK26b5eWyapROoe4arbCU/dwQj76JvVzTPIbRuwsV1RYvtweUWdD1pUT36I9WJ25NJeD2DlkVw3ULTcrlnv/ucPkJkQ77qBvYPoE/Ot658AxagZ8FoS/a7AAAACXBIWXMAAC4jAAAuIwF4pT92AAAAL0lEQVQ4jWM8ffo0A25gYmKCR5YJjxxBMKp5ZGhm/P//Px7pM2fO0MrmUc0jQzMAB2EIhZC3pUYAAAAASUVORK5CYII=';
/**
* The color of a bounding box stroke when its object is selected.
*/
export const BBOX_SELECTED_STROKE = 'rgba(78, 190, 255, 1)';
/**
* The inner border color for the brush preview.
*/
export const BRUSH_BORDER_INNER_COLOR = 'rgba(0,0,0,1)';
/**
* The outer border color for the brush preview.
*/
export const BRUSH_BORDER_OUTER_COLOR = 'rgba(255,255,255,0.8)';
/**
* The target spacing of individual points of brush strokes, as a percentage of the brush size.
*/
export const BRUSH_SPACING_PCT = 10;
/**
* The minimum brush spacing in pixels.
*/
export const MIN_BRUSH_SPACING_PX = 5;
/**
* The maximum brush spacing in pixels.
*/
export const MAX_BRUSH_SPACING_PX = 15;

View File

@ -0,0 +1,201 @@
import { calculateNewBrushSize } from 'features/canvas/hooks/useCanvasZoom';
import {
getIsFocused,
getIsMouseDown,
getScaledFlooredCursorPosition,
snapPosToStage,
} from 'features/controlLayers/konva/util';
import type { AddLineArg, AddPointToLineArg, AddRectArg, Layer, Tool } from 'features/controlLayers/store/types';
import type Konva from 'konva';
import type { Vector2d } from 'konva/lib/types';
import type { WritableAtom } from 'nanostores';
import { TOOL_PREVIEW_LAYER_ID } from './naming';
type SetStageEventHandlersArg = {
stage: Konva.Stage;
$tool: WritableAtom<Tool>;
$isDrawing: WritableAtom<boolean>;
$lastMouseDownPos: WritableAtom<Vector2d | null>;
$lastCursorPos: WritableAtom<Vector2d | null>;
$lastAddedPoint: WritableAtom<Vector2d | null>;
$brushSize: WritableAtom<number>;
$brushSpacingPx: WritableAtom<number>;
$selectedLayerId: WritableAtom<string | null>;
$selectedLayerType: WritableAtom<Layer['type'] | null>;
$shouldInvertBrushSizeScrollDirection: WritableAtom<boolean>;
onRGLayerLineAdded: (arg: AddLineArg) => void;
onRGLayerPointAddedToLine: (arg: AddPointToLineArg) => void;
onRGLayerRectAdded: (arg: AddRectArg) => void;
onBrushSizeChanged: (size: number) => void;
};
const syncCursorPos = (stage: Konva.Stage, $lastCursorPos: WritableAtom<Vector2d | null>) => {
const pos = getScaledFlooredCursorPosition(stage);
if (!pos) {
return null;
}
$lastCursorPos.set(pos);
return pos;
};
export const setStageEventHandlers = ({
stage,
$tool,
$isDrawing,
$lastMouseDownPos,
$lastCursorPos,
$lastAddedPoint,
$brushSize,
$brushSpacingPx,
$selectedLayerId,
$selectedLayerType,
$shouldInvertBrushSizeScrollDirection,
onRGLayerLineAdded,
onRGLayerPointAddedToLine,
onRGLayerRectAdded,
onBrushSizeChanged,
}: SetStageEventHandlersArg): (() => void) => {
stage.on('mouseenter', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const tool = $tool.get();
stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.visible(tool === 'brush' || tool === 'eraser');
});
stage.on('mousedown', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const tool = $tool.get();
const pos = syncCursorPos(stage, $lastCursorPos);
const selectedLayerId = $selectedLayerId.get();
const selectedLayerType = $selectedLayerType.get();
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (tool === 'brush' || tool === 'eraser') {
onRGLayerLineAdded({
layerId: selectedLayerId,
points: [pos.x, pos.y, pos.x, pos.y],
tool,
});
$isDrawing.set(true);
$lastMouseDownPos.set(pos);
} else if (tool === 'rect') {
$lastMouseDownPos.set(snapPosToStage(pos, stage));
}
});
stage.on('mouseup', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = $lastCursorPos.get();
const selectedLayerId = $selectedLayerId.get();
const selectedLayerType = $selectedLayerType.get();
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
const lastPos = $lastMouseDownPos.get();
const tool = $tool.get();
if (lastPos && selectedLayerId && tool === 'rect') {
const snappedPos = snapPosToStage(pos, stage);
onRGLayerRectAdded({
layerId: selectedLayerId,
rect: {
x: Math.min(snappedPos.x, lastPos.x),
y: Math.min(snappedPos.y, lastPos.y),
width: Math.abs(snappedPos.x - lastPos.x),
height: Math.abs(snappedPos.y - lastPos.y),
},
});
}
$isDrawing.set(false);
$lastMouseDownPos.set(null);
});
stage.on('mousemove', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const tool = $tool.get();
const pos = syncCursorPos(stage, $lastCursorPos);
const selectedLayerId = $selectedLayerId.get();
const selectedLayerType = $selectedLayerType.get();
stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.visible(tool === 'brush' || tool === 'eraser');
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (getIsFocused(stage) && getIsMouseDown(e) && (tool === 'brush' || tool === 'eraser')) {
if ($isDrawing.get()) {
// Continue the last line
const lastAddedPoint = $lastAddedPoint.get();
if (lastAddedPoint) {
// Dispatching redux events impacts perf substantially - using brush spacing keeps dispatches to a reasonable number
if (Math.hypot(lastAddedPoint.x - pos.x, lastAddedPoint.y - pos.y) < $brushSpacingPx.get()) {
return;
}
}
$lastAddedPoint.set({ x: pos.x, y: pos.y });
onRGLayerPointAddedToLine({ layerId: selectedLayerId, point: [pos.x, pos.y] });
} else {
// Start a new line
onRGLayerLineAdded({ layerId: selectedLayerId, points: [pos.x, pos.y, pos.x, pos.y], tool });
}
$isDrawing.set(true);
}
});
stage.on('mouseleave', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = syncCursorPos(stage, $lastCursorPos);
$isDrawing.set(false);
$lastCursorPos.set(null);
$lastMouseDownPos.set(null);
const selectedLayerId = $selectedLayerId.get();
const selectedLayerType = $selectedLayerType.get();
const tool = $tool.get();
stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.visible(false);
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (getIsFocused(stage) && getIsMouseDown(e) && (tool === 'brush' || tool === 'eraser')) {
onRGLayerPointAddedToLine({ layerId: selectedLayerId, point: [pos.x, pos.y] });
}
});
stage.on('wheel', (e) => {
e.evt.preventDefault();
const selectedLayerType = $selectedLayerType.get();
const tool = $tool.get();
if (selectedLayerType !== 'regional_guidance_layer' || (tool !== 'brush' && tool !== 'eraser')) {
return;
}
// Invert the delta if the property is set to true
let delta = e.evt.deltaY;
if ($shouldInvertBrushSizeScrollDirection.get()) {
delta = -delta;
}
if (e.evt.ctrlKey || e.evt.metaKey) {
onBrushSizeChanged(calculateNewBrushSize($brushSize.get(), delta));
}
});
return () => stage.off('mousedown mouseup mousemove mouseenter mouseleave wheel');
};

View File

@ -0,0 +1,21 @@
/**
* Konva filters
* https://konvajs.org/docs/filters/Custom_Filter.html
*/
/**
* Calculates the lightness (HSL) of a given pixel and sets the alpha channel to that value.
* This is useful for edge maps and other masks, to make the black areas transparent.
* @param imageData The image data to apply the filter to
*/
export const LightnessToAlphaFilter = (imageData: ImageData): void => {
const len = imageData.data.length / 4;
for (let i = 0; i < len; i++) {
const r = imageData.data[i * 4 + 0] as number;
const g = imageData.data[i * 4 + 1] as number;
const b = imageData.data[i * 4 + 2] as number;
const cMin = Math.min(r, g, b);
const cMax = Math.max(r, g, b);
imageData.data[i * 4 + 3] = (cMin + cMax) / 2;
}
};

View File

@ -0,0 +1,38 @@
/**
* This file contains IDs, names, and ID getters for konva layers and objects.
*/
// IDs for singleton Konva layers and objects
export const TOOL_PREVIEW_LAYER_ID = 'tool_preview_layer';
export const TOOL_PREVIEW_BRUSH_GROUP_ID = 'tool_preview_layer.brush_group';
export const TOOL_PREVIEW_BRUSH_FILL_ID = 'tool_preview_layer.brush_fill';
export const TOOL_PREVIEW_BRUSH_BORDER_INNER_ID = 'tool_preview_layer.brush_border_inner';
export const TOOL_PREVIEW_BRUSH_BORDER_OUTER_ID = 'tool_preview_layer.brush_border_outer';
export const TOOL_PREVIEW_RECT_ID = 'tool_preview_layer.rect';
export const BACKGROUND_LAYER_ID = 'background_layer';
export const BACKGROUND_RECT_ID = 'background_layer.rect';
export const NO_LAYERS_MESSAGE_LAYER_ID = 'no_layers_message';
// Names for Konva layers and objects (comparable to CSS classes)
export const CA_LAYER_NAME = 'control_adapter_layer';
export const CA_LAYER_IMAGE_NAME = 'control_adapter_layer.image';
export const RG_LAYER_NAME = 'regional_guidance_layer';
export const RG_LAYER_LINE_NAME = 'regional_guidance_layer.line';
export const RG_LAYER_OBJECT_GROUP_NAME = 'regional_guidance_layer.object_group';
export const RG_LAYER_RECT_NAME = 'regional_guidance_layer.rect';
export const INITIAL_IMAGE_LAYER_ID = 'singleton_initial_image_layer';
export const INITIAL_IMAGE_LAYER_NAME = 'initial_image_layer';
export const INITIAL_IMAGE_LAYER_IMAGE_NAME = 'initial_image_layer.image';
export const LAYER_BBOX_NAME = 'layer.bbox';
export const COMPOSITING_RECT_NAME = 'compositing-rect';
// Getters for non-singleton layer and object IDs
export const getRGLayerId = (layerId: string) => `${RG_LAYER_NAME}_${layerId}`;
export const getRGLayerLineId = (layerId: string, lineId: string) => `${layerId}.line_${lineId}`;
export const getRGLayerRectId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`;
export const getRGLayerObjectGroupId = (layerId: string, groupId: string) => `${layerId}.objectGroup_${groupId}`;
export const getLayerBboxId = (layerId: string) => `${layerId}.bbox`;
export const getCALayerId = (layerId: string) => `control_adapter_layer_${layerId}`;
export const getCALayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIILayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIPALayerId = (layerId: string) => `ip_adapter_layer_${layerId}`;

View File

@ -1,8 +1,7 @@
import { getStore } from 'app/store/nanostores/store';
import { rgbaColorToString, rgbColorToString } from 'features/canvas/util/colorToString'; import { rgbaColorToString, rgbColorToString } from 'features/canvas/util/colorToString';
import { getScaledFlooredCursorPosition, snapPosToStage } from 'features/controlLayers/hooks/mouseEventHooks'; import { getLayerBboxFast, getLayerBboxPixels } from 'features/controlLayers/konva/bbox';
import { LightnessToAlphaFilter } from 'features/controlLayers/konva/filters';
import { import {
$tool,
BACKGROUND_LAYER_ID, BACKGROUND_LAYER_ID,
BACKGROUND_RECT_ID, BACKGROUND_RECT_ID,
CA_LAYER_IMAGE_NAME, CA_LAYER_IMAGE_NAME,
@ -14,10 +13,6 @@ import {
getRGLayerObjectGroupId, getRGLayerObjectGroupId,
INITIAL_IMAGE_LAYER_IMAGE_NAME, INITIAL_IMAGE_LAYER_IMAGE_NAME,
INITIAL_IMAGE_LAYER_NAME, INITIAL_IMAGE_LAYER_NAME,
isControlAdapterLayer,
isInitialImageLayer,
isRegionalGuidanceLayer,
isRenderableLayer,
LAYER_BBOX_NAME, LAYER_BBOX_NAME,
NO_LAYERS_MESSAGE_LAYER_ID, NO_LAYERS_MESSAGE_LAYER_ID,
RG_LAYER_LINE_NAME, RG_LAYER_LINE_NAME,
@ -30,6 +25,13 @@ import {
TOOL_PREVIEW_BRUSH_GROUP_ID, TOOL_PREVIEW_BRUSH_GROUP_ID,
TOOL_PREVIEW_LAYER_ID, TOOL_PREVIEW_LAYER_ID,
TOOL_PREVIEW_RECT_ID, TOOL_PREVIEW_RECT_ID,
} from 'features/controlLayers/konva/naming';
import { getScaledFlooredCursorPosition, snapPosToStage } from 'features/controlLayers/konva/util';
import {
isControlAdapterLayer,
isInitialImageLayer,
isRegionalGuidanceLayer,
isRenderableLayer,
} from 'features/controlLayers/store/controlLayersSlice'; } from 'features/controlLayers/store/controlLayersSlice';
import type { import type {
ControlAdapterLayer, ControlAdapterLayer,
@ -40,61 +42,46 @@ import type {
VectorMaskLine, VectorMaskLine,
VectorMaskRect, VectorMaskRect,
} from 'features/controlLayers/store/types'; } from 'features/controlLayers/store/types';
import { getLayerBboxFast, getLayerBboxPixels } from 'features/controlLayers/util/bbox';
import { t } from 'i18next'; import { t } from 'i18next';
import Konva from 'konva'; import Konva from 'konva';
import type { IRect, Vector2d } from 'konva/lib/types'; import type { IRect, Vector2d } from 'konva/lib/types';
import { debounce } from 'lodash-es'; import { debounce } from 'lodash-es';
import type { RgbColor } from 'react-colorful'; import type { RgbColor } from 'react-colorful';
import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types';
import { assert } from 'tsafe'; import { assert } from 'tsafe';
import { v4 as uuidv4 } from 'uuid'; import { v4 as uuidv4 } from 'uuid';
const BBOX_SELECTED_STROKE = 'rgba(78, 190, 255, 1)'; import {
const BRUSH_BORDER_INNER_COLOR = 'rgba(0,0,0,1)'; BBOX_SELECTED_STROKE,
const BRUSH_BORDER_OUTER_COLOR = 'rgba(255,255,255,0.8)'; BRUSH_BORDER_INNER_COLOR,
// This is invokeai/frontend/web/public/assets/images/transparent_bg.png as a dataURL BRUSH_BORDER_OUTER_COLOR,
export const STAGE_BG_DATAURL = TRANSPARENCY_CHECKER_PATTERN,
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAAEsmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS41LjAiPgogPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyIKICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIgogICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgZXhpZjpQaXhlbFhEaW1lbnNpb249IjIwIgogICBleGlmOlBpeGVsWURpbWVuc2lvbj0iMjAiCiAgIGV4aWY6Q29sb3JTcGFjZT0iMSIKICAgdGlmZjpJbWFnZVdpZHRoPSIyMCIKICAgdGlmZjpJbWFnZUxlbmd0aD0iMjAiCiAgIHRpZmY6UmVzb2x1dGlvblVuaXQ9IjIiCiAgIHRpZmY6WFJlc29sdXRpb249IjMwMC8xIgogICB0aWZmOllSZXNvbHV0aW9uPSIzMDAvMSIKICAgcGhvdG9zaG9wOkNvbG9yTW9kZT0iMyIKICAgcGhvdG9zaG9wOklDQ1Byb2ZpbGU9InNSR0IgSUVDNjE5NjYtMi4xIgogICB4bXA6TW9kaWZ5RGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCIKICAgeG1wOk1ldGFkYXRhRGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCI+CiAgIDx4bXBNTTpIaXN0b3J5PgogICAgPHJkZjpTZXE+CiAgICAgPHJkZjpsaQogICAgICBzdEV2dDphY3Rpb249InByb2R1Y2VkIgogICAgICBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZmZpbml0eSBQaG90byAxLjEwLjgiCiAgICAgIHN0RXZ0OndoZW49IjIwMjQtMDQtMjNUMDg6MjA6NDcrMTA6MDAiLz4KICAgIDwvcmRmOlNlcT4KICAgPC94bXBNTTpIaXN0b3J5PgogIDwvcmRmOkRlc2NyaXB0aW9uPgogPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KPD94cGFja2V0IGVuZD0iciI/Pn9pdVgAAAGBaUNDUHNSR0IgSUVDNjE5NjYtMi4xAAAokXWR3yuDURjHP5uJmKghFy6WxpVpqMWNMgm1tGbKr5vt3S+1d3t73y3JrXKrKHHj1wV/AbfKtVJESq53TdywXs9rakv2nJ7zfM73nOfpnOeAPZJRVMPhAzWb18NTAffC4pK7oYiDTjpw4YgqhjYeCgWpaR8P2Kx457Vq1T73rzXHE4YCtkbhMUXT88LTwsG1vGbxrnC7ko7Ghc+F+3W5oPC9pcfKXLQ4VeYvi/VIeALsbcLuVBXHqlhJ66qwvByPmikov/exXuJMZOfnJPaId2MQZooAbmaYZAI/g4zK7MfLEAOyoka+7yd/lpzkKjJrrKOzSoo0efpFLUj1hMSk6AkZGdat/v/tq5EcHipXdwag/sU033qhYQdK26b5eWyapROoe4arbCU/dwQj76JvVzTPIbRuwsV1RYvtweUWdD1pUT36I9WJ25NJeD2DlkVw3ULTcrlnv/ucPkJkQ77qBvYPoE/Ot658AxagZ8FoS/a7AAAACXBIWXMAAC4jAAAuIwF4pT92AAAAL0lEQVQ4jWM8ffo0A25gYmKCR5YJjxxBMKp5ZGhm/P//Px7pM2fO0MrmUc0jQzMAB2EIhZC3pUYAAAAASUVORK5CYII='; } from './constants';
const mapId = (object: { id: string }) => object.id; const mapId = (object: { id: string }): string => object.id;
const selectRenderableLayers = (n: Konva.Node) => /**
* Konva selection callback to select all renderable layers. This includes RG, CA and II layers.
*/
const selectRenderableLayers = (n: Konva.Node): boolean =>
n.name() === RG_LAYER_NAME || n.name() === CA_LAYER_NAME || n.name() === INITIAL_IMAGE_LAYER_NAME; n.name() === RG_LAYER_NAME || n.name() === CA_LAYER_NAME || n.name() === INITIAL_IMAGE_LAYER_NAME;
const selectVectorMaskObjects = (node: Konva.Node) => { /**
* Konva selection callback to select RG mask objects. This includes lines and rects.
*/
const selectVectorMaskObjects = (node: Konva.Node): boolean => {
return node.name() === RG_LAYER_LINE_NAME || node.name() === RG_LAYER_RECT_NAME; return node.name() === RG_LAYER_LINE_NAME || node.name() === RG_LAYER_RECT_NAME;
}; };
/** /**
* Creates the brush preview layer. * Creates the singleton tool preview layer and all its objects.
* @param stage The konva stage to render on. * @param stage The konva stage
* @returns The brush preview layer.
*/ */
const createToolPreviewLayer = (stage: Konva.Stage) => { const createToolPreviewLayer = (stage: Konva.Stage): Konva.Layer => {
// Initialize the brush preview layer & add to the stage // Initialize the brush preview layer & add to the stage
const toolPreviewLayer = new Konva.Layer({ id: TOOL_PREVIEW_LAYER_ID, visible: false, listening: false }); const toolPreviewLayer = new Konva.Layer({ id: TOOL_PREVIEW_LAYER_ID, visible: false, listening: false });
stage.add(toolPreviewLayer); stage.add(toolPreviewLayer);
// Add handlers to show/hide the brush preview layer
stage.on('mousemove', (e) => {
const tool = $tool.get();
e.target
.getStage()
?.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)
?.visible(tool === 'brush' || tool === 'eraser');
});
stage.on('mouseleave', (e) => {
e.target.getStage()?.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.visible(false);
});
stage.on('mouseenter', (e) => {
const tool = $tool.get();
e.target
.getStage()
?.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)
?.visible(tool === 'brush' || tool === 'eraser');
});
// Create the brush preview group & circles // Create the brush preview group & circles
const brushPreviewGroup = new Konva.Group({ id: TOOL_PREVIEW_BRUSH_GROUP_ID }); const brushPreviewGroup = new Konva.Group({ id: TOOL_PREVIEW_BRUSH_GROUP_ID });
const brushPreviewFill = new Konva.Circle({ const brushPreviewFill = new Konva.Circle({
@ -121,7 +108,7 @@ const createToolPreviewLayer = (stage: Konva.Stage) => {
brushPreviewGroup.add(brushPreviewBorderOuter); brushPreviewGroup.add(brushPreviewBorderOuter);
toolPreviewLayer.add(brushPreviewGroup); toolPreviewLayer.add(brushPreviewGroup);
// Create the rect preview // Create the rect preview - this is a rectangle drawn from the last mouse down position to the current cursor position
const rectPreview = new Konva.Rect({ id: TOOL_PREVIEW_RECT_ID, listening: false, stroke: 'white', strokeWidth: 1 }); const rectPreview = new Konva.Rect({ id: TOOL_PREVIEW_RECT_ID, listening: false, stroke: 'white', strokeWidth: 1 });
toolPreviewLayer.add(rectPreview); toolPreviewLayer.add(rectPreview);
@ -130,12 +117,14 @@ const createToolPreviewLayer = (stage: Konva.Stage) => {
/** /**
* Renders the brush preview for the selected tool. * Renders the brush preview for the selected tool.
* @param stage The konva stage to render on. * @param stage The konva stage
* @param tool The selected tool. * @param tool The selected tool
* @param color The selected layer's color. * @param color The selected layer's color
* @param cursorPos The cursor position. * @param selectedLayerType The selected layer's type
* @param lastMouseDownPos The position of the last mouse down event - used for the rect tool. * @param globalMaskLayerOpacity The global mask layer opacity
* @param brushSize The brush size. * @param cursorPos The cursor position
* @param lastMouseDownPos The position of the last mouse down event - used for the rect tool
* @param brushSize The brush size
*/ */
const renderToolPreview = ( const renderToolPreview = (
stage: Konva.Stage, stage: Konva.Stage,
@ -146,7 +135,7 @@ const renderToolPreview = (
cursorPos: Vector2d | null, cursorPos: Vector2d | null,
lastMouseDownPos: Vector2d | null, lastMouseDownPos: Vector2d | null,
brushSize: number brushSize: number
) => { ): void => {
const layerCount = stage.find(selectRenderableLayers).length; const layerCount = stage.find(selectRenderableLayers).length;
// Update the stage's pointer style // Update the stage's pointer style
if (layerCount === 0) { if (layerCount === 0) {
@ -162,7 +151,7 @@ const renderToolPreview = (
// Move rect gets a crosshair // Move rect gets a crosshair
stage.container().style.cursor = 'crosshair'; stage.container().style.cursor = 'crosshair';
} else { } else {
// Else we use the brush preview // Else we hide the native cursor and use the konva-rendered brush preview
stage.container().style.cursor = 'none'; stage.container().style.cursor = 'none';
} }
@ -227,28 +216,29 @@ const renderToolPreview = (
}; };
/** /**
* Creates a vector mask layer. * Creates a regional guidance layer.
* @param stage The konva stage to attach the layer to. * @param stage The konva stage
* @param reduxLayer The redux layer to create the konva layer from. * @param layerState The regional guidance layer state
* @param onLayerPosChanged Callback for when the layer's position changes. * @param onLayerPosChanged Callback for when the layer's position changes
*/ */
const createRegionalGuidanceLayer = ( const createRGLayer = (
stage: Konva.Stage, stage: Konva.Stage,
reduxLayer: RegionalGuidanceLayer, layerState: RegionalGuidanceLayer,
onLayerPosChanged?: (layerId: string, x: number, y: number) => void onLayerPosChanged?: (layerId: string, x: number, y: number) => void
) => { ): Konva.Layer => {
// This layer hasn't been added to the konva state yet // This layer hasn't been added to the konva state yet
const konvaLayer = new Konva.Layer({ const konvaLayer = new Konva.Layer({
id: reduxLayer.id, id: layerState.id,
name: RG_LAYER_NAME, name: RG_LAYER_NAME,
draggable: true, draggable: true,
dragDistance: 0, dragDistance: 0,
}); });
// Create a `dragmove` listener for this layer // When a drag on the layer finishes, update the layer's position in state. During the drag, konva handles changing
// the position - we do not need to call this on the `dragmove` event.
if (onLayerPosChanged) { if (onLayerPosChanged) {
konvaLayer.on('dragend', function (e) { konvaLayer.on('dragend', function (e) {
onLayerPosChanged(reduxLayer.id, Math.floor(e.target.x()), Math.floor(e.target.y())); onLayerPosChanged(layerState.id, Math.floor(e.target.x()), Math.floor(e.target.y()));
}); });
} }
@ -258,7 +248,7 @@ const createRegionalGuidanceLayer = (
if (!cursorPos) { if (!cursorPos) {
return this.getAbsolutePosition(); return this.getAbsolutePosition();
} }
// Prevent the user from dragging the layer out of the stage bounds. // Prevent the user from dragging the layer out of the stage bounds by constaining the cursor position to the stage bounds
if ( if (
cursorPos.x < 0 || cursorPos.x < 0 ||
cursorPos.x > stage.width() / stage.scaleX() || cursorPos.x > stage.width() / stage.scaleX() ||
@ -272,7 +262,7 @@ const createRegionalGuidanceLayer = (
// The object group holds all of the layer's objects (e.g. lines and rects) // The object group holds all of the layer's objects (e.g. lines and rects)
const konvaObjectGroup = new Konva.Group({ const konvaObjectGroup = new Konva.Group({
id: getRGLayerObjectGroupId(reduxLayer.id, uuidv4()), id: getRGLayerObjectGroupId(layerState.id, uuidv4()),
name: RG_LAYER_OBJECT_GROUP_NAME, name: RG_LAYER_OBJECT_GROUP_NAME,
listening: false, listening: false,
}); });
@ -284,47 +274,51 @@ const createRegionalGuidanceLayer = (
}; };
/** /**
* Creates a konva line from a redux vector mask line. * Creates a konva line from a vector mask line.
* @param reduxObject The redux object to create the konva line from. * @param vectorMaskLine The vector mask line state
* @param konvaGroup The konva group to add the line to. * @param layerObjectGroup The konva layer's object group to add the line to
*/ */
const createVectorMaskLine = (reduxObject: VectorMaskLine, konvaGroup: Konva.Group): Konva.Line => { const createVectorMaskLine = (vectorMaskLine: VectorMaskLine, layerObjectGroup: Konva.Group): Konva.Line => {
const vectorMaskLine = new Konva.Line({ const konvaLine = new Konva.Line({
id: reduxObject.id, id: vectorMaskLine.id,
key: reduxObject.id, key: vectorMaskLine.id,
name: RG_LAYER_LINE_NAME, name: RG_LAYER_LINE_NAME,
strokeWidth: reduxObject.strokeWidth, strokeWidth: vectorMaskLine.strokeWidth,
tension: 0, tension: 0,
lineCap: 'round', lineCap: 'round',
lineJoin: 'round', lineJoin: 'round',
shadowForStrokeEnabled: false, shadowForStrokeEnabled: false,
globalCompositeOperation: reduxObject.tool === 'brush' ? 'source-over' : 'destination-out', globalCompositeOperation: vectorMaskLine.tool === 'brush' ? 'source-over' : 'destination-out',
listening: false, listening: false,
}); });
konvaGroup.add(vectorMaskLine); layerObjectGroup.add(konvaLine);
return vectorMaskLine; return konvaLine;
}; };
/** /**
* Creates a konva rect from a redux vector mask rect. * Creates a konva rect from a vector mask rect.
* @param reduxObject The redux object to create the konva rect from. * @param vectorMaskRect The vector mask rect state
* @param konvaGroup The konva group to add the rect to. * @param layerObjectGroup The konva layer's object group to add the line to
*/ */
const createVectorMaskRect = (reduxObject: VectorMaskRect, konvaGroup: Konva.Group): Konva.Rect => { const createVectorMaskRect = (vectorMaskRect: VectorMaskRect, layerObjectGroup: Konva.Group): Konva.Rect => {
const vectorMaskRect = new Konva.Rect({ const konvaRect = new Konva.Rect({
id: reduxObject.id, id: vectorMaskRect.id,
key: reduxObject.id, key: vectorMaskRect.id,
name: RG_LAYER_RECT_NAME, name: RG_LAYER_RECT_NAME,
x: reduxObject.x, x: vectorMaskRect.x,
y: reduxObject.y, y: vectorMaskRect.y,
width: reduxObject.width, width: vectorMaskRect.width,
height: reduxObject.height, height: vectorMaskRect.height,
listening: false, listening: false,
}); });
konvaGroup.add(vectorMaskRect); layerObjectGroup.add(konvaRect);
return vectorMaskRect; return konvaRect;
}; };
/**
* Creates the "compositing rect" for a layer.
* @param konvaLayer The konva layer
*/
const createCompositingRect = (konvaLayer: Konva.Layer): Konva.Rect => { const createCompositingRect = (konvaLayer: Konva.Layer): Konva.Rect => {
const compositingRect = new Konva.Rect({ name: COMPOSITING_RECT_NAME, listening: false }); const compositingRect = new Konva.Rect({ name: COMPOSITING_RECT_NAME, listening: false });
konvaLayer.add(compositingRect); konvaLayer.add(compositingRect);
@ -332,41 +326,41 @@ const createCompositingRect = (konvaLayer: Konva.Layer): Konva.Rect => {
}; };
/** /**
* Renders a vector mask layer. * Renders a regional guidance layer.
* @param stage The konva stage to render on. * @param stage The konva stage
* @param reduxLayer The redux vector mask layer to render. * @param layerState The regional guidance layer state
* @param reduxLayerIndex The index of the layer in the redux store. * @param globalMaskLayerOpacity The global mask layer opacity
* @param globalMaskLayerOpacity The opacity of the global mask layer. * @param tool The current tool
* @param tool The current tool. * @param onLayerPosChanged Callback for when the layer's position changes
*/ */
const renderRegionalGuidanceLayer = ( const renderRGLayer = (
stage: Konva.Stage, stage: Konva.Stage,
reduxLayer: RegionalGuidanceLayer, layerState: RegionalGuidanceLayer,
globalMaskLayerOpacity: number, globalMaskLayerOpacity: number,
tool: Tool, tool: Tool,
onLayerPosChanged?: (layerId: string, x: number, y: number) => void onLayerPosChanged?: (layerId: string, x: number, y: number) => void
): void => { ): void => {
const konvaLayer = const konvaLayer =
stage.findOne<Konva.Layer>(`#${reduxLayer.id}`) ?? stage.findOne<Konva.Layer>(`#${layerState.id}`) ?? createRGLayer(stage, layerState, onLayerPosChanged);
createRegionalGuidanceLayer(stage, reduxLayer, onLayerPosChanged);
// Update the layer's position and listening state // Update the layer's position and listening state
konvaLayer.setAttrs({ konvaLayer.setAttrs({
listening: tool === 'move', // The layer only listens when using the move tool - otherwise the stage is handling mouse events listening: tool === 'move', // The layer only listens when using the move tool - otherwise the stage is handling mouse events
x: Math.floor(reduxLayer.x), x: Math.floor(layerState.x),
y: Math.floor(reduxLayer.y), y: Math.floor(layerState.y),
}); });
// Convert the color to a string, stripping the alpha - the object group will handle opacity. // Convert the color to a string, stripping the alpha - the object group will handle opacity.
const rgbColor = rgbColorToString(reduxLayer.previewColor); const rgbColor = rgbColorToString(layerState.previewColor);
const konvaObjectGroup = konvaLayer.findOne<Konva.Group>(`.${RG_LAYER_OBJECT_GROUP_NAME}`); const konvaObjectGroup = konvaLayer.findOne<Konva.Group>(`.${RG_LAYER_OBJECT_GROUP_NAME}`);
assert(konvaObjectGroup, `Object group not found for layer ${reduxLayer.id}`); assert(konvaObjectGroup, `Object group not found for layer ${layerState.id}`);
// We use caching to handle "global" layer opacity, but caching is expensive and we should only do it when required. // We use caching to handle "global" layer opacity, but caching is expensive and we should only do it when required.
let groupNeedsCache = false; let groupNeedsCache = false;
const objectIds = reduxLayer.maskObjects.map(mapId); const objectIds = layerState.maskObjects.map(mapId);
// Destroy any objects that are no longer in the redux state
for (const objectNode of konvaObjectGroup.find(selectVectorMaskObjects)) { for (const objectNode of konvaObjectGroup.find(selectVectorMaskObjects)) {
if (!objectIds.includes(objectNode.id())) { if (!objectIds.includes(objectNode.id())) {
objectNode.destroy(); objectNode.destroy();
@ -374,15 +368,15 @@ const renderRegionalGuidanceLayer = (
} }
} }
for (const reduxObject of reduxLayer.maskObjects) { for (const maskObject of layerState.maskObjects) {
if (reduxObject.type === 'vector_mask_line') { if (maskObject.type === 'vector_mask_line') {
const vectorMaskLine = const vectorMaskLine =
stage.findOne<Konva.Line>(`#${reduxObject.id}`) ?? createVectorMaskLine(reduxObject, konvaObjectGroup); stage.findOne<Konva.Line>(`#${maskObject.id}`) ?? createVectorMaskLine(maskObject, konvaObjectGroup);
// Only update the points if they have changed. The point values are never mutated, they are only added to the // Only update the points if they have changed. The point values are never mutated, they are only added to the
// array, so checking the length is sufficient to determine if we need to re-cache. // array, so checking the length is sufficient to determine if we need to re-cache.
if (vectorMaskLine.points().length !== reduxObject.points.length) { if (vectorMaskLine.points().length !== maskObject.points.length) {
vectorMaskLine.points(reduxObject.points); vectorMaskLine.points(maskObject.points);
groupNeedsCache = true; groupNeedsCache = true;
} }
// Only update the color if it has changed. // Only update the color if it has changed.
@ -390,9 +384,9 @@ const renderRegionalGuidanceLayer = (
vectorMaskLine.stroke(rgbColor); vectorMaskLine.stroke(rgbColor);
groupNeedsCache = true; groupNeedsCache = true;
} }
} else if (reduxObject.type === 'vector_mask_rect') { } else if (maskObject.type === 'vector_mask_rect') {
const konvaObject = const konvaObject =
stage.findOne<Konva.Rect>(`#${reduxObject.id}`) ?? createVectorMaskRect(reduxObject, konvaObjectGroup); stage.findOne<Konva.Rect>(`#${maskObject.id}`) ?? createVectorMaskRect(maskObject, konvaObjectGroup);
// Only update the color if it has changed. // Only update the color if it has changed.
if (konvaObject.fill() !== rgbColor) { if (konvaObject.fill() !== rgbColor) {
@ -403,8 +397,8 @@ const renderRegionalGuidanceLayer = (
} }
// Only update layer visibility if it has changed. // Only update layer visibility if it has changed.
if (konvaLayer.visible() !== reduxLayer.isEnabled) { if (konvaLayer.visible() !== layerState.isEnabled) {
konvaLayer.visible(reduxLayer.isEnabled); konvaLayer.visible(layerState.isEnabled);
groupNeedsCache = true; groupNeedsCache = true;
} }
@ -428,7 +422,7 @@ const renderRegionalGuidanceLayer = (
* Instead, with the special handling, the effect is as if you drew all the shapes at 100% opacity, flattened them to * Instead, with the special handling, the effect is as if you drew all the shapes at 100% opacity, flattened them to
* a single raster image, and _then_ applied the 50% opacity. * a single raster image, and _then_ applied the 50% opacity.
*/ */
if (reduxLayer.isSelected && tool !== 'move') { if (layerState.isSelected && tool !== 'move') {
// We must clear the cache first so Konva will re-draw the group with the new compositing rect // We must clear the cache first so Konva will re-draw the group with the new compositing rect
if (konvaObjectGroup.isCached()) { if (konvaObjectGroup.isCached()) {
konvaObjectGroup.clearCache(); konvaObjectGroup.clearCache();
@ -438,7 +432,7 @@ const renderRegionalGuidanceLayer = (
compositingRect.setAttrs({ compositingRect.setAttrs({
// The rect should be the size of the layer - use the fast method if we don't have a pixel-perfect bbox already // The rect should be the size of the layer - use the fast method if we don't have a pixel-perfect bbox already
...(!reduxLayer.bboxNeedsUpdate && reduxLayer.bbox ? reduxLayer.bbox : getLayerBboxFast(konvaLayer)), ...(!layerState.bboxNeedsUpdate && layerState.bbox ? layerState.bbox : getLayerBboxFast(konvaLayer)),
fill: rgbColor, fill: rgbColor,
opacity: globalMaskLayerOpacity, opacity: globalMaskLayerOpacity,
// Draw this rect only where there are non-transparent pixels under it (e.g. the mask shapes) // Draw this rect only where there are non-transparent pixels under it (e.g. the mask shapes)
@ -459,9 +453,14 @@ const renderRegionalGuidanceLayer = (
} }
}; };
const createInitialImageLayer = (stage: Konva.Stage, reduxLayer: InitialImageLayer): Konva.Layer => { /**
* Creates an initial image konva layer.
* @param stage The konva stage
* @param layerState The initial image layer state
*/
const createIILayer = (stage: Konva.Stage, layerState: InitialImageLayer): Konva.Layer => {
const konvaLayer = new Konva.Layer({ const konvaLayer = new Konva.Layer({
id: reduxLayer.id, id: layerState.id,
name: INITIAL_IMAGE_LAYER_NAME, name: INITIAL_IMAGE_LAYER_NAME,
imageSmoothingEnabled: true, imageSmoothingEnabled: true,
listening: false, listening: false,
@ -470,20 +469,27 @@ const createInitialImageLayer = (stage: Konva.Stage, reduxLayer: InitialImageLay
return konvaLayer; return konvaLayer;
}; };
const createInitialImageLayerImage = (konvaLayer: Konva.Layer, image: HTMLImageElement): Konva.Image => { /**
* Creates the konva image for an initial image layer.
* @param konvaLayer The konva layer
* @param imageEl The image element
*/
const createIILayerImage = (konvaLayer: Konva.Layer, imageEl: HTMLImageElement): Konva.Image => {
const konvaImage = new Konva.Image({ const konvaImage = new Konva.Image({
name: INITIAL_IMAGE_LAYER_IMAGE_NAME, name: INITIAL_IMAGE_LAYER_IMAGE_NAME,
image, image: imageEl,
}); });
konvaLayer.add(konvaImage); konvaLayer.add(konvaImage);
return konvaImage; return konvaImage;
}; };
const updateInitialImageLayerImageAttrs = ( /**
stage: Konva.Stage, * Updates an initial image layer's attributes (width, height, opacity, visibility).
konvaImage: Konva.Image, * @param stage The konva stage
reduxLayer: InitialImageLayer * @param konvaImage The konva image
) => { * @param layerState The initial image layer state
*/
const updateIILayerImageAttrs = (stage: Konva.Stage, konvaImage: Konva.Image, layerState: InitialImageLayer): void => {
// Konva erroneously reports NaN for width and height when the stage is hidden. This causes errors when caching, // Konva erroneously reports NaN for width and height when the stage is hidden. This causes errors when caching,
// but it doesn't seem to break anything. // but it doesn't seem to break anything.
// TODO(psyche): Investigate and report upstream. // TODO(psyche): Investigate and report upstream.
@ -492,46 +498,55 @@ const updateInitialImageLayerImageAttrs = (
if ( if (
konvaImage.width() !== newWidth || konvaImage.width() !== newWidth ||
konvaImage.height() !== newHeight || konvaImage.height() !== newHeight ||
konvaImage.visible() !== reduxLayer.isEnabled konvaImage.visible() !== layerState.isEnabled
) { ) {
konvaImage.setAttrs({ konvaImage.setAttrs({
opacity: reduxLayer.opacity, opacity: layerState.opacity,
scaleX: 1, scaleX: 1,
scaleY: 1, scaleY: 1,
width: stage.width() / stage.scaleX(), width: stage.width() / stage.scaleX(),
height: stage.height() / stage.scaleY(), height: stage.height() / stage.scaleY(),
visible: reduxLayer.isEnabled, visible: layerState.isEnabled,
}); });
} }
if (konvaImage.opacity() !== reduxLayer.opacity) { if (konvaImage.opacity() !== layerState.opacity) {
konvaImage.opacity(reduxLayer.opacity); konvaImage.opacity(layerState.opacity);
} }
}; };
const updateInitialImageLayerImageSource = async ( /**
* Update an initial image layer's image source when the image changes.
* @param stage The konva stage
* @param konvaLayer The konva layer
* @param layerState The initial image layer state
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
*/
const updateIILayerImageSource = async (
stage: Konva.Stage, stage: Konva.Stage,
konvaLayer: Konva.Layer, konvaLayer: Konva.Layer,
reduxLayer: InitialImageLayer layerState: InitialImageLayer,
) => { getImageDTO: (imageName: string) => Promise<ImageDTO | null>
if (reduxLayer.image) { ): Promise<void> => {
const imageName = reduxLayer.image.name; if (layerState.image) {
const req = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName)); const imageName = layerState.image.name;
const imageDTO = await req.unwrap(); const imageDTO = await getImageDTO(imageName);
req.unsubscribe(); if (!imageDTO) {
return;
}
const imageEl = new Image(); const imageEl = new Image();
const imageId = getIILayerImageId(reduxLayer.id, imageName); const imageId = getIILayerImageId(layerState.id, imageName);
imageEl.onload = () => { imageEl.onload = () => {
// Find the existing image or create a new one - must find using the name, bc the id may have just changed // Find the existing image or create a new one - must find using the name, bc the id may have just changed
const konvaImage = const konvaImage =
konvaLayer.findOne<Konva.Image>(`.${INITIAL_IMAGE_LAYER_IMAGE_NAME}`) ?? konvaLayer.findOne<Konva.Image>(`.${INITIAL_IMAGE_LAYER_IMAGE_NAME}`) ??
createInitialImageLayerImage(konvaLayer, imageEl); createIILayerImage(konvaLayer, imageEl);
// Update the image's attributes // Update the image's attributes
konvaImage.setAttrs({ konvaImage.setAttrs({
id: imageId, id: imageId,
image: imageEl, image: imageEl,
}); });
updateInitialImageLayerImageAttrs(stage, konvaImage, reduxLayer); updateIILayerImageAttrs(stage, konvaImage, layerState);
imageEl.id = imageId; imageEl.id = imageId;
}; };
imageEl.src = imageDTO.image_url; imageEl.src = imageDTO.image_url;
@ -540,14 +555,24 @@ const updateInitialImageLayerImageSource = async (
} }
}; };
const renderInitialImageLayer = (stage: Konva.Stage, reduxLayer: InitialImageLayer) => { /**
const konvaLayer = stage.findOne<Konva.Layer>(`#${reduxLayer.id}`) ?? createInitialImageLayer(stage, reduxLayer); * Renders an initial image layer.
* @param stage The konva stage
* @param layerState The initial image layer state
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
*/
const renderIILayer = (
stage: Konva.Stage,
layerState: InitialImageLayer,
getImageDTO: (imageName: string) => Promise<ImageDTO | null>
): void => {
const konvaLayer = stage.findOne<Konva.Layer>(`#${layerState.id}`) ?? createIILayer(stage, layerState);
const konvaImage = konvaLayer.findOne<Konva.Image>(`.${INITIAL_IMAGE_LAYER_IMAGE_NAME}`); const konvaImage = konvaLayer.findOne<Konva.Image>(`.${INITIAL_IMAGE_LAYER_IMAGE_NAME}`);
const canvasImageSource = konvaImage?.image(); const canvasImageSource = konvaImage?.image();
let imageSourceNeedsUpdate = false; let imageSourceNeedsUpdate = false;
if (canvasImageSource instanceof HTMLImageElement) { if (canvasImageSource instanceof HTMLImageElement) {
const image = reduxLayer.image; const image = layerState.image;
if (image && canvasImageSource.id !== getCALayerImageId(reduxLayer.id, image.name)) { if (image && canvasImageSource.id !== getCALayerImageId(layerState.id, image.name)) {
imageSourceNeedsUpdate = true; imageSourceNeedsUpdate = true;
} else if (!image) { } else if (!image) {
imageSourceNeedsUpdate = true; imageSourceNeedsUpdate = true;
@ -557,15 +582,20 @@ const renderInitialImageLayer = (stage: Konva.Stage, reduxLayer: InitialImageLay
} }
if (imageSourceNeedsUpdate) { if (imageSourceNeedsUpdate) {
updateInitialImageLayerImageSource(stage, konvaLayer, reduxLayer); updateIILayerImageSource(stage, konvaLayer, layerState, getImageDTO);
} else if (konvaImage) { } else if (konvaImage) {
updateInitialImageLayerImageAttrs(stage, konvaImage, reduxLayer); updateIILayerImageAttrs(stage, konvaImage, layerState);
} }
}; };
const createControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLayer): Konva.Layer => { /**
* Creates a control adapter layer.
* @param stage The konva stage
* @param layerState The control adapter layer state
*/
const createCALayer = (stage: Konva.Stage, layerState: ControlAdapterLayer): Konva.Layer => {
const konvaLayer = new Konva.Layer({ const konvaLayer = new Konva.Layer({
id: reduxLayer.id, id: layerState.id,
name: CA_LAYER_NAME, name: CA_LAYER_NAME,
imageSmoothingEnabled: true, imageSmoothingEnabled: true,
listening: false, listening: false,
@ -574,39 +604,53 @@ const createControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLay
return konvaLayer; return konvaLayer;
}; };
const createControlNetLayerImage = (konvaLayer: Konva.Layer, image: HTMLImageElement): Konva.Image => { /**
* Creates a control adapter layer image.
* @param konvaLayer The konva layer
* @param imageEl The image element
*/
const createCALayerImage = (konvaLayer: Konva.Layer, imageEl: HTMLImageElement): Konva.Image => {
const konvaImage = new Konva.Image({ const konvaImage = new Konva.Image({
name: CA_LAYER_IMAGE_NAME, name: CA_LAYER_IMAGE_NAME,
image, image: imageEl,
}); });
konvaLayer.add(konvaImage); konvaLayer.add(konvaImage);
return konvaImage; return konvaImage;
}; };
const updateControlNetLayerImageSource = async ( /**
* Updates the image source for a control adapter layer. This includes loading the image from the server and updating the konva image.
* @param stage The konva stage
* @param konvaLayer The konva layer
* @param layerState The control adapter layer state
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
*/
const updateCALayerImageSource = async (
stage: Konva.Stage, stage: Konva.Stage,
konvaLayer: Konva.Layer, konvaLayer: Konva.Layer,
reduxLayer: ControlAdapterLayer layerState: ControlAdapterLayer,
) => { getImageDTO: (imageName: string) => Promise<ImageDTO | null>
const image = reduxLayer.controlAdapter.processedImage ?? reduxLayer.controlAdapter.image; ): Promise<void> => {
const image = layerState.controlAdapter.processedImage ?? layerState.controlAdapter.image;
if (image) { if (image) {
const imageName = image.name; const imageName = image.name;
const req = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName)); const imageDTO = await getImageDTO(imageName);
const imageDTO = await req.unwrap(); if (!imageDTO) {
req.unsubscribe(); return;
}
const imageEl = new Image(); const imageEl = new Image();
const imageId = getCALayerImageId(reduxLayer.id, imageName); const imageId = getCALayerImageId(layerState.id, imageName);
imageEl.onload = () => { imageEl.onload = () => {
// Find the existing image or create a new one - must find using the name, bc the id may have just changed // Find the existing image or create a new one - must find using the name, bc the id may have just changed
const konvaImage = const konvaImage =
konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`) ?? createControlNetLayerImage(konvaLayer, imageEl); konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`) ?? createCALayerImage(konvaLayer, imageEl);
// Update the image's attributes // Update the image's attributes
konvaImage.setAttrs({ konvaImage.setAttrs({
id: imageId, id: imageId,
image: imageEl, image: imageEl,
}); });
updateControlNetLayerImageAttrs(stage, konvaImage, reduxLayer); updateCALayerImageAttrs(stage, konvaImage, layerState);
// Must cache after this to apply the filters // Must cache after this to apply the filters
konvaImage.cache(); konvaImage.cache();
imageEl.id = imageId; imageEl.id = imageId;
@ -617,11 +661,17 @@ const updateControlNetLayerImageSource = async (
} }
}; };
const updateControlNetLayerImageAttrs = ( /**
* Updates the image attributes for a control adapter layer's image (width, height, visibility, opacity, filters).
* @param stage The konva stage
* @param konvaImage The konva image
* @param layerState The control adapter layer state
*/
const updateCALayerImageAttrs = (
stage: Konva.Stage, stage: Konva.Stage,
konvaImage: Konva.Image, konvaImage: Konva.Image,
reduxLayer: ControlAdapterLayer layerState: ControlAdapterLayer
) => { ): void => {
let needsCache = false; let needsCache = false;
// Konva erroneously reports NaN for width and height when the stage is hidden. This causes errors when caching, // Konva erroneously reports NaN for width and height when the stage is hidden. This causes errors when caching,
// but it doesn't seem to break anything. // but it doesn't seem to break anything.
@ -632,36 +682,47 @@ const updateControlNetLayerImageAttrs = (
if ( if (
konvaImage.width() !== newWidth || konvaImage.width() !== newWidth ||
konvaImage.height() !== newHeight || konvaImage.height() !== newHeight ||
konvaImage.visible() !== reduxLayer.isEnabled || konvaImage.visible() !== layerState.isEnabled ||
hasFilter !== reduxLayer.isFilterEnabled hasFilter !== layerState.isFilterEnabled
) { ) {
konvaImage.setAttrs({ konvaImage.setAttrs({
opacity: reduxLayer.opacity, opacity: layerState.opacity,
scaleX: 1, scaleX: 1,
scaleY: 1, scaleY: 1,
width: stage.width() / stage.scaleX(), width: stage.width() / stage.scaleX(),
height: stage.height() / stage.scaleY(), height: stage.height() / stage.scaleY(),
visible: reduxLayer.isEnabled, visible: layerState.isEnabled,
filters: reduxLayer.isFilterEnabled ? [LightnessToAlphaFilter] : [], filters: layerState.isFilterEnabled ? [LightnessToAlphaFilter] : [],
}); });
needsCache = true; needsCache = true;
} }
if (konvaImage.opacity() !== reduxLayer.opacity) { if (konvaImage.opacity() !== layerState.opacity) {
konvaImage.opacity(reduxLayer.opacity); konvaImage.opacity(layerState.opacity);
} }
if (needsCache) { if (needsCache) {
konvaImage.cache(); konvaImage.cache();
} }
}; };
const renderControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLayer) => { /**
const konvaLayer = stage.findOne<Konva.Layer>(`#${reduxLayer.id}`) ?? createControlNetLayer(stage, reduxLayer); * Renders a control adapter layer. If the layer doesn't already exist, it is created. Otherwise, the layer is updated
* with the current image source and attributes.
* @param stage The konva stage
* @param layerState The control adapter layer state
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
*/
const renderCALayer = (
stage: Konva.Stage,
layerState: ControlAdapterLayer,
getImageDTO: (imageName: string) => Promise<ImageDTO | null>
): void => {
const konvaLayer = stage.findOne<Konva.Layer>(`#${layerState.id}`) ?? createCALayer(stage, layerState);
const konvaImage = konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`); const konvaImage = konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`);
const canvasImageSource = konvaImage?.image(); const canvasImageSource = konvaImage?.image();
let imageSourceNeedsUpdate = false; let imageSourceNeedsUpdate = false;
if (canvasImageSource instanceof HTMLImageElement) { if (canvasImageSource instanceof HTMLImageElement) {
const image = reduxLayer.controlAdapter.processedImage ?? reduxLayer.controlAdapter.image; const image = layerState.controlAdapter.processedImage ?? layerState.controlAdapter.image;
if (image && canvasImageSource.id !== getCALayerImageId(reduxLayer.id, image.name)) { if (image && canvasImageSource.id !== getCALayerImageId(layerState.id, image.name)) {
imageSourceNeedsUpdate = true; imageSourceNeedsUpdate = true;
} else if (!image) { } else if (!image) {
imageSourceNeedsUpdate = true; imageSourceNeedsUpdate = true;
@ -671,44 +732,46 @@ const renderControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLay
} }
if (imageSourceNeedsUpdate) { if (imageSourceNeedsUpdate) {
updateControlNetLayerImageSource(stage, konvaLayer, reduxLayer); updateCALayerImageSource(stage, konvaLayer, layerState, getImageDTO);
} else if (konvaImage) { } else if (konvaImage) {
updateControlNetLayerImageAttrs(stage, konvaImage, reduxLayer); updateCALayerImageAttrs(stage, konvaImage, layerState);
} }
}; };
/** /**
* Renders the layers on the stage. * Renders the layers on the stage.
* @param stage The konva stage to render on. * @param stage The konva stage
* @param reduxLayers Array of the layers from the redux store. * @param layerStates Array of all layer states
* @param layerOpacity The opacity of the layer. * @param globalMaskLayerOpacity The global mask layer opacity
* @param onLayerPosChanged Callback for when the layer's position changes. This is optional to allow for offscreen rendering. * @param tool The current tool
* @returns * @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
* @param onLayerPosChanged Callback for when the layer's position changes
*/ */
const renderLayers = ( const renderLayers = (
stage: Konva.Stage, stage: Konva.Stage,
reduxLayers: Layer[], layerStates: Layer[],
globalMaskLayerOpacity: number, globalMaskLayerOpacity: number,
tool: Tool, tool: Tool,
getImageDTO: (imageName: string) => Promise<ImageDTO | null>,
onLayerPosChanged?: (layerId: string, x: number, y: number) => void onLayerPosChanged?: (layerId: string, x: number, y: number) => void
) => { ): void => {
const reduxLayerIds = reduxLayers.filter(isRenderableLayer).map(mapId); const layerIds = layerStates.filter(isRenderableLayer).map(mapId);
// Remove un-rendered layers // Remove un-rendered layers
for (const konvaLayer of stage.find<Konva.Layer>(selectRenderableLayers)) { for (const konvaLayer of stage.find<Konva.Layer>(selectRenderableLayers)) {
if (!reduxLayerIds.includes(konvaLayer.id())) { if (!layerIds.includes(konvaLayer.id())) {
konvaLayer.destroy(); konvaLayer.destroy();
} }
} }
for (const reduxLayer of reduxLayers) { for (const layer of layerStates) {
if (isRegionalGuidanceLayer(reduxLayer)) { if (isRegionalGuidanceLayer(layer)) {
renderRegionalGuidanceLayer(stage, reduxLayer, globalMaskLayerOpacity, tool, onLayerPosChanged); renderRGLayer(stage, layer, globalMaskLayerOpacity, tool, onLayerPosChanged);
} }
if (isControlAdapterLayer(reduxLayer)) { if (isControlAdapterLayer(layer)) {
renderControlNetLayer(stage, reduxLayer); renderCALayer(stage, layer, getImageDTO);
} }
if (isInitialImageLayer(reduxLayer)) { if (isInitialImageLayer(layer)) {
renderInitialImageLayer(stage, reduxLayer); renderIILayer(stage, layer, getImageDTO);
} }
// IP Adapter layers are not rendered // IP Adapter layers are not rendered
} }
@ -716,13 +779,12 @@ const renderLayers = (
/** /**
* Creates a bounding box rect for a layer. * Creates a bounding box rect for a layer.
* @param reduxLayer The redux layer to create the bounding box for. * @param layerState The layer state for the layer to create the bounding box for
* @param konvaLayer The konva layer to attach the bounding box to. * @param konvaLayer The konva layer to attach the bounding box to
* @param onBboxMouseDown Callback for when the bounding box is clicked.
*/ */
const createBboxRect = (reduxLayer: Layer, konvaLayer: Konva.Layer) => { const createBboxRect = (layerState: Layer, konvaLayer: Konva.Layer): Konva.Rect => {
const rect = new Konva.Rect({ const rect = new Konva.Rect({
id: getLayerBboxId(reduxLayer.id), id: getLayerBboxId(layerState.id),
name: LAYER_BBOX_NAME, name: LAYER_BBOX_NAME,
strokeWidth: 1, strokeWidth: 1,
visible: false, visible: false,
@ -733,12 +795,12 @@ const createBboxRect = (reduxLayer: Layer, konvaLayer: Konva.Layer) => {
/** /**
* Renders the bounding boxes for the layers. * Renders the bounding boxes for the layers.
* @param stage The konva stage to render on * @param stage The konva stage
* @param reduxLayers An array of all redux layers to draw bboxes for * @param layerStates An array of layers to draw bboxes for
* @param tool The current tool * @param tool The current tool
* @returns * @returns
*/ */
const renderBboxes = (stage: Konva.Stage, reduxLayers: Layer[], tool: Tool) => { const renderBboxes = (stage: Konva.Stage, layerStates: Layer[], tool: Tool): void => {
// Hide all bboxes so they don't interfere with getClientRect // Hide all bboxes so they don't interfere with getClientRect
for (const bboxRect of stage.find<Konva.Rect>(`.${LAYER_BBOX_NAME}`)) { for (const bboxRect of stage.find<Konva.Rect>(`.${LAYER_BBOX_NAME}`)) {
bboxRect.visible(false); bboxRect.visible(false);
@ -749,39 +811,39 @@ const renderBboxes = (stage: Konva.Stage, reduxLayers: Layer[], tool: Tool) => {
return; return;
} }
for (const reduxLayer of reduxLayers.filter(isRegionalGuidanceLayer)) { for (const layer of layerStates.filter(isRegionalGuidanceLayer)) {
if (!reduxLayer.bbox) { if (!layer.bbox) {
continue; continue;
} }
const konvaLayer = stage.findOne<Konva.Layer>(`#${reduxLayer.id}`); const konvaLayer = stage.findOne<Konva.Layer>(`#${layer.id}`);
assert(konvaLayer, `Layer ${reduxLayer.id} not found in stage`); assert(konvaLayer, `Layer ${layer.id} not found in stage`);
const bboxRect = konvaLayer.findOne<Konva.Rect>(`.${LAYER_BBOX_NAME}`) ?? createBboxRect(reduxLayer, konvaLayer); const bboxRect = konvaLayer.findOne<Konva.Rect>(`.${LAYER_BBOX_NAME}`) ?? createBboxRect(layer, konvaLayer);
bboxRect.setAttrs({ bboxRect.setAttrs({
visible: !reduxLayer.bboxNeedsUpdate, visible: !layer.bboxNeedsUpdate,
listening: reduxLayer.isSelected, listening: layer.isSelected,
x: reduxLayer.bbox.x, x: layer.bbox.x,
y: reduxLayer.bbox.y, y: layer.bbox.y,
width: reduxLayer.bbox.width, width: layer.bbox.width,
height: reduxLayer.bbox.height, height: layer.bbox.height,
stroke: reduxLayer.isSelected ? BBOX_SELECTED_STROKE : '', stroke: layer.isSelected ? BBOX_SELECTED_STROKE : '',
}); });
} }
}; };
/** /**
* Calculates the bbox of each regional guidance layer. Only calculates if the mask has changed. * Calculates the bbox of each regional guidance layer. Only calculates if the mask has changed.
* @param stage The konva stage to render on. * @param stage The konva stage
* @param reduxLayers An array of redux layers to calculate bboxes for * @param layerStates An array of layers to calculate bboxes for
* @param onBboxChanged Callback for when the bounding box changes * @param onBboxChanged Callback for when the bounding box changes
*/ */
const updateBboxes = ( const updateBboxes = (
stage: Konva.Stage, stage: Konva.Stage,
reduxLayers: Layer[], layerStates: Layer[],
onBboxChanged: (layerId: string, bbox: IRect | null) => void onBboxChanged: (layerId: string, bbox: IRect | null) => void
) => { ): void => {
for (const rgLayer of reduxLayers.filter(isRegionalGuidanceLayer)) { for (const rgLayer of layerStates.filter(isRegionalGuidanceLayer)) {
const konvaLayer = stage.findOne<Konva.Layer>(`#${rgLayer.id}`); const konvaLayer = stage.findOne<Konva.Layer>(`#${rgLayer.id}`);
assert(konvaLayer, `Layer ${rgLayer.id} not found in stage`); assert(konvaLayer, `Layer ${rgLayer.id} not found in stage`);
// We only need to recalculate the bbox if the layer has changed // We only need to recalculate the bbox if the layer has changed
@ -808,7 +870,7 @@ const updateBboxes = (
/** /**
* Creates the background layer for the stage. * Creates the background layer for the stage.
* @param stage The konva stage to render on * @param stage The konva stage
*/ */
const createBackgroundLayer = (stage: Konva.Stage): Konva.Layer => { const createBackgroundLayer = (stage: Konva.Stage): Konva.Layer => {
const layer = new Konva.Layer({ const layer = new Konva.Layer({
@ -829,17 +891,17 @@ const createBackgroundLayer = (stage: Konva.Stage): Konva.Layer => {
image.onload = () => { image.onload = () => {
background.fillPatternImage(image); background.fillPatternImage(image);
}; };
image.src = STAGE_BG_DATAURL; image.src = TRANSPARENCY_CHECKER_PATTERN;
return layer; return layer;
}; };
/** /**
* Renders the background layer for the stage. * Renders the background layer for the stage.
* @param stage The konva stage to render on * @param stage The konva stage
* @param width The unscaled width of the canvas * @param width The unscaled width of the canvas
* @param height The unscaled height of the canvas * @param height The unscaled height of the canvas
*/ */
const renderBackground = (stage: Konva.Stage, width: number, height: number) => { const renderBackground = (stage: Konva.Stage, width: number, height: number): void => {
const layer = stage.findOne<Konva.Layer>(`#${BACKGROUND_LAYER_ID}`) ?? createBackgroundLayer(stage); const layer = stage.findOne<Konva.Layer>(`#${BACKGROUND_LAYER_ID}`) ?? createBackgroundLayer(stage);
const background = layer.findOne<Konva.Rect>(`#${BACKGROUND_RECT_ID}`); const background = layer.findOne<Konva.Rect>(`#${BACKGROUND_RECT_ID}`);
@ -880,6 +942,10 @@ const arrangeLayers = (stage: Konva.Stage, layerIds: string[]): void => {
stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.zIndex(nextZIndex++); stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.zIndex(nextZIndex++);
}; };
/**
* Creates the "no layers" fallback layer
* @param stage The konva stage
*/
const createNoLayersMessageLayer = (stage: Konva.Stage): Konva.Layer => { const createNoLayersMessageLayer = (stage: Konva.Stage): Konva.Layer => {
const noLayersMessageLayer = new Konva.Layer({ const noLayersMessageLayer = new Konva.Layer({
id: NO_LAYERS_MESSAGE_LAYER_ID, id: NO_LAYERS_MESSAGE_LAYER_ID,
@ -891,7 +957,7 @@ const createNoLayersMessageLayer = (stage: Konva.Stage): Konva.Layer => {
y: 0, y: 0,
align: 'center', align: 'center',
verticalAlign: 'middle', verticalAlign: 'middle',
text: t('controlLayers.noLayersAdded'), text: t('controlLayers.noLayersAdded', 'No Layers Added'),
fontFamily: '"Inter Variable", sans-serif', fontFamily: '"Inter Variable", sans-serif',
fontStyle: '600', fontStyle: '600',
fill: 'white', fill: 'white',
@ -901,7 +967,14 @@ const createNoLayersMessageLayer = (stage: Konva.Stage): Konva.Layer => {
return noLayersMessageLayer; return noLayersMessageLayer;
}; };
const renderNoLayersMessage = (stage: Konva.Stage, layerCount: number, width: number, height: number) => { /**
* Renders the "no layers" message when there are no layers to render
* @param stage The konva stage
* @param layerCount The current number of layers
* @param width The target width of the text
* @param height The target height of the text
*/
const renderNoLayersMessage = (stage: Konva.Stage, layerCount: number, width: number, height: number): void => {
const noLayersMessageLayer = const noLayersMessageLayer =
stage.findOne<Konva.Layer>(`#${NO_LAYERS_MESSAGE_LAYER_ID}`) ?? createNoLayersMessageLayer(stage); stage.findOne<Konva.Layer>(`#${NO_LAYERS_MESSAGE_LAYER_ID}`) ?? createNoLayersMessageLayer(stage);
if (layerCount === 0) { if (layerCount === 0) {
@ -936,20 +1009,3 @@ export const debouncedRenderers = {
arrangeLayers: debounce(arrangeLayers, DEBOUNCE_MS), arrangeLayers: debounce(arrangeLayers, DEBOUNCE_MS),
updateBboxes: debounce(updateBboxes, DEBOUNCE_MS), updateBboxes: debounce(updateBboxes, DEBOUNCE_MS),
}; };
/**
* Calculates the lightness (HSL) of a given pixel and sets the alpha channel to that value.
* This is useful for edge maps and other masks, to make the black areas transparent.
* @param imageData The image data to apply the filter to
*/
const LightnessToAlphaFilter = (imageData: ImageData) => {
const len = imageData.data.length / 4;
for (let i = 0; i < len; i++) {
const r = imageData.data[i * 4 + 0] as number;
const g = imageData.data[i * 4 + 1] as number;
const b = imageData.data[i * 4 + 2] as number;
const cMin = Math.min(r, g, b);
const cMax = Math.max(r, g, b);
imageData.data[i * 4 + 3] = (cMin + cMax) / 2;
}
};

View File

@ -0,0 +1,67 @@
import type Konva from 'konva';
import type { KonvaEventObject } from 'konva/lib/Node';
import type { Vector2d } from 'konva/lib/types';
//#region getScaledFlooredCursorPosition
/**
* Gets the scaled and floored cursor position on the stage. If the cursor is not currently over the stage, returns null.
* @param stage The konva stage
*/
export const getScaledFlooredCursorPosition = (stage: Konva.Stage): Vector2d | null => {
const pointerPosition = stage.getPointerPosition();
const stageTransform = stage.getAbsoluteTransform().copy();
if (!pointerPosition) {
return null;
}
const scaledCursorPosition = stageTransform.invert().point(pointerPosition);
return {
x: Math.floor(scaledCursorPosition.x),
y: Math.floor(scaledCursorPosition.y),
};
};
//#endregion
//#region snapPosToStage
/**
* Snaps a position to the edge of the stage if within a threshold of the edge
* @param pos The position to snap
* @param stage The konva stage
* @param snapPx The snap threshold in pixels
*/
export const snapPosToStage = (pos: Vector2d, stage: Konva.Stage, snapPx = 10): Vector2d => {
const snappedPos = { ...pos };
// Get the normalized threshold for snapping to the edge of the stage
const thresholdX = snapPx / stage.scaleX();
const thresholdY = snapPx / stage.scaleY();
const stageWidth = stage.width() / stage.scaleX();
const stageHeight = stage.height() / stage.scaleY();
// Snap to the edge of the stage if within threshold
if (pos.x - thresholdX < 0) {
snappedPos.x = 0;
} else if (pos.x + thresholdX > stageWidth) {
snappedPos.x = Math.floor(stageWidth);
}
if (pos.y - thresholdY < 0) {
snappedPos.y = 0;
} else if (pos.y + thresholdY > stageHeight) {
snappedPos.y = Math.floor(stageHeight);
}
return snappedPos;
};
//#endregion
//#region getIsMouseDown
/**
* Checks if the left mouse button is currently pressed
* @param e The konva event
*/
export const getIsMouseDown = (e: KonvaEventObject<MouseEvent>): boolean => e.evt.buttons === 1;
//#endregion
//#region getIsFocused
/**
* Checks if the stage is currently focused
* @param stage The konva stage
*/
export const getIsFocused = (stage: Konva.Stage): boolean => stage.container().contains(document.activeElement);
//#endregion

View File

@ -4,6 +4,14 @@ import type { PersistConfig, RootState } from 'app/store/store';
import { moveBackward, moveForward, moveToBack, moveToFront } from 'common/util/arrayUtils'; import { moveBackward, moveForward, moveToBack, moveToFront } from 'common/util/arrayUtils';
import { deepClone } from 'common/util/deepClone'; import { deepClone } from 'common/util/deepClone';
import { roundDownToMultiple } from 'common/util/roundDownToMultiple'; import { roundDownToMultiple } from 'common/util/roundDownToMultiple';
import {
getCALayerId,
getIPALayerId,
getRGLayerId,
getRGLayerLineId,
getRGLayerRectId,
INITIAL_IMAGE_LAYER_ID,
} from 'features/controlLayers/konva/naming';
import type { import type {
CLIPVisionModelV2, CLIPVisionModelV2,
ControlModeV2, ControlModeV2,
@ -36,6 +44,9 @@ import { assert } from 'tsafe';
import { v4 as uuidv4 } from 'uuid'; import { v4 as uuidv4 } from 'uuid';
import type { import type {
AddLineArg,
AddPointToLineArg,
AddRectArg,
ControlAdapterLayer, ControlAdapterLayer,
ControlLayersState, ControlLayersState,
DrawingTool, DrawingTool,
@ -492,11 +503,11 @@ export const controlLayersSlice = createSlice({
layer.bboxNeedsUpdate = true; layer.bboxNeedsUpdate = true;
layer.uploadedMaskImage = null; layer.uploadedMaskImage = null;
}, },
prepare: (payload: { layerId: string; points: [number, number, number, number]; tool: DrawingTool }) => ({ prepare: (payload: AddLineArg) => ({
payload: { ...payload, lineUuid: uuidv4() }, payload: { ...payload, lineUuid: uuidv4() },
}), }),
}, },
rgLayerPointsAdded: (state, action: PayloadAction<{ layerId: string; point: [number, number] }>) => { rgLayerPointsAdded: (state, action: PayloadAction<AddPointToLineArg>) => {
const { layerId, point } = action.payload; const { layerId, point } = action.payload;
const layer = selectRGLayerOrThrow(state, layerId); const layer = selectRGLayerOrThrow(state, layerId);
const lastLine = layer.maskObjects.findLast(isLine); const lastLine = layer.maskObjects.findLast(isLine);
@ -529,7 +540,7 @@ export const controlLayersSlice = createSlice({
layer.bboxNeedsUpdate = true; layer.bboxNeedsUpdate = true;
layer.uploadedMaskImage = null; layer.uploadedMaskImage = null;
}, },
prepare: (payload: { layerId: string; rect: IRect }) => ({ payload: { ...payload, rectUuid: uuidv4() } }), prepare: (payload: AddRectArg) => ({ payload: { ...payload, rectUuid: uuidv4() } }),
}, },
rgLayerMaskImageUploaded: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO }>) => { rgLayerMaskImageUploaded: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO }>) => {
const { layerId, imageDTO } = action.payload; const { layerId, imageDTO } = action.payload;
@ -883,45 +894,21 @@ const migrateControlLayersState = (state: any): any => {
return state; return state;
}; };
// Ephemeral interaction state
export const $isDrawing = atom(false); export const $isDrawing = atom(false);
export const $lastMouseDownPos = atom<Vector2d | null>(null); export const $lastMouseDownPos = atom<Vector2d | null>(null);
export const $tool = atom<Tool>('brush'); export const $tool = atom<Tool>('brush');
export const $lastCursorPos = atom<Vector2d | null>(null); export const $lastCursorPos = atom<Vector2d | null>(null);
export const $isPreviewVisible = atom(true);
export const $lastAddedPoint = atom<Vector2d | null>(null);
// IDs for singleton Konva layers and objects // Some nanostores that are manually synced to redux state to provide imperative access
export const TOOL_PREVIEW_LAYER_ID = 'tool_preview_layer'; // TODO(psyche): This is a hack, figure out another way to handle this...
export const TOOL_PREVIEW_BRUSH_GROUP_ID = 'tool_preview_layer.brush_group'; export const $brushSize = atom<number>(0);
export const TOOL_PREVIEW_BRUSH_FILL_ID = 'tool_preview_layer.brush_fill'; export const $brushSpacingPx = atom<number>(0);
export const TOOL_PREVIEW_BRUSH_BORDER_INNER_ID = 'tool_preview_layer.brush_border_inner'; export const $selectedLayerId = atom<string | null>(null);
export const TOOL_PREVIEW_BRUSH_BORDER_OUTER_ID = 'tool_preview_layer.brush_border_outer'; export const $selectedLayerType = atom<Layer['type'] | null>(null);
export const TOOL_PREVIEW_RECT_ID = 'tool_preview_layer.rect'; export const $shouldInvertBrushSizeScrollDirection = atom(false);
export const BACKGROUND_LAYER_ID = 'background_layer';
export const BACKGROUND_RECT_ID = 'background_layer.rect';
export const NO_LAYERS_MESSAGE_LAYER_ID = 'no_layers_message';
// Names (aka classes) for Konva layers and objects
export const CA_LAYER_NAME = 'control_adapter_layer';
export const CA_LAYER_IMAGE_NAME = 'control_adapter_layer.image';
export const RG_LAYER_NAME = 'regional_guidance_layer';
export const RG_LAYER_LINE_NAME = 'regional_guidance_layer.line';
export const RG_LAYER_OBJECT_GROUP_NAME = 'regional_guidance_layer.object_group';
export const RG_LAYER_RECT_NAME = 'regional_guidance_layer.rect';
export const INITIAL_IMAGE_LAYER_ID = 'singleton_initial_image_layer';
export const INITIAL_IMAGE_LAYER_NAME = 'initial_image_layer';
export const INITIAL_IMAGE_LAYER_IMAGE_NAME = 'initial_image_layer.image';
export const LAYER_BBOX_NAME = 'layer.bbox';
export const COMPOSITING_RECT_NAME = 'compositing-rect';
// Getters for non-singleton layer and object IDs
export const getRGLayerId = (layerId: string) => `${RG_LAYER_NAME}_${layerId}`;
const getRGLayerLineId = (layerId: string, lineId: string) => `${layerId}.line_${lineId}`;
const getRGLayerRectId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`;
export const getRGLayerObjectGroupId = (layerId: string, groupId: string) => `${layerId}.objectGroup_${groupId}`;
export const getLayerBboxId = (layerId: string) => `${layerId}.bbox`;
export const getCALayerId = (layerId: string) => `control_adapter_layer_${layerId}`;
export const getCALayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIILayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIPALayerId = (layerId: string) => `ip_adapter_layer_${layerId}`;
export const controlLayersPersistConfig: PersistConfig<ControlLayersState> = { export const controlLayersPersistConfig: PersistConfig<ControlLayersState> = {
name: controlLayersSlice.name, name: controlLayersSlice.name,

View File

@ -17,6 +17,7 @@ import {
zParameterPositivePrompt, zParameterPositivePrompt,
zParameterStrength, zParameterStrength,
} from 'features/parameters/types/parameterSchemas'; } from 'features/parameters/types/parameterSchemas';
import type { IRect } from 'konva/lib/types';
import { z } from 'zod'; import { z } from 'zod';
const zTool = z.enum(['brush', 'eraser', 'move', 'rect']); const zTool = z.enum(['brush', 'eraser', 'move', 'rect']);
@ -129,3 +130,7 @@ export type ControlLayersState = {
aspectRatio: AspectRatioState; aspectRatio: AspectRatioState;
}; };
}; };
export type AddLineArg = { layerId: string; points: [number, number, number, number]; tool: DrawingTool };
export type AddPointToLineArg = { layerId: string; point: [number, number] };
export type AddRectArg = { layerId: string; rect: IRect };

View File

@ -1,66 +0,0 @@
import { getStore } from 'app/store/nanostores/store';
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
import { isRegionalGuidanceLayer, RG_LAYER_NAME } from 'features/controlLayers/store/controlLayersSlice';
import { renderers } from 'features/controlLayers/util/renderers';
import Konva from 'konva';
import { assert } from 'tsafe';
/**
* Get the blobs of all regional prompt layers. Only visible layers are returned.
* @param layerIds The IDs of the layers to get blobs for. If not provided, all regional prompt layers are used.
* @param preview Whether to open a new tab displaying each layer.
* @returns A map of layer IDs to blobs.
*/
export const getRegionalPromptLayerBlobs = async (
layerIds?: string[],
preview: boolean = false
): Promise<Record<string, Blob>> => {
const state = getStore().getState();
const { layers } = state.controlLayers.present;
const { width, height } = state.controlLayers.present.size;
const reduxLayers = layers.filter(isRegionalGuidanceLayer);
const container = document.createElement('div');
const stage = new Konva.Stage({ container, width, height });
renderers.renderLayers(stage, reduxLayers, 1, 'brush');
const konvaLayers = stage.find<Konva.Layer>(`.${RG_LAYER_NAME}`);
const blobs: Record<string, Blob> = {};
// First remove all layers
for (const layer of konvaLayers) {
layer.remove();
}
// Next render each layer to a blob
for (const layer of konvaLayers) {
if (layerIds && !layerIds.includes(layer.id())) {
continue;
}
const reduxLayer = reduxLayers.find((l) => l.id === layer.id());
assert(reduxLayer, `Redux layer ${layer.id()} not found`);
stage.add(layer);
const blob = await new Promise<Blob>((resolve) => {
stage.toBlob({
callback: (blob) => {
assert(blob, 'Blob is null');
resolve(blob);
},
});
});
if (preview) {
const base64 = await blobToDataURL(blob);
openBase64ImageInTab([
{
base64,
caption: `${reduxLayer.id}: ${reduxLayer.positivePrompt} / ${reduxLayer.negativePrompt}`,
},
]);
}
layer.remove();
blobs[layer.id()] = blob;
}
return blobs;
};

View File

@ -28,7 +28,9 @@ const ImageMetadataGraphTabContent = ({ image }: Props) => {
return <IAINoContentFallback label={t('nodes.noGraph')} />; return <IAINoContentFallback label={t('nodes.noGraph')} />;
} }
return <DataViewer data={graph} label={t('nodes.graph')} />; return (
<DataViewer fileName={`${image.image_name.replace('.png', '')}_graph`} data={graph} label={t('nodes.graph')} />
);
}; };
export default memo(ImageMetadataGraphTabContent); export default memo(ImageMetadataGraphTabContent);

View File

@ -68,14 +68,22 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => {
</TabPanel> </TabPanel>
<TabPanel> <TabPanel>
{metadata ? ( {metadata ? (
<DataViewer data={metadata} label={t('metadata.metadata')} /> <DataViewer
fileName={`${image.image_name.replace('.png', '')}_metadata`}
data={metadata}
label={t('metadata.metadata')}
/>
) : ( ) : (
<IAINoContentFallback label={t('metadata.noMetaData')} /> <IAINoContentFallback label={t('metadata.noMetaData')} />
)} )}
</TabPanel> </TabPanel>
<TabPanel> <TabPanel>
{image ? ( {image ? (
<DataViewer data={image} label={t('metadata.imageDetails')} /> <DataViewer
fileName={`${image.image_name.replace('.png', '')}_details`}
data={image}
label={t('metadata.imageDetails')}
/>
) : ( ) : (
<IAINoContentFallback label={t('metadata.noImageDetails')} /> <IAINoContentFallback label={t('metadata.noImageDetails')} />
)} )}

View File

@ -28,7 +28,13 @@ const ImageMetadataWorkflowTabContent = ({ image }: Props) => {
return <IAINoContentFallback label={t('nodes.noWorkflow')} />; return <IAINoContentFallback label={t('nodes.noWorkflow')} />;
} }
return <DataViewer data={workflow} label={t('metadata.workflow')} />; return (
<DataViewer
fileName={`${image.image_name.replace('.png', '')}_workflow`}
data={workflow}
label={t('metadata.workflow')}
/>
);
}; };
export default memo(ImageMetadataWorkflowTabContent); export default memo(ImageMetadataWorkflowTabContent);

View File

@ -3,7 +3,7 @@ import { useAppSelector } from 'app/store/storeHooks';
import { useBoolean } from 'common/hooks/useBoolean'; import { useBoolean } from 'common/hooks/useBoolean';
import { preventDefault } from 'common/util/stopPropagation'; import { preventDefault } from 'common/util/stopPropagation';
import type { Dimensions } from 'features/canvas/store/canvasTypes'; import type { Dimensions } from 'features/canvas/store/canvasTypes';
import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; import { TRANSPARENCY_CHECKER_PATTERN } from 'features/controlLayers/konva/constants';
import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel'; import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel';
import { memo, useMemo, useRef } from 'react'; import { memo, useMemo, useRef } from 'react';
@ -78,7 +78,7 @@ export const ImageComparisonHover = memo(({ firstImage, secondImage, containerDi
left={0} left={0}
right={0} right={0}
bottom={0} bottom={0}
backgroundImage={STAGE_BG_DATAURL} backgroundImage={TRANSPARENCY_CHECKER_PATTERN}
backgroundRepeat="repeat" backgroundRepeat="repeat"
opacity={0.2} opacity={0.2}
/> />

View File

@ -2,7 +2,7 @@ import { Box, Flex, Icon, Image } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks'; import { useAppSelector } from 'app/store/storeHooks';
import { preventDefault } from 'common/util/stopPropagation'; import { preventDefault } from 'common/util/stopPropagation';
import type { Dimensions } from 'features/canvas/store/canvasTypes'; import type { Dimensions } from 'features/canvas/store/canvasTypes';
import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; import { TRANSPARENCY_CHECKER_PATTERN } from 'features/controlLayers/konva/constants';
import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel'; import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel';
import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'; import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi';
@ -120,7 +120,7 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage, containerD
left={0} left={0}
right={0} right={0}
bottom={0} bottom={0}
backgroundImage={STAGE_BG_DATAURL} backgroundImage={TRANSPARENCY_CHECKER_PATTERN}
backgroundRepeat="repeat" backgroundRepeat="repeat"
opacity={0.2} opacity={0.2}
/> />

View File

@ -1,4 +1,7 @@
import { getStore } from 'app/store/nanostores/store';
import { deepClone } from 'common/util/deepClone';
import { objectKeys } from 'common/util/objectKeys'; import { objectKeys } from 'common/util/objectKeys';
import { shouldConcatPromptsChanged } from 'features/controlLayers/store/controlLayersSlice';
import type { Layer } from 'features/controlLayers/store/types'; import type { Layer } from 'features/controlLayers/store/types';
import type { LoRA } from 'features/lora/store/loraSlice'; import type { LoRA } from 'features/lora/store/loraSlice';
import type { import type {
@ -16,6 +19,7 @@ import { validators } from 'features/metadata/util/validators';
import type { ModelIdentifierField } from 'features/nodes/types/common'; import type { ModelIdentifierField } from 'features/nodes/types/common';
import { toast } from 'features/toast/toast'; import { toast } from 'features/toast/toast';
import { t } from 'i18next'; import { t } from 'i18next';
import { size } from 'lodash-es';
import { assert } from 'tsafe'; import { assert } from 'tsafe';
import { parsers } from './parsers'; import { parsers } from './parsers';
@ -376,54 +380,25 @@ export const handlers = {
}), }),
} as const; } as const;
type ParsedValue = Awaited<ReturnType<(typeof handlers)[keyof typeof handlers]['parse']>>;
type RecallResults = Partial<Record<keyof typeof handlers, ParsedValue>>;
export const parseAndRecallPrompts = async (metadata: unknown) => { export const parseAndRecallPrompts = async (metadata: unknown) => {
const results = await Promise.allSettled([ const keysToRecall: (keyof typeof handlers)[] = [
handlers.positivePrompt.parse(metadata).then((positivePrompt) => { 'positivePrompt',
if (!handlers.positivePrompt.recall) { 'negativePrompt',
return; 'sdxlPositiveStylePrompt',
} 'sdxlNegativeStylePrompt',
handlers.positivePrompt?.recall(positivePrompt); ];
}), const recalled = await recallKeys(keysToRecall, metadata);
handlers.negativePrompt.parse(metadata).then((negativePrompt) => { if (size(recalled) > 0) {
if (!handlers.negativePrompt.recall) {
return;
}
handlers.negativePrompt?.recall(negativePrompt);
}),
handlers.sdxlPositiveStylePrompt.parse(metadata).then((sdxlPositiveStylePrompt) => {
if (!handlers.sdxlPositiveStylePrompt.recall) {
return;
}
handlers.sdxlPositiveStylePrompt?.recall(sdxlPositiveStylePrompt);
}),
handlers.sdxlNegativeStylePrompt.parse(metadata).then((sdxlNegativeStylePrompt) => {
if (!handlers.sdxlNegativeStylePrompt.recall) {
return;
}
handlers.sdxlNegativeStylePrompt?.recall(sdxlNegativeStylePrompt);
}),
]);
if (results.some((result) => result.status === 'fulfilled')) {
parameterSetToast(t('metadata.allPrompts')); parameterSetToast(t('metadata.allPrompts'));
} }
}; };
export const parseAndRecallImageDimensions = async (metadata: unknown) => { export const parseAndRecallImageDimensions = async (metadata: unknown) => {
const results = await Promise.allSettled([ const recalled = recallKeys(['width', 'height'], metadata);
handlers.width.parse(metadata).then((width) => { if (size(recalled) > 0) {
if (!handlers.width.recall) {
return;
}
handlers.width?.recall(width);
}),
handlers.height.parse(metadata).then((height) => {
if (!handlers.height.recall) {
return;
}
handlers.height?.recall(height);
}),
]);
if (results.some((result) => result.status === 'fulfilled')) {
parameterSetToast(t('metadata.imageDimensions')); parameterSetToast(t('metadata.imageDimensions'));
} }
}; };
@ -438,28 +413,20 @@ export const parseAndRecallAllMetadata = async (
toControlLayers: boolean, toControlLayers: boolean,
skip: (keyof typeof handlers)[] = [] skip: (keyof typeof handlers)[] = []
) => { ) => {
const skipKeys = skip ?? []; const skipKeys = deepClone(skip);
if (toControlLayers) { if (toControlLayers) {
skipKeys.push(...TO_CONTROL_LAYERS_SKIP_KEYS); skipKeys.push(...TO_CONTROL_LAYERS_SKIP_KEYS);
} else { } else {
skipKeys.push(...NOT_TO_CONTROL_LAYERS_SKIP_KEYS); skipKeys.push(...NOT_TO_CONTROL_LAYERS_SKIP_KEYS);
} }
const results = await Promise.allSettled(
objectKeys(handlers)
.filter((key) => !skipKeys.includes(key))
.map((key) => {
const { parse, recall } = handlers[key];
return parse(metadata).then((value) => {
if (!recall) {
return;
}
/* @ts-expect-error The return type of parse and the input type of recall are guaranteed to be compatible. */
recall(value);
});
})
);
if (results.some((result) => result.status === 'fulfilled')) { // We may need to take some further action depending on what was recalled. For example, we need to disable SDXL prompt
// concat if the negative or positive style prompt was set. Because the recalling is all async, we need to collect all
// results
const keysToRecall = objectKeys(handlers).filter((key) => !skipKeys.includes(key));
const recalled = await recallKeys(keysToRecall, metadata);
if (size(recalled) > 0) {
toast({ toast({
id: 'PARAMETER_SET', id: 'PARAMETER_SET',
title: t('toast.parametersSet'), title: t('toast.parametersSet'),
@ -473,3 +440,43 @@ export const parseAndRecallAllMetadata = async (
}); });
} }
}; };
/**
* Recalls a set of keys from metadata.
* Includes special handling for some metadata where recalling may have side effects. For example, recalling a "style"
* prompt that is different from the "positive" or "negative" prompt should disable prompt concatenation.
* @param keysToRecall An array of keys to recall.
* @param metadata The metadata to recall from
* @returns A promise that resolves to an object containing the recalled values.
*/
const recallKeys = async (keysToRecall: (keyof typeof handlers)[], metadata: unknown): Promise<RecallResults> => {
const { dispatch } = getStore();
const recalled: RecallResults = {};
for (const key of keysToRecall) {
const { parse, recall } = handlers[key];
if (!recall) {
continue;
}
try {
const value = await parse(metadata);
/* @ts-expect-error The return type of parse and the input type of recall are guaranteed to be compatible. */
await recall(value);
recalled[key] = value;
} catch {
// no-op
}
}
if (
(recalled['sdxlPositiveStylePrompt'] && recalled['sdxlPositiveStylePrompt'] !== recalled['positivePrompt']) ||
(recalled['sdxlNegativeStylePrompt'] && recalled['sdxlNegativeStylePrompt'] !== recalled['negativePrompt'])
) {
// If we set the negative style prompt or positive style prompt, we should disable prompt concat
dispatch(shouldConcatPromptsChanged(false));
} else {
// Otherwise, we should enable prompt concat
dispatch(shouldConcatPromptsChanged(true));
}
return recalled;
};

View File

@ -1,6 +1,7 @@
import { getStore } from 'app/store/nanostores/store'; import { getStore } from 'app/store/nanostores/store';
import type { ModelIdentifierField } from 'features/nodes/types/common'; import type { ModelIdentifierField } from 'features/nodes/types/common';
import { isModelIdentifier, isModelIdentifierV2 } from 'features/nodes/types/common'; import { isModelIdentifier, isModelIdentifierV2 } from 'features/nodes/types/common';
import type { ModelIdentifier } from 'features/nodes/types/v2/common';
import { modelsApi } from 'services/api/endpoints/models'; import { modelsApi } from 'services/api/endpoints/models';
import type { AnyModelConfig, BaseModelType, ModelType } from 'services/api/types'; import type { AnyModelConfig, BaseModelType, ModelType } from 'services/api/types';
@ -107,19 +108,30 @@ export const fetchModelConfigWithTypeGuard = async <T extends AnyModelConfig>(
/** /**
* Fetches the model key from a model identifier. This includes fetching the key for MM1 format model identifiers. * Fetches the model key from a model identifier. This includes fetching the key for MM1 format model identifiers.
* @param modelIdentifier The model identifier. The MM2 format `{key: string}` simply extracts the key. The MM1 format * @param modelIdentifier The model identifier. This can be a MM1 or MM2 identifier. In every case, we attempt to fetch
* `{model_name: string, base_model: BaseModelType}` must do a network request to fetch the key. * the model config from the server to ensure that the model identifier is valid and represents an installed model.
* @param type The type of model to fetch. This is used to fetch the key for MM1 format model identifiers. * @param type The type of model to fetch. This is used to fetch the key for MM1 format model identifiers.
* @param message An optional custom message to include in the error if the model identifier is invalid. * @param message An optional custom message to include in the error if the model identifier is invalid.
* @returns A promise that resolves to the model key. * @returns A promise that resolves to the model key.
* @throws {InvalidModelConfigError} If the model identifier is invalid. * @throws {InvalidModelConfigError} If the model identifier is invalid.
*/ */
export const getModelKey = async (modelIdentifier: unknown, type: ModelType, message?: string): Promise<string> => { export const getModelKey = async (
modelIdentifier: unknown | ModelIdentifierField | ModelIdentifier,
type: ModelType,
message?: string
): Promise<string> => {
if (isModelIdentifier(modelIdentifier)) { if (isModelIdentifier(modelIdentifier)) {
return modelIdentifier.key; try {
// Check if the model exists by key
return (await fetchModelConfig(modelIdentifier.key)).key;
} catch {
// If not, fetch the model key by name and base model
return (await fetchModelConfigByAttrs(modelIdentifier.name, modelIdentifier.base, type)).key;
} }
if (isModelIdentifierV2(modelIdentifier)) { } else if (isModelIdentifierV2(modelIdentifier)) {
// Try by old-format model identifier
return (await fetchModelConfigByAttrs(modelIdentifier.model_name, modelIdentifier.base_model, type)).key; return (await fetchModelConfigByAttrs(modelIdentifier.model_name, modelIdentifier.base_model, type)).key;
} }
// Nope, couldn't find it
throw new InvalidModelConfigError(message || `Invalid model identifier: ${modelIdentifier}`); throw new InvalidModelConfigError(message || `Invalid model identifier: ${modelIdentifier}`);
}; };

View File

@ -4,7 +4,7 @@ import {
initialT2IAdapter, initialT2IAdapter,
} from 'features/controlAdapters/util/buildControlAdapter'; } from 'features/controlAdapters/util/buildControlAdapter';
import { buildControlAdapterProcessor } from 'features/controlAdapters/util/buildControlAdapterProcessor'; import { buildControlAdapterProcessor } from 'features/controlAdapters/util/buildControlAdapterProcessor';
import { getCALayerId, getIPALayerId, INITIAL_IMAGE_LAYER_ID } from 'features/controlLayers/store/controlLayersSlice'; import { getCALayerId, getIPALayerId, INITIAL_IMAGE_LAYER_ID } from 'features/controlLayers/konva/naming';
import type { ControlAdapterLayer, InitialImageLayer, IPAdapterLayer, Layer } from 'features/controlLayers/store/types'; import type { ControlAdapterLayer, InitialImageLayer, IPAdapterLayer, Layer } from 'features/controlLayers/store/types';
import { zLayer } from 'features/controlLayers/store/types'; import { zLayer } from 'features/controlLayers/store/types';
import { import {

View File

@ -6,12 +6,10 @@ import {
ipAdaptersReset, ipAdaptersReset,
t2iAdaptersReset, t2iAdaptersReset,
} from 'features/controlAdapters/store/controlAdaptersSlice'; } from 'features/controlAdapters/store/controlAdaptersSlice';
import { getCALayerId, getIPALayerId, getRGLayerId } from 'features/controlLayers/konva/naming';
import { import {
allLayersDeleted, allLayersDeleted,
caLayerRecalled, caLayerRecalled,
getCALayerId,
getIPALayerId,
getRGLayerId,
heightChanged, heightChanged,
iiLayerRecalled, iiLayerRecalled,
ipaLayerRecalled, ipaLayerRecalled,

View File

@ -1,6 +1,10 @@
import { getStore } from 'app/store/nanostores/store'; import { getStore } from 'app/store/nanostores/store';
import type { RootState } from 'app/store/store'; import type { RootState } from 'app/store/store';
import { deepClone } from 'common/util/deepClone'; import { deepClone } from 'common/util/deepClone';
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
import { RG_LAYER_NAME } from 'features/controlLayers/konva/naming';
import { renderers } from 'features/controlLayers/konva/renderers';
import { import {
isControlAdapterLayer, isControlAdapterLayer,
isInitialImageLayer, isInitialImageLayer,
@ -16,7 +20,6 @@ import type {
ProcessorConfig, ProcessorConfig,
T2IAdapterConfigV2, T2IAdapterConfigV2,
} from 'features/controlLayers/util/controlAdapters'; } from 'features/controlLayers/util/controlAdapters';
import { getRegionalPromptLayerBlobs } from 'features/controlLayers/util/getLayerBlobs';
import type { ImageField } from 'features/nodes/types/common'; import type { ImageField } from 'features/nodes/types/common';
import { import {
CONTROL_NET_COLLECT, CONTROL_NET_COLLECT,
@ -31,11 +34,13 @@ import {
T2I_ADAPTER_COLLECT, T2I_ADAPTER_COLLECT,
} from 'features/nodes/util/graph/constants'; } from 'features/nodes/util/graph/constants';
import type { Graph } from 'features/nodes/util/graph/generation/Graph'; import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import Konva from 'konva';
import { size } from 'lodash-es'; import { size } from 'lodash-es';
import { getImageDTO, imagesApi } from 'services/api/endpoints/images'; import { getImageDTO, imagesApi } from 'services/api/endpoints/images';
import type { BaseModelType, ImageDTO, Invocation } from 'services/api/types'; import type { BaseModelType, ImageDTO, Invocation } from 'services/api/types';
import { assert } from 'tsafe'; import { assert } from 'tsafe';
//#region addControlLayers
/** /**
* Adds the control layers to the graph * Adds the control layers to the graph
* @param state The app root state * @param state The app root state
@ -90,7 +95,7 @@ export const addControlLayers = async (
const validRGLayers = validLayers.filter(isRegionalGuidanceLayer); const validRGLayers = validLayers.filter(isRegionalGuidanceLayer);
const layerIds = validRGLayers.map((l) => l.id); const layerIds = validRGLayers.map((l) => l.id);
const blobs = await getRegionalPromptLayerBlobs(layerIds); const blobs = await getRGLayerBlobs(layerIds);
assert(size(blobs) === size(layerIds), 'Mismatch between layer IDs and blobs'); assert(size(blobs) === size(layerIds), 'Mismatch between layer IDs and blobs');
for (const layer of validRGLayers) { for (const layer of validRGLayers) {
@ -257,6 +262,7 @@ export const addControlLayers = async (
g.upsertMetadata({ control_layers: { layers: validLayers, version: state.controlLayers.present._version } }); g.upsertMetadata({ control_layers: { layers: validLayers, version: state.controlLayers.present._version } });
return validLayers; return validLayers;
}; };
//#endregion
//#region Control Adapters //#region Control Adapters
const addGlobalControlAdapterToGraph = ( const addGlobalControlAdapterToGraph = (
@ -509,7 +515,7 @@ const isValidLayer = (layer: Layer, base: BaseModelType) => {
}; };
//#endregion //#endregion
//#region Helpers //#region getMaskImage
const getMaskImage = async (layer: RegionalGuidanceLayer, blob: Blob): Promise<ImageDTO> => { const getMaskImage = async (layer: RegionalGuidanceLayer, blob: Blob): Promise<ImageDTO> => {
if (layer.uploadedMaskImage) { if (layer.uploadedMaskImage) {
const imageDTO = await getImageDTO(layer.uploadedMaskImage.name); const imageDTO = await getImageDTO(layer.uploadedMaskImage.name);
@ -529,7 +535,9 @@ const getMaskImage = async (layer: RegionalGuidanceLayer, blob: Blob): Promise<I
dispatch(rgLayerMaskImageUploaded({ layerId: layer.id, imageDTO })); dispatch(rgLayerMaskImageUploaded({ layerId: layer.id, imageDTO }));
return imageDTO; return imageDTO;
}; };
//#endregion
//#region buildControlImage
const buildControlImage = ( const buildControlImage = (
image: ImageWithDims | null, image: ImageWithDims | null,
processedImage: ImageWithDims | null, processedImage: ImageWithDims | null,
@ -549,3 +557,61 @@ const buildControlImage = (
assert(false, 'Attempted to add unprocessed control image'); assert(false, 'Attempted to add unprocessed control image');
}; };
//#endregion //#endregion
//#region getRGLayerBlobs
/**
* Get the blobs of all regional prompt layers. Only visible layers are returned.
* @param layerIds The IDs of the layers to get blobs for. If not provided, all regional prompt layers are used.
* @param preview Whether to open a new tab displaying each layer.
* @returns A map of layer IDs to blobs.
*/
const getRGLayerBlobs = async (layerIds?: string[], preview: boolean = false): Promise<Record<string, Blob>> => {
const state = getStore().getState();
const { layers } = state.controlLayers.present;
const { width, height } = state.controlLayers.present.size;
const reduxLayers = layers.filter(isRegionalGuidanceLayer);
const container = document.createElement('div');
const stage = new Konva.Stage({ container, width, height });
renderers.renderLayers(stage, reduxLayers, 1, 'brush', getImageDTO);
const konvaLayers = stage.find<Konva.Layer>(`.${RG_LAYER_NAME}`);
const blobs: Record<string, Blob> = {};
// First remove all layers
for (const layer of konvaLayers) {
layer.remove();
}
// Next render each layer to a blob
for (const layer of konvaLayers) {
if (layerIds && !layerIds.includes(layer.id())) {
continue;
}
const reduxLayer = reduxLayers.find((l) => l.id === layer.id());
assert(reduxLayer, `Redux layer ${layer.id()} not found`);
stage.add(layer);
const blob = await new Promise<Blob>((resolve) => {
stage.toBlob({
callback: (blob) => {
assert(blob, 'Blob is null');
resolve(blob);
},
});
});
if (preview) {
const base64 = await blobToDataURL(blob);
openBase64ImageInTab([
{
base64,
caption: `${reduxLayer.id}: ${reduxLayer.positivePrompt} / ${reduxLayer.negativePrompt}`,
},
]);
}
layer.remove();
blobs[layer.id()] = blob;
}
return blobs;
};
//#endregion

View File

@ -1,8 +1,17 @@
import { Flex } from '@invoke-ai/ui-library'; import { Flex } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { StageComponent } from 'features/controlLayers/components/StageComponent'; import { StageComponent } from 'features/controlLayers/components/StageComponent';
import { $isPreviewVisible } from 'features/controlLayers/store/controlLayersSlice';
import { AspectRatioIconPreview } from 'features/parameters/components/ImageSize/AspectRatioIconPreview';
import { memo } from 'react'; import { memo } from 'react';
export const AspectRatioCanvasPreview = memo(() => { export const AspectRatioCanvasPreview = memo(() => {
const isPreviewVisible = useStore($isPreviewVisible);
if (!isPreviewVisible) {
return <AspectRatioIconPreview />;
}
return ( return (
<Flex w="full" h="full" alignItems="center" justifyContent="center" position="relative"> <Flex w="full" h="full" alignItems="center" justifyContent="center" position="relative">
<StageComponent asPreview /> <StageComponent asPreview />

View File

@ -3,15 +3,12 @@ import { aspectRatioChanged, heightChanged, widthChanged } from 'features/contro
import { ParamHeight } from 'features/parameters/components/Core/ParamHeight'; import { ParamHeight } from 'features/parameters/components/Core/ParamHeight';
import { ParamWidth } from 'features/parameters/components/Core/ParamWidth'; import { ParamWidth } from 'features/parameters/components/Core/ParamWidth';
import { AspectRatioCanvasPreview } from 'features/parameters/components/ImageSize/AspectRatioCanvasPreview'; import { AspectRatioCanvasPreview } from 'features/parameters/components/ImageSize/AspectRatioCanvasPreview';
import { AspectRatioIconPreview } from 'features/parameters/components/ImageSize/AspectRatioIconPreview';
import { ImageSize } from 'features/parameters/components/ImageSize/ImageSize'; import { ImageSize } from 'features/parameters/components/ImageSize/ImageSize';
import type { AspectRatioState } from 'features/parameters/components/ImageSize/types'; import type { AspectRatioState } from 'features/parameters/components/ImageSize/types';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { memo, useCallback } from 'react'; import { memo, useCallback } from 'react';
export const ImageSizeLinear = memo(() => { export const ImageSizeLinear = memo(() => {
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const tab = useAppSelector(activeTabNameSelector);
const width = useAppSelector((s) => s.controlLayers.present.size.width); const width = useAppSelector((s) => s.controlLayers.present.size.width);
const height = useAppSelector((s) => s.controlLayers.present.size.height); const height = useAppSelector((s) => s.controlLayers.present.size.height);
const aspectRatioState = useAppSelector((s) => s.controlLayers.present.size.aspectRatio); const aspectRatioState = useAppSelector((s) => s.controlLayers.present.size.aspectRatio);
@ -50,7 +47,7 @@ export const ImageSizeLinear = memo(() => {
aspectRatioState={aspectRatioState} aspectRatioState={aspectRatioState}
heightComponent={<ParamHeight />} heightComponent={<ParamHeight />}
widthComponent={<ParamWidth />} widthComponent={<ParamWidth />}
previewComponent={tab === 'generation' ? <AspectRatioCanvasPreview /> : <AspectRatioIconPreview />} previewComponent={<AspectRatioCanvasPreview />}
onChangeAspectRatioState={onChangeAspectRatioState} onChangeAspectRatioState={onChangeAspectRatioState}
onChangeWidth={onChangeWidth} onChangeWidth={onChangeWidth}
onChangeHeight={onChangeHeight} onChangeHeight={onChangeHeight}

View File

@ -3,6 +3,7 @@ import { Box, Flex, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/u
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants'; import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
import { ControlLayersPanelContent } from 'features/controlLayers/components/ControlLayersPanelContent'; import { ControlLayersPanelContent } from 'features/controlLayers/components/ControlLayersPanelContent';
import { $isPreviewVisible } from 'features/controlLayers/store/controlLayersSlice';
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
import { Prompts } from 'features/parameters/components/Prompts/Prompts'; import { Prompts } from 'features/parameters/components/Prompts/Prompts';
import QueueControls from 'features/queue/components/QueueControls'; import QueueControls from 'features/queue/components/QueueControls';
@ -53,6 +54,7 @@ const ParametersPanelTextToImage = () => {
if (i === 1) { if (i === 1) {
dispatch(isImageViewerOpenChanged(false)); dispatch(isImageViewerOpenChanged(false));
} }
$isPreviewVisible.set(i === 0);
}, },
[dispatch] [dispatch]
); );
@ -66,6 +68,7 @@ const ParametersPanelTextToImage = () => {
<Flex gap={2} flexDirection="column" h="full" w="full"> <Flex gap={2} flexDirection="column" h="full" w="full">
{isSDXL ? <SDXLPrompts /> : <Prompts />} {isSDXL ? <SDXLPrompts /> : <Prompts />}
<Tabs <Tabs
defaultIndex={0}
variant="enclosed" variant="enclosed"
display="flex" display="flex"
flexDir="column" flexDir="column"

View File

@ -1 +1 @@
__version__ = "4.2.3" __version__ = "4.2.4"