diff --git a/docs/features/LOGGING.md b/docs/features/LOGGING.md new file mode 100644 index 0000000000..bda968140b --- /dev/null +++ b/docs/features/LOGGING.md @@ -0,0 +1,171 @@ +--- +title: Controlling Logging +--- + +# :material-image-off: Controlling Logging + +## Controlling How InvokeAI Logs Status Messages + +InvokeAI logs status messages using a configurable logging system. You +can log to the terminal window, to a designated file on the local +machine, to the syslog facility on a Linux or Mac, or to a properly +configured web server. You can configure several logs at the same +time, and control the level of message logged and the logging format +(to a limited extent). + +Three command-line options control logging: + +### `--log_handlers ...` + +This option activates one or more log handlers. Options are "console", +"file", "syslog" and "http". To specify more than one, separate them +by spaces: + +```bash +invokeai-web --log_handlers console syslog=/dev/log file=C:\Users\fred\invokeai.log +``` + +The format of these options is described below. + +### `--log_format {plain|color|legacy|syslog}` + +This controls the format of log messages written to the console. Only +the "console" log handler is currently affected by this setting. + +* "plain" provides formatted messages like this: + +```bash + +[2023-05-24 23:18:2[2023-05-24 23:18:50,352]::[InvokeAI]::DEBUG --> this is a debug message +[2023-05-24 23:18:50,352]::[InvokeAI]::INFO --> this is an informational messages +[2023-05-24 23:18:50,352]::[InvokeAI]::WARNING --> this is a warning +[2023-05-24 23:18:50,352]::[InvokeAI]::ERROR --> this is an error +[2023-05-24 23:18:50,352]::[InvokeAI]::CRITICAL --> this is a critical error +``` + +* "color" produces similar output, but the text will be color coded to +indicate the severity of the message. + +* "legacy" produces output similar to InvokeAI versions 2.3 and earlier: + +```bash +### this is a critical error +*** this is an error +** this is a warning +>> this is an informational messages + | this is a debug message +``` + +* "syslog" produces messages suitable for syslog entries: + +```bash +InvokeAI [2691178] this is a critical error +InvokeAI [2691178] this is an error +InvokeAI [2691178] this is a warning +InvokeAI [2691178] this is an informational messages +InvokeAI [2691178] this is a debug message +``` + +(note that the date, time and hostname will be added by the syslog +system) + +### `--log_level {debug|info|warning|error|critical}` + +Providing this command-line option will cause only messages at the +specified level or above to be emitted. + +## Console logging + +When "console" is provided to `--log_handlers`, messages will be +written to the command line window in which InvokeAI was launched. By +default, the color formatter will be used unless overridden by +`--log_format`. + +## File logging + +When "file" is provided to `--log_handlers`, entries will be written +to the file indicated in the path argument. By default, the "plain" +format will be used: + +```bash +invokeai-web --log_handlers file=/var/log/invokeai.log +``` + +## Syslog logging + +When "syslog" is requested, entries will be sent to the syslog +system. There are a variety of ways to control where the log message +is sent: + +* Send to the local machine using the `/dev/log` socket: + +``` +invokeai-web --log_handlers syslog=/dev/log +``` + +* Send to the local machine using a UDP message: + +``` +invokeai-web --log_handlers syslog=localhost +``` + +* Send to the local machine using a UDP message on a nonstandard + port: + +``` +invokeai-web --log_handlers syslog=localhost:512 +``` + +* Send to a remote machine named "loghost" on the local LAN using + facility LOG_USER and UDP packets: + +``` +invokeai-web --log_handlers syslog=loghost,facility=LOG_USER,socktype=SOCK_DGRAM +``` + +This can be abbreviated `syslog=loghost`, as LOG_USER and SOCK_DGRAM +are defaults. + +* Send to a remote machine named "loghost" using the facility LOCAL0 + and using a TCP socket: + +``` +invokeai-web --log_handlers syslog=loghost,facility=LOG_LOCAL0,socktype=SOCK_STREAM +``` + +If no arguments are specified (just a bare "syslog"), then the logging +system will look for a UNIX socket named `/dev/log`, and if not found +try to send a UDP message to `localhost`. The Macintosh OS used to +support logging to a socket named `/var/run/syslog`, but this feature +has since been disabled. + +## Web logging + +If you have access to a web server that is configured to log messages +when a particular URL is requested, you can log using the "http" +method: + +``` +invokeai-web --log_handlers http=http://my.server/path/to/logger,method=POST +``` + +The optional [,method=] part can be used to specify whether the URL +accepts GET (default) or POST messages. + +Currently password authentication and SSL are not supported. + +## Using the configuration file + +You can set and forget logging options by adding a "Logging" section +to `invokeai.yaml`: + +``` +InvokeAI: + [... other settings...] + Logging: + log_handlers: + - console + - syslog=/dev/log + log_level: info + log_format: color +``` diff --git a/docs/features/index.md b/docs/features/index.md index d9b0e1fd7c..53d380f3fb 100644 --- a/docs/features/index.md +++ b/docs/features/index.md @@ -57,6 +57,9 @@ Personalize models by adding your own style or subjects. ## * [The NSFW Checker](NSFW.md) Prevent InvokeAI from displaying unwanted racy images. +## * [Controlling Logging](LOGGING.md) +Control how InvokeAI logs status messages. + ## * [Miscellaneous](OTHER.md) Run InvokeAI on Google Colab, generate images with repeating patterns, batch process a file of prompts, increase the "creativity" of image diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 076ce81021..58dc661baf 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -3,6 +3,7 @@ from pydantic import BaseModel, Field from invokeai.app.invocations.util.choose_model import choose_model from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig +from ...backend.prompting.conditioning import try_parse_legacy_blend from ...backend.util.devices import choose_torch_device, torch_dtype from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent @@ -13,7 +14,7 @@ from compel.prompt_parser import ( Blend, CrossAttentionControlSubstitute, FlattenedPrompt, - Fragment, + Fragment, Conjunction, ) @@ -93,25 +94,22 @@ class CompelInvocation(BaseInvocation): text_encoder=text_encoder, textual_inversion_manager=pipeline.textual_inversion_manager, dtype_for_device_getter=torch_dtype, - truncate_long_prompts=True, # TODO: + truncate_long_prompts=False, ) - # TODO: support legacy blend? - - conjunction = Compel.parse_prompt_string(prompt_str) - prompt: Union[FlattenedPrompt, Blend] = conjunction.prompts[0] + legacy_blend = try_parse_legacy_blend(prompt_str, skip_normalize=False) + if legacy_blend is not None: + conjunction = legacy_blend + else: + conjunction = Compel.parse_prompt_string(prompt_str) if context.services.configuration.log_tokenization: - log_tokenization_for_prompt_object(prompt, tokenizer) + log_tokenization_for_conjunction(conjunction, tokenizer) - c, options = compel.build_conditioning_tensor_for_prompt_object(prompt) - - # TODO: long prompt support - #if not self.truncate_long_prompts: - # [c, uc] = compel.pad_conditioning_tensors_to_same_length([c, uc]) + c, options = compel.build_conditioning_tensor_for_conjunction(conjunction) ec = InvokeAIDiffuserComponent.ExtraConditioningInfo( - tokens_count_including_eos_bos=get_max_token_count(tokenizer, prompt), + tokens_count_including_eos_bos=get_max_token_count(tokenizer, conjunction), cross_attention_control_args=options.get("cross_attention_control", None), ) @@ -128,14 +126,22 @@ class CompelInvocation(BaseInvocation): def get_max_token_count( - tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=False + tokenizer, prompt: Union[FlattenedPrompt, Blend, Conjunction], truncate_if_too_long=False ) -> int: if type(prompt) is Blend: blend: Blend = prompt return max( [ - get_max_token_count(tokenizer, c, truncate_if_too_long) - for c in blend.prompts + get_max_token_count(tokenizer, p, truncate_if_too_long) + for p in blend.prompts + ] + ) + elif type(prompt) is Conjunction: + conjunction: Conjunction = prompt + return sum( + [ + get_max_token_count(tokenizer, p, truncate_if_too_long) + for p in conjunction.prompts ] ) else: @@ -170,6 +176,22 @@ def get_tokens_for_prompt_object( return tokens +def log_tokenization_for_conjunction( + c: Conjunction, tokenizer, display_label_prefix=None +): + display_label_prefix = display_label_prefix or "" + for i, p in enumerate(c.prompts): + if len(c.prompts)>1: + this_display_label_prefix = f"{display_label_prefix}(conjunction part {i + 1}, weight={c.weights[i]})" + else: + this_display_label_prefix = display_label_prefix + log_tokenization_for_prompt_object( + p, + tokenizer, + display_label_prefix=this_display_label_prefix + ) + + def log_tokenization_for_prompt_object( p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None ): diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 7d5160a491..15aecde851 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -94,13 +94,13 @@ CONTROLNET_DEFAULT_MODELS = [ CONTROLNET_NAME_VALUES = Literal[tuple(CONTROLNET_DEFAULT_MODELS)] class ControlField(BaseModel): - image: ImageField = Field(default=None, description="processed image") - control_model: Optional[str] = Field(default=None, description="control model used") - control_weight: Optional[float] = Field(default=1, description="weight given to controlnet") + image: ImageField = Field(default=None, description="The control image") + control_model: Optional[str] = Field(default=None, description="The ControlNet model to use") + control_weight: Optional[float] = Field(default=1, description="The weight given to the ControlNet") begin_step_percent: float = Field(default=0, ge=0, le=1, - description="% of total steps at which controlnet is first applied") + description="When the ControlNet is first applied (% of total steps)") end_step_percent: float = Field(default=1, ge=0, le=1, - description="% of total steps at which controlnet is last applied") + description="When the ControlNet is last applied (% of total steps)") class Config: schema_extra = { @@ -112,7 +112,7 @@ class ControlOutput(BaseInvocationOutput): """node output for ControlNet info""" # fmt: off type: Literal["control_output"] = "control_output" - control: ControlField = Field(default=None, description="The control info dict") + control: ControlField = Field(default=None, description="The output control image") # fmt: on @@ -121,15 +121,15 @@ class ControlNetInvocation(BaseInvocation): # fmt: off type: Literal["controlnet"] = "controlnet" # Inputs - image: ImageField = Field(default=None, description="image to process") + image: ImageField = Field(default=None, description="The control image") control_model: CONTROLNET_NAME_VALUES = Field(default="lllyasviel/sd-controlnet-canny", - description="control model used") - control_weight: float = Field(default=1.0, ge=0, le=1, description="weight given to controlnet") + description="The ControlNet model to use") + control_weight: float = Field(default=1.0, ge=0, le=1, description="The weight given to the ControlNet") # TODO: add support in backend core for begin_step_percent, end_step_percent, guess_mode begin_step_percent: float = Field(default=0, ge=0, le=1, - description="% of total steps at which controlnet is first applied") + description="When the ControlNet is first applied (% of total steps)") end_step_percent: float = Field(default=1, ge=0, le=1, - description="% of total steps at which controlnet is last applied") + description="When the ControlNet is last applied (% of total steps)") # fmt: on @@ -152,7 +152,7 @@ class ImageProcessorInvocation(BaseInvocation, PILInvocationConfig): # fmt: off type: Literal["image_processor"] = "image_processor" # Inputs - image: ImageField = Field(default=None, description="image to process") + image: ImageField = Field(default=None, description="The image to process") # fmt: on @@ -204,8 +204,8 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfi # fmt: off type: Literal["canny_image_processor"] = "canny_image_processor" # Input - low_threshold: float = Field(default=100, ge=0, description="low threshold of Canny pixel gradient") - high_threshold: float = Field(default=200, ge=0, description="high threshold of Canny pixel gradient") + low_threshold: int = Field(default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)") + high_threshold: int = Field(default=200, ge=0, le=255, description="The high threshold of the Canny pixel gradient (0-255)") # fmt: on def run_processor(self, image): @@ -214,16 +214,16 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfi return processed_image -class HedImageprocessorInvocation(ImageProcessorInvocation, PILInvocationConfig): +class HedImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): """Applies HED edge detection to image""" # fmt: off type: Literal["hed_image_processor"] = "hed_image_processor" # Inputs - detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection") - image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") # safe not supported in controlnet_aux v0.0.3 # safe: bool = Field(default=False, description="whether to use safe mode") - scribble: bool = Field(default=False, description="whether to use scribble mode") + scribble: bool = Field(default=False, description="Whether to use scribble mode") # fmt: on def run_processor(self, image): @@ -243,9 +243,9 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation, PILInvocationCon # fmt: off type: Literal["lineart_image_processor"] = "lineart_image_processor" # Inputs - detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection") - image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image") - coarse: bool = Field(default=False, description="whether to use coarse mode") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") + coarse: bool = Field(default=False, description="Whether to use coarse mode") # fmt: on def run_processor(self, image): @@ -262,8 +262,8 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation, PILInvocati # fmt: off type: Literal["lineart_anime_image_processor"] = "lineart_anime_image_processor" # Inputs - detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection") - image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") # fmt: on def run_processor(self, image): @@ -280,9 +280,9 @@ class OpenposeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationCo # fmt: off type: Literal["openpose_image_processor"] = "openpose_image_processor" # Inputs - hand_and_face: bool = Field(default=False, description="whether to use hands and face mode") - detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection") - image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image") + hand_and_face: bool = Field(default=False, description="Whether to use hands and face mode") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") # fmt: on def run_processor(self, image): @@ -300,8 +300,8 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocation # fmt: off type: Literal["midas_depth_image_processor"] = "midas_depth_image_processor" # Inputs - a_mult: float = Field(default=2.0, ge=0, description="Midas parameter a = amult * PI") - bg_th: float = Field(default=0.1, ge=0, description="Midas parameter bg_th") + a_mult: float = Field(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)") + bg_th: float = Field(default=0.1, ge=0, description="Midas parameter `bg_th`") # depth_and_normal not supported in controlnet_aux v0.0.3 # depth_and_normal: bool = Field(default=False, description="whether to use depth and normal mode") # fmt: on @@ -322,8 +322,8 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationC # fmt: off type: Literal["normalbae_image_processor"] = "normalbae_image_processor" # Inputs - detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection") - image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") # fmt: on def run_processor(self, image): @@ -339,10 +339,10 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig # fmt: off type: Literal["mlsd_image_processor"] = "mlsd_image_processor" # Inputs - detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection") - image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image") - thr_v: float = Field(default=0.1, ge=0, description="MLSD parameter thr_v") - thr_d: float = Field(default=0.1, ge=0, description="MLSD parameter thr_d") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") + thr_v: float = Field(default=0.1, ge=0, description="MLSD parameter `thr_v`") + thr_d: float = Field(default=0.1, ge=0, description="MLSD parameter `thr_d`") # fmt: on def run_processor(self, image): @@ -360,10 +360,10 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig # fmt: off type: Literal["pidi_image_processor"] = "pidi_image_processor" # Inputs - detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection") - image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image") - safe: bool = Field(default=False, description="whether to use safe mode") - scribble: bool = Field(default=False, description="whether to use scribble mode") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") + safe: bool = Field(default=False, description="Whether to use safe mode") + scribble: bool = Field(default=False, description="Whether to use scribble mode") # fmt: on def run_processor(self, image): @@ -381,11 +381,11 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation, PILInvoca # fmt: off type: Literal["content_shuffle_image_processor"] = "content_shuffle_image_processor" # Inputs - detect_resolution: int = Field(default=512, ge=0, description="pixel resolution for edge detection") - image_resolution: int = Field(default=512, ge=0, description="pixel resolution for output image") - h: Union[int | None] = Field(default=512, ge=0, description="content shuffle h parameter") - w: Union[int | None] = Field(default=512, ge=0, description="content shuffle w parameter") - f: Union[int | None] = Field(default=256, ge=0, description="cont") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") + h: Union[int, None] = Field(default=512, ge=0, description="Content shuffle `h` parameter") + w: Union[int, None] = Field(default=512, ge=0, description="Content shuffle `w` parameter") + f: Union[int, None] = Field(default=256, ge=0, description="Content shuffle `f` parameter") # fmt: on def run_processor(self, image): @@ -418,8 +418,8 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationCo # fmt: off type: Literal["mediapipe_face_processor"] = "mediapipe_face_processor" # Inputs - max_faces: int = Field(default=1, ge=1, description="maximum number of faces to detect") - min_confidence: float = Field(default=0.5, ge=0, le=1, description="minimum confidence for face detection") + max_faces: int = Field(default=1, ge=1, description="Maximum number of faces to detect") + min_confidence: float = Field(default=0.5, ge=0, le=1, description="Minimum confidence for face detection") # fmt: on def run_processor(self, image): diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 4dc1f6456c..ba65e214c3 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -4,6 +4,7 @@ import random import einops from typing import Literal, Optional, Union, List +from compel import Compel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from pydantic import BaseModel, Field, validator @@ -233,6 +234,15 @@ class TextToLatentsInvocation(BaseInvocation): c, extra_conditioning_info = context.services.latents.get(self.positive_conditioning.conditioning_name) uc, _ = context.services.latents.get(self.negative_conditioning.conditioning_name) + compel = Compel( + tokenizer=model.tokenizer, + text_encoder=model.text_encoder, + textual_inversion_manager=model.textual_inversion_manager, + dtype_for_device_getter=torch_dtype, + truncate_long_prompts=False, + ) + [c, uc] = compel.pad_conditioning_tensors_to_same_length([c, uc]) + conditioning_data = ConditioningData( uc, c, diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 208f6d9949..0dabde1c8c 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -165,14 +165,13 @@ two configs are kept in separate sections of the config file: from __future__ import annotations import argparse import pydoc -import typing import os import sys from argparse import ArgumentParser from omegaconf import OmegaConf, DictConfig from pathlib import Path from pydantic import BaseSettings, Field, parse_obj_as -from typing import Any, ClassVar, Dict, List, Literal, Type, Union, get_origin, get_type_hints, get_args +from typing import ClassVar, Dict, List, Literal, Type, Union, get_origin, get_type_hints, get_args INIT_FILE = Path('invokeai.yaml') LEGACY_INIT_FILE = Path('invokeai.init') @@ -187,7 +186,7 @@ class InvokeAISettings(BaseSettings): def parse_args(self, argv: list=sys.argv[1:]): parser = self.get_parser() - opt, _ = parser.parse_known_args(argv) + opt = parser.parse_args(argv) for name in self.__fields__: if name not in self._excluded(): setattr(self, name, getattr(opt,name)) @@ -389,6 +388,11 @@ setting environment variables INVOKEAI_. model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models') embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models') + + log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http="', category="Logging") + # note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues + log_format : Literal[tuple(['plain','color','syslog','legacy'])] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging") + log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging") #fmt: on def parse_args(self, argv: List[str]=None, conf: DictConfig = None, clobber=False): diff --git a/invokeai/backend/config/invokeai_configure.py b/invokeai/backend/config/invokeai_configure.py index 4c0b0e3641..c4d43e4758 100755 --- a/invokeai/backend/config/invokeai_configure.py +++ b/invokeai/backend/config/invokeai_configure.py @@ -35,15 +35,19 @@ from transformers import ( CLIPTextModel, CLIPTokenizer, ) - import invokeai.configs as configs +from invokeai.app.services.config import ( + get_invokeai_config, + InvokeAIAppConfig, +) from invokeai.frontend.install.model_install import addModelsForm, process_and_execute from invokeai.frontend.install.widgets import ( CenteredButtonPress, IntTitleSlider, set_min_terminal_size, ) + from invokeai.backend.config.legacy_arg_parsing import legacy_parser from invokeai.backend.config.model_install_backend import ( default_dataset, @@ -51,6 +55,7 @@ from invokeai.backend.config.model_install_backend import ( hf_download_with_resume, recommended_datasets, ) + from invokeai.app.services.config import InvokeAIAppConfig warnings.filterwarnings("ignore") @@ -59,6 +64,7 @@ transformers.logging.set_verbosity_error() # --------------------------globals----------------------- + config = InvokeAIAppConfig.get_config() Model_dir = "models" @@ -817,6 +823,7 @@ def main(): if old_init_file.exists() and not new_init_file.exists(): print('** Migrating invokeai.init to invokeai.yaml') migrate_init_file(old_init_file) + # Load new init file into config config.parse_args(argv=[],conf=OmegaConf.load(new_init_file)) diff --git a/invokeai/backend/config/model_install_backend.py b/invokeai/backend/config/model_install_backend.py index 96468dee4b..25fe10a57b 100644 --- a/invokeai/backend/config/model_install_backend.py +++ b/invokeai/backend/config/model_install_backend.py @@ -28,6 +28,7 @@ warnings.filterwarnings("ignore") # --------------------------globals----------------------- config = InvokeAIAppConfig.get_config() + Model_dir = "models" Weights_dir = "ldm/stable-diffusion-v1/" diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index 7a26be9800..2a2bdd9e83 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -39,8 +39,8 @@ def get_uc_and_c_and_ec(prompt_string, textual_inversion_manager=model.textual_inversion_manager, dtype_for_device_getter=torch_dtype, truncate_long_prompts=False, - ) - + ) + # get rid of any newline characters prompt_string = prompt_string.replace("\n", " ") positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string) @@ -282,6 +282,8 @@ def split_weighted_subprompts(text, skip_normalize=False) -> list: (match.group("prompt").replace("\\:", ":"), float(match.group("weight") or 1)) for match in re.finditer(prompt_parser, text) ] + if len(parsed_prompts) == 0: + return [] if skip_normalize: return parsed_prompts weight_sum = sum(map(lambda x: x[1], parsed_prompts)) diff --git a/invokeai/backend/util/__init__.py b/invokeai/backend/util/__init__.py index ca42f86fd6..84720b1854 100644 --- a/invokeai/backend/util/__init__.py +++ b/invokeai/backend/util/__init__.py @@ -17,3 +17,5 @@ from .util import ( instantiate_from_config, url_attachment_name, ) + + diff --git a/invokeai/backend/util/logging.py b/invokeai/backend/util/logging.py index 9d1262d5c6..16efd56c03 100644 --- a/invokeai/backend/util/logging.py +++ b/invokeai/backend/util/logging.py @@ -31,7 +31,20 @@ IAILogger.debug('this is a debugging message') """ import logging +import logging.handlers +import socket +import urllib.parse +from abc import abstractmethod +from pathlib import Path + +from invokeai.app.services.config import InvokeAIAppConfig, get_invokeai_config + +try: + import syslog + SYSLOG_AVAILABLE = True +except: + SYSLOG_AVAILABLE = False # module level functions def debug(msg, *args, **kwargs): @@ -62,11 +75,77 @@ def getLogger(name: str = None) -> logging.Logger: return InvokeAILogger.getLogger(name) -class InvokeAILogFormatter(logging.Formatter): +_FACILITY_MAP = dict( + LOG_KERN = syslog.LOG_KERN, + LOG_USER = syslog.LOG_USER, + LOG_MAIL = syslog.LOG_MAIL, + LOG_DAEMON = syslog.LOG_DAEMON, + LOG_AUTH = syslog.LOG_AUTH, + LOG_LPR = syslog.LOG_LPR, + LOG_NEWS = syslog.LOG_NEWS, + LOG_UUCP = syslog.LOG_UUCP, + LOG_CRON = syslog.LOG_CRON, + LOG_SYSLOG = syslog.LOG_SYSLOG, + LOG_LOCAL0 = syslog.LOG_LOCAL0, + LOG_LOCAL1 = syslog.LOG_LOCAL1, + LOG_LOCAL2 = syslog.LOG_LOCAL2, + LOG_LOCAL3 = syslog.LOG_LOCAL3, + LOG_LOCAL4 = syslog.LOG_LOCAL4, + LOG_LOCAL5 = syslog.LOG_LOCAL5, + LOG_LOCAL6 = syslog.LOG_LOCAL6, + LOG_LOCAL7 = syslog.LOG_LOCAL7, +) if SYSLOG_AVAILABLE else dict() + +_SOCK_MAP = dict( + SOCK_STREAM = socket.SOCK_STREAM, + SOCK_DGRAM = socket.SOCK_DGRAM, +) + +class InvokeAIFormatter(logging.Formatter): + ''' + Base class for logging formatter + + ''' + def format(self, record): + formatter = logging.Formatter(self.log_fmt(record.levelno)) + return formatter.format(record) + + @abstractmethod + def log_fmt(self, levelno: int)->str: + pass + +class InvokeAISyslogFormatter(InvokeAIFormatter): + ''' + Formatting for syslog + ''' + def log_fmt(self, levelno: int)->str: + return '%(name)s [%(process)d] <%(levelname)s> %(message)s' + +class InvokeAILegacyLogFormatter(InvokeAIFormatter): + ''' + Formatting for the InvokeAI Logger (legacy version) + ''' + FORMATS = { + logging.DEBUG: " | %(message)s", + logging.INFO: ">> %(message)s", + logging.WARNING: "** %(message)s", + logging.ERROR: "*** %(message)s", + logging.CRITICAL: "### %(message)s", + } + def log_fmt(self,levelno:int)->str: + return self.FORMATS.get(levelno) + +class InvokeAIPlainLogFormatter(InvokeAIFormatter): + ''' + Custom Formatting for the InvokeAI Logger (plain version) + ''' + def log_fmt(self, levelno: int)->str: + return "[%(asctime)s]::[%(name)s]::%(levelname)s --> %(message)s" + +class InvokeAIColorLogFormatter(InvokeAIFormatter): ''' Custom Formatting for the InvokeAI Logger ''' - # Color Codes grey = "\x1b[38;20m" yellow = "\x1b[33;20m" @@ -88,23 +167,109 @@ class InvokeAILogFormatter(logging.Formatter): logging.CRITICAL: bold_red + log_format + reset } - def format(self, record): - log_fmt = self.FORMATS.get(record.levelno) - formatter = logging.Formatter(log_fmt, datefmt="%d-%m-%Y %H:%M:%S") - return formatter.format(record) + def log_fmt(self, levelno: int)->str: + return self.FORMATS.get(levelno) +LOG_FORMATTERS = { + 'plain': InvokeAIPlainLogFormatter, + 'color': InvokeAIColorLogFormatter, + 'syslog': InvokeAISyslogFormatter, + 'legacy': InvokeAILegacyLogFormatter, +} class InvokeAILogger(object): loggers = dict() @classmethod def getLogger(cls, name: str = 'InvokeAI') -> logging.Logger: + config = get_invokeai_config() + if name not in cls.loggers: logger = logging.getLogger(name) - logger.setLevel(logging.DEBUG) - ch = logging.StreamHandler() - fmt = InvokeAILogFormatter() - ch.setFormatter(fmt) - logger.addHandler(ch) + logger.setLevel(config.log_level.upper()) # yes, strings work here + for ch in cls.getLoggers(config): + logger.addHandler(ch) cls.loggers[name] = logger return cls.loggers[name] + + @classmethod + def getLoggers(cls, config: InvokeAIAppConfig) -> list[logging.Handler]: + handler_strs = config.log_handlers + print(f'handler_strs={handler_strs}') + handlers = list() + for handler in handler_strs: + handler_name,*args = handler.split('=',2) + args = args[0] if len(args) > 0 else None + + # console is the only handler that gets a custom formatter + if handler_name=='console': + formatter = LOG_FORMATTERS[config.log_format] + ch = logging.StreamHandler() + ch.setFormatter(formatter()) + handlers.append(ch) + + elif handler_name=='syslog': + ch = cls._parse_syslog_args(args) + ch.setFormatter(InvokeAISyslogFormatter()) + handlers.append(ch) + + elif handler_name=='file': + handlers.append(cls._parse_file_args(args)) + + elif handler_name=='http': + handlers.append(cls._parse_http_args(args)) + return handlers + + @staticmethod + def _parse_syslog_args( + args: str=None + )-> logging.Handler: + if not SYSLOG_AVAILABLE: + raise ValueError("syslog is not available on this system") + if not args: + args='/dev/log' if Path('/dev/log').exists() else 'address:localhost:514' + syslog_args = dict() + try: + for a in args.split(','): + arg_name,*arg_value = a.split(':',2) + if arg_name=='address': + host,*port = arg_value + port = 514 if len(port)==0 else int(port[0]) + syslog_args['address'] = (host,port) + elif arg_name=='facility': + syslog_args['facility'] = _FACILITY_MAP[arg_value[0]] + elif arg_name=='socktype': + syslog_args['socktype'] = _SOCK_MAP[arg_value[0]] + else: + syslog_args['address'] = arg_name + except: + raise ValueError(f"{args} is not a value argument list for syslog logging") + return logging.handlers.SysLogHandler(**syslog_args) + + @staticmethod + def _parse_file_args(args: str=None)-> logging.Handler: + if not args: + raise ValueError("please provide filename for file logging using format 'file=/path/to/logfile.txt'") + return logging.FileHandler(args) + + @staticmethod + def _parse_http_args(args: str=None)-> logging.Handler: + if not args: + raise ValueError("please provide destination for http logging using format 'http=url'") + arg_list = args.split(',') + url = urllib.parse.urlparse(arg_list.pop(0)) + if url.scheme != 'http': + raise ValueError(f"the http logging module can only log to HTTP URLs, but {url.scheme} was specified") + host = url.hostname + path = url.path + port = url.port or 80 + + syslog_args = dict() + for a in arg_list: + arg_name, *arg_value = a.split(':',2) + if arg_name=='method': + arg_value = arg_value[0] if len(arg_value)>0 else 'GET' + syslog_args[arg_name] = arg_value + else: # TODO: Provide support for SSL context and credentials + pass + return logging.handlers.HTTPHandler(f'{host}:{port}',path,**syslog_args) diff --git a/invokeai/frontend/web/docs/API_CLIENT.md b/invokeai/frontend/web/docs/API_CLIENT.md index 51f3a6510c..5072aa2c42 100644 --- a/invokeai/frontend/web/docs/API_CLIENT.md +++ b/invokeai/frontend/web/docs/API_CLIENT.md @@ -26,10 +26,10 @@ We need to start the nodes web server, which serves the OpenAPI schema to the ge ```bash # from the repo root -python scripts/invoke-new.py --web +python scripts/invokeai-web.py ``` -2. Generate the API client. +2. Generate the API client. ```bash # from invokeai/frontend/web/ diff --git a/invokeai/frontend/web/docs/README.md b/invokeai/frontend/web/docs/README.md index 323dcc5bc7..e8b150e71e 100644 --- a/invokeai/frontend/web/docs/README.md +++ b/invokeai/frontend/web/docs/README.md @@ -12,7 +12,14 @@ Code in `invokeai/frontend/web/` if you want to have a look. ## Stack -State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a custom redux middleware to help). +State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK: +- `createAsyncThunk` for HTTP requests +- `createEntityAdapter` for fetching images and models +- `createListenerMiddleware` for workflows + +The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md. + +Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help). [Chakra-UI](https://github.com/chakra-ui/chakra-ui) for components and styling. @@ -37,9 +44,15 @@ From `invokeai/frontend/web/` run `yarn install` to get everything set up. Start everything in dev mode: 1. Start the dev server: `yarn dev` -2. Start the InvokeAI Nodes backend: `python scripts/invokeai-new.py --web # run from the repo root` +2. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root` 3. Point your browser to the dev server address e.g. +#### VSCode Remote Dev + +We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH: + +`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host` + ### Production builds For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo. diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index dd1c87effb..64b9a828cd 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -60,6 +60,8 @@ "@chakra-ui/styled-system": "^2.9.0", "@chakra-ui/theme-tools": "^2.0.16", "@dagrejs/graphlib": "^2.1.12", + "@dnd-kit/core": "^6.0.8", + "@dnd-kit/modifiers": "^6.0.1", "@emotion/react": "^11.10.6", "@emotion/styled": "^11.10.6", "@floating-ui/react-dom": "^2.0.0", @@ -87,7 +89,7 @@ "react-dropzone": "^14.2.3", "react-hotkeys-hook": "4.4.0", "react-i18next": "^12.2.2", - "react-icons": "^4.7.1", + "react-icons": "^4.9.0", "react-konva": "^18.2.7", "react-redux": "^8.0.5", "react-resizable-panels": "^0.0.42", diff --git a/invokeai/frontend/web/src/app/components/ImageDnd/ImageDndContext.tsx b/invokeai/frontend/web/src/app/components/ImageDnd/ImageDndContext.tsx new file mode 100644 index 0000000000..72487f329c --- /dev/null +++ b/invokeai/frontend/web/src/app/components/ImageDnd/ImageDndContext.tsx @@ -0,0 +1,68 @@ +import { + DndContext, + DragEndEvent, + DragOverlay, + DragStartEvent, + KeyboardSensor, + MouseSensor, + TouchSensor, + pointerWithin, + useSensor, + useSensors, +} from '@dnd-kit/core'; +import { PropsWithChildren, memo, useCallback, useState } from 'react'; +import OverlayDragImage from './OverlayDragImage'; +import { ImageDTO } from 'services/api'; +import { isImageDTO } from 'services/types/guards'; +import { snapCenterToCursor } from '@dnd-kit/modifiers'; + +type ImageDndContextProps = PropsWithChildren; + +const ImageDndContext = (props: ImageDndContextProps) => { + const [draggedImage, setDraggedImage] = useState(null); + + const handleDragStart = useCallback((event: DragStartEvent) => { + const dragData = event.active.data.current; + if (dragData && 'image' in dragData && isImageDTO(dragData.image)) { + setDraggedImage(dragData.image); + } + }, []); + + const handleDragEnd = useCallback( + (event: DragEndEvent) => { + const handleDrop = event.over?.data.current?.handleDrop; + if (handleDrop && typeof handleDrop === 'function' && draggedImage) { + handleDrop(draggedImage); + } + setDraggedImage(null); + }, + [draggedImage] + ); + + const mouseSensor = useSensor(MouseSensor, { + activationConstraint: { distance: 15 }, + }); + + const touchSensor = useSensor(TouchSensor, { + activationConstraint: { distance: 15 }, + }); + const keyboardSensor = useSensor(KeyboardSensor); + + const sensors = useSensors(mouseSensor, touchSensor, keyboardSensor); + + return ( + + {props.children} + + {draggedImage && } + + + ); +}; + +export default memo(ImageDndContext); diff --git a/invokeai/frontend/web/src/app/components/ImageDnd/OverlayDragImage.tsx b/invokeai/frontend/web/src/app/components/ImageDnd/OverlayDragImage.tsx new file mode 100644 index 0000000000..510dadc823 --- /dev/null +++ b/invokeai/frontend/web/src/app/components/ImageDnd/OverlayDragImage.tsx @@ -0,0 +1,36 @@ +import { Box, Image } from '@chakra-ui/react'; +import { memo } from 'react'; +import { ImageDTO } from 'services/api'; + +type OverlayDragImageProps = { + image: ImageDTO; +}; + +const OverlayDragImage = (props: OverlayDragImageProps) => { + return ( + + + + ); +}; + +export default memo(OverlayDragImage); diff --git a/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx b/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx index 69b2756f96..c94f7624b2 100644 --- a/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx +++ b/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx @@ -16,6 +16,7 @@ import { PartialAppConfig } from 'app/types/invokeai'; import '../../i18n'; import { socketMiddleware } from 'services/events/middleware'; import { Middleware } from '@reduxjs/toolkit'; +import ImageDndContext from './ImageDnd/ImageDndContext'; const App = lazy(() => import('./App')); const ThemeLocaleProvider = lazy(() => import('./ThemeLocaleProvider')); @@ -69,11 +70,13 @@ const InvokeAIUI = ({ }> - + + + diff --git a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/serialize.ts b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/serialize.ts index 9fb4ceae32..5025ca081a 100644 --- a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/serialize.ts +++ b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/serialize.ts @@ -1,4 +1,5 @@ import { canvasPersistDenylist } from 'features/canvas/store/canvasPersistDenylist'; +import { controlNetDenylist } from 'features/controlNet/store/controlNetDenylist'; import { galleryPersistDenylist } from 'features/gallery/store/galleryPersistDenylist'; import { lightboxPersistDenylist } from 'features/lightbox/store/lightboxPersistDenylist'; import { nodesPersistDenylist } from 'features/nodes/store/nodesPersistDenylist'; @@ -23,6 +24,7 @@ const serializationDenylist: { system: systemPersistDenylist, // config: configPersistDenyList, ui: uiPersistDenylist, + controlNet: controlNetDenylist, // hotkeys: hotkeysPersistDenylist, }; diff --git a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/unserialize.ts b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/unserialize.ts index c6ae4946f2..c6af5f3612 100644 --- a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/unserialize.ts +++ b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/unserialize.ts @@ -1,4 +1,5 @@ import { initialCanvasState } from 'features/canvas/store/canvasSlice'; +import { initialControlNetState } from 'features/controlNet/store/controlNetSlice'; import { initialGalleryState } from 'features/gallery/store/gallerySlice'; import { initialImagesState } from 'features/gallery/store/imagesSlice'; import { initialLightboxState } from 'features/lightbox/store/lightboxSlice'; @@ -28,6 +29,7 @@ const initialStates: { ui: initialUIState, hotkeys: initialHotkeysState, images: initialImagesState, + controlNet: initialControlNetState, }; export const unserialize: UnserializeFunction = (data, key) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index ba16e56371..a9349dc863 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -70,6 +70,8 @@ import { import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSaved'; import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingAreaImageListener'; import { addImageCategoriesChangedListener } from './listeners/imageCategoriesChanged'; +import { addControlNetImageProcessedListener } from './listeners/controlNetImageProcessed'; +import { addControlNetAutoProcessListener } from './listeners/controlNetAutoProcess'; export const listenerMiddleware = createListenerMiddleware(); @@ -173,3 +175,7 @@ addReceivedPageOfImagesRejectedListener(); // Gallery addImageCategoriesChangedListener(); + +// ControlNet +addControlNetImageProcessedListener(); +addControlNetAutoProcessListener(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts new file mode 100644 index 0000000000..9f98b8f25e --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts @@ -0,0 +1,59 @@ +import { AnyAction } from '@reduxjs/toolkit'; +import { startAppListening } from '..'; +import { log } from 'app/logging/useLogger'; +import { controlNetImageProcessed } from 'features/controlNet/store/actions'; +import { + controlNetImageChanged, + controlNetProcessorParamsChanged, + controlNetProcessorTypeChanged, +} from 'features/controlNet/store/controlNetSlice'; +import { RootState } from 'app/store/store'; + +const moduleLog = log.child({ namespace: 'controlNet' }); + +const predicate = (action: AnyAction, state: RootState) => { + const isActionMatched = + controlNetProcessorParamsChanged.match(action) || + controlNetImageChanged.match(action) || + controlNetProcessorTypeChanged.match(action); + + if (!isActionMatched) { + return false; + } + + const { controlImage, processorType } = + state.controlNet.controlNets[action.payload.controlNetId]; + + const isProcessorSelected = processorType !== 'none'; + + const isBusy = state.system.isProcessing; + + const hasControlImage = Boolean(controlImage); + + return isProcessorSelected && !isBusy && hasControlImage; +}; + +/** + * Listener that automatically processes a ControlNet image when its processor parameters are changed. + * + * The network request is debounced by 1 second. + */ +export const addControlNetAutoProcessListener = () => { + startAppListening({ + predicate, + effect: async ( + action, + { dispatch, getState, cancelActiveListeners, delay } + ) => { + const { controlNetId } = action.payload; + + // Cancel any in-progress instances of this listener + cancelActiveListeners(); + + // Delay before starting actual work + await delay(300); + + dispatch(controlNetImageProcessed({ controlNetId })); + }, + }); +}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts new file mode 100644 index 0000000000..717417792c --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts @@ -0,0 +1,93 @@ +import { startAppListening } from '..'; +import { imageMetadataReceived } from 'services/thunks/image'; +import { log } from 'app/logging/useLogger'; +import { controlNetImageProcessed } from 'features/controlNet/store/actions'; +import { Graph } from 'services/api'; +import { sessionCreated } from 'services/thunks/session'; +import { sessionReadyToInvoke } from 'features/system/store/actions'; +import { socketInvocationComplete } from 'services/events/actions'; +import { isImageOutput } from 'services/types/guards'; +import { controlNetProcessedImageChanged } from 'features/controlNet/store/controlNetSlice'; +import { pick } from 'lodash-es'; + +const moduleLog = log.child({ namespace: 'controlNet' }); + +export const addControlNetImageProcessedListener = () => { + startAppListening({ + actionCreator: controlNetImageProcessed, + effect: async ( + action, + { dispatch, getState, take, unsubscribe, subscribe } + ) => { + const { controlNetId } = action.payload; + const controlNet = getState().controlNet.controlNets[controlNetId]; + + if (!controlNet.controlImage) { + moduleLog.error('Unable to process ControlNet image'); + return; + } + + // ControlNet one-off procressing graph is just the processor node, no edges. + // Also we need to grab the image. + const graph: Graph = { + nodes: { + [controlNet.processorNode.id]: { + ...controlNet.processorNode, + is_intermediate: true, + image: pick(controlNet.controlImage, [ + 'image_name', + 'image_origin', + ]), + }, + }, + }; + + // Create a session to run the graph & wait til it's ready to invoke + const sessionCreatedAction = dispatch(sessionCreated({ graph })); + const [sessionCreatedFulfilledAction] = await take( + (action): action is ReturnType => + sessionCreated.fulfilled.match(action) && + action.meta.requestId === sessionCreatedAction.requestId + ); + + const sessionId = sessionCreatedFulfilledAction.payload.id; + + // Invoke the session & wait til it's complete + dispatch(sessionReadyToInvoke()); + const [invocationCompleteAction] = await take( + (action): action is ReturnType => + socketInvocationComplete.match(action) && + action.payload.data.graph_execution_state_id === sessionId + ); + + // We still have to check the output type + if (isImageOutput(invocationCompleteAction.payload.data.result)) { + const { image_name } = + invocationCompleteAction.payload.data.result.image; + + // Wait for the ImageDTO to be received + const [imageMetadataReceivedAction] = await take( + ( + action + ): action is ReturnType => + imageMetadataReceived.fulfilled.match(action) && + action.payload.image_name === image_name + ); + const processedControlImage = imageMetadataReceivedAction.payload; + + moduleLog.debug( + { data: { arg: action.payload, processedControlImage } }, + 'ControlNet image processed' + ); + + // Update the processed image in the store + dispatch( + controlNetProcessedImageChanged({ + controlNetId, + processedControlImage, + }) + ); + } + }, + }); +}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/initialImageSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/initialImageSelected.ts index 940cc84c1e..9069e477ac 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/initialImageSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/initialImageSelected.ts @@ -2,12 +2,10 @@ import { initialImageChanged } from 'features/parameters/store/generationSlice'; import { t } from 'i18next'; import { addToast } from 'features/system/store/systemSlice'; import { startAppListening } from '..'; -import { - initialImageSelected, - isImageDTO, -} from 'features/parameters/store/actions'; +import { initialImageSelected } from 'features/parameters/store/actions'; import { makeToast } from 'app/components/Toaster'; import { selectImagesById } from 'features/gallery/store/imagesSlice'; +import { isImageDTO } from 'services/types/guards'; export const addInitialImageSelectedListener = () => { startAppListening({ diff --git a/invokeai/frontend/web/src/app/store/store.ts b/invokeai/frontend/web/src/app/store/store.ts index 521610adcc..f577b73895 100644 --- a/invokeai/frontend/web/src/app/store/store.ts +++ b/invokeai/frontend/web/src/app/store/store.ts @@ -13,6 +13,7 @@ import galleryReducer from 'features/gallery/store/gallerySlice'; import imagesReducer from 'features/gallery/store/imagesSlice'; import lightboxReducer from 'features/lightbox/store/lightboxSlice'; import generationReducer from 'features/parameters/store/generationSlice'; +import controlNetReducer from 'features/controlNet/store/controlNetSlice'; import postprocessingReducer from 'features/parameters/store/postprocessingSlice'; import systemReducer from 'features/system/store/systemSlice'; // import sessionReducer from 'features/system/store/sessionSlice'; @@ -45,6 +46,7 @@ const allReducers = { ui: uiReducer, hotkeys: hotkeysReducer, images: imagesReducer, + controlNet: controlNetReducer, // session: sessionReducer, }; @@ -62,6 +64,7 @@ const rememberedKeys: (keyof typeof allReducers)[] = [ 'postprocessing', 'system', 'ui', + 'controlNet', // 'hotkeys', // 'config', ]; diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts index 8081ffa491..304b094749 100644 --- a/invokeai/frontend/web/src/app/types/invokeai.ts +++ b/invokeai/frontend/web/src/app/types/invokeai.ts @@ -95,6 +95,7 @@ export type AppFeature = * A disable-able Stable Diffusion feature */ export type SDFeature = + | 'controlNet' | 'noise' | 'variation' | 'symmetry' diff --git a/invokeai/frontend/web/src/common/components/IAICheckbox.tsx b/invokeai/frontend/web/src/common/components/IAICheckbox.tsx deleted file mode 100644 index eb423b2b27..0000000000 --- a/invokeai/frontend/web/src/common/components/IAICheckbox.tsx +++ /dev/null @@ -1,17 +0,0 @@ -import { Checkbox, CheckboxProps } from '@chakra-ui/react'; -import { memo, ReactNode } from 'react'; - -type IAICheckboxProps = CheckboxProps & { - label: string | ReactNode; -}; - -const IAICheckbox = (props: IAICheckboxProps) => { - const { label, ...rest } = props; - return ( - - {label} - - ); -}; - -export default memo(IAICheckbox); diff --git a/invokeai/frontend/web/src/common/components/IAICollapse.tsx b/invokeai/frontend/web/src/common/components/IAICollapse.tsx index 161caca24d..ec23793741 100644 --- a/invokeai/frontend/web/src/common/components/IAICollapse.tsx +++ b/invokeai/frontend/web/src/common/components/IAICollapse.tsx @@ -49,7 +49,7 @@ const IAICollapse = (props: IAIToggleCollapseProps) => { /> )} - + {children} diff --git a/invokeai/frontend/web/src/common/components/IAICustomSelect.tsx b/invokeai/frontend/web/src/common/components/IAICustomSelect.tsx index 6d6cdbadf5..9accceb846 100644 --- a/invokeai/frontend/web/src/common/components/IAICustomSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAICustomSelect.tsx @@ -1,4 +1,4 @@ -import { CheckIcon } from '@chakra-ui/icons'; +import { CheckIcon, ChevronUpIcon } from '@chakra-ui/icons'; import { Box, Flex, @@ -10,7 +10,6 @@ import { GridItem, List, ListItem, - Select, Text, Tooltip, TooltipProps, @@ -19,7 +18,8 @@ import { autoUpdate, offset, shift, useFloating } from '@floating-ui/react-dom'; import { useSelect } from 'downshift'; import { OverlayScrollbarsComponent } from 'overlayscrollbars-react'; -import { memo } from 'react'; +import { memo, useMemo } from 'react'; +import { getInputOutlineStyles } from 'theme/util/getInputOutlineStyles'; export type ItemTooltips = { [key: string]: string }; @@ -34,6 +34,7 @@ type IAICustomSelectProps = { buttonProps?: FlexProps; tooltip?: string; tooltipProps?: Omit; + ellipsisPosition?: 'start' | 'end'; }; const IAICustomSelect = (props: IAICustomSelectProps) => { @@ -48,6 +49,7 @@ const IAICustomSelect = (props: IAICustomSelectProps) => { tooltip, buttonProps, tooltipProps, + ellipsisPosition = 'end', } = props; const { @@ -69,6 +71,14 @@ const IAICustomSelect = (props: IAICustomSelectProps) => { middleware: [offset(4), shift({ crossAxis: true, padding: 8 })], }); + const labelTextDirection = useMemo(() => { + if (ellipsisPosition === 'start') { + return document.dir === 'rtl' ? 'ltr' : 'rtl'; + } + + return document.dir; + }, [ellipsisPosition]); + return ( {label && ( @@ -82,20 +92,44 @@ const IAICustomSelect = (props: IAICustomSelectProps) => { )} - + + {isOpen && ( @@ -104,11 +138,10 @@ const IAICustomSelect = (props: IAICustomSelectProps) => { ref={refs.setFloating} sx={{ ...floatingStyles, - width: 'max-content', top: 0, - left: 0, + insetInlineStart: 0, flexDirection: 'column', - zIndex: 1, + zIndex: 2, bg: 'base.800', borderRadius: 'base', border: '1px', @@ -118,61 +151,72 @@ const IAICustomSelect = (props: IAICustomSelectProps) => { px: 0, h: 'fit-content', maxH: 64, + minW: 48, }} > - {items.map((item, index) => ( - - { + const isSelected = selectedItem === item; + const isHighlighted = highlightedIndex === index; + const fontWeight = isSelected ? 700 : 500; + const bg = isHighlighted + ? 'base.700' + : isSelected + ? 'base.750' + : undefined; + return ( + - {withCheckIcon ? ( - - - {selectedItem === item && } - - - - {item} - - - - ) : ( - - {item} - - )} - - - ))} + + {withCheckIcon ? ( + + + {isSelected && } + + + + {item} + + + + ) : ( + + {item} + + )} + + + ); + })} )} diff --git a/invokeai/frontend/web/src/common/components/IAIDndImage.tsx b/invokeai/frontend/web/src/common/components/IAIDndImage.tsx new file mode 100644 index 0000000000..b9b9e56722 --- /dev/null +++ b/invokeai/frontend/web/src/common/components/IAIDndImage.tsx @@ -0,0 +1,258 @@ +import { + Box, + Flex, + Icon, + IconButtonProps, + Image, + Text, +} from '@chakra-ui/react'; +import { useDraggable, useDroppable } from '@dnd-kit/core'; +import { useCombinedRefs } from '@dnd-kit/utilities'; +import IAIIconButton from 'common/components/IAIIconButton'; +import { IAIImageFallback } from 'common/components/IAIImageFallback'; +import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay'; +import { useGetUrl } from 'common/util/getUrl'; +import { AnimatePresence, motion } from 'framer-motion'; +import { ReactElement, SyntheticEvent } from 'react'; +import { memo, useRef } from 'react'; +import { FaImage, FaTimes } from 'react-icons/fa'; +import { ImageDTO } from 'services/api'; +import { v4 as uuidv4 } from 'uuid'; + +type IAIDndImageProps = { + image: ImageDTO | null | undefined; + onDrop: (droppedImage: ImageDTO) => void; + onReset?: () => void; + onError?: (event: SyntheticEvent) => void; + onLoad?: (event: SyntheticEvent) => void; + resetIconSize?: IconButtonProps['size']; + withResetIcon?: boolean; + withMetadataOverlay?: boolean; + isDragDisabled?: boolean; + isDropDisabled?: boolean; + fallback?: ReactElement; + payloadImage?: ImageDTO | null | undefined; + minSize?: number; +}; + +const IAIDndImage = (props: IAIDndImageProps) => { + const { + image, + onDrop, + onReset, + onError, + resetIconSize = 'md', + withResetIcon = false, + withMetadataOverlay = false, + isDropDisabled = false, + isDragDisabled = false, + fallback = , + payloadImage, + minSize = 24, + } = props; + const dndId = useRef(uuidv4()); + const { getUrl } = useGetUrl(); + const { + isOver, + setNodeRef: setDroppableRef, + active, + } = useDroppable({ + id: dndId.current, + disabled: isDropDisabled, + data: { + handleDrop: onDrop, + }, + }); + + const { + attributes, + listeners, + setNodeRef: setDraggableRef, + } = useDraggable({ + id: dndId.current, + data: { + image: payloadImage ? payloadImage : image, + }, + disabled: isDragDisabled, + }); + + const setNodeRef = useCombinedRefs(setDroppableRef, setDraggableRef); + + return ( + + {image && ( + + + {withMetadataOverlay && } + {onReset && withResetIcon && ( + + } + onClick={onReset} + /> + + )} + + {active && } + + + )} + {!image && ( + <> + + + + + {active && } + + + )} + + ); +}; + +export default memo(IAIDndImage); + +type DropOverlayProps = { + isOver: boolean; +}; + +const DropOverlay = (props: DropOverlayProps) => { + const { isOver } = props; + return ( + + + + + + + Drop + + + + + ); +}; diff --git a/invokeai/frontend/web/src/common/components/IAIFullCheckbox.tsx b/invokeai/frontend/web/src/common/components/IAIFullCheckbox.tsx new file mode 100644 index 0000000000..97ff24689c --- /dev/null +++ b/invokeai/frontend/web/src/common/components/IAIFullCheckbox.tsx @@ -0,0 +1,25 @@ +import { + Checkbox, + CheckboxProps, + FormControl, + FormControlProps, + FormLabel, +} from '@chakra-ui/react'; +import { memo, ReactNode } from 'react'; + +type IAIFullCheckboxProps = CheckboxProps & { + label: string | ReactNode; + formControlProps?: FormControlProps; +}; + +const IAIFullCheckbox = (props: IAIFullCheckboxProps) => { + const { label, formControlProps, ...rest } = props; + return ( + + {label} + + + ); +}; + +export default memo(IAIFullCheckbox); diff --git a/invokeai/frontend/web/src/common/components/IAIImageFallback.tsx b/invokeai/frontend/web/src/common/components/IAIImageFallback.tsx new file mode 100644 index 0000000000..3d34fbca9e --- /dev/null +++ b/invokeai/frontend/web/src/common/components/IAIImageFallback.tsx @@ -0,0 +1,27 @@ +import { Flex, FlexProps, Spinner, SpinnerProps } from '@chakra-ui/react'; + +type Props = FlexProps & { + spinnerProps?: SpinnerProps; +}; + +export const IAIImageFallback = (props: Props) => { + const { spinnerProps, ...rest } = props; + const { sx, ...restFlexProps } = rest; + return ( + + + + ); +}; diff --git a/invokeai/frontend/web/src/common/components/IAISimpleCheckbox.tsx b/invokeai/frontend/web/src/common/components/IAISimpleCheckbox.tsx new file mode 100644 index 0000000000..2d28b5b72e --- /dev/null +++ b/invokeai/frontend/web/src/common/components/IAISimpleCheckbox.tsx @@ -0,0 +1,19 @@ +import { Checkbox, CheckboxProps, Text } from '@chakra-ui/react'; +import { memo, ReactElement } from 'react'; + +type IAISimpleCheckboxProps = CheckboxProps & { + label: string | ReactElement; +}; + +const IAISimpleCheckbox = (props: IAISimpleCheckboxProps) => { + const { label, ...rest } = props; + return ( + + + {label} + + + ); +}; + +export default memo(IAISimpleCheckbox); diff --git a/invokeai/frontend/web/src/common/components/IAISlider.tsx b/invokeai/frontend/web/src/common/components/IAISlider.tsx index 48080e8970..2777e35967 100644 --- a/invokeai/frontend/web/src/common/components/IAISlider.tsx +++ b/invokeai/frontend/web/src/common/components/IAISlider.tsx @@ -40,7 +40,7 @@ import IAIIconButton, { IAIIconButtonProps } from './IAIIconButton'; import { roundDownToMultiple } from 'common/util/roundDownToMultiple'; export type IAIFullSliderProps = { - label: string; + label?: string; value: number; min?: number; max?: number; @@ -178,9 +178,11 @@ const IAISlider = (props: IAIFullSliderProps) => { isDisabled={isDisabled} {...sliderFormControlProps} > - - {label} - + {label && ( + + {label} + + )} { sx={{ insetInlineStart: '0 !important', insetInlineEnd: 'unset !important', + mt: 1.5, }} {...sliderMarkProps} > @@ -213,6 +216,7 @@ const IAISlider = (props: IAIFullSliderProps) => { sx={{ insetInlineStart: 'unset !important', insetInlineEnd: '0 !important', + mt: 1.5, }} {...sliderMarkProps} > diff --git a/invokeai/frontend/web/src/common/components/IAISwitch.tsx b/invokeai/frontend/web/src/common/components/IAISwitch.tsx index e1bddb9f43..33c46c4aeb 100644 --- a/invokeai/frontend/web/src/common/components/IAISwitch.tsx +++ b/invokeai/frontend/web/src/common/components/IAISwitch.tsx @@ -5,6 +5,7 @@ import { FormLabelProps, Switch, SwitchProps, + Tooltip, } from '@chakra-ui/react'; import { memo } from 'react'; @@ -13,6 +14,7 @@ interface Props extends SwitchProps { width?: string | number; formControlProps?: FormControlProps; formLabelProps?: FormLabelProps; + tooltip?: string; } /** @@ -25,22 +27,27 @@ const IAISwitch = (props: Props) => { width = 'auto', formControlProps, formLabelProps, + tooltip, ...rest } = props; return ( - - - {label} - - - + + + {label && ( + + {label} + + )} + + + ); }; diff --git a/invokeai/frontend/web/src/common/components/ImageMetadataOverlay.tsx b/invokeai/frontend/web/src/common/components/ImageMetadataOverlay.tsx index bed0a26831..e3bee9797b 100644 --- a/invokeai/frontend/web/src/common/components/ImageMetadataOverlay.tsx +++ b/invokeai/frontend/web/src/common/components/ImageMetadataOverlay.tsx @@ -1,5 +1,5 @@ import { Badge, Flex } from '@chakra-ui/react'; -import { isNumber, isString } from 'lodash-es'; +import { isString } from 'lodash-es'; import { useMemo } from 'react'; import { ImageDTO } from 'services/api'; @@ -8,14 +8,6 @@ type ImageMetadataOverlayProps = { }; const ImageMetadataOverlay = ({ image }: ImageMetadataOverlayProps) => { - const dimensions = useMemo(() => { - if (!isNumber(image.metadata?.width) || isNumber(!image.metadata?.height)) { - return; - } - - return `${image.metadata?.width} × ${image.metadata?.height}`; - }, [image.metadata]); - const model = useMemo(() => { if (!isString(image.metadata?.model)) { return; @@ -31,17 +23,15 @@ const ImageMetadataOverlay = ({ image }: ImageMetadataOverlayProps) => { flexDirection: 'column', position: 'absolute', top: 0, - right: 0, + insetInlineStart: 0, p: 2, - alignItems: 'flex-end', + alignItems: 'flex-start', gap: 2, }} > - {dimensions && ( - - {dimensions} - - )} + + {image.width} × {image.height} + {model && ( {model} diff --git a/invokeai/frontend/web/src/common/components/InitialImageButtons.tsx b/invokeai/frontend/web/src/common/components/InitialImageButtons.tsx deleted file mode 100644 index 469cd1695c..0000000000 --- a/invokeai/frontend/web/src/common/components/InitialImageButtons.tsx +++ /dev/null @@ -1,42 +0,0 @@ -import { ButtonGroup, Flex, Spacer, Text } from '@chakra-ui/react'; -import IAIIconButton from 'common/components/IAIIconButton'; - -import { useTranslation } from 'react-i18next'; -import { FaUndo, FaUpload } from 'react-icons/fa'; -import { useAppDispatch } from 'app/store/storeHooks'; -import { useCallback } from 'react'; -import { clearInitialImage } from 'features/parameters/store/generationSlice'; -import useImageUploader from 'common/hooks/useImageUploader'; - -const InitialImageButtons = () => { - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - const { openUploader } = useImageUploader(); - - const handleResetInitialImage = useCallback(() => { - dispatch(clearInitialImage()); - }, [dispatch]); - - return ( - - - {t('parameters.initialImage')} - - - - } - aria-label={t('accessibility.reset')} - onClick={handleResetInitialImage} - /> - } - onClick={openUploader} - aria-label={t('common.upload')} - /> - - - ); -}; - -export default InitialImageButtons; diff --git a/invokeai/frontend/web/src/app/selectors/readinessSelector.ts b/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts similarity index 89% rename from invokeai/frontend/web/src/app/selectors/readinessSelector.ts rename to invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts index 2b77fe9f47..7204205216 100644 --- a/invokeai/frontend/web/src/app/selectors/readinessSelector.ts +++ b/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts @@ -1,12 +1,12 @@ import { createSelector } from '@reduxjs/toolkit'; +import { useAppSelector } from 'app/store/storeHooks'; import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; import { validateSeedWeights } from 'common/util/seedWeightPairs'; import { generationSelector } from 'features/parameters/store/generationSelectors'; import { systemSelector } from 'features/system/store/systemSelectors'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; -import { isEqual } from 'lodash-es'; -export const readinessSelector = createSelector( +const readinessSelector = createSelector( [generationSelector, systemSelector, activeTabNameSelector], (generation, system, activeTabName) => { const { @@ -60,3 +60,8 @@ export const readinessSelector = createSelector( }, defaultSelectorOptions ); + +export const useIsReadyToInvoke = () => { + const { isReady } = useAppSelector(readinessSelector); + return isReady; +}; diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx index b345f2cda0..2f74e5542a 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx @@ -2,7 +2,7 @@ import { ButtonGroup, Flex } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import IAIButton from 'common/components/IAIButton'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import IAIColorPicker from 'common/components/IAIColorPicker'; import IAIIconButton from 'common/components/IAIIconButton'; import IAIPopover from 'common/components/IAIPopover'; @@ -117,12 +117,12 @@ const IAICanvasMaskOptions = () => { } > - - diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx index 94a990bb4c..638332809c 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx @@ -1,7 +1,7 @@ import { Flex } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import IAIIconButton from 'common/components/IAIIconButton'; import IAIPopover from 'common/components/IAIPopover'; import { canvasSelector } from 'features/canvas/store/canvasSelectors'; @@ -102,50 +102,50 @@ const IAICanvasSettingsButtonPopover = () => { } > - dispatch(setShouldShowIntermediates(e.target.checked)) } /> - dispatch(setShouldShowGrid(e.target.checked))} /> - - dispatch(setShouldDarkenOutsideBoundingBox(e.target.checked)) } /> - dispatch(setShouldAutoSave(e.target.checked))} /> - dispatch(setShouldCropToBoundingBoxOnSave(e.target.checked)) } /> - dispatch(setShouldRestrictStrokesToBox(e.target.checked)) } /> - @@ -153,7 +153,7 @@ const IAICanvasSettingsButtonPopover = () => { } /> - dispatch(setShouldAntialias(e.target.checked))} diff --git a/invokeai/frontend/web/src/features/controlNet/components/ControlNet.tsx b/invokeai/frontend/web/src/features/controlNet/components/ControlNet.tsx new file mode 100644 index 0000000000..903d453446 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/ControlNet.tsx @@ -0,0 +1,258 @@ +import { memo, useCallback } from 'react'; +import { + ControlNetConfig, + controlNetAdded, + controlNetRemoved, + controlNetToggled, +} from '../store/controlNetSlice'; +import { useAppDispatch } from 'app/store/storeHooks'; +import ParamControlNetModel from './parameters/ParamControlNetModel'; +import ParamControlNetWeight from './parameters/ParamControlNetWeight'; +import { + Checkbox, + Flex, + FormControl, + FormLabel, + HStack, + TabList, + TabPanels, + Tabs, + Tab, + TabPanel, + Box, +} from '@chakra-ui/react'; +import { FaCopy, FaPlus, FaTrash, FaWrench } from 'react-icons/fa'; + +import ParamControlNetBeginEnd from './parameters/ParamControlNetBeginEnd'; +import ControlNetImagePreview from './ControlNetImagePreview'; +import IAIIconButton from 'common/components/IAIIconButton'; +import { v4 as uuidv4 } from 'uuid'; +import { useToggle } from 'react-use'; +import ParamControlNetProcessorSelect from './parameters/ParamControlNetProcessorSelect'; +import ControlNetProcessorComponent from './ControlNetProcessorComponent'; +import ControlNetPreprocessButton from './ControlNetPreprocessButton'; +import IAIButton from 'common/components/IAIButton'; +import IAISwitch from 'common/components/IAISwitch'; +import { ChevronDownIcon, ChevronUpIcon } from '@chakra-ui/icons'; + +type ControlNetProps = { + controlNet: ControlNetConfig; +}; + +const ControlNet = (props: ControlNetProps) => { + const { + controlNetId, + isEnabled, + model, + weight, + beginStepPct, + endStepPct, + controlImage, + processedControlImage, + processorNode, + processorType, + } = props.controlNet; + const dispatch = useAppDispatch(); + const [shouldShowAdvanced, onToggleAdvanced] = useToggle(false); + + const handleDelete = useCallback(() => { + dispatch(controlNetRemoved({ controlNetId })); + }, [controlNetId, dispatch]); + + const handleDuplicate = useCallback(() => { + dispatch( + controlNetAdded({ controlNetId: uuidv4(), controlNet: props.controlNet }) + ); + }, [dispatch, props.controlNet]); + + const handleToggleIsEnabled = useCallback(() => { + dispatch(controlNetToggled({ controlNetId })); + }, [controlNetId, dispatch]); + + return ( + + + + + + + } + /> + } + /> + + } + /> + + {isEnabled && ( + <> + + + + + + {!shouldShowAdvanced && ( + + + + )} + + {shouldShowAdvanced && ( + <> + + + + + + + )} + + )} + + ); + + return ( + + + + + + + Model Config + + + Preprocess + + + + + + + + + + + + {/* } + onClick={handleReset} + isDisabled={Boolean(!processedControlImage)} + > + Reset Processing + */} + + + + Remove ControlNet + + ); +}; + +export default memo(ControlNet); diff --git a/invokeai/frontend/web/src/features/controlNet/components/ControlNetImagePreview.tsx b/invokeai/frontend/web/src/features/controlNet/components/ControlNetImagePreview.tsx new file mode 100644 index 0000000000..632f88b57b --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/ControlNetImagePreview.tsx @@ -0,0 +1,141 @@ +import { memo, useCallback, useRef, useState } from 'react'; +import { ImageDTO } from 'services/api'; +import { + ControlNetConfig, + controlNetImageChanged, + controlNetSelector, +} from '../store/controlNetSlice'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { Box } from '@chakra-ui/react'; +import IAIDndImage from 'common/components/IAIDndImage'; +import { createSelector } from '@reduxjs/toolkit'; +import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; +import { AnimatePresence, motion } from 'framer-motion'; +import { IAIImageFallback } from 'common/components/IAIImageFallback'; +import { useHoverDirty } from 'react-use'; + +const selector = createSelector( + controlNetSelector, + (controlNet) => { + const { isProcessingControlImage } = controlNet; + return { isProcessingControlImage }; + }, + defaultSelectorOptions +); + +type Props = { + controlNet: ControlNetConfig; +}; + +const ControlNetImagePreview = (props: Props) => { + const { controlNetId, controlImage, processedControlImage, processorType } = + props.controlNet; + const dispatch = useAppDispatch(); + const { isProcessingControlImage } = useAppSelector(selector); + const containerRef = useRef(null); + + const isMouseOverImage = useHoverDirty(containerRef); + + const handleDrop = useCallback( + (droppedImage: ImageDTO) => { + if (controlImage?.image_name === droppedImage.image_name) { + return; + } + dispatch( + controlNetImageChanged({ controlNetId, controlImage: droppedImage }) + ); + }, + [controlImage, controlNetId, dispatch] + ); + + const shouldShowProcessedImageBackdrop = + Number(controlImage?.width) > Number(processedControlImage?.width) || + Number(controlImage?.height) > Number(processedControlImage?.height); + + const shouldShowProcessedImage = + controlImage && + processedControlImage && + !isMouseOverImage && + !isProcessingControlImage && + processorType !== 'none'; + + return ( + + + + {shouldShowProcessedImage && ( + + + {shouldShowProcessedImageBackdrop && ( + + )} + + + + + + )} + + {isProcessingControlImage && ( + + + + )} + + ); +}; + +export default memo(ControlNetImagePreview); diff --git a/invokeai/frontend/web/src/features/controlNet/components/ControlNetPreprocessButton.tsx b/invokeai/frontend/web/src/features/controlNet/components/ControlNetPreprocessButton.tsx new file mode 100644 index 0000000000..95a4f968e5 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/ControlNetPreprocessButton.tsx @@ -0,0 +1,36 @@ +import IAIButton from 'common/components/IAIButton'; +import { memo, useCallback } from 'react'; +import { ControlNetConfig } from '../store/controlNetSlice'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { controlNetImageProcessed } from '../store/actions'; +import { useIsReadyToInvoke } from 'common/hooks/useIsReadyToInvoke'; + +type Props = { + controlNet: ControlNetConfig; +}; + +const ControlNetPreprocessButton = (props: Props) => { + const { controlNetId, controlImage } = props.controlNet; + const dispatch = useAppDispatch(); + const isReady = useIsReadyToInvoke(); + + const handleProcess = useCallback(() => { + dispatch( + controlNetImageProcessed({ + controlNetId, + }) + ); + }, [controlNetId, dispatch]); + + return ( + + Preprocess + + ); +}; + +export default memo(ControlNetPreprocessButton); diff --git a/invokeai/frontend/web/src/features/controlNet/components/ControlNetProcessorComponent.tsx b/invokeai/frontend/web/src/features/controlNet/components/ControlNetProcessorComponent.tsx new file mode 100644 index 0000000000..4649f89b35 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/ControlNetProcessorComponent.tsx @@ -0,0 +1,131 @@ +import { memo } from 'react'; +import { RequiredControlNetProcessorNode } from '../store/types'; +import CannyProcessor from './processors/CannyProcessor'; +import HedProcessor from './processors/HedProcessor'; +import LineartProcessor from './processors/LineartProcessor'; +import LineartAnimeProcessor from './processors/LineartAnimeProcessor'; +import ContentShuffleProcessor from './processors/ContentShuffleProcessor'; +import MediapipeFaceProcessor from './processors/MediapipeFaceProcessor'; +import MidasDepthProcessor from './processors/MidasDepthProcessor'; +import MlsdImageProcessor from './processors/MlsdImageProcessor'; +import NormalBaeProcessor from './processors/NormalBaeProcessor'; +import OpenposeProcessor from './processors/OpenposeProcessor'; +import PidiProcessor from './processors/PidiProcessor'; +import ZoeDepthProcessor from './processors/ZoeDepthProcessor'; + +export type ControlNetProcessorProps = { + controlNetId: string; + processorNode: RequiredControlNetProcessorNode; +}; + +const ControlNetProcessorComponent = (props: ControlNetProcessorProps) => { + const { controlNetId, processorNode } = props; + if (processorNode.type === 'canny_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'hed_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'lineart_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'content_shuffle_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'lineart_anime_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'mediapipe_face_processor') { + return ( + + ); + } + + if (processorNode.type === 'midas_depth_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'mlsd_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'normalbae_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'openpose_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'pidi_image_processor') { + return ( + + ); + } + + if (processorNode.type === 'zoe_depth_image_processor') { + return ( + + ); + } + + return null; +}; + +export default memo(ControlNetProcessorComponent); diff --git a/invokeai/frontend/web/src/features/controlNet/components/hooks/useProcessorNodeChanged.ts b/invokeai/frontend/web/src/features/controlNet/components/hooks/useProcessorNodeChanged.ts new file mode 100644 index 0000000000..79a502cb0e --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/hooks/useProcessorNodeChanged.ts @@ -0,0 +1,20 @@ +import { useAppDispatch } from 'app/store/storeHooks'; +import { controlNetProcessorParamsChanged } from 'features/controlNet/store/controlNetSlice'; +import { ControlNetProcessorNode } from 'features/controlNet/store/types'; +import { useCallback } from 'react'; + +export const useProcessorNodeChanged = () => { + const dispatch = useAppDispatch(); + const handleProcessorNodeChanged = useCallback( + (controlNetId: string, changes: Partial) => { + dispatch( + controlNetProcessorParamsChanged({ + controlNetId, + changes, + }) + ); + }, + [dispatch] + ); + return handleProcessorNodeChanged; +}; diff --git a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetBeginEnd.tsx b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetBeginEnd.tsx new file mode 100644 index 0000000000..bb2f151193 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetBeginEnd.tsx @@ -0,0 +1,130 @@ +import { + FormControl, + FormLabel, + HStack, + RangeSlider, + RangeSliderFilledTrack, + RangeSliderMark, + RangeSliderThumb, + RangeSliderTrack, + Tooltip, +} from '@chakra-ui/react'; +import { useAppDispatch } from 'app/store/storeHooks'; +import IAIIconButton from 'common/components/IAIIconButton'; +import { + controlNetBeginStepPctChanged, + controlNetEndStepPctChanged, +} from 'features/controlNet/store/controlNetSlice'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { BiReset } from 'react-icons/bi'; + +type Props = { + controlNetId: string; + beginStepPct: number; + endStepPct: number; + mini?: boolean; +}; + +const formatPct = (v: number) => `${Math.round(v * 100)}%`; + +const ParamControlNetBeginEnd = (props: Props) => { + const { controlNetId, beginStepPct, endStepPct, mini = false } = props; + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + + const handleStepPctChanged = useCallback( + (v: number[]) => { + dispatch( + controlNetBeginStepPctChanged({ controlNetId, beginStepPct: v[0] }) + ); + dispatch(controlNetEndStepPctChanged({ controlNetId, endStepPct: v[1] })); + }, + [controlNetId, dispatch] + ); + + const handleStepPctReset = useCallback(() => { + dispatch(controlNetBeginStepPctChanged({ controlNetId, beginStepPct: 0 })); + dispatch(controlNetEndStepPctChanged({ controlNetId, endStepPct: 1 })); + }, [controlNetId, dispatch]); + + return ( + + Begin / End Step Percentage + + + + + + + + + + + + {!mini && ( + <> + + 0% + + + 50% + + + 100% + + + )} + + + {!mini && ( + } + onClick={handleStepPctReset} + /> + )} + + + ); +}; + +export default memo(ParamControlNetBeginEnd); diff --git a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetIsEnabled.tsx b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetIsEnabled.tsx new file mode 100644 index 0000000000..d7f519a7b6 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetIsEnabled.tsx @@ -0,0 +1,28 @@ +import { useAppDispatch } from 'app/store/storeHooks'; +import IAISwitch from 'common/components/IAISwitch'; +import { controlNetToggled } from 'features/controlNet/store/controlNetSlice'; +import { memo, useCallback } from 'react'; + +type ParamControlNetIsEnabledProps = { + controlNetId: string; + isEnabled: boolean; +}; + +const ParamControlNetIsEnabled = (props: ParamControlNetIsEnabledProps) => { + const { controlNetId, isEnabled } = props; + const dispatch = useAppDispatch(); + + const handleIsEnabledChanged = useCallback(() => { + dispatch(controlNetToggled({ controlNetId })); + }, [dispatch, controlNetId]); + + return ( + + ); +}; + +export default memo(ParamControlNetIsEnabled); diff --git a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetIsPreprocessed.tsx b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetIsPreprocessed.tsx new file mode 100644 index 0000000000..6db61a0d15 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetIsPreprocessed.tsx @@ -0,0 +1,36 @@ +import { useAppDispatch } from 'app/store/storeHooks'; +import IAIFullCheckbox from 'common/components/IAIFullCheckbox'; +import IAISwitch from 'common/components/IAISwitch'; +import { + controlNetToggled, + isControlNetImagePreprocessedToggled, +} from 'features/controlNet/store/controlNetSlice'; +import { memo, useCallback } from 'react'; + +type ParamControlNetIsEnabledProps = { + controlNetId: string; + isControlImageProcessed: boolean; +}; + +const ParamControlNetIsEnabled = (props: ParamControlNetIsEnabledProps) => { + const { controlNetId, isControlImageProcessed } = props; + const dispatch = useAppDispatch(); + + const handleIsControlImageProcessedToggled = useCallback(() => { + dispatch( + isControlNetImagePreprocessedToggled({ + controlNetId, + }) + ); + }, [controlNetId, dispatch]); + + return ( + + ); +}; + +export default memo(ParamControlNetIsEnabled); diff --git a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetModel.tsx b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetModel.tsx new file mode 100644 index 0000000000..113b1148f4 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetModel.tsx @@ -0,0 +1,41 @@ +import { useAppDispatch } from 'app/store/storeHooks'; +import IAICustomSelect from 'common/components/IAICustomSelect'; +import { + CONTROLNET_MODELS, + ControlNetModel, +} from 'features/controlNet/store/constants'; +import { controlNetModelChanged } from 'features/controlNet/store/controlNetSlice'; +import { memo, useCallback } from 'react'; + +type ParamIsControlNetModelProps = { + controlNetId: string; + model: ControlNetModel; +}; + +const ParamIsControlNetModel = (props: ParamIsControlNetModelProps) => { + const { controlNetId, model } = props; + const dispatch = useAppDispatch(); + + const handleModelChanged = useCallback( + (val: string | null | undefined) => { + // TODO: do not cast + const model = val as ControlNetModel; + dispatch(controlNetModelChanged({ controlNetId, model })); + }, + [controlNetId, dispatch] + ); + + return ( + + ); +}; + +export default memo(ParamIsControlNetModel); diff --git a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetProcessorSelect.tsx b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetProcessorSelect.tsx new file mode 100644 index 0000000000..019b5ef849 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetProcessorSelect.tsx @@ -0,0 +1,47 @@ +import IAICustomSelect from 'common/components/IAICustomSelect'; +import { memo, useCallback } from 'react'; +import { + ControlNetProcessorNode, + ControlNetProcessorType, +} from '../../store/types'; +import { controlNetProcessorTypeChanged } from '../../store/controlNetSlice'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { CONTROLNET_PROCESSORS } from '../../store/constants'; + +type ParamControlNetProcessorSelectProps = { + controlNetId: string; + processorNode: ControlNetProcessorNode; +}; + +const CONTROLNET_PROCESSOR_TYPES = Object.keys( + CONTROLNET_PROCESSORS +) as ControlNetProcessorType[]; + +const ParamControlNetProcessorSelect = ( + props: ParamControlNetProcessorSelectProps +) => { + const { controlNetId, processorNode } = props; + const dispatch = useAppDispatch(); + const handleProcessorTypeChanged = useCallback( + (v: string | null | undefined) => { + dispatch( + controlNetProcessorTypeChanged({ + controlNetId, + processorType: v as ControlNetProcessorType, + }) + ); + }, + [controlNetId, dispatch] + ); + return ( + + ); +}; + +export default memo(ParamControlNetProcessorSelect); diff --git a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetWeight.tsx b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetWeight.tsx new file mode 100644 index 0000000000..007ef355c3 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetWeight.tsx @@ -0,0 +1,57 @@ +import { useAppDispatch } from 'app/store/storeHooks'; +import IAISlider from 'common/components/IAISlider'; +import { controlNetWeightChanged } from 'features/controlNet/store/controlNetSlice'; +import { memo, useCallback } from 'react'; + +type ParamControlNetWeightProps = { + controlNetId: string; + weight: number; + mini?: boolean; +}; + +const ParamControlNetWeight = (props: ParamControlNetWeightProps) => { + const { controlNetId, weight, mini = false } = props; + const dispatch = useAppDispatch(); + + const handleWeightChanged = useCallback( + (weight: number) => { + dispatch(controlNetWeightChanged({ controlNetId, weight })); + }, + [controlNetId, dispatch] + ); + + const handleWeightReset = () => { + dispatch(controlNetWeightChanged({ controlNetId, weight: 1 })); + }; + + if (mini) { + return ( + + ); + } + + return ( + + ); +}; + +export default memo(ParamControlNetWeight); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/CannyProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/CannyProcessor.tsx new file mode 100644 index 0000000000..6887d1abb0 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/CannyProcessor.tsx @@ -0,0 +1,72 @@ +import IAISlider from 'common/components/IAISlider'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredCannyImageProcessorInvocation } from 'features/controlNet/store/types'; +import { memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.canny_image_processor.default; + +type CannyProcessorProps = { + controlNetId: string; + processorNode: RequiredCannyImageProcessorInvocation; +}; + +const CannyProcessor = (props: CannyProcessorProps) => { + const { controlNetId, processorNode } = props; + const { low_threshold, high_threshold } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleLowThresholdChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { low_threshold: v }); + }, + [controlNetId, processorChanged] + ); + + const handleLowThresholdReset = useCallback(() => { + processorChanged(controlNetId, { + low_threshold: DEFAULTS.low_threshold, + }); + }, [controlNetId, processorChanged]); + + const handleHighThresholdChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { high_threshold: v }); + }, + [controlNetId, processorChanged] + ); + + const handleHighThresholdReset = useCallback(() => { + processorChanged(controlNetId, { + high_threshold: DEFAULTS.high_threshold, + }); + }, [controlNetId, processorChanged]); + + return ( + + + + + ); +}; + +export default memo(CannyProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/ContentShuffleProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/ContentShuffleProcessor.tsx new file mode 100644 index 0000000000..7ce6ab2297 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/ContentShuffleProcessor.tsx @@ -0,0 +1,141 @@ +import IAISlider from 'common/components/IAISlider'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredContentShuffleImageProcessorInvocation } from 'features/controlNet/store/types'; +import { memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.content_shuffle_image_processor.default; + +type Props = { + controlNetId: string; + processorNode: RequiredContentShuffleImageProcessorInvocation; +}; + +const ContentShuffleProcessor = (props: Props) => { + const { controlNetId, processorNode } = props; + const { image_resolution, detect_resolution, w, h, f } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleDetectResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { detect_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleDetectResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + detect_resolution: DEFAULTS.detect_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleImageResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { image_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleImageResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + image_resolution: DEFAULTS.image_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleWChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { w: v }); + }, + [controlNetId, processorChanged] + ); + + const handleWReset = useCallback(() => { + processorChanged(controlNetId, { + w: DEFAULTS.w, + }); + }, [controlNetId, processorChanged]); + + const handleHChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { h: v }); + }, + [controlNetId, processorChanged] + ); + + const handleHReset = useCallback(() => { + processorChanged(controlNetId, { + h: DEFAULTS.h, + }); + }, [controlNetId, processorChanged]); + + const handleFChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { f: v }); + }, + [controlNetId, processorChanged] + ); + + const handleFReset = useCallback(() => { + processorChanged(controlNetId, { + f: DEFAULTS.f, + }); + }, [controlNetId, processorChanged]); + + return ( + + + + + + + + ); +}; + +export default memo(ContentShuffleProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/HedProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/HedProcessor.tsx new file mode 100644 index 0000000000..a1aced5a8f --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/HedProcessor.tsx @@ -0,0 +1,88 @@ +import IAISlider from 'common/components/IAISlider'; +import IAISwitch from 'common/components/IAISwitch'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredHedImageProcessorInvocation } from 'features/controlNet/store/types'; +import { ChangeEvent, memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.hed_image_processor.default; + +type HedProcessorProps = { + controlNetId: string; + processorNode: RequiredHedImageProcessorInvocation; +}; + +const HedPreprocessor = (props: HedProcessorProps) => { + const { + controlNetId, + processorNode: { detect_resolution, image_resolution, scribble }, + } = props; + + const processorChanged = useProcessorNodeChanged(); + + const handleDetectResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { detect_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleImageResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { image_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleScribbleChanged = useCallback( + (e: ChangeEvent) => { + processorChanged(controlNetId, { scribble: e.target.checked }); + }, + [controlNetId, processorChanged] + ); + + const handleDetectResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + detect_resolution: DEFAULTS.detect_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleImageResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + image_resolution: DEFAULTS.image_resolution, + }); + }, [controlNetId, processorChanged]); + + return ( + + + + + + ); +}; + +export default memo(HedPreprocessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/LineartAnimeProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/LineartAnimeProcessor.tsx new file mode 100644 index 0000000000..17dc9b43df --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/LineartAnimeProcessor.tsx @@ -0,0 +1,72 @@ +import IAISlider from 'common/components/IAISlider'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredLineartAnimeImageProcessorInvocation } from 'features/controlNet/store/types'; +import { memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.lineart_anime_image_processor.default; + +type Props = { + controlNetId: string; + processorNode: RequiredLineartAnimeImageProcessorInvocation; +}; + +const LineartAnimeProcessor = (props: Props) => { + const { controlNetId, processorNode } = props; + const { image_resolution, detect_resolution } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleDetectResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { detect_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleImageResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { image_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleDetectResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + detect_resolution: DEFAULTS.detect_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleImageResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + image_resolution: DEFAULTS.image_resolution, + }); + }, [controlNetId, processorChanged]); + + return ( + + + + + ); +}; + +export default memo(LineartAnimeProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/LineartProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/LineartProcessor.tsx new file mode 100644 index 0000000000..99765ff62f --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/LineartProcessor.tsx @@ -0,0 +1,85 @@ +import IAISlider from 'common/components/IAISlider'; +import IAISwitch from 'common/components/IAISwitch'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredLineartImageProcessorInvocation } from 'features/controlNet/store/types'; +import { ChangeEvent, memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.lineart_image_processor.default; + +type LineartProcessorProps = { + controlNetId: string; + processorNode: RequiredLineartImageProcessorInvocation; +}; + +const LineartProcessor = (props: LineartProcessorProps) => { + const { controlNetId, processorNode } = props; + const { image_resolution, detect_resolution, coarse } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleDetectResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { detect_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleImageResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { image_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleDetectResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + detect_resolution: DEFAULTS.detect_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleImageResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + image_resolution: DEFAULTS.image_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleCoarseChanged = useCallback( + (e: ChangeEvent) => { + processorChanged(controlNetId, { coarse: e.target.checked }); + }, + [controlNetId, processorChanged] + ); + + return ( + + + + + + ); +}; + +export default memo(LineartProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/MediapipeFaceProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/MediapipeFaceProcessor.tsx new file mode 100644 index 0000000000..6e1a3959f2 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/MediapipeFaceProcessor.tsx @@ -0,0 +1,69 @@ +import IAISlider from 'common/components/IAISlider'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredMediapipeFaceProcessorInvocation } from 'features/controlNet/store/types'; +import { memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.mediapipe_face_processor.default; + +type Props = { + controlNetId: string; + processorNode: RequiredMediapipeFaceProcessorInvocation; +}; + +const MediapipeFaceProcessor = (props: Props) => { + const { controlNetId, processorNode } = props; + const { max_faces, min_confidence } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleMaxFacesChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { max_faces: v }); + }, + [controlNetId, processorChanged] + ); + + const handleMinConfidenceChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { min_confidence: v }); + }, + [controlNetId, processorChanged] + ); + + const handleMaxFacesReset = useCallback(() => { + processorChanged(controlNetId, { max_faces: DEFAULTS.max_faces }); + }, [controlNetId, processorChanged]); + + const handleMinConfidenceReset = useCallback(() => { + processorChanged(controlNetId, { min_confidence: DEFAULTS.min_confidence }); + }, [controlNetId, processorChanged]); + + return ( + + + + + ); +}; + +export default memo(MediapipeFaceProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/MidasDepthProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/MidasDepthProcessor.tsx new file mode 100644 index 0000000000..a552c90f3a --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/MidasDepthProcessor.tsx @@ -0,0 +1,70 @@ +import IAISlider from 'common/components/IAISlider'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredMidasDepthImageProcessorInvocation } from 'features/controlNet/store/types'; +import { memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.midas_depth_image_processor.default; + +type Props = { + controlNetId: string; + processorNode: RequiredMidasDepthImageProcessorInvocation; +}; + +const MidasDepthProcessor = (props: Props) => { + const { controlNetId, processorNode } = props; + const { a_mult, bg_th } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleAMultChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { a_mult: v }); + }, + [controlNetId, processorChanged] + ); + + const handleBgThChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { bg_th: v }); + }, + [controlNetId, processorChanged] + ); + + const handleAMultReset = useCallback(() => { + processorChanged(controlNetId, { a_mult: DEFAULTS.a_mult }); + }, [controlNetId, processorChanged]); + + const handleBgThReset = useCallback(() => { + processorChanged(controlNetId, { bg_th: DEFAULTS.bg_th }); + }, [controlNetId, processorChanged]); + + return ( + + + + + ); +}; + +export default memo(MidasDepthProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/MlsdImageProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/MlsdImageProcessor.tsx new file mode 100644 index 0000000000..d753d3b266 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/MlsdImageProcessor.tsx @@ -0,0 +1,116 @@ +import IAISlider from 'common/components/IAISlider'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredMlsdImageProcessorInvocation } from 'features/controlNet/store/types'; +import { memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.mlsd_image_processor.default; + +type Props = { + controlNetId: string; + processorNode: RequiredMlsdImageProcessorInvocation; +}; + +const MlsdImageProcessor = (props: Props) => { + const { controlNetId, processorNode } = props; + const { image_resolution, detect_resolution, thr_d, thr_v } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleDetectResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { detect_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleImageResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { image_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleThrDChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { thr_d: v }); + }, + [controlNetId, processorChanged] + ); + + const handleThrVChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { thr_v: v }); + }, + [controlNetId, processorChanged] + ); + + const handleDetectResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + detect_resolution: DEFAULTS.detect_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleImageResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + image_resolution: DEFAULTS.image_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleThrDReset = useCallback(() => { + processorChanged(controlNetId, { thr_d: DEFAULTS.thr_d }); + }, [controlNetId, processorChanged]); + + const handleThrVReset = useCallback(() => { + processorChanged(controlNetId, { thr_v: DEFAULTS.thr_v }); + }, [controlNetId, processorChanged]); + + return ( + + + + + + + ); +}; + +export default memo(MlsdImageProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/NormalBaeProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/NormalBaeProcessor.tsx new file mode 100644 index 0000000000..ea3270adb3 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/NormalBaeProcessor.tsx @@ -0,0 +1,72 @@ +import IAISlider from 'common/components/IAISlider'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredNormalbaeImageProcessorInvocation } from 'features/controlNet/store/types'; +import { memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.normalbae_image_processor.default; + +type Props = { + controlNetId: string; + processorNode: RequiredNormalbaeImageProcessorInvocation; +}; + +const NormalBaeProcessor = (props: Props) => { + const { controlNetId, processorNode } = props; + const { image_resolution, detect_resolution } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleDetectResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { detect_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleImageResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { image_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleDetectResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + detect_resolution: DEFAULTS.detect_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleImageResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + image_resolution: DEFAULTS.image_resolution, + }); + }, [controlNetId, processorChanged]); + + return ( + + + + + ); +}; + +export default memo(NormalBaeProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/OpenposeProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/OpenposeProcessor.tsx new file mode 100644 index 0000000000..57b45fffa4 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/OpenposeProcessor.tsx @@ -0,0 +1,85 @@ +import IAISlider from 'common/components/IAISlider'; +import IAISwitch from 'common/components/IAISwitch'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredOpenposeImageProcessorInvocation } from 'features/controlNet/store/types'; +import { ChangeEvent, memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.openpose_image_processor.default; + +type Props = { + controlNetId: string; + processorNode: RequiredOpenposeImageProcessorInvocation; +}; + +const OpenposeProcessor = (props: Props) => { + const { controlNetId, processorNode } = props; + const { image_resolution, detect_resolution, hand_and_face } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleDetectResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { detect_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleImageResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { image_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleDetectResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + detect_resolution: DEFAULTS.detect_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleImageResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + image_resolution: DEFAULTS.image_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleHandAndFaceChanged = useCallback( + (e: ChangeEvent) => { + processorChanged(controlNetId, { hand_and_face: e.target.checked }); + }, + [controlNetId, processorChanged] + ); + + return ( + + + + + + ); +}; + +export default memo(OpenposeProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/PidiProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/PidiProcessor.tsx new file mode 100644 index 0000000000..7fb5b92b9c --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/PidiProcessor.tsx @@ -0,0 +1,93 @@ +import IAISlider from 'common/components/IAISlider'; +import IAISwitch from 'common/components/IAISwitch'; +import { CONTROLNET_PROCESSORS } from 'features/controlNet/store/constants'; +import { RequiredPidiImageProcessorInvocation } from 'features/controlNet/store/types'; +import { ChangeEvent, memo, useCallback } from 'react'; +import { useProcessorNodeChanged } from '../hooks/useProcessorNodeChanged'; +import ProcessorWrapper from './common/ProcessorWrapper'; + +const DEFAULTS = CONTROLNET_PROCESSORS.pidi_image_processor.default; + +type Props = { + controlNetId: string; + processorNode: RequiredPidiImageProcessorInvocation; +}; + +const PidiProcessor = (props: Props) => { + const { controlNetId, processorNode } = props; + const { image_resolution, detect_resolution, scribble, safe } = processorNode; + const processorChanged = useProcessorNodeChanged(); + + const handleDetectResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { detect_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleImageResolutionChanged = useCallback( + (v: number) => { + processorChanged(controlNetId, { image_resolution: v }); + }, + [controlNetId, processorChanged] + ); + + const handleDetectResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + detect_resolution: DEFAULTS.detect_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleImageResolutionReset = useCallback(() => { + processorChanged(controlNetId, { + image_resolution: DEFAULTS.image_resolution, + }); + }, [controlNetId, processorChanged]); + + const handleScribbleChanged = useCallback( + (e: ChangeEvent) => { + processorChanged(controlNetId, { scribble: e.target.checked }); + }, + [controlNetId, processorChanged] + ); + + const handleSafeChanged = useCallback( + (e: ChangeEvent) => { + processorChanged(controlNetId, { safe: e.target.checked }); + }, + [controlNetId, processorChanged] + ); + + return ( + + + + + + + ); +}; + +export default memo(PidiProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/ZoeDepthProcessor.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/ZoeDepthProcessor.tsx new file mode 100644 index 0000000000..d0a34784bf --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/ZoeDepthProcessor.tsx @@ -0,0 +1,14 @@ +import { RequiredZoeDepthImageProcessorInvocation } from 'features/controlNet/store/types'; +import { memo } from 'react'; + +type Props = { + controlNetId: string; + processorNode: RequiredZoeDepthImageProcessorInvocation; +}; + +const ZoeDepthProcessor = (props: Props) => { + // Has no parameters? + return null; +}; + +export default memo(ZoeDepthProcessor); diff --git a/invokeai/frontend/web/src/features/controlNet/components/processors/common/ProcessorWrapper.tsx b/invokeai/frontend/web/src/features/controlNet/components/processors/common/ProcessorWrapper.tsx new file mode 100644 index 0000000000..5dc0a909d5 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/components/processors/common/ProcessorWrapper.tsx @@ -0,0 +1,8 @@ +import { Flex } from '@chakra-ui/react'; +import { PropsWithChildren } from 'react'; + +type Props = PropsWithChildren; + +export default function ProcessorWrapper(props: Props) { + return {props.children}; +} diff --git a/invokeai/frontend/web/src/features/controlNet/store/actions.ts b/invokeai/frontend/web/src/features/controlNet/store/actions.ts new file mode 100644 index 0000000000..3d9f56a36b --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/store/actions.ts @@ -0,0 +1,5 @@ +import { createAction } from '@reduxjs/toolkit'; + +export const controlNetImageProcessed = createAction<{ + controlNetId: string; +}>('controlNet/imageProcessed'); diff --git a/invokeai/frontend/web/src/features/controlNet/store/constants.ts b/invokeai/frontend/web/src/features/controlNet/store/constants.ts new file mode 100644 index 0000000000..c8689badf5 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/store/constants.ts @@ -0,0 +1,212 @@ +import { + ControlNetProcessorType, + RequiredCannyImageProcessorInvocation, + RequiredControlNetProcessorNode, +} from './types'; + +type ControlNetProcessorsDict = Record< + ControlNetProcessorType, + { + type: ControlNetProcessorType; + label: string; + description: string; + default: RequiredControlNetProcessorNode; + } +>; + +/** + * A dict of ControlNet processors, including: + * - type + * - label + * - description + * - default values + * + * TODO: Generate from the OpenAPI schema + */ +export const CONTROLNET_PROCESSORS = { + none: { + type: 'none', + label: 'None', + description: '', + default: { + type: 'none', + }, + }, + canny_image_processor: { + type: 'canny_image_processor', + label: 'Canny', + description: '', + default: { + id: 'canny_image_processor', + type: 'canny_image_processor', + low_threshold: 100, + high_threshold: 200, + }, + }, + content_shuffle_image_processor: { + type: 'content_shuffle_image_processor', + label: 'Content Shuffle', + description: '', + default: { + id: 'content_shuffle_image_processor', + type: 'content_shuffle_image_processor', + detect_resolution: 512, + image_resolution: 512, + h: 512, + w: 512, + f: 256, + }, + }, + hed_image_processor: { + type: 'hed_image_processor', + label: 'HED', + description: '', + default: { + id: 'hed_image_processor', + type: 'hed_image_processor', + detect_resolution: 512, + image_resolution: 512, + scribble: false, + }, + }, + lineart_anime_image_processor: { + type: 'lineart_anime_image_processor', + label: 'Lineart Anime', + description: '', + default: { + id: 'lineart_anime_image_processor', + type: 'lineart_anime_image_processor', + detect_resolution: 512, + image_resolution: 512, + }, + }, + lineart_image_processor: { + type: 'lineart_image_processor', + label: 'Lineart', + description: '', + default: { + id: 'lineart_image_processor', + type: 'lineart_image_processor', + detect_resolution: 512, + image_resolution: 512, + coarse: false, + }, + }, + mediapipe_face_processor: { + type: 'mediapipe_face_processor', + label: 'Mediapipe Face', + description: '', + default: { + id: 'mediapipe_face_processor', + type: 'mediapipe_face_processor', + max_faces: 1, + min_confidence: 0.5, + }, + }, + midas_depth_image_processor: { + type: 'midas_depth_image_processor', + label: 'Depth (Midas)', + description: '', + default: { + id: 'midas_depth_image_processor', + type: 'midas_depth_image_processor', + a_mult: 2, + bg_th: 0.1, + }, + }, + mlsd_image_processor: { + type: 'mlsd_image_processor', + label: 'MLSD', + description: '', + default: { + id: 'mlsd_image_processor', + type: 'mlsd_image_processor', + detect_resolution: 512, + image_resolution: 512, + thr_d: 0.1, + thr_v: 0.1, + }, + }, + normalbae_image_processor: { + type: 'normalbae_image_processor', + label: 'NormalBae', + description: '', + default: { + id: 'normalbae_image_processor', + type: 'normalbae_image_processor', + detect_resolution: 512, + image_resolution: 512, + }, + }, + openpose_image_processor: { + type: 'openpose_image_processor', + label: 'Openpose', + description: '', + default: { + id: 'openpose_image_processor', + type: 'openpose_image_processor', + detect_resolution: 512, + image_resolution: 512, + hand_and_face: false, + }, + }, + pidi_image_processor: { + type: 'pidi_image_processor', + label: 'PIDI', + description: '', + default: { + id: 'pidi_image_processor', + type: 'pidi_image_processor', + detect_resolution: 512, + image_resolution: 512, + scribble: false, + safe: false, + }, + }, + zoe_depth_image_processor: { + type: 'zoe_depth_image_processor', + label: 'Depth (Zoe)', + description: '', + default: { + id: 'zoe_depth_image_processor', + type: 'zoe_depth_image_processor', + }, + }, +}; + +export const CONTROLNET_MODELS = [ + 'lllyasviel/control_v11p_sd15_canny', + 'lllyasviel/control_v11p_sd15_inpaint', + 'lllyasviel/control_v11p_sd15_mlsd', + 'lllyasviel/control_v11f1p_sd15_depth', + 'lllyasviel/control_v11p_sd15_normalbae', + 'lllyasviel/control_v11p_sd15_seg', + 'lllyasviel/control_v11p_sd15_lineart', + 'lllyasviel/control_v11p_sd15s2_lineart_anime', + 'lllyasviel/control_v11p_sd15_scribble', + 'lllyasviel/control_v11p_sd15_softedge', + 'lllyasviel/control_v11e_sd15_shuffle', + 'lllyasviel/control_v11p_sd15_openpose', + 'lllyasviel/control_v11f1e_sd15_tile', + 'lllyasviel/control_v11e_sd15_ip2p', + 'CrucibleAI/ControlNetMediaPipeFace', +]; + +export type ControlNetModel = (typeof CONTROLNET_MODELS)[number]; + +export const CONTROLNET_MODEL_MAP: Record< + ControlNetModel, + ControlNetProcessorType +> = { + 'lllyasviel/control_v11p_sd15_canny': 'canny_image_processor', + 'lllyasviel/control_v11p_sd15_mlsd': 'mlsd_image_processor', + 'lllyasviel/control_v11f1p_sd15_depth': 'midas_depth_image_processor', + 'lllyasviel/control_v11p_sd15_normalbae': 'normalbae_image_processor', + 'lllyasviel/control_v11p_sd15_lineart': 'lineart_image_processor', + 'lllyasviel/control_v11p_sd15s2_lineart_anime': + 'lineart_anime_image_processor', + 'lllyasviel/control_v11p_sd15_softedge': 'hed_image_processor', + 'lllyasviel/control_v11e_sd15_shuffle': 'content_shuffle_image_processor', + 'lllyasviel/control_v11p_sd15_openpose': 'openpose_image_processor', + 'CrucibleAI/ControlNetMediaPipeFace': 'mediapipe_face_processor', +}; diff --git a/invokeai/frontend/web/src/features/controlNet/store/controlNetDenylist.ts b/invokeai/frontend/web/src/features/controlNet/store/controlNetDenylist.ts new file mode 100644 index 0000000000..07eab8120f --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/store/controlNetDenylist.ts @@ -0,0 +1,8 @@ +import { ControlNetState } from './controlNetSlice'; + +/** + * ControlNet slice persist denylist + */ +export const controlNetDenylist: (keyof ControlNetState)[] = [ + 'isProcessingControlImage', +]; diff --git a/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts b/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts new file mode 100644 index 0000000000..1389457aba --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts @@ -0,0 +1,218 @@ +import { PayloadAction } from '@reduxjs/toolkit'; +import { createSlice } from '@reduxjs/toolkit'; +import { RootState } from 'app/store/store'; +import { ImageDTO } from 'services/api'; +import { + ControlNetProcessorType, + RequiredCannyImageProcessorInvocation, + RequiredControlNetProcessorNode, +} from './types'; +import { + CONTROLNET_MODELS, + CONTROLNET_PROCESSORS, + ControlNetModel, +} from './constants'; +import { controlNetImageProcessed } from './actions'; + +export const initialControlNet: Omit = { + isEnabled: true, + model: CONTROLNET_MODELS[0], + weight: 1, + beginStepPct: 0, + endStepPct: 1, + controlImage: null, + processedControlImage: null, + processorType: 'canny_image_processor', + processorNode: CONTROLNET_PROCESSORS.canny_image_processor + .default as RequiredCannyImageProcessorInvocation, +}; + +export type ControlNetConfig = { + controlNetId: string; + isEnabled: boolean; + model: ControlNetModel; + weight: number; + beginStepPct: number; + endStepPct: number; + controlImage: ImageDTO | null; + processedControlImage: ImageDTO | null; + processorType: ControlNetProcessorType; + processorNode: RequiredControlNetProcessorNode; +}; + +export type ControlNetState = { + controlNets: Record; + isEnabled: boolean; + isProcessingControlImage: boolean; +}; + +export const initialControlNetState: ControlNetState = { + controlNets: {}, + isEnabled: false, + isProcessingControlImage: false, +}; + +export const controlNetSlice = createSlice({ + name: 'controlNet', + initialState: initialControlNetState, + reducers: { + isControlNetEnabledToggled: (state) => { + state.isEnabled = !state.isEnabled; + }, + controlNetAdded: ( + state, + action: PayloadAction<{ + controlNetId: string; + controlNet?: ControlNetConfig; + }> + ) => { + const { controlNetId, controlNet } = action.payload; + state.controlNets[controlNetId] = { + ...(controlNet ?? initialControlNet), + controlNetId, + }; + }, + controlNetAddedFromImage: ( + state, + action: PayloadAction<{ controlNetId: string; controlImage: ImageDTO }> + ) => { + const { controlNetId, controlImage } = action.payload; + state.controlNets[controlNetId] = { + ...initialControlNet, + controlNetId, + controlImage, + }; + }, + controlNetRemoved: ( + state, + action: PayloadAction<{ controlNetId: string }> + ) => { + const { controlNetId } = action.payload; + delete state.controlNets[controlNetId]; + }, + controlNetToggled: ( + state, + action: PayloadAction<{ controlNetId: string }> + ) => { + const { controlNetId } = action.payload; + state.controlNets[controlNetId].isEnabled = + !state.controlNets[controlNetId].isEnabled; + }, + controlNetImageChanged: ( + state, + action: PayloadAction<{ + controlNetId: string; + controlImage: ImageDTO | null; + }> + ) => { + const { controlNetId, controlImage } = action.payload; + state.controlNets[controlNetId].controlImage = controlImage; + state.controlNets[controlNetId].processedControlImage = null; + if ( + controlImage !== null && + state.controlNets[controlNetId].processorType !== 'none' + ) { + state.isProcessingControlImage = true; + } + }, + controlNetProcessedImageChanged: ( + state, + action: PayloadAction<{ + controlNetId: string; + processedControlImage: ImageDTO | null; + }> + ) => { + const { controlNetId, processedControlImage } = action.payload; + state.controlNets[controlNetId].processedControlImage = + processedControlImage; + state.isProcessingControlImage = false; + }, + controlNetModelChanged: ( + state, + action: PayloadAction<{ controlNetId: string; model: ControlNetModel }> + ) => { + const { controlNetId, model } = action.payload; + state.controlNets[controlNetId].model = model; + }, + controlNetWeightChanged: ( + state, + action: PayloadAction<{ controlNetId: string; weight: number }> + ) => { + const { controlNetId, weight } = action.payload; + state.controlNets[controlNetId].weight = weight; + }, + controlNetBeginStepPctChanged: ( + state, + action: PayloadAction<{ controlNetId: string; beginStepPct: number }> + ) => { + const { controlNetId, beginStepPct } = action.payload; + state.controlNets[controlNetId].beginStepPct = beginStepPct; + }, + controlNetEndStepPctChanged: ( + state, + action: PayloadAction<{ controlNetId: string; endStepPct: number }> + ) => { + const { controlNetId, endStepPct } = action.payload; + state.controlNets[controlNetId].endStepPct = endStepPct; + }, + controlNetProcessorParamsChanged: ( + state, + action: PayloadAction<{ + controlNetId: string; + changes: Omit< + Partial, + 'id' | 'type' | 'is_intermediate' + >; + }> + ) => { + const { controlNetId, changes } = action.payload; + const processorNode = state.controlNets[controlNetId].processorNode; + state.controlNets[controlNetId].processorNode = { + ...processorNode, + ...changes, + }; + }, + controlNetProcessorTypeChanged: ( + state, + action: PayloadAction<{ + controlNetId: string; + processorType: ControlNetProcessorType; + }> + ) => { + const { controlNetId, processorType } = action.payload; + state.controlNets[controlNetId].processorType = processorType; + state.controlNets[controlNetId].processorNode = CONTROLNET_PROCESSORS[ + processorType + ].default as RequiredControlNetProcessorNode; + }, + }, + extraReducers: (builder) => { + builder.addCase(controlNetImageProcessed, (state, action) => { + if ( + state.controlNets[action.payload.controlNetId].controlImage !== null + ) { + state.isProcessingControlImage = true; + } + }); + }, +}); + +export const { + isControlNetEnabledToggled, + controlNetAdded, + controlNetAddedFromImage, + controlNetRemoved, + controlNetImageChanged, + controlNetProcessedImageChanged, + controlNetToggled, + controlNetModelChanged, + controlNetWeightChanged, + controlNetBeginStepPctChanged, + controlNetEndStepPctChanged, + controlNetProcessorParamsChanged, + controlNetProcessorTypeChanged, +} = controlNetSlice.actions; + +export default controlNetSlice.reducer; + +export const controlNetSelector = (state: RootState) => state.controlNet; diff --git a/invokeai/frontend/web/src/features/controlNet/store/types.ts b/invokeai/frontend/web/src/features/controlNet/store/types.ts new file mode 100644 index 0000000000..4ee15b39b9 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlNet/store/types.ts @@ -0,0 +1,329 @@ +import { isObject } from 'lodash-es'; +import { + CannyImageProcessorInvocation, + ContentShuffleImageProcessorInvocation, + HedImageProcessorInvocation, + LineartAnimeImageProcessorInvocation, + LineartImageProcessorInvocation, + MediapipeFaceProcessorInvocation, + MidasDepthImageProcessorInvocation, + MlsdImageProcessorInvocation, + NormalbaeImageProcessorInvocation, + OpenposeImageProcessorInvocation, + PidiImageProcessorInvocation, + ZoeDepthImageProcessorInvocation, +} from 'services/api'; +import { O } from 'ts-toolbelt'; + +/** + * Any ControlNet processor node + */ +export type ControlNetProcessorNode = + | CannyImageProcessorInvocation + | ContentShuffleImageProcessorInvocation + | HedImageProcessorInvocation + | LineartAnimeImageProcessorInvocation + | LineartImageProcessorInvocation + | MediapipeFaceProcessorInvocation + | MidasDepthImageProcessorInvocation + | MlsdImageProcessorInvocation + | NormalbaeImageProcessorInvocation + | OpenposeImageProcessorInvocation + | PidiImageProcessorInvocation + | ZoeDepthImageProcessorInvocation; + +/** + * Any ControlNet processor type + */ +export type ControlNetProcessorType = NonNullable< + ControlNetProcessorNode['type'] | 'none' +>; + +/** + * The Canny processor node, with parameters flagged as required + */ +export type RequiredCannyImageProcessorInvocation = O.Required< + CannyImageProcessorInvocation, + 'type' | 'low_threshold' | 'high_threshold' +>; + +/** + * The ContentShuffle processor node, with parameters flagged as required + */ +export type RequiredContentShuffleImageProcessorInvocation = O.Required< + ContentShuffleImageProcessorInvocation, + 'type' | 'detect_resolution' | 'image_resolution' | 'w' | 'h' | 'f' +>; + +/** + * The HED processor node, with parameters flagged as required + */ +export type RequiredHedImageProcessorInvocation = O.Required< + HedImageProcessorInvocation, + 'type' | 'detect_resolution' | 'image_resolution' | 'scribble' +>; + +/** + * The Lineart Anime processor node, with parameters flagged as required + */ +export type RequiredLineartAnimeImageProcessorInvocation = O.Required< + LineartAnimeImageProcessorInvocation, + 'type' | 'detect_resolution' | 'image_resolution' +>; + +/** + * The Lineart processor node, with parameters flagged as required + */ +export type RequiredLineartImageProcessorInvocation = O.Required< + LineartImageProcessorInvocation, + 'type' | 'detect_resolution' | 'image_resolution' | 'coarse' +>; + +/** + * The MediapipeFace processor node, with parameters flagged as required + */ +export type RequiredMediapipeFaceProcessorInvocation = O.Required< + MediapipeFaceProcessorInvocation, + 'type' | 'max_faces' | 'min_confidence' +>; + +/** + * The MidasDepth processor node, with parameters flagged as required + */ +export type RequiredMidasDepthImageProcessorInvocation = O.Required< + MidasDepthImageProcessorInvocation, + 'type' | 'a_mult' | 'bg_th' +>; + +/** + * The MLSD processor node, with parameters flagged as required + */ +export type RequiredMlsdImageProcessorInvocation = O.Required< + MlsdImageProcessorInvocation, + 'type' | 'detect_resolution' | 'image_resolution' | 'thr_v' | 'thr_d' +>; + +/** + * The NormalBae processor node, with parameters flagged as required + */ +export type RequiredNormalbaeImageProcessorInvocation = O.Required< + NormalbaeImageProcessorInvocation, + 'type' | 'detect_resolution' | 'image_resolution' +>; + +/** + * The Openpose processor node, with parameters flagged as required + */ +export type RequiredOpenposeImageProcessorInvocation = O.Required< + OpenposeImageProcessorInvocation, + 'type' | 'detect_resolution' | 'image_resolution' | 'hand_and_face' +>; + +/** + * The Pidi processor node, with parameters flagged as required + */ +export type RequiredPidiImageProcessorInvocation = O.Required< + PidiImageProcessorInvocation, + 'type' | 'detect_resolution' | 'image_resolution' | 'safe' | 'scribble' +>; + +/** + * The ZoeDepth processor node, with parameters flagged as required + */ +export type RequiredZoeDepthImageProcessorInvocation = O.Required< + ZoeDepthImageProcessorInvocation, + 'type' +>; + +/** + * Any ControlNet Processor node, with its parameters flagged as required + */ +export type RequiredControlNetProcessorNode = + | RequiredCannyImageProcessorInvocation + | RequiredContentShuffleImageProcessorInvocation + | RequiredHedImageProcessorInvocation + | RequiredLineartAnimeImageProcessorInvocation + | RequiredLineartImageProcessorInvocation + | RequiredMediapipeFaceProcessorInvocation + | RequiredMidasDepthImageProcessorInvocation + | RequiredMlsdImageProcessorInvocation + | RequiredNormalbaeImageProcessorInvocation + | RequiredOpenposeImageProcessorInvocation + | RequiredPidiImageProcessorInvocation + | RequiredZoeDepthImageProcessorInvocation; + +/** + * Type guard for CannyImageProcessorInvocation + */ +export const isCannyImageProcessorInvocation = ( + obj: unknown +): obj is CannyImageProcessorInvocation => { + if (isObject(obj) && 'type' in obj && obj.type === 'canny_image_processor') { + return true; + } + return false; +}; + +/** + * Type guard for ContentShuffleImageProcessorInvocation + */ +export const isContentShuffleImageProcessorInvocation = ( + obj: unknown +): obj is ContentShuffleImageProcessorInvocation => { + if ( + isObject(obj) && + 'type' in obj && + obj.type === 'content_shuffle_image_processor' + ) { + return true; + } + return false; +}; + +/** + * Type guard for HedImageprocessorInvocation + */ +export const isHedImageprocessorInvocation = ( + obj: unknown +): obj is HedImageProcessorInvocation => { + if (isObject(obj) && 'type' in obj && obj.type === 'hed_image_processor') { + return true; + } + return false; +}; + +/** + * Type guard for LineartAnimeImageProcessorInvocation + */ +export const isLineartAnimeImageProcessorInvocation = ( + obj: unknown +): obj is LineartAnimeImageProcessorInvocation => { + if ( + isObject(obj) && + 'type' in obj && + obj.type === 'lineart_anime_image_processor' + ) { + return true; + } + return false; +}; + +/** + * Type guard for LineartImageProcessorInvocation + */ +export const isLineartImageProcessorInvocation = ( + obj: unknown +): obj is LineartImageProcessorInvocation => { + if ( + isObject(obj) && + 'type' in obj && + obj.type === 'lineart_image_processor' + ) { + return true; + } + return false; +}; + +/** + * Type guard for MediapipeFaceProcessorInvocation + */ +export const isMediapipeFaceProcessorInvocation = ( + obj: unknown +): obj is MediapipeFaceProcessorInvocation => { + if ( + isObject(obj) && + 'type' in obj && + obj.type === 'mediapipe_face_processor' + ) { + return true; + } + return false; +}; + +/** + * Type guard for MidasDepthImageProcessorInvocation + */ +export const isMidasDepthImageProcessorInvocation = ( + obj: unknown +): obj is MidasDepthImageProcessorInvocation => { + if ( + isObject(obj) && + 'type' in obj && + obj.type === 'midas_depth_image_processor' + ) { + return true; + } + return false; +}; + +/** + * Type guard for MlsdImageProcessorInvocation + */ +export const isMlsdImageProcessorInvocation = ( + obj: unknown +): obj is MlsdImageProcessorInvocation => { + if (isObject(obj) && 'type' in obj && obj.type === 'mlsd_image_processor') { + return true; + } + return false; +}; + +/** + * Type guard for NormalbaeImageProcessorInvocation + */ +export const isNormalbaeImageProcessorInvocation = ( + obj: unknown +): obj is NormalbaeImageProcessorInvocation => { + if ( + isObject(obj) && + 'type' in obj && + obj.type === 'normalbae_image_processor' + ) { + return true; + } + return false; +}; + +/** + * Type guard for OpenposeImageProcessorInvocation + */ +export const isOpenposeImageProcessorInvocation = ( + obj: unknown +): obj is OpenposeImageProcessorInvocation => { + if ( + isObject(obj) && + 'type' in obj && + obj.type === 'openpose_image_processor' + ) { + return true; + } + return false; +}; + +/** + * Type guard for PidiImageProcessorInvocation + */ +export const isPidiImageProcessorInvocation = ( + obj: unknown +): obj is PidiImageProcessorInvocation => { + if (isObject(obj) && 'type' in obj && obj.type === 'pidi_image_processor') { + return true; + } + return false; +}; + +/** + * Type guard for ZoeDepthImageProcessorInvocation + */ +export const isZoeDepthImageProcessorInvocation = ( + obj: unknown +): obj is ZoeDepthImageProcessorInvocation => { + if ( + isObject(obj) && + 'type' in obj && + obj.type === 'zoe_depth_image_processor' + ) { + return true; + } + return false; +}; diff --git a/invokeai/frontend/web/src/features/gallery/components/CurrentImageDisplay.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImageDisplay.tsx index 5810c599c1..e4e50e6c5d 100644 --- a/invokeai/frontend/web/src/features/gallery/components/CurrentImageDisplay.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/CurrentImageDisplay.tsx @@ -1,4 +1,4 @@ -import { Flex, Icon } from '@chakra-ui/react'; +import { Box, Flex } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { useAppSelector } from 'app/store/storeHooks'; import { systemSelector } from 'features/system/store/systemSelectors'; @@ -7,7 +7,7 @@ import { isEqual } from 'lodash-es'; import { gallerySelector } from '../store/gallerySelectors'; import CurrentImageButtons from './CurrentImageButtons'; import CurrentImagePreview from './CurrentImagePreview'; -import { FaImage } from 'react-icons/fa'; +import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; export const currentImageDisplaySelector = createSelector( [systemSelector, gallerySelector], @@ -15,21 +15,20 @@ export const currentImageDisplaySelector = createSelector( const { progressImage } = system; return { - hasAnImageToDisplay: gallery.selectedImage || progressImage, + hasSelectedImage: Boolean(gallery.selectedImage), + hasProgressImage: Boolean(progressImage), }; }, - { - memoizeOptions: { - resultEqualityCheck: isEqual, - }, - } + defaultSelectorOptions ); /** * Displays the current image if there is one, plus associated actions. */ const CurrentImageDisplay = () => { - const { hasAnImageToDisplay } = useAppSelector(currentImageDisplaySelector); + const { hasSelectedImage, hasProgressImage } = useAppSelector( + currentImageDisplaySelector + ); return ( { gap: 4, }} > - {hasAnImageToDisplay ? ( - <> - - - - ) : ( - - )} + + {hasSelectedImage && ( + + + + )} ); }; diff --git a/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx index 280d859b87..12d62ead70 100644 --- a/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx @@ -1,20 +1,20 @@ import { Box, Flex, Image } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { useGetUrl } from 'common/util/getUrl'; import { uiSelector } from 'features/ui/store/uiSelectors'; import { isEqual } from 'lodash-es'; import { gallerySelector } from '../store/gallerySelectors'; import ImageMetadataViewer from './ImageMetaDataViewer/ImageMetadataViewer'; import NextPrevImageButtons from './NextPrevImageButtons'; -import { DragEvent, memo, useCallback } from 'react'; +import { memo, useCallback } from 'react'; import { systemSelector } from 'features/system/store/systemSelectors'; -import ImageFallbackSpinner from './ImageFallbackSpinner'; -import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay'; import { configSelector } from '../../system/store/configSelectors'; import { useAppToaster } from 'app/components/Toaster'; import { imageSelected } from '../store/gallerySlice'; +import IAIDndImage from 'common/components/IAIDndImage'; +import { ImageDTO } from 'services/api'; +import { IAIImageFallback } from 'common/components/IAIImageFallback'; export const imagesSelector = createSelector( [uiSelector, gallerySelector, systemSelector], @@ -46,27 +46,14 @@ const CurrentImagePreview = () => { const { shouldShowImageDetails, image, - shouldHidePreview, progressImage, shouldShowProgressInViewer, shouldAntialiasProgressImage, } = useAppSelector(imagesSelector); const { shouldFetchImages } = useAppSelector(configSelector); - const { getUrl } = useGetUrl(); const toaster = useAppToaster(); const dispatch = useAppDispatch(); - const handleDragStart = useCallback( - (e: DragEvent) => { - if (!image) { - return; - } - e.dataTransfer.setData('invokeai/imageName', image.image_name); - e.dataTransfer.effectAllowed = 'move'; - }, - [image] - ); - const handleError = useCallback(() => { dispatch(imageSelected()); if (shouldFetchImages) { @@ -78,11 +65,21 @@ const CurrentImagePreview = () => { } }, [dispatch, toaster, shouldFetchImages]); + const handleDrop = useCallback( + (droppedImage: ImageDTO) => { + if (droppedImage.image_name === image?.image_name) { + return; + } + dispatch(imageSelected(droppedImage)); + }, + [dispatch, image?.image_name] + ); + return ( { height={progressImage.height} sx={{ objectFit: 'contain', - maxWidth: '100%', - maxHeight: '100%', + maxWidth: 'full', + maxHeight: 'full', height: 'auto', position: 'absolute', borderRadius: 'base', @@ -104,34 +101,29 @@ const CurrentImagePreview = () => { }} /> ) : ( - image && ( - <> - } - onDragStart={handleDragStart} - sx={{ - objectFit: 'contain', - maxWidth: '100%', - maxHeight: '100%', - height: 'auto', - position: 'absolute', - borderRadius: 'base', - }} - onError={handleError} - /> - - - ) + + } + /> + )} - {shouldShowImageDetails && image && 'metadata' in image && ( + {shouldShowImageDetails && image && image.metadata && ( { )} - {!shouldShowImageDetails && } + {!shouldShowImageDetails && image && ( + + + + )} ); }; diff --git a/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx b/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx index f652cebda2..4dad27d4e8 100644 --- a/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx @@ -39,6 +39,7 @@ import { } from '../store/actions'; import { useAppToaster } from 'app/components/Toaster'; import { ImageDTO } from 'services/api'; +import { useDraggable } from '@dnd-kit/core'; export const selector = createSelector( [gallerySelector, systemSelector, lightboxSelector, activeTabNameSelector], @@ -117,6 +118,13 @@ const HoverableImage = memo((props: HoverableImageProps) => { const { recallBothPrompts, recallSeed, recallAllParameters } = useRecallParameters(); + const { attributes, listeners, setNodeRef } = useDraggable({ + id: `galleryImage_${image_name}`, + data: { + image, + }, + }); + const handleMouseOver = () => setIsHovered(true); const handleMouseOut = () => setIsHovered(false); @@ -144,14 +152,6 @@ const HoverableImage = memo((props: HoverableImageProps) => { dispatch(imageSelected(image)); }, [image, dispatch]); - const handleDragStart = useCallback( - (e: DragEvent) => { - e.dataTransfer.setData('invokeai/imageName', image.image_name); - e.dataTransfer.effectAllowed = 'move'; - }, - [image] - ); - // Recall parameters handlers const handleRecallPrompt = useCallback(() => { recallBothPrompts( @@ -212,7 +212,12 @@ const HoverableImage = memo((props: HoverableImageProps) => { }; return ( - <> + menuProps={{ size: 'sm', isLazy: true }} renderMenu={() => ( @@ -291,8 +296,8 @@ const HoverableImage = memo((props: HoverableImageProps) => { onMouseOver={handleMouseOver} onMouseOut={handleMouseOut} userSelect="none" - draggable={true} - onDragStart={handleDragStart} + // draggable={true} + // onDragStart={handleDragStart} onClick={handleSelectImage} ref={ref} sx={{ @@ -373,7 +378,7 @@ const HoverableImage = memo((props: HoverableImageProps) => { onClose={onDeleteDialogClose} handleDelete={handleDelete} /> - + ); }, memoEqualityCheck); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageFallbackSpinner.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageFallbackSpinner.tsx index 394ff9db15..fd603d3756 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageFallbackSpinner.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageFallbackSpinner.tsx @@ -14,6 +14,8 @@ const ImageFallbackSpinner = (props: ImageFallbackSpinnerProps) => { justifyContent: 'center', position: 'absolute', color: 'base.400', + minH: 36, + minW: 36, }} > diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx index 77f42a11a6..fe8690e379 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx @@ -10,7 +10,7 @@ import { } from '@chakra-ui/react'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import IAIButton from 'common/components/IAIButton'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import IAIIconButton from 'common/components/IAIIconButton'; import IAIPopover from 'common/components/IAIPopover'; import IAISlider from 'common/components/IAISlider'; @@ -233,7 +233,7 @@ const ImageGalleryContent = () => { withReset handleReset={() => dispatch(setGalleryImageMinimumWidth(64))} /> - @@ -244,14 +244,14 @@ const ImageGalleryContent = () => { ) } /> - ) => dispatch(setShouldAutoSwitchToNewImages(e.target.checked)) } /> - ) => diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index ab62646c0f..8e5ecf64fa 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -50,7 +50,10 @@ export const gallerySlice = createSlice({ }, extraReducers: (builder) => { builder.addCase(imageUpserted, (state, action) => { - if (state.shouldAutoSwitchToNewImages) { + if ( + state.shouldAutoSwitchToNewImages && + action.payload.image_category === 'general' + ) { state.selectedImage = action.payload; } }); diff --git a/invokeai/frontend/web/src/features/nodes/components/fields/ImageInputFieldComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/fields/ImageInputFieldComponent.tsx index 57cefb0a9c..9ea65911a2 100644 --- a/invokeai/frontend/web/src/features/nodes/components/fields/ImageInputFieldComponent.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/fields/ImageInputFieldComponent.tsx @@ -1,54 +1,67 @@ -import { Box, Image } from '@chakra-ui/react'; import { useAppDispatch } from 'app/store/storeHooks'; -import SelectImagePlaceholder from 'common/components/SelectImagePlaceholder'; -import { useGetUrl } from 'common/util/getUrl'; -import useGetImageByName from 'features/gallery/hooks/useGetImageByName'; import { fieldValueChanged } from 'features/nodes/store/nodesSlice'; import { ImageInputFieldTemplate, ImageInputFieldValue, } from 'features/nodes/types/types'; -import { DragEvent, memo, useCallback, useState } from 'react'; +import { memo, useCallback } from 'react'; import { FieldComponentProps } from './types'; +import IAIDndImage from 'common/components/IAIDndImage'; +import { ImageDTO } from 'services/api'; +import { Flex } from '@chakra-ui/react'; const ImageInputFieldComponent = ( props: FieldComponentProps ) => { const { nodeId, field } = props; - const getImageByName = useGetImageByName(); const dispatch = useAppDispatch(); - const [url, setUrl] = useState(field.value?.image_url); - const { getUrl } = useGetUrl(); const handleDrop = useCallback( - (e: DragEvent) => { - const name = e.dataTransfer.getData('invokeai/imageName'); - const image = getImageByName(name); - - if (!image) { + (droppedImage: ImageDTO) => { + if (field.value?.image_name === droppedImage.image_name) { return; } - setUrl(image.image_url); - dispatch( fieldValueChanged({ nodeId, fieldName: field.name, - value: image, + value: droppedImage, }) ); }, - [getImageByName, dispatch, field.name, nodeId] + [dispatch, field.name, field.value?.image_name, nodeId] ); + const handleReset = useCallback(() => { + dispatch( + fieldValueChanged({ + nodeId, + fieldName: field.name, + value: undefined, + }) + ); + }, [dispatch, field.name, nodeId]); + return ( - - } /> - + + + ); }; diff --git a/invokeai/frontend/web/src/features/nodes/components/ui/NodeInvokeButton.tsx b/invokeai/frontend/web/src/features/nodes/components/ui/NodeInvokeButton.tsx index 4b916abd2e..be5e5a943e 100644 --- a/invokeai/frontend/web/src/features/nodes/components/ui/NodeInvokeButton.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/ui/NodeInvokeButton.tsx @@ -1,11 +1,11 @@ import { Box } from '@chakra-ui/react'; -import { readinessSelector } from 'app/selectors/readinessSelector'; import { userInvoked } from 'app/store/actions'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import IAIButton, { IAIButtonProps } from 'common/components/IAIButton'; import IAIIconButton, { IAIIconButtonProps, } from 'common/components/IAIIconButton'; +import { useIsReadyToInvoke } from 'common/hooks/useIsReadyToInvoke'; import ProgressBar from 'features/system/components/ProgressBar'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; import { useCallback } from 'react'; @@ -21,9 +21,8 @@ interface InvokeButton export default function NodeInvokeButton(props: InvokeButton) { const { iconButton = false, ...rest } = props; const dispatch = useAppDispatch(); - const { isReady } = useAppSelector(readinessSelector); const activeTabName = useAppSelector(activeTabNameSelector); - + const isReady = useIsReadyToInvoke(); const handleInvoke = useCallback(() => { dispatch(userInvoked('nodes')); }, [dispatch]); diff --git a/invokeai/frontend/web/src/features/nodes/util/addControlNetToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/addControlNetToLinearGraph.ts new file mode 100644 index 0000000000..9c77681d18 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/addControlNetToLinearGraph.ts @@ -0,0 +1,99 @@ +import { RootState } from 'app/store/store'; +import { forEach, size } from 'lodash-es'; +import { CollectInvocation, ControlNetInvocation } from 'services/api'; +import { NonNullableGraph } from '../types/types'; + +const CONTROL_NET_COLLECT = 'control_net_collect'; + +export const addControlNetToLinearGraph = ( + graph: NonNullableGraph, + baseNodeId: string, + state: RootState +): void => { + const { isEnabled: isControlNetEnabled, controlNets } = state.controlNet; + + // Add ControlNet + if (isControlNetEnabled) { + if (size(controlNets) > 1) { + const controlNetIterateNode: CollectInvocation = { + id: CONTROL_NET_COLLECT, + type: 'collect', + }; + graph.nodes[controlNetIterateNode.id] = controlNetIterateNode; + graph.edges.push({ + source: { node_id: controlNetIterateNode.id, field: 'collection' }, + destination: { + node_id: baseNodeId, + field: 'control', + }, + }); + } + + forEach(controlNets, (controlNet, index) => { + const { + controlNetId, + isEnabled, + controlImage, + processedControlImage, + beginStepPct, + endStepPct, + model, + processorType, + weight, + } = controlNet; + + if (!isEnabled) { + // Skip disabled ControlNets + return; + } + + const controlNetNode: ControlNetInvocation = { + id: `control_net_${controlNetId}`, + type: 'controlnet', + begin_step_percent: beginStepPct, + end_step_percent: endStepPct, + control_model: model as ControlNetInvocation['control_model'], + control_weight: weight, + }; + + if (processedControlImage && processorType !== 'none') { + // We've already processed the image in the app, so we can just use the processed image + const { image_name, image_origin } = processedControlImage; + controlNetNode.image = { + image_name, + image_origin, + }; + } else if (controlImage && processorType !== 'none') { + // The control image is preprocessed + const { image_name, image_origin } = controlImage; + controlNetNode.image = { + image_name, + image_origin, + }; + } else { + // Skip ControlNets without an unprocessed image - should never happen if everything is working correctly + return; + } + + graph.nodes[controlNetNode.id] = controlNetNode; + + if (size(controlNets) > 1) { + graph.edges.push({ + source: { node_id: controlNetNode.id, field: 'control' }, + destination: { + node_id: CONTROL_NET_COLLECT, + field: 'item', + }, + }); + } else { + graph.edges.push({ + source: { node_id: controlNetNode.id, field: 'control' }, + destination: { + node_id: baseNodeId, + field: 'control', + }, + }); + } + }); + } +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildImageToImageGraph.ts index fe4f6c63b5..a1dc5d48ab 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildImageToImageGraph.ts @@ -14,6 +14,7 @@ import { import { NonNullableGraph } from 'features/nodes/types/types'; import { log } from 'app/logging/useLogger'; import { set } from 'lodash-es'; +import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph'; const moduleLog = log.child({ namespace: 'nodes' }); @@ -408,5 +409,7 @@ export const buildImageToImageGraph = (state: RootState): Graph => { }); } + addControlNetToLinearGraph(graph, LATENTS_TO_LATENTS, state); + return graph; }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildTextToImageGraph.ts index 753ccccff8..ae71f569b6 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildTextToImageGraph.ts @@ -10,6 +10,7 @@ import { TextToLatentsInvocation, } from 'services/api'; import { NonNullableGraph } from 'features/nodes/types/types'; +import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph'; const POSITIVE_CONDITIONING = 'positive_conditioning'; const NEGATIVE_CONDITIONING = 'negative_conditioning'; @@ -308,5 +309,8 @@ export const buildTextToImageGraph = (state: RootState): Graph => { }, }); } + + addControlNetToLinearGraph(graph, TEXT_TO_LATENTS, state); + return graph; }; diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse.tsx new file mode 100644 index 0000000000..06c6108dcb --- /dev/null +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse.tsx @@ -0,0 +1,69 @@ +import { Divider, Flex } from '@chakra-ui/react'; +import { useTranslation } from 'react-i18next'; +import IAICollapse from 'common/components/IAICollapse'; +import { Fragment, memo, useCallback } from 'react'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { createSelector } from '@reduxjs/toolkit'; +import { + controlNetAdded, + controlNetSelector, + isControlNetEnabledToggled, +} from 'features/controlNet/store/controlNetSlice'; +import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; +import { map } from 'lodash-es'; +import { v4 as uuidv4 } from 'uuid'; +import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; +import IAIButton from 'common/components/IAIButton'; +import ControlNet from 'features/controlNet/components/ControlNet'; + +const selector = createSelector( + controlNetSelector, + (controlNet) => { + const { controlNets, isEnabled } = controlNet; + + return { controlNetsArray: map(controlNets), isEnabled }; + }, + defaultSelectorOptions +); + +const ParamControlNetCollapse = () => { + const { t } = useTranslation(); + const { controlNetsArray, isEnabled } = useAppSelector(selector); + const isControlNetDisabled = useFeatureStatus('controlNet').isFeatureDisabled; + const dispatch = useAppDispatch(); + + const handleClickControlNetToggle = useCallback(() => { + dispatch(isControlNetEnabledToggled()); + }, [dispatch]); + + const handleClickedAddControlNet = useCallback(() => { + dispatch(controlNetAdded({ controlNetId: uuidv4() })); + }, [dispatch]); + + if (isControlNetDisabled) { + return null; + } + + return ( + + + {controlNetsArray.map((c, i) => ( + + {i > 0 && } + + + ))} + + Add ControlNet + + + + ); +}; + +export default memo(ParamControlNetCollapse); diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Core/ParamPositiveConditioning.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Core/ParamPositiveConditioning.tsx index 365bade0aa..0980b84ab3 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Core/ParamPositiveConditioning.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Core/ParamPositiveConditioning.tsx @@ -4,7 +4,6 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { ChangeEvent, KeyboardEvent, useCallback, useRef } from 'react'; import { createSelector } from '@reduxjs/toolkit'; -import { readinessSelector } from 'app/selectors/readinessSelector'; import { GenerationState, clampSymmetrySteps, @@ -17,6 +16,7 @@ import { useHotkeys } from 'react-hotkeys-hook'; import { useTranslation } from 'react-i18next'; import { userInvoked } from 'app/store/actions'; import IAITextarea from 'common/components/IAITextarea'; +import { useIsReadyToInvoke } from 'common/hooks/useIsReadyToInvoke'; const promptInputSelector = createSelector( [(state: RootState) => state.generation, activeTabNameSelector], @@ -39,7 +39,7 @@ const promptInputSelector = createSelector( const ParamPositiveConditioning = () => { const dispatch = useAppDispatch(); const { prompt, activeTabName } = useAppSelector(promptInputSelector); - const { isReady } = useAppSelector(readinessSelector); + const isReady = useIsReadyToInvoke(); const promptRef = useRef(null); diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/InitialImageDisplay.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/InitialImageDisplay.tsx index f17ebcbdc0..64974f0d35 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/InitialImageDisplay.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/InitialImageDisplay.tsx @@ -1,6 +1,5 @@ import { Flex } from '@chakra-ui/react'; import InitialImagePreview from './InitialImagePreview'; -import InitialImageButtons from 'common/components/InitialImageButtons'; const InitialImageDisplay = () => { return ( @@ -28,7 +27,6 @@ const InitialImageDisplay = () => { gap: 4, }} > - diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/InitialImagePreview.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/InitialImagePreview.tsx index cfe1513420..a1c4d5acab 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/InitialImagePreview.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/InitialImagePreview.tsx @@ -1,18 +1,20 @@ -import { Flex, Icon, Image } from '@chakra-ui/react'; +import { Flex } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { useGetUrl } from 'common/util/getUrl'; -import { clearInitialImage } from 'features/parameters/store/generationSlice'; -import { DragEvent, useCallback } from 'react'; +import { + clearInitialImage, + initialImageChanged, +} from 'features/parameters/store/generationSlice'; +import { useCallback } from 'react'; import { useTranslation } from 'react-i18next'; -import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay'; import { generationSelector } from 'features/parameters/store/generationSelectors'; -import { initialImageSelected } from 'features/parameters/store/actions'; import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; -import ImageFallbackSpinner from 'features/gallery/components/ImageFallbackSpinner'; -import { FaImage } from 'react-icons/fa'; import { configSelector } from '../../../../system/store/configSelectors'; import { useAppToaster } from 'app/components/Toaster'; +import IAIDndImage from 'common/components/IAIDndImage'; +import { ImageDTO } from 'services/api'; +import { IAIImageFallback } from 'common/components/IAIImageFallback'; const selector = createSelector( [generationSelector], @@ -52,13 +54,19 @@ const InitialImagePreview = () => { }, [dispatch, t, toaster, shouldFetchImages]); const handleDrop = useCallback( - (e: DragEvent) => { - const name = e.dataTransfer.getData('invokeai/imageName'); - dispatch(initialImageSelected(name)); + (droppedImage: ImageDTO) => { + if (droppedImage.image_name === initialImage?.image_name) { + return; + } + dispatch(initialImageChanged(droppedImage)); }, - [dispatch] + [dispatch, initialImage?.image_name] ); + const handleReset = useCallback(() => { + dispatch(clearInitialImage()); + }, [dispatch]); + return ( { alignItems: 'center', justifyContent: 'center', }} - onDrop={handleDrop} > - {initialImage?.image_url && ( - <> - } - onError={handleError} - sx={{ - objectFit: 'contain', - maxWidth: '100%', - maxHeight: '100%', - height: 'auto', - position: 'absolute', - borderRadius: 'base', - }} - /> - - - )} - {!initialImage?.image_url && ( - - )} + } + /> ); }; diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Seed/ParamSeedFull.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Seed/ParamSeedFull.tsx new file mode 100644 index 0000000000..75a5d189ae --- /dev/null +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Seed/ParamSeedFull.tsx @@ -0,0 +1,17 @@ +import { Flex } from '@chakra-ui/react'; +import { memo } from 'react'; +import ParamSeed from './ParamSeed'; +import ParamSeedShuffle from './ParamSeedShuffle'; +import ParamSeedRandomize from './ParamSeedRandomize'; + +const ParamSeedFull = () => { + return ( + + + + + + ); +}; + +export default memo(ParamSeedFull); diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Seed/ParamSeedRandomize.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Seed/ParamSeedRandomize.tsx index 13380f3660..6b1dd46780 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Seed/ParamSeedRandomize.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Seed/ParamSeedRandomize.tsx @@ -2,30 +2,10 @@ import { ChangeEvent, memo } from 'react'; import { RootState } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAISwitch from 'common/components/IAISwitch'; import { setShouldRandomizeSeed } from 'features/parameters/store/generationSlice'; import { useTranslation } from 'react-i18next'; -import { FormControl, FormLabel, Switch } from '@chakra-ui/react'; - -// export default function RandomizeSeed() { -// const dispatch = useAppDispatch(); -// const { t } = useTranslation(); - -// const shouldRandomizeSeed = useAppSelector( -// (state: RootState) => state.generation.shouldRandomizeSeed -// ); - -// const handleChangeShouldRandomizeSeed = (e: ChangeEvent) => -// dispatch(setShouldRandomizeSeed(e.target.checked)); - -// return ( -// -// ); -// } +import { FormControl, FormLabel, Switch, Tooltip } from '@chakra-ui/react'; +import IAISwitch from 'common/components/IAISwitch'; const ParamSeedRandomize = () => { const dispatch = useAppDispatch(); @@ -38,6 +18,14 @@ const ParamSeedRandomize = () => { const handleChangeShouldRandomizeSeed = (e: ChangeEvent) => dispatch(setShouldRandomizeSeed(e.target.checked)); + return ( + + ); + return ( dispatch(setSeed(randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX))); + return ( + } + /> + ); + return ( { diff --git a/invokeai/frontend/web/src/features/parameters/store/actions.ts b/invokeai/frontend/web/src/features/parameters/store/actions.ts index e9b90134e1..eba01248d1 100644 --- a/invokeai/frontend/web/src/features/parameters/store/actions.ts +++ b/invokeai/frontend/web/src/features/parameters/store/actions.ts @@ -7,25 +7,6 @@ export type ImageNameAndOrigin = { image_origin: ResourceOrigin; }; -export const isImageDTO = (image: any): image is ImageDTO => { - return ( - image && - isObject(image) && - 'image_name' in image && - image?.image_name !== undefined && - 'image_origin' in image && - image?.image_origin !== undefined && - 'image_url' in image && - image?.image_url !== undefined && - 'thumbnail_url' in image && - image?.thumbnail_url !== undefined && - 'image_category' in image && - image?.image_category !== undefined && - 'created_at' in image && - image?.created_at !== undefined - ); -}; - export const initialImageSelected = createAction( 'generation/initialImageSelected' ); diff --git a/invokeai/frontend/web/src/features/system/components/ModelManager/AddCheckpointModel.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/AddCheckpointModel.tsx index bb5db0302d..e6bd0b6ffb 100644 --- a/invokeai/frontend/web/src/features/system/components/ModelManager/AddCheckpointModel.tsx +++ b/invokeai/frontend/web/src/features/system/components/ModelManager/AddCheckpointModel.tsx @@ -10,7 +10,7 @@ import { } from '@chakra-ui/react'; import IAIButton from 'common/components/IAIButton'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import IAIInput from 'common/components/IAIInput'; import IAINumberInput from 'common/components/IAINumberInput'; import React from 'react'; @@ -74,12 +74,12 @@ export default function AddCheckpointModel() { return ( - setAddmanually(!addManually)} /> - setAddmanually(!addManually)} diff --git a/invokeai/frontend/web/src/features/system/components/ModelManager/MergeModels.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/MergeModels.tsx index 6ba148cac4..219d49d4ee 100644 --- a/invokeai/frontend/web/src/features/system/components/ModelManager/MergeModels.tsx +++ b/invokeai/frontend/web/src/features/system/components/ModelManager/MergeModels.tsx @@ -24,7 +24,7 @@ import { useState } from 'react'; import { useTranslation } from 'react-i18next'; import * as InvokeAI from 'app/types/invokeai'; import IAISlider from 'common/components/IAISlider'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; export default function MergeModels() { const dispatch = useAppDispatch(); @@ -286,7 +286,7 @@ export default function MergeModels() { )} - setModelMergeForce(e.target.checked)} diff --git a/invokeai/frontend/web/src/features/system/components/ModelManager/SearchModels.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/SearchModels.tsx index 3a99997ac8..3381cb85d3 100644 --- a/invokeai/frontend/web/src/features/system/components/ModelManager/SearchModels.tsx +++ b/invokeai/frontend/web/src/features/system/components/ModelManager/SearchModels.tsx @@ -1,5 +1,5 @@ import IAIButton from 'common/components/IAIButton'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import IAIIconButton from 'common/components/IAIIconButton'; import React from 'react'; @@ -81,13 +81,13 @@ function SearchModelEntry({ borderRadius={4} > - {model.name}} isChecked={modelsToAdd.includes(model.name)} isDisabled={existingModels.includes(model.location)} onChange={foundModelsChangeHandler} - > + > {existingModels.includes(model.location) && ( {t('modelManager.modelExists')} )} @@ -324,7 +324,7 @@ export default function SearchModels() { > {t('modelManager.deselectAll')} - diff --git a/invokeai/frontend/web/src/features/system/hooks/useIsApplicationReady.ts b/invokeai/frontend/web/src/features/system/hooks/useIsApplicationReady.ts index 6e62c3642b..193420e29c 100644 --- a/invokeai/frontend/web/src/features/system/hooks/useIsApplicationReady.ts +++ b/invokeai/frontend/web/src/features/system/hooks/useIsApplicationReady.ts @@ -19,6 +19,9 @@ const isApplicationReadySelector = createSelector( } ); +/** + * Checks if the application is ready to be used, i.e. if the initial startup process is finished. + */ export const useIsApplicationReady = () => { const { disabledTabs, wereModelsReceived, wasSchemaParsed } = useAppSelector( isApplicationReadySelector diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabCoreParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabCoreParameters.tsx index c4161154bb..cdbec9b55d 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabCoreParameters.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabCoreParameters.tsx @@ -1,5 +1,5 @@ import { memo } from 'react'; -import { Flex } from '@chakra-ui/react'; +import { Box, Flex, useDisclosure } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { uiSelector } from 'features/ui/store/uiSelectors'; import { useAppSelector } from 'app/store/storeHooks'; @@ -13,6 +13,8 @@ import ImageToImageStrength from 'features/parameters/components/Parameters/Imag import ImageToImageFit from 'features/parameters/components/Parameters/ImageToImage/ImageToImageFit'; import { generationSelector } from 'features/parameters/store/generationSelectors'; import ParamSchedulerAndModel from 'features/parameters/components/Parameters/Core/ParamSchedulerAndModel'; +import ParamSeedFull from 'features/parameters/components/Parameters/Seed/ParamSeedFull'; +import IAICollapse from 'common/components/IAICollapse'; const selector = createSelector( [uiSelector, generationSelector], @@ -27,43 +29,47 @@ const selector = createSelector( const ImageToImageTabCoreParameters = () => { const { shouldUseSliders, shouldFitToWidthHeight } = useAppSelector(selector); + const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); return ( - - {shouldUseSliders ? ( - - - - - - - - - - - ) : ( - - + + + {shouldUseSliders ? ( + <> + + + + - - - - - - - - )} - + + + + ) : ( + <> + + + + + + + + + + + + + )} + + + + ); }; diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabParameters.tsx index 3b3daeaa4c..55d2c1def2 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabParameters.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabParameters.tsx @@ -2,12 +2,12 @@ import { memo } from 'react'; import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons'; import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning'; import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning'; -import ParamSeedCollapse from 'features/parameters/components/Parameters/Seed/ParamSeedCollapse'; import ParamVariationCollapse from 'features/parameters/components/Parameters/Variations/ParamVariationCollapse'; import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse'; import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse'; import ImageToImageTabCoreParameters from './ImageToImageTabCoreParameters'; +import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; const ImageToImageTabParameters = () => { return ( @@ -16,7 +16,7 @@ const ImageToImageTabParameters = () => { - + diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters.tsx index 59512775bc..07297bda31 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters.tsx @@ -3,13 +3,15 @@ import ParamSteps from 'features/parameters/components/Parameters/Core/ParamStep import ParamCFGScale from 'features/parameters/components/Parameters/Core/ParamCFGScale'; import ParamWidth from 'features/parameters/components/Parameters/Core/ParamWidth'; import ParamHeight from 'features/parameters/components/Parameters/Core/ParamHeight'; -import { Flex } from '@chakra-ui/react'; +import { Box, Flex, useDisclosure } from '@chakra-ui/react'; import { useAppSelector } from 'app/store/storeHooks'; import { createSelector } from '@reduxjs/toolkit'; import { uiSelector } from 'features/ui/store/uiSelectors'; import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; import { memo } from 'react'; import ParamSchedulerAndModel from 'features/parameters/components/Parameters/Core/ParamSchedulerAndModel'; +import IAICollapse from 'common/components/IAICollapse'; +import ParamSeedFull from 'features/parameters/components/Parameters/Seed/ParamSeedFull'; const selector = createSelector( uiSelector, @@ -23,39 +25,45 @@ const selector = createSelector( const TextToImageTabCoreParameters = () => { const { shouldUseSliders } = useAppSelector(selector); + const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); return ( - - {shouldUseSliders ? ( - - - - - - - - - ) : ( - - + + + {shouldUseSliders ? ( + <> + + + + - - - - - - )} - + + + + ) : ( + <> + + + + + + + + + + + + + )} + + ); }; diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/TextToImage/TextToImageTabParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/TextToImage/TextToImageTabParameters.tsx index 0976e3eef2..a28fa71407 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/TextToImage/TextToImageTabParameters.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/TextToImage/TextToImageTabParameters.tsx @@ -2,13 +2,13 @@ import ProcessButtons from 'features/parameters/components/ProcessButtons/Proces import { memo } from 'react'; import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning'; import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning'; -import ParamSeedCollapse from 'features/parameters/components/Parameters/Seed/ParamSeedCollapse'; import ParamVariationCollapse from 'features/parameters/components/Parameters/Variations/ParamVariationCollapse'; import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse'; import ParamHiresCollapse from 'features/parameters/components/Parameters/Hires/ParamHiresCollapse'; import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse'; import TextToImageTabCoreParameters from './TextToImageTabCoreParameters'; +import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; const TextToImageTabParameters = () => { return ( @@ -17,7 +17,7 @@ const TextToImageTabParameters = () => { - + diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx index 042749e792..53e36f62b6 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx @@ -1,6 +1,6 @@ import { RootState } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import { setShouldDarkenOutsideBoundingBox } from 'features/canvas/store/canvasSlice'; import { useTranslation } from 'react-i18next'; @@ -14,7 +14,7 @@ export default function UnifiedCanvasDarkenOutsideSelection() { const { t } = useTranslation(); return ( - diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx index 24f3f45a25..ceb58cb5ca 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx @@ -1,6 +1,6 @@ import { RootState } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import { setIsMaskEnabled } from 'features/canvas/store/canvasSlice'; import { useTranslation } from 'react-i18next'; @@ -16,7 +16,7 @@ export default function UnifiedCanvasEnableMask() { dispatch(setIsMaskEnabled(!isMaskEnabled)); return ( - diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx index 9b4b20e936..fd3396533c 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx @@ -1,6 +1,6 @@ import { RootState } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import { setShouldPreserveMaskedArea } from 'features/canvas/store/canvasSlice'; import { useTranslation } from 'react-i18next'; @@ -13,7 +13,7 @@ export default function UnifiedCanvasPreserveMask() { ); return ( - dispatch(setShouldPreserveMaskedArea(e.target.checked))} diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx index bfaa7cdae8..a173211258 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx @@ -1,7 +1,7 @@ import { Flex } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import IAIIconButton from 'common/components/IAIIconButton'; import IAIPopover from 'common/components/IAIPopover'; import { canvasSelector } from 'features/canvas/store/canvasSelectors'; @@ -73,33 +73,33 @@ const UnifiedCanvasSettings = () => { } > - dispatch(setShouldShowIntermediates(e.target.checked)) } /> - dispatch(setShouldAutoSave(e.target.checked))} /> - dispatch(setShouldCropToBoundingBoxOnSave(e.target.checked)) } /> - dispatch(setShouldShowCanvasDebugInfo(e.target.checked)) } /> - dispatch(setShouldAntialias(e.target.checked))} diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx index e3d8a518ef..e17f74ce41 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx @@ -1,6 +1,6 @@ import { RootState } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import { setShouldShowGrid } from 'features/canvas/store/canvasSlice'; import { useTranslation } from 'react-i18next'; @@ -13,7 +13,7 @@ export default function UnifiedCanvasShowGrid() { const { t } = useTranslation(); return ( - dispatch(setShouldShowGrid(e.target.checked))} diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx index c334bd213b..69e9a4e78b 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx @@ -1,6 +1,6 @@ import { RootState } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAICheckbox from 'common/components/IAICheckbox'; +import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox'; import { setShouldSnapToGrid } from 'features/canvas/store/canvasSlice'; import { ChangeEvent } from 'react'; import { useTranslation } from 'react-i18next'; @@ -17,7 +17,7 @@ export default function UnifiedCanvasSnapToGrid() { dispatch(setShouldSnapToGrid(e.target.checked)); return ( - { const { shouldUseSliders } = useAppSelector(selector); + const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); return ( - - {shouldUseSliders ? ( - - - - - - - - - - - ) : ( - - + + + {shouldUseSliders ? ( + <> + + + + - - - - - - - )} - + + + + ) : ( + <> + + + + + + + + + + + + + )} + + + ); }; diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx index c4501ffc44..19ef7fd6fa 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx @@ -16,7 +16,6 @@ const UnifiedCanvasParameters = () => { - diff --git a/invokeai/frontend/web/src/services/api/index.ts b/invokeai/frontend/web/src/services/api/index.ts index ff083079f9..187752627a 100644 --- a/invokeai/frontend/web/src/services/api/index.ts +++ b/invokeai/frontend/web/src/services/api/index.ts @@ -32,7 +32,7 @@ export type { Graph } from './models/Graph'; export type { GraphExecutionState } from './models/GraphExecutionState'; export type { GraphInvocation } from './models/GraphInvocation'; export type { GraphInvocationOutput } from './models/GraphInvocationOutput'; -export type { HedImageprocessorInvocation } from './models/HedImageprocessorInvocation'; +export type { HedImageProcessorInvocation } from './models/HedImageProcessorInvocation'; export type { HTTPValidationError } from './models/HTTPValidationError'; export type { ImageBlurInvocation } from './models/ImageBlurInvocation'; export type { ImageCategory } from './models/ImageCategory'; diff --git a/invokeai/frontend/web/src/services/api/models/CannyImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/CannyImageProcessorInvocation.ts index 3a8b0b21e7..d5203867ac 100644 --- a/invokeai/frontend/web/src/services/api/models/CannyImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/CannyImageProcessorInvocation.ts @@ -18,15 +18,15 @@ export type CannyImageProcessorInvocation = { is_intermediate?: boolean; type?: 'canny_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * low threshold of Canny pixel gradient + * The low threshold of the Canny pixel gradient (0-255) */ low_threshold?: number; /** - * high threshold of Canny pixel gradient + * The high threshold of the Canny pixel gradient (0-255) */ high_threshold?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/ContentShuffleImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/ContentShuffleImageProcessorInvocation.ts index d8bc3fe58e..e3f67ec9be 100644 --- a/invokeai/frontend/web/src/services/api/models/ContentShuffleImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/ContentShuffleImageProcessorInvocation.ts @@ -18,27 +18,27 @@ export type ContentShuffleImageProcessorInvocation = { is_intermediate?: boolean; type?: 'content_shuffle_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * pixel resolution for edge detection + * The pixel resolution for detection */ detect_resolution?: number; /** - * pixel resolution for output image + * The pixel resolution for the output image */ image_resolution?: number; /** - * content shuffle h parameter + * Content shuffle `h` parameter */ 'h'?: number; /** - * content shuffle w parameter + * Content shuffle `w` parameter */ 'w'?: number; /** - * cont + * Content shuffle `f` parameter */ 'f'?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/ControlField.ts b/invokeai/frontend/web/src/services/api/models/ControlField.ts index 4f493d4410..a67655c018 100644 --- a/invokeai/frontend/web/src/services/api/models/ControlField.ts +++ b/invokeai/frontend/web/src/services/api/models/ControlField.ts @@ -6,23 +6,23 @@ import type { ImageField } from './ImageField'; export type ControlField = { /** - * processed image + * The control image */ image: ImageField; /** - * control model used + * The ControlNet model to use */ control_model: string; /** - * weight given to controlnet + * The weight given to the ControlNet */ control_weight: number; /** - * % of total steps at which controlnet is first applied + * When the ControlNet is first applied (% of total steps) */ begin_step_percent: number; /** - * % of total steps at which controlnet is last applied + * When the ControlNet is last applied (% of total steps) */ end_step_percent: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/ControlNetInvocation.ts b/invokeai/frontend/web/src/services/api/models/ControlNetInvocation.ts index fad3af911b..92688d6adc 100644 --- a/invokeai/frontend/web/src/services/api/models/ControlNetInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/ControlNetInvocation.ts @@ -18,23 +18,23 @@ export type ControlNetInvocation = { is_intermediate?: boolean; type?: 'controlnet'; /** - * image to process + * The control image */ image?: ImageField; /** - * control model used + * The ControlNet model to use */ control_model?: 'lllyasviel/sd-controlnet-canny' | 'lllyasviel/sd-controlnet-depth' | 'lllyasviel/sd-controlnet-hed' | 'lllyasviel/sd-controlnet-seg' | 'lllyasviel/sd-controlnet-openpose' | 'lllyasviel/sd-controlnet-scribble' | 'lllyasviel/sd-controlnet-normal' | 'lllyasviel/sd-controlnet-mlsd' | 'lllyasviel/control_v11p_sd15_canny' | 'lllyasviel/control_v11p_sd15_openpose' | 'lllyasviel/control_v11p_sd15_seg' | 'lllyasviel/control_v11f1p_sd15_depth' | 'lllyasviel/control_v11p_sd15_normalbae' | 'lllyasviel/control_v11p_sd15_scribble' | 'lllyasviel/control_v11p_sd15_mlsd' | 'lllyasviel/control_v11p_sd15_softedge' | 'lllyasviel/control_v11p_sd15s2_lineart_anime' | 'lllyasviel/control_v11p_sd15_lineart' | 'lllyasviel/control_v11p_sd15_inpaint' | 'lllyasviel/control_v11e_sd15_shuffle' | 'lllyasviel/control_v11e_sd15_ip2p' | 'lllyasviel/control_v11f1e_sd15_tile' | 'thibaud/controlnet-sd21-openpose-diffusers' | 'thibaud/controlnet-sd21-canny-diffusers' | 'thibaud/controlnet-sd21-depth-diffusers' | 'thibaud/controlnet-sd21-scribble-diffusers' | 'thibaud/controlnet-sd21-hed-diffusers' | 'thibaud/controlnet-sd21-zoedepth-diffusers' | 'thibaud/controlnet-sd21-color-diffusers' | 'thibaud/controlnet-sd21-openposev2-diffusers' | 'thibaud/controlnet-sd21-lineart-diffusers' | 'thibaud/controlnet-sd21-normalbae-diffusers' | 'thibaud/controlnet-sd21-ade20k-diffusers' | 'CrucibleAI/ControlNetMediaPipeFace,diffusion_sd15' | 'CrucibleAI/ControlNetMediaPipeFace'; /** - * weight given to controlnet + * The weight given to the ControlNet */ control_weight?: number; /** - * % of total steps at which controlnet is first applied + * When the ControlNet is first applied (% of total steps) */ begin_step_percent?: number; /** - * % of total steps at which controlnet is last applied + * When the ControlNet is last applied (% of total steps) */ end_step_percent?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/ControlOutput.ts b/invokeai/frontend/web/src/services/api/models/ControlOutput.ts index 43f1b3341c..8c8b76a32f 100644 --- a/invokeai/frontend/web/src/services/api/models/ControlOutput.ts +++ b/invokeai/frontend/web/src/services/api/models/ControlOutput.ts @@ -10,7 +10,7 @@ import type { ControlField } from './ControlField'; export type ControlOutput = { type?: 'control_output'; /** - * The control info dict + * The output control image */ control?: ControlField; }; diff --git a/invokeai/frontend/web/src/services/api/models/Graph.ts b/invokeai/frontend/web/src/services/api/models/Graph.ts index e89e815ab2..2c7efbb423 100644 --- a/invokeai/frontend/web/src/services/api/models/Graph.ts +++ b/invokeai/frontend/web/src/services/api/models/Graph.ts @@ -12,7 +12,7 @@ import type { CvInpaintInvocation } from './CvInpaintInvocation'; import type { DivideInvocation } from './DivideInvocation'; import type { Edge } from './Edge'; import type { GraphInvocation } from './GraphInvocation'; -import type { HedImageprocessorInvocation } from './HedImageprocessorInvocation'; +import type { HedImageProcessorInvocation } from './HedImageProcessorInvocation'; import type { ImageBlurInvocation } from './ImageBlurInvocation'; import type { ImageChannelInvocation } from './ImageChannelInvocation'; import type { ImageConvertInvocation } from './ImageConvertInvocation'; @@ -69,7 +69,7 @@ export type Graph = { /** * The nodes in this graph */ - nodes?: Record; + nodes?: Record; /** * The connections between nodes and their fields in this graph */ diff --git a/invokeai/frontend/web/src/services/api/models/HedImageprocessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/HedImageProcessorInvocation.ts similarity index 73% rename from invokeai/frontend/web/src/services/api/models/HedImageprocessorInvocation.ts rename to invokeai/frontend/web/src/services/api/models/HedImageProcessorInvocation.ts index f975f18968..1132012c5a 100644 --- a/invokeai/frontend/web/src/services/api/models/HedImageprocessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/HedImageProcessorInvocation.ts @@ -7,7 +7,7 @@ import type { ImageField } from './ImageField'; /** * Applies HED edge detection to image */ -export type HedImageprocessorInvocation = { +export type HedImageProcessorInvocation = { /** * The id of this node. Must be unique among all nodes. */ @@ -18,19 +18,19 @@ export type HedImageprocessorInvocation = { is_intermediate?: boolean; type?: 'hed_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * pixel resolution for edge detection + * The pixel resolution for detection */ detect_resolution?: number; /** - * pixel resolution for output image + * The pixel resolution for the output image */ image_resolution?: number; /** - * whether to use scribble mode + * Whether to use scribble mode */ scribble?: boolean; }; diff --git a/invokeai/frontend/web/src/services/api/models/ImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/ImageProcessorInvocation.ts index f972582e2f..0d995c4e68 100644 --- a/invokeai/frontend/web/src/services/api/models/ImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/ImageProcessorInvocation.ts @@ -18,7 +18,7 @@ export type ImageProcessorInvocation = { is_intermediate?: boolean; type?: 'image_processor'; /** - * image to process + * The image to process */ image?: ImageField; }; diff --git a/invokeai/frontend/web/src/services/api/models/LineartAnimeImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/LineartAnimeImageProcessorInvocation.ts index 4796d2a049..5d239536d5 100644 --- a/invokeai/frontend/web/src/services/api/models/LineartAnimeImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/LineartAnimeImageProcessorInvocation.ts @@ -18,15 +18,15 @@ export type LineartAnimeImageProcessorInvocation = { is_intermediate?: boolean; type?: 'lineart_anime_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * pixel resolution for edge detection + * The pixel resolution for detection */ detect_resolution?: number; /** - * pixel resolution for output image + * The pixel resolution for the output image */ image_resolution?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/LineartImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/LineartImageProcessorInvocation.ts index 8328849b50..17720e689b 100644 --- a/invokeai/frontend/web/src/services/api/models/LineartImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/LineartImageProcessorInvocation.ts @@ -18,19 +18,19 @@ export type LineartImageProcessorInvocation = { is_intermediate?: boolean; type?: 'lineart_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * pixel resolution for edge detection + * The pixel resolution for detection */ detect_resolution?: number; /** - * pixel resolution for output image + * The pixel resolution for the output image */ image_resolution?: number; /** - * whether to use coarse mode + * Whether to use coarse mode */ coarse?: boolean; }; diff --git a/invokeai/frontend/web/src/services/api/models/MediapipeFaceProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/MediapipeFaceProcessorInvocation.ts index bd223eed7d..aa7b966b4b 100644 --- a/invokeai/frontend/web/src/services/api/models/MediapipeFaceProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/MediapipeFaceProcessorInvocation.ts @@ -18,15 +18,15 @@ export type MediapipeFaceProcessorInvocation = { is_intermediate?: boolean; type?: 'mediapipe_face_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * maximum number of faces to detect + * Maximum number of faces to detect */ max_faces?: number; /** - * minimum confidence for face detection + * Minimum confidence for face detection */ min_confidence?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/MidasDepthImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/MidasDepthImageProcessorInvocation.ts index 11023086a2..bd274228db 100644 --- a/invokeai/frontend/web/src/services/api/models/MidasDepthImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/MidasDepthImageProcessorInvocation.ts @@ -18,15 +18,15 @@ export type MidasDepthImageProcessorInvocation = { is_intermediate?: boolean; type?: 'midas_depth_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * Midas parameter a = amult * PI + * Midas parameter `a_mult` (a = a_mult * PI) */ a_mult?: number; /** - * Midas parameter bg_th + * Midas parameter `bg_th` */ bg_th?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/MlsdImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/MlsdImageProcessorInvocation.ts index c2d4a61b9a..0e81c9a4b8 100644 --- a/invokeai/frontend/web/src/services/api/models/MlsdImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/MlsdImageProcessorInvocation.ts @@ -18,23 +18,23 @@ export type MlsdImageProcessorInvocation = { is_intermediate?: boolean; type?: 'mlsd_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * pixel resolution for edge detection + * The pixel resolution for detection */ detect_resolution?: number; /** - * pixel resolution for output image + * The pixel resolution for the output image */ image_resolution?: number; /** - * MLSD parameter thr_v + * MLSD parameter `thr_v` */ thr_v?: number; /** - * MLSD parameter thr_d + * MLSD parameter `thr_d` */ thr_d?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/NormalbaeImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/NormalbaeImageProcessorInvocation.ts index ecfb50a09f..400068171e 100644 --- a/invokeai/frontend/web/src/services/api/models/NormalbaeImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/NormalbaeImageProcessorInvocation.ts @@ -18,15 +18,15 @@ export type NormalbaeImageProcessorInvocation = { is_intermediate?: boolean; type?: 'normalbae_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * pixel resolution for edge detection + * The pixel resolution for detection */ detect_resolution?: number; /** - * pixel resolution for output image + * The pixel resolution for the output image */ image_resolution?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/OpenposeImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/OpenposeImageProcessorInvocation.ts index 5af21d542e..982ce8ade7 100644 --- a/invokeai/frontend/web/src/services/api/models/OpenposeImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/OpenposeImageProcessorInvocation.ts @@ -18,19 +18,19 @@ export type OpenposeImageProcessorInvocation = { is_intermediate?: boolean; type?: 'openpose_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * whether to use hands and face mode + * Whether to use hands and face mode */ hand_and_face?: boolean; /** - * pixel resolution for edge detection + * The pixel resolution for detection */ detect_resolution?: number; /** - * pixel resolution for output image + * The pixel resolution for the output image */ image_resolution?: number; }; diff --git a/invokeai/frontend/web/src/services/api/models/PidiImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/PidiImageProcessorInvocation.ts index a08bf6a920..91c9dc0ce5 100644 --- a/invokeai/frontend/web/src/services/api/models/PidiImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/PidiImageProcessorInvocation.ts @@ -18,23 +18,23 @@ export type PidiImageProcessorInvocation = { is_intermediate?: boolean; type?: 'pidi_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; /** - * pixel resolution for edge detection + * The pixel resolution for detection */ detect_resolution?: number; /** - * pixel resolution for output image + * The pixel resolution for the output image */ image_resolution?: number; /** - * whether to use safe mode + * Whether to use safe mode */ safe?: boolean; /** - * whether to use scribble mode + * Whether to use scribble mode */ scribble?: boolean; }; diff --git a/invokeai/frontend/web/src/services/api/models/ZoeDepthImageProcessorInvocation.ts b/invokeai/frontend/web/src/services/api/models/ZoeDepthImageProcessorInvocation.ts index 55d05f3167..6caded8f04 100644 --- a/invokeai/frontend/web/src/services/api/models/ZoeDepthImageProcessorInvocation.ts +++ b/invokeai/frontend/web/src/services/api/models/ZoeDepthImageProcessorInvocation.ts @@ -18,7 +18,7 @@ export type ZoeDepthImageProcessorInvocation = { is_intermediate?: boolean; type?: 'zoe_depth_image_processor'; /** - * image to process + * The image to process */ image?: ImageField; }; diff --git a/invokeai/frontend/web/src/services/api/services/SessionsService.ts b/invokeai/frontend/web/src/services/api/services/SessionsService.ts index 6ae6783313..977c03e6fb 100644 --- a/invokeai/frontend/web/src/services/api/services/SessionsService.ts +++ b/invokeai/frontend/web/src/services/api/services/SessionsService.ts @@ -13,7 +13,7 @@ import type { Edge } from '../models/Edge'; import type { Graph } from '../models/Graph'; import type { GraphExecutionState } from '../models/GraphExecutionState'; import type { GraphInvocation } from '../models/GraphInvocation'; -import type { HedImageprocessorInvocation } from '../models/HedImageprocessorInvocation'; +import type { HedImageProcessorInvocation } from '../models/HedImageProcessorInvocation'; import type { ImageBlurInvocation } from '../models/ImageBlurInvocation'; import type { ImageChannelInvocation } from '../models/ImageChannelInvocation'; import type { ImageConvertInvocation } from '../models/ImageConvertInvocation'; @@ -171,7 +171,7 @@ export class SessionsService { * The id of the session */ sessionId: string, - requestBody: (LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CompelInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | CvInpaintInvocation | RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageprocessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation), + requestBody: (LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CompelInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | CvInpaintInvocation | RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation), }): CancelablePromise { return __request(OpenAPI, { method: 'POST', @@ -208,7 +208,7 @@ export class SessionsService { * The path to the node in the graph */ nodePath: string, - requestBody: (LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CompelInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | CvInpaintInvocation | RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageprocessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation), + requestBody: (LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CompelInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | CvInpaintInvocation | RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation), }): CancelablePromise { return __request(OpenAPI, { method: 'PUT', diff --git a/invokeai/frontend/web/src/services/types/guards.ts b/invokeai/frontend/web/src/services/types/guards.ts index 4d33cfa246..334c04e6ed 100644 --- a/invokeai/frontend/web/src/services/types/guards.ts +++ b/invokeai/frontend/web/src/services/types/guards.ts @@ -10,8 +10,25 @@ import { ImageField, LatentsOutput, ResourceOrigin, + ImageDTO, } from 'services/api'; +export const isImageDTO = (obj: unknown): obj is ImageDTO => { + return ( + isObject(obj) && + 'image_name' in obj && + isString(obj?.image_name) && + 'thumbnail_url' in obj && + isString(obj?.thumbnail_url) && + 'image_url' in obj && + isString(obj?.image_url) && + 'image_origin' in obj && + isString(obj?.image_origin) && + 'created_at' in obj && + isString(obj?.created_at) + ); +}; + export const isImageOutput = ( output: GraphExecutionState['results'][string] ): output is ImageOutput => output.type === 'image_output'; diff --git a/invokeai/frontend/web/src/theme/colors/greenTea.ts b/invokeai/frontend/web/src/theme/colors/greenTea.ts index 06476c0513..ffecbf2ffa 100644 --- a/invokeai/frontend/web/src/theme/colors/greenTea.ts +++ b/invokeai/frontend/web/src/theme/colors/greenTea.ts @@ -3,10 +3,16 @@ import { generateColorPalette } from '../util/generateColorPalette'; export const greenTeaThemeColors: InvokeAIThemeColors = { base: generateColorPalette(223, 10), + baseAlpha: generateColorPalette(223, 10, false, true), accent: generateColorPalette(155, 80), + accentAlpha: generateColorPalette(155, 80, false, true), working: generateColorPalette(47, 68), + workingAlpha: generateColorPalette(47, 68, false, true), warning: generateColorPalette(28, 75), + warningAlpha: generateColorPalette(28, 75, false, true), ok: generateColorPalette(122, 49), + okAlpha: generateColorPalette(122, 49, false, true), error: generateColorPalette(0, 50), + errorAlpha: generateColorPalette(0, 50, false, true), gridLineColor: 'rgba(255, 255, 255, 0.2)', }; diff --git a/invokeai/frontend/web/src/theme/colors/invokeAI.ts b/invokeai/frontend/web/src/theme/colors/invokeAI.ts index a523ae38c8..c39b3bed81 100644 --- a/invokeai/frontend/web/src/theme/colors/invokeAI.ts +++ b/invokeai/frontend/web/src/theme/colors/invokeAI.ts @@ -3,10 +3,16 @@ import { generateColorPalette } from 'theme/util/generateColorPalette'; export const invokeAIThemeColors: InvokeAIThemeColors = { base: generateColorPalette(225, 15), + baseAlpha: generateColorPalette(225, 15, false, true), accent: generateColorPalette(250, 50), + accentAlpha: generateColorPalette(250, 50, false, true), working: generateColorPalette(47, 67), + workingAlpha: generateColorPalette(47, 67, false, true), warning: generateColorPalette(28, 75), + warningAlpha: generateColorPalette(28, 75, false, true), ok: generateColorPalette(113, 70), + okAlpha: generateColorPalette(113, 70, false, true), error: generateColorPalette(0, 76), + errorAlpha: generateColorPalette(0, 76, false, true), gridLineColor: 'rgba(255, 255, 255, 0.2)', }; diff --git a/invokeai/frontend/web/src/theme/colors/lightTheme.ts b/invokeai/frontend/web/src/theme/colors/lightTheme.ts index 8fdf199bb8..2a7a05bbd2 100644 --- a/invokeai/frontend/web/src/theme/colors/lightTheme.ts +++ b/invokeai/frontend/web/src/theme/colors/lightTheme.ts @@ -3,10 +3,16 @@ import { generateColorPalette } from '../util/generateColorPalette'; export const lightThemeColors: InvokeAIThemeColors = { base: generateColorPalette(223, 10, true), + baseAlpha: generateColorPalette(223, 10, true, true), accent: generateColorPalette(40, 80, true), + accentAlpha: generateColorPalette(40, 80, true, true), working: generateColorPalette(47, 68, true), + workingAlpha: generateColorPalette(47, 68, true, true), warning: generateColorPalette(28, 75, true), + warningAlpha: generateColorPalette(28, 75, true, true), ok: generateColorPalette(122, 49, true), + okAlpha: generateColorPalette(122, 49, true, true), error: generateColorPalette(0, 50, true), + errorAlpha: generateColorPalette(0, 50, true, true), gridLineColor: 'rgba(0, 0, 0, 0.2)', }; diff --git a/invokeai/frontend/web/src/theme/colors/oceanBlue.ts b/invokeai/frontend/web/src/theme/colors/oceanBlue.ts index 3462459c1c..adfb8ab288 100644 --- a/invokeai/frontend/web/src/theme/colors/oceanBlue.ts +++ b/invokeai/frontend/web/src/theme/colors/oceanBlue.ts @@ -3,10 +3,16 @@ import { generateColorPalette } from '../util/generateColorPalette'; export const oceanBlueColors: InvokeAIThemeColors = { base: generateColorPalette(220, 30), + baseAlpha: generateColorPalette(220, 30, false, true), accent: generateColorPalette(210, 80), + accentAlpha: generateColorPalette(210, 80, false, true), working: generateColorPalette(47, 68), + workingAlpha: generateColorPalette(47, 68, false, true), warning: generateColorPalette(28, 75), + warningAlpha: generateColorPalette(28, 75, false, true), ok: generateColorPalette(122, 49), + okAlpha: generateColorPalette(122, 49, false, true), error: generateColorPalette(0, 100), + errorAlpha: generateColorPalette(0, 100, false, true), gridLineColor: 'rgba(136, 148, 184, 0.2)', }; diff --git a/invokeai/frontend/web/src/theme/components/tabs.ts b/invokeai/frontend/web/src/theme/components/tabs.ts index 5eb1a36013..daf6e18cab 100644 --- a/invokeai/frontend/web/src/theme/components/tabs.ts +++ b/invokeai/frontend/web/src/theme/components/tabs.ts @@ -26,6 +26,7 @@ const invokeAITablist = defineStyle((_props) => ({ padding: 2, borderRadius: 'base', _selected: { + borderBottomColor: 'base.800', bg: 'accent.700', color: 'accent.100', _hover: { diff --git a/invokeai/frontend/web/src/theme/css/overlayscrollbars.css b/invokeai/frontend/web/src/theme/css/overlayscrollbars.css index b5acaca75d..8f6f267095 100644 --- a/invokeai/frontend/web/src/theme/css/overlayscrollbars.css +++ b/invokeai/frontend/web/src/theme/css/overlayscrollbars.css @@ -8,11 +8,11 @@ /* The border radius of the scrollbar track */ /* --os-track-border-radius: 0; */ /* The background of the scrollbar track */ - --os-track-bg: rgba(0, 0, 0, 0.3); + /* --os-track-bg: rgba(0, 0, 0, 0.3); */ /* The :hover background of the scrollbar track */ - --os-track-bg-hover: rgba(0, 0, 0, 0.3); + /* --os-track-bg-hover: rgba(0, 0, 0, 0.3); */ /* The :active background of the scrollbar track */ - --os-track-bg-active: rgba(0, 0, 0, 0.3); + /* --os-track-bg-active: rgba(0, 0, 0, 0.3); */ /* The border of the scrollbar track */ /* --os-track-border: none; */ /* The :hover background of the scrollbar track */ @@ -22,11 +22,11 @@ /* The border radius of the scrollbar handle */ /* --os-handle-border-radius: 0; */ /* The background of the scrollbar handle */ - --os-handle-bg: var(--invokeai-colors-accent-500); + --os-handle-bg: var(--invokeai-colors-accentAlpha-500); /* The :hover background of the scrollbar handle */ - --os-handle-bg-hover: var(--invokeai-colors-accent-450); + --os-handle-bg-hover: var(--invokeai-colors-accentAlpha-700); /* The :active background of the scrollbar handle */ - --os-handle-bg-active: var(--invokeai-colors-accent-400); + --os-handle-bg-active: var(--invokeai-colors-accentAlpha-800); /* The border of the scrollbar handle */ /* --os-handle-border: none; */ /* The :hover border of the scrollbar handle */ diff --git a/invokeai/frontend/web/src/theme/themeTypes.d.ts b/invokeai/frontend/web/src/theme/themeTypes.d.ts index dce386168d..46144f39ab 100644 --- a/invokeai/frontend/web/src/theme/themeTypes.d.ts +++ b/invokeai/frontend/web/src/theme/themeTypes.d.ts @@ -1,10 +1,16 @@ export type InvokeAIThemeColors = { base: Partial; + baseAlpha: Partial; accent: Partial; + accentAlpha: Partial; working: Partial; + workingAlpha: Partial; warning: Partial; + warningAlpha: Partial; ok: Partial; + okAlpha: Partial; error: Partial; + errorAlpha: Partial; gridLineColor: string; }; diff --git a/invokeai/frontend/web/src/theme/util/generateColorPalette.ts b/invokeai/frontend/web/src/theme/util/generateColorPalette.ts index ed346c684a..4cb5fbd57d 100644 --- a/invokeai/frontend/web/src/theme/util/generateColorPalette.ts +++ b/invokeai/frontend/web/src/theme/util/generateColorPalette.ts @@ -9,49 +9,38 @@ import { InvokeAIPaletteSteps } from 'theme/themeTypes'; export function generateColorPalette( hue: string | number, saturation: string | number, - light = false + light = false, + alpha = false ) { hue = String(hue); saturation = String(saturation); const colorSteps = Array.from({ length: 21 }, (_, i) => i * 50); const lightnessSteps = [ - '0', - '5', - '10', - '15', - '20', - '25', - '30', - '35', - '40', - '45', - '50', - '55', - '59', - '64', - '68', - '73', - '77', - '82', - '86', - '95', - '100', + 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 59, 64, 68, 73, 77, 82, 86, + 95, 100, ]; const darkPalette: Partial = {}; const lightPalette: Partial = {}; colorSteps.forEach((colorStep, index) => { + const A = alpha ? lightnessSteps[index] / 100 : 1; + + // Lightness should be 50% for alpha colors + const darkPaletteLightness = alpha + ? 50 + : lightnessSteps[colorSteps.length - 1 - index]; + darkPalette[ colorStep as keyof typeof darkPalette - ] = `hsl(${hue}, ${saturation}%, ${ - lightnessSteps[colorSteps.length - 1 - index] - }%)`; + ] = `hsl(${hue} ${saturation}% ${darkPaletteLightness}% / ${A})`; + + const lightPaletteLightness = alpha ? 50 : lightnessSteps[index]; lightPalette[ colorStep as keyof typeof lightPalette - ] = `hsl(${hue}, ${saturation}%, ${lightnessSteps[index]}%)`; + ] = `hsl(${hue} ${saturation}% ${lightPaletteLightness}% / ${A})`; }); return light ? lightPalette : darkPalette; diff --git a/invokeai/frontend/web/src/theme/util/getInputOutlineStyles.ts b/invokeai/frontend/web/src/theme/util/getInputOutlineStyles.ts index 85e9d109c5..469bf47be4 100644 --- a/invokeai/frontend/web/src/theme/util/getInputOutlineStyles.ts +++ b/invokeai/frontend/web/src/theme/util/getInputOutlineStyles.ts @@ -1,6 +1,6 @@ import { StyleFunctionProps } from '@chakra-ui/theme-tools'; -export const getInputOutlineStyles = (_props: StyleFunctionProps) => ({ +export const getInputOutlineStyles = (_props?: StyleFunctionProps) => ({ outline: 'none', borderWidth: 2, borderStyle: 'solid', diff --git a/invokeai/frontend/web/yarn.lock b/invokeai/frontend/web/yarn.lock index 356f7466fe..b14d83ae2b 100644 --- a/invokeai/frontend/web/yarn.lock +++ b/invokeai/frontend/web/yarn.lock @@ -937,6 +937,37 @@ gonzales-pe "^4.3.0" node-source-walk "^5.0.1" +"@dnd-kit/accessibility@^3.0.0": + version "3.0.1" + resolved "https://registry.yarnpkg.com/@dnd-kit/accessibility/-/accessibility-3.0.1.tgz#3ccbefdfca595b0a23a5dc57d3de96bc6935641c" + integrity sha512-HXRrwS9YUYQO9lFRc/49uO/VICbM+O+ZRpFDe9Pd1rwVv2PCNkRiTZRdxrDgng/UkvdC3Re9r2vwPpXXrWeFzg== + dependencies: + tslib "^2.0.0" + +"@dnd-kit/core@^6.0.8": + version "6.0.8" + resolved "https://registry.yarnpkg.com/@dnd-kit/core/-/core-6.0.8.tgz#040ae13fea9787ee078e5f0361f3b49b07f3f005" + integrity sha512-lYaoP8yHTQSLlZe6Rr9qogouGUz9oRUj4AHhDQGQzq/hqaJRpFo65X+JKsdHf8oUFBzx5A+SJPUvxAwTF2OabA== + dependencies: + "@dnd-kit/accessibility" "^3.0.0" + "@dnd-kit/utilities" "^3.2.1" + tslib "^2.0.0" + +"@dnd-kit/modifiers@^6.0.1": + version "6.0.1" + resolved "https://registry.yarnpkg.com/@dnd-kit/modifiers/-/modifiers-6.0.1.tgz#9e39b25fd6e323659604cc74488fe044d33188c8" + integrity sha512-rbxcsg3HhzlcMHVHWDuh9LCjpOVAgqbV78wLGI8tziXY3+qcMQ61qVXIvNKQFuhj75dSfD+o+PYZQ/NUk2A23A== + dependencies: + "@dnd-kit/utilities" "^3.2.1" + tslib "^2.0.0" + +"@dnd-kit/utilities@^3.2.1": + version "3.2.1" + resolved "https://registry.yarnpkg.com/@dnd-kit/utilities/-/utilities-3.2.1.tgz#53f9e2016fd2506ec49e404c289392cfff30332a" + integrity sha512-OOXqISfvBw/1REtkSK2N3Fi2EQiLMlWUlqnOK/UpOISqBZPWpE6TqL+jcPtMOkE8TqYGiURvRdPSI9hltNUjEA== + dependencies: + tslib "^2.0.0" + "@emotion/babel-plugin@^11.10.8": version "11.10.8" resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.10.8.tgz#bae325c902937665d00684038fd5294223ef9e1d" @@ -5509,10 +5540,10 @@ react-i18next@^12.2.2: "@babel/runtime" "^7.20.6" html-parse-stringify "^3.0.1" -react-icons@^4.7.1: - version "4.8.0" - resolved "https://registry.yarnpkg.com/react-icons/-/react-icons-4.8.0.tgz#621e900caa23b912f737e41be57f27f6b2bff445" - integrity sha512-N6+kOLcihDiAnj5Czu637waJqSnwlMNROzVZMhfX68V/9bu9qHaMIJC4UdozWoOk57gahFCNHwVvWzm0MTzRjg== +react-icons@^4.9.0: + version "4.9.0" + resolved "https://registry.yarnpkg.com/react-icons/-/react-icons-4.9.0.tgz#ba44f436a053393adb1bdcafbc5c158b7b70d2a3" + integrity sha512-ijUnFr//ycebOqujtqtV9PFS7JjhWg0QU6ykURVHuL4cbofvRCf3f6GMn9+fBktEFQOIVZnuAYLZdiyadRQRFg== react-is@^16.13.1, react-is@^16.7.0: version "16.13.1" diff --git a/pyproject.toml b/pyproject.toml index 38aa71bd0e..38f4b7673f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,7 @@ dependencies = [ "albumentations", "click", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", - "compel~=1.1.5", + "compel>=1.2.1", "controlnet-aux>=0.0.4", "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", diff --git a/scripts/invoke.py b/scripts/invoke.py deleted file mode 100755 index 9cd4b5a0a6..0000000000 --- a/scripts/invoke.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python - -from invokeai.frontend.CLI import invokeai_command_line_interface as main -main() - diff --git a/scripts/invoke-new.py b/scripts/invokeai-cli.py similarity index 64% rename from scripts/invoke-new.py rename to scripts/invokeai-cli.py index faf83a9993..aefe08e956 100755 --- a/scripts/invoke-new.py +++ b/scripts/invokeai-cli.py @@ -12,13 +12,9 @@ def main(): # Change working directory to the repo root os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) - if '--web' in sys.argv: - from invokeai.app.api_app import invoke_api - invoke_api() - else: - # TODO: Parse some top-level args here. - from invokeai.app.cli_app import invoke_cli - invoke_cli() + # TODO: Parse some top-level args here. + from invokeai.app.cli_app import invoke_cli + invoke_cli() if __name__ == '__main__': diff --git a/scripts/invokeai-web.py b/scripts/invokeai-web.py new file mode 100755 index 0000000000..9ac7ee5cb9 --- /dev/null +++ b/scripts/invokeai-web.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) + +import logging +logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) + +import os +import sys + +def main(): + # Change working directory to the repo root + os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + + from invokeai.app.api_app import invoke_api + invoke_api() + + +if __name__ == '__main__': + main() diff --git a/tests/test_config.py b/tests/test_config.py index 2c883d63f5..9317a794c5 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,13 +1,16 @@ import os import pytest +import sys from omegaconf import OmegaConf from pathlib import Path os.environ['INVOKEAI_ROOT']='/tmp' + from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.invocations.generate import TextToImageInvocation + init1 = OmegaConf.create( ''' InvokeAI: