From 399ebe443ec9dcb59ba0f3889ea022d29c733761 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 25 Sep 2023 18:28:10 -0400 Subject: [PATCH 01/37] Fix IP-Adapter calculation of memory footprint. --- invokeai/backend/ip_adapter/ip_adapter.py | 16 ++++++++++++++++ .../model_management/models/ip_adapter.py | 8 ++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/ip_adapter/ip_adapter.py b/invokeai/backend/ip_adapter/ip_adapter.py index 7f320a2c35..94f3202ba0 100644 --- a/invokeai/backend/ip_adapter/ip_adapter.py +++ b/invokeai/backend/ip_adapter/ip_adapter.py @@ -9,6 +9,8 @@ from diffusers.models import UNet2DConditionModel from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection +from invokeai.backend.model_management.models.base import calc_model_size_by_data + from .attention_processor import AttnProcessor2_0, IPAttnProcessor2_0 from .resampler import Resampler @@ -87,6 +89,20 @@ class IPAdapter: if self._attn_processors is not None: torch.nn.ModuleList(self._attn_processors.values()).to(device=self.device, dtype=self.dtype) + def calc_size(self): + if self._state_dict is not None: + image_proj_size = sum( + [tensor.nelement() * tensor.element_size() for tensor in self._state_dict["image_proj"].values()] + ) + ip_adapter_size = sum( + [tensor.nelement() * tensor.element_size() for tensor in self._state_dict["ip_adapter"].values()] + ) + return image_proj_size + ip_adapter_size + else: + return calc_model_size_by_data(self._image_proj_model) + calc_model_size_by_data( + torch.nn.ModuleList(self._attn_processors.values()) + ) + def _init_image_proj_model(self, state_dict): return ImageProjModel.from_state_dict(state_dict, self._num_tokens).to(self.device, dtype=self.dtype) diff --git a/invokeai/backend/model_management/models/ip_adapter.py b/invokeai/backend/model_management/models/ip_adapter.py index 8e1e97c9e0..63694af0c8 100644 --- a/invokeai/backend/model_management/models/ip_adapter.py +++ b/invokeai/backend/model_management/models/ip_adapter.py @@ -13,6 +13,7 @@ from invokeai.backend.model_management.models.base import ( ModelConfigBase, ModelType, SubModelType, + calc_model_size_by_fs, classproperty, ) @@ -30,7 +31,7 @@ class IPAdapterModel(ModelBase): assert model_type == ModelType.IPAdapter super().__init__(model_path, base_model, model_type) - self.model_size = os.path.getsize(self.model_path) + self.model_size = calc_model_size_by_fs(self.model_path) @classmethod def detect_format(cls, path: str) -> str: @@ -63,10 +64,13 @@ class IPAdapterModel(ModelBase): if child_type is not None: raise ValueError("There are no child models in an IP-Adapter model.") - return build_ip_adapter( + model = build_ip_adapter( ip_adapter_ckpt_path=os.path.join(self.model_path, "ip_adapter.bin"), device="cpu", dtype=torch_dtype ) + self.model_size = model.calc_size() + return model + @classmethod def convert_if_required( cls, From c8b109f52e20b41bb3096bacb1ecfb4527ca7115 Mon Sep 17 00:00:00 2001 From: DekitaRPG Date: Tue, 26 Sep 2023 06:57:44 +0100 Subject: [PATCH 02/37] Add 'Random Float' node <3 (#4581) * Add 'Random Float' node <3 does what it says on the tin :) * Add random float + random seeded float nodes altered my random float node as requested by Millu, kept the seeded version as an alternate variant for those that would like to control the randomization seed :) * Update math.py * Update math.py * feat(nodes): standardize fields to match other nodes --------- Co-authored-by: Millun Atluri Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> --- invokeai/app/invocations/baseinvocation.py | 3 +++ invokeai/app/invocations/math.py | 18 ++++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index af7a343274..3285de3d5a 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -91,6 +91,9 @@ class FieldDescriptions: board = "The board to save the image to" image = "The image to process" tile_size = "Tile size" + inclusive_low = "The inclusive low value" + exclusive_high = "The exclusive high value" + decimal_places = "The number of decimal places to round to" class Input(str, Enum): diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index 3cdd43fb59..b52cbb28bf 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -65,13 +65,27 @@ class DivideInvocation(BaseInvocation): class RandomIntInvocation(BaseInvocation): """Outputs a single random integer.""" - low: int = InputField(default=0, description="The inclusive low value") - high: int = InputField(default=np.iinfo(np.int32).max, description="The exclusive high value") + low: int = InputField(default=0, description=FieldDescriptions.inclusive_low) + high: int = InputField(default=np.iinfo(np.int32).max, description=FieldDescriptions.exclusive_high) def invoke(self, context: InvocationContext) -> IntegerOutput: return IntegerOutput(value=np.random.randint(self.low, self.high)) +@invocation("rand_float", title="Random Float", tags=["math", "float", "random"], category="math", version="1.0.0") +class RandomFloatInvocation(BaseInvocation): + """Outputs a single random float""" + + low: float = InputField(default=0.0, description=FieldDescriptions.inclusive_low) + high: float = InputField(default=1.0, description=FieldDescriptions.exclusive_high) + decimals: int = InputField(default=2, description=FieldDescriptions.decimal_places) + + def invoke(self, context: InvocationContext) -> FloatOutput: + random_float = np.random.uniform(self.low, self.high) + rounded_float = round(random_float, self.decimals) + return FloatOutput(value=rounded_float) + + @invocation( "float_to_int", title="Float To Integer", From 4530bd46dcd4e426b688589e8138f9d4f1787d0b Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Tue, 26 Sep 2023 17:30:34 +1000 Subject: [PATCH 03/37] Added IP-Adapter --- docs/features/CONTROLNET.md | 38 ++++++++++++++++++++++++++++++------- mkdocs.yml | 6 +++--- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index 42ed43146e..a77e58a10a 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -1,13 +1,11 @@ --- -title: ControlNet +title: Control Adapters --- -# :material-loupe: ControlNet +# :material-loupe: Control Adapters ## ControlNet -ControlNet - ControlNet is a powerful set of features developed by the open-source community (notably, Stanford researcher [**@ilyasviel**](https://github.com/lllyasviel)) that allows you to @@ -20,7 +18,7 @@ towards generating images that better fit your desired style or outcome. -### How it works +#### How it works ControlNet works by analyzing an input image, pre-processing that image to identify relevant information that can be interpreted by each @@ -30,7 +28,7 @@ composition, or other aspects of the image to better achieve a specific result. -### Models +#### Models InvokeAI provides access to a series of ControlNet models that provide different effects or styles in your generated images. Currently @@ -96,6 +94,8 @@ A model that generates normal maps from input images, allowing for more realisti **Image Segmentation**: A model that divides input images into segments or regions, each of which corresponds to a different object or part of the image. (More details coming soon) +**QR Code Monster**: +A model that helps generate creative QR codes that still scan. Can also be used to create images with text, logos or shapes within them. **Openpose**: The OpenPose control model allows for the identification of the general pose of a character by pre-processing an existing image with a clear human structure. With advanced options, Openpose can also detect the face or hands in the image. @@ -120,7 +120,7 @@ With Pix2Pix, you can input an image into the controlnet, and then "instruct" th Each of these models can be adjusted and combined with other ControlNet models to achieve different results, giving you even more control over your image generation process. -## Using ControlNet +### Using ControlNet To use ControlNet, you can simply select the desired model and adjust both the ControlNet and Pre-processor settings to achieve the desired result. You can also use multiple ControlNet models at the same time, allowing you to achieve even more complex effects or styles in your generated images. @@ -132,3 +132,27 @@ Weight - Strength of the Controlnet model applied to the generation for the sect Start/End - 0 represents the start of the generation, 1 represents the end. The Start/end setting controls what steps during the generation process have the ControlNet applied. Additionally, each ControlNet section can be expanded in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in when you Invoke. + + +## IP-Adapter + +[IP-Adapter](https://ip-adapter.github.io) is a tooling that allows for image prompt capabilities with text-to-image diffusion models. IP-Adapter works by analyzing the given image prompt to extract features, then passing those features to the UNet along with any other conditioning provided. + +#### Installation +There are several ways to instal IP-Adapter models with an existing InvokeAI installation: + +1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [5] to download models +2. Install IP-Adapter models through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. +3. Manually downloading the models files and placed in the `models/ip-adapter` folder of the Invoke root directory. *Note:* The image_encoder folder is necessary for IP-Adapter to function. + +#### Using IP-Adapter + +IP-Adapter can be used by navigating to the *Control Adapters* options and enabling IP-Adapter. + +IP-Adapter requires an image to be used as the Image Prompt. It can also be used in conjunction with text prompts, Image-to-Image, ControlNets and LoRAs. + + +Each IP-Adapter has two settings that are applied to the IP-Adapter: + +* Weight - Strength of the IP-Adapter model applied to the generation for the section, defined by start/end +* Start/End - 0 represents the start of the generation, 1 represents the end. The Start/end setting controls what steps during the generation process have the IP-Adapter applied. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index f4e0688878..f95d83ac8f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -127,12 +127,12 @@ nav: - Manual Installation on Windows: 'installation/deprecated_documentation/INSTALL_WINDOWS.md' - Installing Invoke with pip: 'installation/deprecated_documentation/INSTALL_PCP.md' - Source Installer: 'installation/deprecated_documentation/INSTALL_SOURCE.md' - - Nodes: + - Workflows & Nodes: - Community Nodes: 'nodes/communityNodes.md' - Example Workflows: 'nodes/exampleWorkflows.md' - Nodes Overview: 'nodes/overview.md' - List of Default Nodes: 'nodes/defaultNodes.md' - - Node Editor Usage: 'nodes/NODES.md' + - Workflow Editor Usage: 'nodes/NODES.md' - ComfyUI to InvokeAI: 'nodes/comfyToInvoke.md' - Contributing Nodes: 'nodes/contributingNodes.md' - Features: @@ -140,7 +140,7 @@ nav: - New to InvokeAI?: 'help/gettingStartedWithAI.md' - Concepts: 'features/CONCEPTS.md' - Configuration: 'features/CONFIGURATION.md' - - ControlNet: 'features/CONTROLNET.md' + - Control Adapters: 'features/CONTROLNET.md' - Image-to-Image: 'features/IMG2IMG.md' - Controlling Logging: 'features/LOGGING.md' - Model Merging: 'features/MODEL_MERGING.md' From 727cc0dafe166091326eee00707d53aaf557d4ad Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Tue, 26 Sep 2023 17:51:08 +1000 Subject: [PATCH 04/37] add pics --- docs/features/CONTROLNET.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index a77e58a10a..a9dc382759 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -138,18 +138,21 @@ Additionally, each ControlNet section can be expanded in order to manipulate set [IP-Adapter](https://ip-adapter.github.io) is a tooling that allows for image prompt capabilities with text-to-image diffusion models. IP-Adapter works by analyzing the given image prompt to extract features, then passing those features to the UNet along with any other conditioning provided. +![IP-Adapter + T2I](https://github.com/tencent-ailab/IP-Adapter/raw/main/assets/demo/ip_adpter_plus_multi.jpg) +![IP-Adapter + T2I](https://github.com/tencent-ailab/IP-Adapter/blob/main/assets/demo/image-to-image.jpg) + #### Installation -There are several ways to instal IP-Adapter models with an existing InvokeAI installation: +There are several ways to install IP-Adapter models with an existing InvokeAI installation: 1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [5] to download models -2. Install IP-Adapter models through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. -3. Manually downloading the models files and placed in the `models/ip-adapter` folder of the Invoke root directory. *Note:* The image_encoder folder is necessary for IP-Adapter to function. +2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. +3. Manually downloading the models files and placed in the `models/ip-adapter` folder of the Invoke root directory. **Note:** The image_encoder folder is necessary for IP-Adapter to function. #### Using IP-Adapter IP-Adapter can be used by navigating to the *Control Adapters* options and enabling IP-Adapter. -IP-Adapter requires an image to be used as the Image Prompt. It can also be used in conjunction with text prompts, Image-to-Image, ControlNets and LoRAs. +IP-Adapter requires an image to be used as the Image Prompt. It can also be used in conjunction with text prompts, Image-to-Image, Inpainting, Outpainting, ControlNets and LoRAs. Each IP-Adapter has two settings that are applied to the IP-Adapter: From edd2c54b9ef3d274280cb3608ca77bc0cae1e25e Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Tue, 26 Sep 2023 18:28:52 +1000 Subject: [PATCH 05/37] add cache --- docs/nodes/NODES.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/nodes/NODES.md b/docs/nodes/NODES.md index 1abd0b1ac4..fdb522eca1 100644 --- a/docs/nodes/NODES.md +++ b/docs/nodes/NODES.md @@ -4,7 +4,7 @@ The workflow editor is a blank canvas allowing for the use of individual functio If you're not familiar with Diffusion, take a look at our [Diffusion Overview.](../help/diffusion.md) Understanding how diffusion works will enable you to more easily use the Workflow Editor and build workflows to suit your needs. -## UI Features +## Features ### Linear View The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations. @@ -25,6 +25,10 @@ Any node or input field can be renamed in the workflow editor. If the input fiel * Backspace/Delete to delete a node * Shift+Click to drag and select multiple nodes +### Node Caching + +Nodes have a "Use Cache" option in their footer. This allows for performance improvements by using the previously cached values during the workflow processing. + ## Important Concepts From c8b306d9f8b06a829c262042f97fea392edeb72d Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Tue, 26 Sep 2023 19:20:03 +1000 Subject: [PATCH 06/37] Update CONTROLNET.md --- docs/features/CONTROLNET.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index a9dc382759..f079e0fc9d 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -139,7 +139,7 @@ Additionally, each ControlNet section can be expanded in order to manipulate set [IP-Adapter](https://ip-adapter.github.io) is a tooling that allows for image prompt capabilities with text-to-image diffusion models. IP-Adapter works by analyzing the given image prompt to extract features, then passing those features to the UNet along with any other conditioning provided. ![IP-Adapter + T2I](https://github.com/tencent-ailab/IP-Adapter/raw/main/assets/demo/ip_adpter_plus_multi.jpg) -![IP-Adapter + T2I](https://github.com/tencent-ailab/IP-Adapter/blob/main/assets/demo/image-to-image.jpg) +![IP-Adapter + IMG2IMG](https://github.com/tencent-ailab/IP-Adapter/blob/main/assets/demo/image-to-image.jpg) #### Installation There are several ways to install IP-Adapter models with an existing InvokeAI installation: @@ -158,4 +158,4 @@ IP-Adapter requires an image to be used as the Image Prompt. It can also be used Each IP-Adapter has two settings that are applied to the IP-Adapter: * Weight - Strength of the IP-Adapter model applied to the generation for the section, defined by start/end -* Start/End - 0 represents the start of the generation, 1 represents the end. The Start/end setting controls what steps during the generation process have the IP-Adapter applied. \ No newline at end of file +* Start/End - 0 represents the start of the generation, 1 represents the end. The Start/end setting controls what steps during the generation process have the IP-Adapter applied. From 0c97a1e7e7a9ad896edc4f51a3bf72c816f8aaac Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 24 Sep 2023 15:31:25 -0400 Subject: [PATCH 07/37] give user option to disable the configure TUI during installation --- installer/lib/installer.py | 11 ++++-- installer/lib/messages.py | 37 +++++++++++++++++-- .../app/services/config/invokeai_config.py | 2 +- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/installer/lib/installer.py b/installer/lib/installer.py index aaf5779801..811a9d7b16 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -332,6 +332,7 @@ class InvokeAiInstance: Configure the InvokeAI runtime directory """ + auto_install = False # set sys.argv to a consistent state new_argv = [sys.argv[0]] for i in range(1, len(sys.argv)): @@ -340,13 +341,17 @@ class InvokeAiInstance: new_argv.append(el) new_argv.append(sys.argv[i + 1]) elif el in ["-y", "--yes", "--yes-to-all"]: - new_argv.append(el) + auto_install = True sys.argv = new_argv import requests # to catch download exceptions - from messages import introduction + import messages - introduction() + auto_install = auto_install or messages.user_wants_auto_configuration() + if auto_install: + sys.argv.append('--yes') + else: + messages.introduction() from invokeai.frontend.install.invokeai_configure import invokeai_configure diff --git a/installer/lib/messages.py b/installer/lib/messages.py index c5a39dc91c..4d6a06d2e0 100644 --- a/installer/lib/messages.py +++ b/installer/lib/messages.py @@ -7,7 +7,7 @@ import os import platform from pathlib import Path -from prompt_toolkit import prompt +from prompt_toolkit import prompt, HTML from prompt_toolkit.completion import PathCompleter from prompt_toolkit.validation import Validator from rich import box, print @@ -65,17 +65,46 @@ def confirm_install(dest: Path) -> bool: if dest.exists(): print(f":exclamation: Directory {dest} already exists :exclamation:") dest_confirmed = Confirm.ask( - ":stop_sign: Are you sure you want to (re)install in this location?", + ":stop_sign: (re)install in this location?", default=False, ) else: print(f"InvokeAI will be installed in {dest}") - dest_confirmed = not Confirm.ask("Would you like to pick a different location?", default=False) + dest_confirmed = Confirm.ask("Use this location?", default=True) console.line() return dest_confirmed +def user_wants_auto_configuration() -> bool: + """Prompt the user to choose between manual and auto configuration.""" + console.rule("InvokeAI Configuration Section") + console.print( + Panel( + Group( + "\n".join( + [ + "Libraries are installed and InvokeAI will now set up its root directory and configuration. Choose between:", + "", + " * AUTOMATIC configuration: install reasonable defaults and a minimal set of starter models.", + " * MANUAL configuration: manually inspect and adjust configuration options and pick from a larger set of starter models.", + "", + "Later you can fine tune your configuration by selecting option [6] 'Change InvokeAI startup options' from the invoke.bat/invoke.sh launcher script.", + ] + ), + ), + box=box.MINIMAL, + padding=(1, 1), + ) + ) + choice = prompt(HTML("Choose <a>utomatic or <m>anual configuration [a/m] (a): "), + validator=Validator.from_callable( + lambda n: n=='' or n.startswith(('a', 'A', 'm', 'M')), + error_message="Please select 'a' or 'm'" + ), + ) or 'a' + return choice.lower().startswith('a') + def dest_path(dest=None) -> Path: """ Prompt the user for the destination path and create the path @@ -180,7 +209,7 @@ def graphical_accelerator(): "cpu", ) idk = ( - "I'm not sure what to choose", + "I'm not sure what to choose", "idk", ) diff --git a/invokeai/app/services/config/invokeai_config.py b/invokeai/app/services/config/invokeai_config.py index 51ccf45704..8ea703f39a 100644 --- a/invokeai/app/services/config/invokeai_config.py +++ b/invokeai/app/services/config/invokeai_config.py @@ -241,7 +241,7 @@ class InvokeAIAppConfig(InvokeAISettings): version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other") # CACHE - ram : Union[float, Literal["auto"]] = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", ) + ram : Union[float, Literal["auto"]] = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", ) vram : Union[float, Literal["auto"]] = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number or 'auto')", category="Model Cache", ) lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", ) From d59e534cad5657fb93901b805e5b9fbd0063365e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 25 Sep 2023 19:18:58 -0400 Subject: [PATCH 08/37] use heuristic to select RAM cache size during headless install; blackified --- installer/lib/installer.py | 4 ++-- installer/lib/messages.py | 22 +++++++++++-------- invokeai/app/invocations/math.py | 4 ++-- .../backend/install/invokeai_configure.py | 14 ++++++++++-- invokeai/backend/ip_adapter/resampler.py | 4 ++-- .../diffusion/cross_attention_control.py | 2 +- .../diffusion/shared_invokeai_diffusion.py | 20 ++++------------- .../training/textual_inversion_training.py | 5 +---- invokeai/backend/util/mps_fixes.py | 2 +- invokeai/backend/util/util.py | 2 +- 10 files changed, 39 insertions(+), 40 deletions(-) diff --git a/installer/lib/installer.py b/installer/lib/installer.py index 811a9d7b16..70ed4d4331 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -344,12 +344,12 @@ class InvokeAiInstance: auto_install = True sys.argv = new_argv - import requests # to catch download exceptions import messages + import requests # to catch download exceptions auto_install = auto_install or messages.user_wants_auto_configuration() if auto_install: - sys.argv.append('--yes') + sys.argv.append("--yes") else: messages.introduction() diff --git a/installer/lib/messages.py b/installer/lib/messages.py index 4d6a06d2e0..e4c03bbfd2 100644 --- a/installer/lib/messages.py +++ b/installer/lib/messages.py @@ -7,7 +7,7 @@ import os import platform from pathlib import Path -from prompt_toolkit import prompt, HTML +from prompt_toolkit import HTML, prompt from prompt_toolkit.completion import PathCompleter from prompt_toolkit.validation import Validator from rich import box, print @@ -97,13 +97,17 @@ def user_wants_auto_configuration() -> bool: padding=(1, 1), ) ) - choice = prompt(HTML("Choose <a>utomatic or <m>anual configuration [a/m] (a): "), - validator=Validator.from_callable( - lambda n: n=='' or n.startswith(('a', 'A', 'm', 'M')), - error_message="Please select 'a' or 'm'" - ), - ) or 'a' - return choice.lower().startswith('a') + choice = ( + prompt( + HTML("Choose <a>utomatic or <m>anual configuration [a/m] (a): "), + validator=Validator.from_callable( + lambda n: n == "" or n.startswith(("a", "A", "m", "M")), error_message="Please select 'a' or 'm'" + ), + ) + or "a" + ) + return choice.lower().startswith("a") + def dest_path(dest=None) -> Path: """ @@ -209,7 +213,7 @@ def graphical_accelerator(): "cpu", ) idk = ( - "I'm not sure what to choose", + "I'm not sure what to choose", "idk", ) diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index b52cbb28bf..90ab44913f 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -199,7 +199,7 @@ class IntegerMathInvocation(BaseInvocation): elif self.operation == "DIV": return IntegerOutput(value=int(self.a / self.b)) elif self.operation == "EXP": - return IntegerOutput(value=self.a**self.b) + return IntegerOutput(value=self.a ** self.b) elif self.operation == "MOD": return IntegerOutput(value=self.a % self.b) elif self.operation == "ABS": @@ -273,7 +273,7 @@ class FloatMathInvocation(BaseInvocation): elif self.operation == "DIV": return FloatOutput(value=self.a / self.b) elif self.operation == "EXP": - return FloatOutput(value=self.a**self.b) + return FloatOutput(value=self.a ** self.b) elif self.operation == "SQRT": return FloatOutput(value=np.sqrt(self.a)) elif self.operation == "ABS": diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index ec2221e12d..5afbdfb5a3 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -70,7 +70,6 @@ def get_literal_fields(field) -> list[Any]: config = InvokeAIAppConfig.get_config() Model_dir = "models" - Default_config_file = config.model_conf_path SD_Configs = config.legacy_conf_path @@ -458,7 +457,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle. ) self.add_widget_intelligent( npyscreen.TitleFixedText, - name="Model RAM cache size (GB). Make this at least large enough to hold a single full model.", + name="Model RAM cache size (GB). Make this at least large enough to hold a single full model (2GB for SD-1, 6GB for SDXL).", begin_entry_at=0, editable=False, color="CONTROL", @@ -651,8 +650,19 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam return editApp.new_opts() +def default_ramcache() -> float: + """Run a heuristic for the default RAM cache based on installed RAM.""" + + # Note that on my 64 GB machine, psutil.virtual_memory().total gives 62 GB, + # So we adjust everthing down a bit. + return ( + 15.0 if MAX_RAM >= 60 else 7.5 if MAX_RAM >= 30 else 4 if MAX_RAM >= 14 else 2.1 + ) # 2.1 is just large enough for sd 1.5 ;-) + + def default_startup_options(init_file: Path) -> Namespace: opts = InvokeAIAppConfig.get_config() + opts.ram = default_ramcache() return opts diff --git a/invokeai/backend/ip_adapter/resampler.py b/invokeai/backend/ip_adapter/resampler.py index 84224fd359..0c07a7fa1b 100644 --- a/invokeai/backend/ip_adapter/resampler.py +++ b/invokeai/backend/ip_adapter/resampler.py @@ -33,7 +33,7 @@ def reshape_tensor(x, heads): class PerceiverAttention(nn.Module): def __init__(self, *, dim, dim_head=64, heads=8): super().__init__() - self.scale = dim_head**-0.5 + self.scale = dim_head ** -0.5 self.dim_head = dim_head self.heads = heads inner_dim = dim_head * heads @@ -91,7 +91,7 @@ class Resampler(nn.Module): ): super().__init__() - self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim ** 0.5) self.proj_in = nn.Linear(embedding_dim, dim) diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py index 3cb1862004..4931ab7b50 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py @@ -261,7 +261,7 @@ class InvokeAICrossAttentionMixin: if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 return self.einsum_lowest_level(q, k, v, None, None, None) else: - slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) + slice_size = math.floor(2 ** 30 / (q.shape[0] * q.shape[1])) return self.einsum_op_slice_dim1(q, k, v, slice_size) def einsum_op_mps_v2(self, q, k, v): diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 125e62a2e8..2089ff9fa6 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -175,10 +175,7 @@ class InvokeAIDiffuserComponent: dim=0, ), } - ( - encoder_hidden_states, - encoder_attention_mask, - ) = self._concat_conditionings_for_batch( + (encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch( conditioning_data.unconditioned_embeddings.embeds, conditioning_data.text_embeddings.embeds, ) @@ -240,10 +237,7 @@ class InvokeAIDiffuserComponent: wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0 if wants_cross_attention_control: - ( - unconditioned_next_x, - conditioned_next_x, - ) = self._apply_cross_attention_controlled_conditioning( + (unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning( sample, timestep, conditioning_data, @@ -251,10 +245,7 @@ class InvokeAIDiffuserComponent: **kwargs, ) elif self.sequential_guidance: - ( - unconditioned_next_x, - conditioned_next_x, - ) = self._apply_standard_conditioning_sequentially( + (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially( sample, timestep, conditioning_data, @@ -262,10 +253,7 @@ class InvokeAIDiffuserComponent: ) else: - ( - unconditioned_next_x, - conditioned_next_x, - ) = self._apply_standard_conditioning( + (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning( sample, timestep, conditioning_data, diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 153bd0fcc4..6f419a61de 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -470,10 +470,7 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - ( - h, - w, - ) = ( + (h, w,) = ( img.shape[0], img.shape[1], ) diff --git a/invokeai/backend/util/mps_fixes.py b/invokeai/backend/util/mps_fixes.py index ce21d33b88..dc428e9d51 100644 --- a/invokeai/backend/util/mps_fixes.py +++ b/invokeai/backend/util/mps_fixes.py @@ -203,7 +203,7 @@ class ChunkedSlicedAttnProcessor: if attn.upcast_attention: out_item_size = 4 - chunk_size = 2**29 + chunk_size = 2 ** 29 out_size = query.shape[1] * key.shape[1] * out_item_size chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size)) diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index 0796f1a8cd..fb85425581 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -207,7 +207,7 @@ def parallel_data_prefetch( return gather_res -def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): +def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3): delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1]) From 3fd27b1aa9467389bfefba7579b40560ee1a6110 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 25 Sep 2023 19:27:22 -0400 Subject: [PATCH 09/37] run correct version of black --- invokeai/app/invocations/math.py | 4 ++-- invokeai/backend/ip_adapter/resampler.py | 4 ++-- .../diffusion/cross_attention_control.py | 2 +- .../diffusion/shared_invokeai_diffusion.py | 20 +++++++++++++++---- .../training/textual_inversion_training.py | 5 ++++- invokeai/backend/util/mps_fixes.py | 2 +- invokeai/backend/util/util.py | 2 +- 7 files changed, 27 insertions(+), 12 deletions(-) diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index 90ab44913f..b52cbb28bf 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -199,7 +199,7 @@ class IntegerMathInvocation(BaseInvocation): elif self.operation == "DIV": return IntegerOutput(value=int(self.a / self.b)) elif self.operation == "EXP": - return IntegerOutput(value=self.a ** self.b) + return IntegerOutput(value=self.a**self.b) elif self.operation == "MOD": return IntegerOutput(value=self.a % self.b) elif self.operation == "ABS": @@ -273,7 +273,7 @@ class FloatMathInvocation(BaseInvocation): elif self.operation == "DIV": return FloatOutput(value=self.a / self.b) elif self.operation == "EXP": - return FloatOutput(value=self.a ** self.b) + return FloatOutput(value=self.a**self.b) elif self.operation == "SQRT": return FloatOutput(value=np.sqrt(self.a)) elif self.operation == "ABS": diff --git a/invokeai/backend/ip_adapter/resampler.py b/invokeai/backend/ip_adapter/resampler.py index 0c07a7fa1b..84224fd359 100644 --- a/invokeai/backend/ip_adapter/resampler.py +++ b/invokeai/backend/ip_adapter/resampler.py @@ -33,7 +33,7 @@ def reshape_tensor(x, heads): class PerceiverAttention(nn.Module): def __init__(self, *, dim, dim_head=64, heads=8): super().__init__() - self.scale = dim_head ** -0.5 + self.scale = dim_head**-0.5 self.dim_head = dim_head self.heads = heads inner_dim = dim_head * heads @@ -91,7 +91,7 @@ class Resampler(nn.Module): ): super().__init__() - self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim ** 0.5) + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) self.proj_in = nn.Linear(embedding_dim, dim) diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py index 4931ab7b50..3cb1862004 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py @@ -261,7 +261,7 @@ class InvokeAICrossAttentionMixin: if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 return self.einsum_lowest_level(q, k, v, None, None, None) else: - slice_size = math.floor(2 ** 30 / (q.shape[0] * q.shape[1])) + slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) return self.einsum_op_slice_dim1(q, k, v, slice_size) def einsum_op_mps_v2(self, q, k, v): diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 2089ff9fa6..125e62a2e8 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -175,7 +175,10 @@ class InvokeAIDiffuserComponent: dim=0, ), } - (encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch( + ( + encoder_hidden_states, + encoder_attention_mask, + ) = self._concat_conditionings_for_batch( conditioning_data.unconditioned_embeddings.embeds, conditioning_data.text_embeddings.embeds, ) @@ -237,7 +240,10 @@ class InvokeAIDiffuserComponent: wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0 if wants_cross_attention_control: - (unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_cross_attention_controlled_conditioning( sample, timestep, conditioning_data, @@ -245,7 +251,10 @@ class InvokeAIDiffuserComponent: **kwargs, ) elif self.sequential_guidance: - (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_standard_conditioning_sequentially( sample, timestep, conditioning_data, @@ -253,7 +262,10 @@ class InvokeAIDiffuserComponent: ) else: - (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_standard_conditioning( sample, timestep, conditioning_data, diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 6f419a61de..153bd0fcc4 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -470,7 +470,10 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - (h, w,) = ( + ( + h, + w, + ) = ( img.shape[0], img.shape[1], ) diff --git a/invokeai/backend/util/mps_fixes.py b/invokeai/backend/util/mps_fixes.py index dc428e9d51..ce21d33b88 100644 --- a/invokeai/backend/util/mps_fixes.py +++ b/invokeai/backend/util/mps_fixes.py @@ -203,7 +203,7 @@ class ChunkedSlicedAttnProcessor: if attn.upcast_attention: out_item_size = 4 - chunk_size = 2 ** 29 + chunk_size = 2**29 out_size = query.shape[1] * key.shape[1] * out_item_size chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size)) diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index fb85425581..0796f1a8cd 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -207,7 +207,7 @@ def parallel_data_prefetch( return gather_res -def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3): +def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1]) From d45c47db819450421fdd004b667f94268e461324 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 00:03:48 +1000 Subject: [PATCH 10/37] fix(backend): remove extra cache arg (#4698) --- .../app/services/invocation_cache/invocation_cache_memory.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/invokeai/app/services/invocation_cache/invocation_cache_memory.py b/invokeai/app/services/invocation_cache/invocation_cache_memory.py index b40243f285..817dbb958e 100644 --- a/invokeai/app/services/invocation_cache/invocation_cache_memory.py +++ b/invokeai/app/services/invocation_cache/invocation_cache_memory.py @@ -1,7 +1,6 @@ from collections import OrderedDict from dataclasses import dataclass, field from threading import Lock -from time import time from typing import Optional, Union from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput @@ -59,7 +58,7 @@ class MemoryInvocationCache(InvocationCacheBase): # If the cache is full, we need to remove the least used number_to_delete = len(self._cache) + 1 - self._max_cache_size self._delete_oldest_access(number_to_delete) - self._cache[key] = CachedItem(time(), invocation_output, invocation_output.json()) + self._cache[key] = CachedItem(invocation_output, invocation_output.json()) def _delete_oldest_access(self, number_to_delete: int) -> None: number_to_delete = min(number_to_delete, len(self._cache)) From 34c563060fffbd01eb9d96de0cc187998817c528 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 00:06:39 +1000 Subject: [PATCH 11/37] feat(ui): store active tab as name, not index (#4697) This fixes an issue with tab changing when some tabs are disabled. --- .../src/features/ui/components/InvokeTabs.tsx | 16 +++++----- .../src/features/ui/store/extraReducers.ts | 13 -------- .../web/src/features/ui/store/uiSelectors.ts | 30 ++++++++----------- .../web/src/features/ui/store/uiSlice.ts | 7 ++--- .../web/src/features/ui/store/uiTypes.ts | 3 +- 5 files changed, 26 insertions(+), 43 deletions(-) delete mode 100644 invokeai/frontend/web/src/features/ui/store/extraReducers.ts diff --git a/invokeai/frontend/web/src/features/ui/components/InvokeTabs.tsx b/invokeai/frontend/web/src/features/ui/components/InvokeTabs.tsx index fb5756b121..ac7b8aa1c4 100644 --- a/invokeai/frontend/web/src/features/ui/components/InvokeTabs.tsx +++ b/invokeai/frontend/web/src/features/ui/components/InvokeTabs.tsx @@ -14,7 +14,7 @@ import { stateSelector } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import ImageGalleryContent from 'features/gallery/components/ImageGalleryContent'; import NodeEditorPanelGroup from 'features/nodes/components/sidePanel/NodeEditorPanelGroup'; -import { InvokeTabName, tabMap } from 'features/ui/store/tabMap'; +import { InvokeTabName } from 'features/ui/store/tabMap'; import { setActiveTab } from 'features/ui/store/uiSlice'; import { ResourceKey } from 'i18next'; import { isEqual } from 'lodash-es'; @@ -110,7 +110,7 @@ export const NO_GALLERY_TABS: InvokeTabName[] = ['modelManager', 'queue']; export const NO_SIDE_PANEL_TABS: InvokeTabName[] = ['modelManager', 'queue']; const InvokeTabs = () => { - const activeTab = useAppSelector(activeTabIndexSelector); + const activeTabIndex = useAppSelector(activeTabIndexSelector); const activeTabName = useAppSelector(activeTabNameSelector); const enabledTabs = useAppSelector(enabledTabsSelector); const { t } = useTranslation(); @@ -150,13 +150,13 @@ const InvokeTabs = () => { const handleTabChange = useCallback( (index: number) => { - const activeTabName = tabMap[index]; - if (!activeTabName) { + const tab = enabledTabs[index]; + if (!tab) { return; } - dispatch(setActiveTab(activeTabName)); + dispatch(setActiveTab(tab.id)); }, - [dispatch] + [dispatch, enabledTabs] ); const { @@ -216,8 +216,8 @@ const InvokeTabs = () => { return ( { - if (typeof newActiveTab === 'number') { - state.activeTab = newActiveTab; - } else { - state.activeTab = tabMap.indexOf(newActiveTab); - } -}; diff --git a/invokeai/frontend/web/src/features/ui/store/uiSelectors.ts b/invokeai/frontend/web/src/features/ui/store/uiSelectors.ts index 5427fa9d3b..99ee8d80f7 100644 --- a/invokeai/frontend/web/src/features/ui/store/uiSelectors.ts +++ b/invokeai/frontend/web/src/features/ui/store/uiSelectors.ts @@ -1,27 +1,23 @@ import { createSelector } from '@reduxjs/toolkit'; import { RootState } from 'app/store/store'; -import { isEqual } from 'lodash-es'; - -import { InvokeTabName, tabMap } from './tabMap'; -import { UIState } from './uiTypes'; +import { isEqual, isString } from 'lodash-es'; +import { tabMap } from './tabMap'; export const activeTabNameSelector = createSelector( - (state: RootState) => state.ui, - (ui: UIState) => tabMap[ui.activeTab] as InvokeTabName, - { - memoizeOptions: { - equalityCheck: isEqual, - }, - } + (state: RootState) => state, + /** + * Previously `activeTab` was an integer, but now it's a string. + * Default to first tab in case user has integer. + */ + ({ ui }) => (isString(ui.activeTab) ? ui.activeTab : 'txt2img') ); export const activeTabIndexSelector = createSelector( - (state: RootState) => state.ui, - (ui: UIState) => ui.activeTab, - { - memoizeOptions: { - equalityCheck: isEqual, - }, + (state: RootState) => state, + ({ ui, config }) => { + const tabs = tabMap.filter((t) => !config.disabledTabs.includes(t)); + const idx = tabs.indexOf(ui.activeTab); + return idx === -1 ? 0 : idx; } ); diff --git a/invokeai/frontend/web/src/features/ui/store/uiSlice.ts b/invokeai/frontend/web/src/features/ui/store/uiSlice.ts index 82c9ef4e77..9782d0bfac 100644 --- a/invokeai/frontend/web/src/features/ui/store/uiSlice.ts +++ b/invokeai/frontend/web/src/features/ui/store/uiSlice.ts @@ -2,12 +2,11 @@ import type { PayloadAction } from '@reduxjs/toolkit'; import { createSlice } from '@reduxjs/toolkit'; import { initialImageChanged } from 'features/parameters/store/generationSlice'; import { SchedulerParam } from 'features/parameters/types/parameterSchemas'; -import { setActiveTabReducer } from './extraReducers'; import { InvokeTabName } from './tabMap'; import { UIState } from './uiTypes'; export const initialUIState: UIState = { - activeTab: 0, + activeTab: 'txt2img', shouldShowImageDetails: false, shouldUseCanvasBetaLayout: false, shouldShowExistingModelsInSearch: false, @@ -26,7 +25,7 @@ export const uiSlice = createSlice({ initialState: initialUIState, reducers: { setActiveTab: (state, action: PayloadAction) => { - setActiveTabReducer(state, action.payload); + state.activeTab = action.payload; }, setShouldShowImageDetails: (state, action: PayloadAction) => { state.shouldShowImageDetails = action.payload; @@ -73,7 +72,7 @@ export const uiSlice = createSlice({ }, extraReducers(builder) { builder.addCase(initialImageChanged, (state) => { - setActiveTabReducer(state, 'img2img'); + state.activeTab = 'img2img'; }); }, }); diff --git a/invokeai/frontend/web/src/features/ui/store/uiTypes.ts b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts index 41a359a651..1b9fee6989 100644 --- a/invokeai/frontend/web/src/features/ui/store/uiTypes.ts +++ b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts @@ -1,4 +1,5 @@ import { SchedulerParam } from 'features/parameters/types/parameterSchemas'; +import { InvokeTabName } from './tabMap'; export type Coordinates = { x: number; @@ -13,7 +14,7 @@ export type Dimensions = { export type Rect = Coordinates & Dimensions; export interface UIState { - activeTab: number; + activeTab: InvokeTabName; shouldShowImageDetails: boolean; shouldUseCanvasBetaLayout: boolean; shouldShowExistingModelsInSearch: boolean; From 105a4234b04edb8e87f6da43e365325314805576 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 00:11:12 +1000 Subject: [PATCH 12/37] fix(ui): fix color picker on canvas (#4706) Resolves #4667 Co-authored-by: Mary Hipp Rogers --- .../src/common/components/IAIColorPicker.tsx | 33 +++++++------------ .../IAICanvasToolbar/IAICanvasMaskOptions.tsx | 13 ++++---- .../IAICanvasToolChooserOptions.tsx | 13 +++++--- .../UnifiedCanvasColorPicker.tsx | 22 ++++++++----- 4 files changed, 40 insertions(+), 41 deletions(-) diff --git a/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx b/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx index f6a05c86b1..5854f7503f 100644 --- a/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx +++ b/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx @@ -1,18 +1,9 @@ -import { chakra, ChakraProps } from '@chakra-ui/react'; +import { Box, ChakraProps } from '@chakra-ui/react'; import { memo } from 'react'; import { RgbaColorPicker } from 'react-colorful'; import { ColorPickerBaseProps, RgbaColor } from 'react-colorful/dist/types'; -type IAIColorPickerProps = Omit, 'color'> & - ChakraProps & { - pickerColor: RgbaColor; - styleClass?: string; - }; - -const ChakraRgbaColorPicker = chakra(RgbaColorPicker, { - baseStyle: { paddingInline: 4 }, - shouldForwardProp: (prop) => !['pickerColor'].includes(prop), -}); +type IAIColorPickerProps = ColorPickerBaseProps; const colorPickerStyles: NonNullable = { width: 6, @@ -20,19 +11,17 @@ const colorPickerStyles: NonNullable = { borderColor: 'base.100', }; -const IAIColorPicker = (props: IAIColorPickerProps) => { - const { styleClass = '', ...rest } = props; +const sx = { + '.react-colorful__hue-pointer': colorPickerStyles, + '.react-colorful__saturation-pointer': colorPickerStyles, + '.react-colorful__alpha-pointer': colorPickerStyles, +}; +const IAIColorPicker = (props: IAIColorPickerProps) => { return ( - + + + ); }; diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx index 76211a2e95..43e8febd66 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx @@ -1,4 +1,4 @@ -import { ButtonGroup, Flex } from '@chakra-ui/react'; +import { Box, ButtonGroup, Flex } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import IAIButton from 'common/components/IAIButton'; @@ -135,11 +135,12 @@ const IAICanvasMaskOptions = () => { dispatch(setShouldPreserveMaskedArea(e.target.checked)) } /> - dispatch(setMaskColor(newColor))} - /> + + dispatch(setMaskColor(newColor))} + /> + } onClick={handleSaveMask}> Save Mask diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx index 6a7db0e5f2..b5770fdda6 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx @@ -1,4 +1,4 @@ -import { ButtonGroup, Flex } from '@chakra-ui/react'; +import { ButtonGroup, Flex, Box } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { stateSelector } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; @@ -237,15 +237,18 @@ const IAICanvasToolChooserOptions = () => { sliderNumberInputProps={{ max: 500 }} /> - dispatch(setBrushColor(newColor))} - /> + > + dispatch(setBrushColor(newColor))} + /> + diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx index 0b8366b06d..e57d87915f 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx @@ -95,26 +95,32 @@ export default function UnifiedCanvasColorPicker() { > {layer === 'base' && ( - dispatch(setBrushColor(newColor))} - /> + > + dispatch(setBrushColor(newColor))} + /> + )} {layer === 'mask' && ( - dispatch(setMaskColor(newColor))} - /> + > + dispatch(setMaskColor(newColor))} + /> + )} From a4cdaa245e63bff246a47571140310abae014201 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 01:24:53 +1000 Subject: [PATCH 13/37] feat(ui): improve error handling (#4699) * feat(ui): add error handling for enqueueBatch route, remove sessions This re-implements the handling for the session create/invoke errors, but for batches. Also remove all references to the old sessions routes in the UI. * feat(ui): improve canvas image error UI * make canvas error state gray instead of red --------- Co-authored-by: Mary Hipp --- invokeai/frontend/web/public/locales/en.json | 1 + .../app/components/ThemeLocaleProvider.tsx | 8 +- .../middleware/listenerMiddleware/index.ts | 32 +-- .../listeners/batchEnqueued.ts | 96 +++++++++ .../listeners/enqueueRequestedCanvas.ts | 26 +-- .../listeners/enqueueRequestedLinear.ts | 43 +--- .../listeners/enqueueRequestedNodes.ts | 44 +---- .../listeners/sessionCanceled.ts | 44 ----- .../listeners/sessionCreated.ts | 45 ----- .../listeners/sessionInvoked.ts | 44 ----- .../listeners/util/enqueueBatch.ts | 54 ----- .../canvas/components/IAICanvasImage.tsx | 15 +- .../IAICanvasImageErrorFallback.tsx | 44 +++++ .../src/features/canvas/store/canvasSlice.ts | 6 - .../controlNet/store/controlNetSlice.ts | 5 - .../src/features/system/store/systemSlice.ts | 50 +---- .../src/features/system/store/zodSchemas.ts | 2 +- .../web/src/services/api/thunks/session.ts | 184 ------------------ invokeai/frontend/web/src/theme/theme.ts | 7 +- 19 files changed, 178 insertions(+), 572 deletions(-) create mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/batchEnqueued.ts delete mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionCanceled.ts delete mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionCreated.ts delete mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionInvoked.ts delete mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/util/enqueueBatch.ts create mode 100644 invokeai/frontend/web/src/features/canvas/components/IAICanvasImageErrorFallback.tsx delete mode 100644 invokeai/frontend/web/src/services/api/thunks/session.ts diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 285da58e3c..e9261692a2 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -58,6 +58,7 @@ "githubLabel": "Github", "hotkeysLabel": "Hotkeys", "imagePrompt": "Image Prompt", + "imageFailedToLoad": "Unable to Load Image", "img2img": "Image To Image", "langArabic": "العربية", "langBrPortuguese": "Português do Brasil", diff --git a/invokeai/frontend/web/src/app/components/ThemeLocaleProvider.tsx b/invokeai/frontend/web/src/app/components/ThemeLocaleProvider.tsx index 9bcc7c831b..a9d56a7f16 100644 --- a/invokeai/frontend/web/src/app/components/ThemeLocaleProvider.tsx +++ b/invokeai/frontend/web/src/app/components/ThemeLocaleProvider.tsx @@ -5,7 +5,7 @@ import { } from '@chakra-ui/react'; import { ReactNode, memo, useEffect, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { theme as invokeAITheme } from 'theme/theme'; +import { TOAST_OPTIONS, theme as invokeAITheme } from 'theme/theme'; import '@fontsource-variable/inter'; import { MantineProvider } from '@mantine/core'; @@ -39,7 +39,11 @@ function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) { return ( - + {children} diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index ead6e1cd42..677b0fd20c 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -54,21 +54,6 @@ import { addModelSelectedListener } from './listeners/modelSelected'; import { addModelsLoadedListener } from './listeners/modelsLoaded'; import { addDynamicPromptsListener } from './listeners/promptChanged'; import { addReceivedOpenAPISchemaListener } from './listeners/receivedOpenAPISchema'; -import { - addSessionCanceledFulfilledListener, - addSessionCanceledPendingListener, - addSessionCanceledRejectedListener, -} from './listeners/sessionCanceled'; -import { - addSessionCreatedFulfilledListener, - addSessionCreatedPendingListener, - addSessionCreatedRejectedListener, -} from './listeners/sessionCreated'; -import { - addSessionInvokedFulfilledListener, - addSessionInvokedPendingListener, - addSessionInvokedRejectedListener, -} from './listeners/sessionInvoked'; import { addSocketConnectedEventListener as addSocketConnectedListener } from './listeners/socketio/socketConnected'; import { addSocketDisconnectedEventListener as addSocketDisconnectedListener } from './listeners/socketio/socketDisconnected'; import { addGeneratorProgressEventListener as addGeneratorProgressListener } from './listeners/socketio/socketGeneratorProgress'; @@ -86,6 +71,7 @@ import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSa import { addTabChangedListener } from './listeners/tabChanged'; import { addUpscaleRequestedListener } from './listeners/upscaleRequested'; import { addWorkflowLoadedListener } from './listeners/workflowLoaded'; +import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; export const listenerMiddleware = createListenerMiddleware(); @@ -136,6 +122,7 @@ addEnqueueRequestedCanvasListener(); addEnqueueRequestedNodes(); addEnqueueRequestedLinear(); addAnyEnqueuedListener(); +addBatchEnqueuedListener(); // Canvas actions addCanvasSavedToGalleryListener(); @@ -175,21 +162,6 @@ addSessionRetrievalErrorEventListener(); addInvocationRetrievalErrorEventListener(); addSocketQueueItemStatusChangedEventListener(); -// Session Created -addSessionCreatedPendingListener(); -addSessionCreatedFulfilledListener(); -addSessionCreatedRejectedListener(); - -// Session Invoked -addSessionInvokedPendingListener(); -addSessionInvokedFulfilledListener(); -addSessionInvokedRejectedListener(); - -// Session Canceled -addSessionCanceledPendingListener(); -addSessionCanceledFulfilledListener(); -addSessionCanceledRejectedListener(); - // ControlNet addControlNetImageProcessedListener(); addControlNetAutoProcessListener(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/batchEnqueued.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/batchEnqueued.ts new file mode 100644 index 0000000000..fe351f3be6 --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/batchEnqueued.ts @@ -0,0 +1,96 @@ +import { createStandaloneToast } from '@chakra-ui/react'; +import { logger } from 'app/logging/logger'; +import { parseify } from 'common/util/serialize'; +import { zPydanticValidationError } from 'features/system/store/zodSchemas'; +import { t } from 'i18next'; +import { get, truncate, upperFirst } from 'lodash-es'; +import { queueApi } from 'services/api/endpoints/queue'; +import { TOAST_OPTIONS, theme } from 'theme/theme'; +import { startAppListening } from '..'; + +const { toast } = createStandaloneToast({ + theme: theme, + defaultOptions: TOAST_OPTIONS.defaultOptions, +}); + +export const addBatchEnqueuedListener = () => { + // success + startAppListening({ + matcher: queueApi.endpoints.enqueueBatch.matchFulfilled, + effect: async (action) => { + const response = action.payload; + const arg = action.meta.arg.originalArgs; + logger('queue').debug( + { enqueueResult: parseify(response) }, + 'Batch enqueued' + ); + + if (!toast.isActive('batch-queued')) { + toast({ + id: 'batch-queued', + title: t('queue.batchQueued'), + description: t('queue.batchQueuedDesc', { + item_count: response.enqueued, + direction: arg.prepend ? t('queue.front') : t('queue.back'), + }), + duration: 1000, + status: 'success', + }); + } + }, + }); + + // error + startAppListening({ + matcher: queueApi.endpoints.enqueueBatch.matchRejected, + effect: async (action) => { + const response = action.payload; + const arg = action.meta.arg.originalArgs; + + if (!response) { + toast({ + title: t('queue.batchFailedToQueue'), + status: 'error', + description: 'Unknown Error', + }); + logger('queue').error( + { batchConfig: parseify(arg), error: parseify(response) }, + t('queue.batchFailedToQueue') + ); + return; + } + + const result = zPydanticValidationError.safeParse(response); + if (result.success) { + result.data.data.detail.map((e) => { + toast({ + id: 'batch-failed-to-queue', + title: truncate(upperFirst(e.msg), { length: 128 }), + status: 'error', + description: truncate( + `Path: + ${e.loc.join('.')}`, + { length: 128 } + ), + }); + }); + } else { + let detail = 'Unknown Error'; + if (response.status === 403 && 'body' in response) { + detail = get(response, 'body.detail', 'Unknown Error'); + } else if (response.status === 403 && 'error' in response) { + detail = get(response, 'error.detail', 'Unknown Error'); + } + toast({ + title: t('queue.batchFailedToQueue'), + status: 'error', + description: detail, + }); + } + logger('queue').error( + { batchConfig: parseify(arg), error: parseify(response) }, + t('queue.batchFailedToQueue') + ); + }, + }); +}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas.ts index c1511bd0e8..8c283ce64e 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas.ts @@ -12,8 +12,6 @@ import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGeneratio import { canvasGraphBuilt } from 'features/nodes/store/actions'; import { buildCanvasGraph } from 'features/nodes/util/graphBuilders/buildCanvasGraph'; import { prepareLinearUIBatch } from 'features/nodes/util/graphBuilders/buildLinearBatchConfig'; -import { addToast } from 'features/system/store/systemSlice'; -import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; import { queueApi } from 'services/api/endpoints/queue'; import { ImageDTO } from 'services/api/types'; @@ -140,8 +138,6 @@ export const addEnqueueRequestedCanvasListener = () => { const enqueueResult = await req.unwrap(); req.reset(); - log.debug({ enqueueResult: parseify(enqueueResult) }, 'Batch enqueued'); - const batchId = enqueueResult.batch.batch_id as string; // we know the is a string, backend provides it // Prep the canvas staging area if it is not yet initialized @@ -158,28 +154,8 @@ export const addEnqueueRequestedCanvasListener = () => { // Associate the session with the canvas session ID dispatch(canvasBatchIdAdded(batchId)); - - dispatch( - addToast({ - title: t('queue.batchQueued'), - description: t('queue.batchQueuedDesc', { - item_count: enqueueResult.enqueued, - direction: prepend ? t('queue.front') : t('queue.back'), - }), - status: 'success', - }) - ); } catch { - log.error( - { batchConfig: parseify(batchConfig) }, - t('queue.batchFailedToQueue') - ); - dispatch( - addToast({ - title: t('queue.batchFailedToQueue'), - status: 'error', - }) - ); + // no-op } }, }); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts index e36c6f2ebe..bb89d18b91 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts @@ -1,13 +1,9 @@ -import { logger } from 'app/logging/logger'; import { enqueueRequested } from 'app/store/actions'; -import { parseify } from 'common/util/serialize'; import { prepareLinearUIBatch } from 'features/nodes/util/graphBuilders/buildLinearBatchConfig'; import { buildLinearImageToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearImageToImageGraph'; import { buildLinearSDXLImageToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph'; import { buildLinearSDXLTextToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph'; import { buildLinearTextToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearTextToImageGraph'; -import { addToast } from 'features/system/store/systemSlice'; -import { t } from 'i18next'; import { queueApi } from 'services/api/endpoints/queue'; import { startAppListening } from '..'; @@ -18,7 +14,6 @@ export const addEnqueueRequestedLinear = () => { (action.payload.tabName === 'txt2img' || action.payload.tabName === 'img2img'), effect: async (action, { getState, dispatch }) => { - const log = logger('queue'); const state = getState(); const model = state.generation.model; const { prepend } = action.payload; @@ -41,38 +36,12 @@ export const addEnqueueRequestedLinear = () => { const batchConfig = prepareLinearUIBatch(state, graph, prepend); - try { - const req = dispatch( - queueApi.endpoints.enqueueBatch.initiate(batchConfig, { - fixedCacheKey: 'enqueueBatch', - }) - ); - const enqueueResult = await req.unwrap(); - req.reset(); - - log.debug({ enqueueResult: parseify(enqueueResult) }, 'Batch enqueued'); - dispatch( - addToast({ - title: t('queue.batchQueued'), - description: t('queue.batchQueuedDesc', { - item_count: enqueueResult.enqueued, - direction: prepend ? t('queue.front') : t('queue.back'), - }), - status: 'success', - }) - ); - } catch { - log.error( - { batchConfig: parseify(batchConfig) }, - t('queue.batchFailedToQueue') - ); - dispatch( - addToast({ - title: t('queue.batchFailedToQueue'), - status: 'error', - }) - ); - } + const req = dispatch( + queueApi.endpoints.enqueueBatch.initiate(batchConfig, { + fixedCacheKey: 'enqueueBatch', + }) + ); + req.reset(); }, }); }; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes.ts index 31281678d4..b87e443a4e 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes.ts @@ -1,9 +1,5 @@ -import { logger } from 'app/logging/logger'; import { enqueueRequested } from 'app/store/actions'; -import { parseify } from 'common/util/serialize'; import { buildNodesGraph } from 'features/nodes/util/graphBuilders/buildNodesGraph'; -import { addToast } from 'features/system/store/systemSlice'; -import { t } from 'i18next'; import { queueApi } from 'services/api/endpoints/queue'; import { BatchConfig } from 'services/api/types'; import { startAppListening } from '..'; @@ -13,9 +9,7 @@ export const addEnqueueRequestedNodes = () => { predicate: (action): action is ReturnType => enqueueRequested.match(action) && action.payload.tabName === 'nodes', effect: async (action, { getState, dispatch }) => { - const log = logger('queue'); const state = getState(); - const { prepend } = action.payload; const graph = buildNodesGraph(state.nodes); const batchConfig: BatchConfig = { batch: { @@ -25,38 +19,12 @@ export const addEnqueueRequestedNodes = () => { prepend: action.payload.prepend, }; - try { - const req = dispatch( - queueApi.endpoints.enqueueBatch.initiate(batchConfig, { - fixedCacheKey: 'enqueueBatch', - }) - ); - const enqueueResult = await req.unwrap(); - req.reset(); - - log.debug({ enqueueResult: parseify(enqueueResult) }, 'Batch enqueued'); - dispatch( - addToast({ - title: t('queue.batchQueued'), - description: t('queue.batchQueuedDesc', { - item_count: enqueueResult.enqueued, - direction: prepend ? t('queue.front') : t('queue.back'), - }), - status: 'success', - }) - ); - } catch { - log.error( - { batchConfig: parseify(batchConfig) }, - 'Failed to enqueue batch' - ); - dispatch( - addToast({ - title: t('queue.batchFailedToQueue'), - status: 'error', - }) - ); - } + const req = dispatch( + queueApi.endpoints.enqueueBatch.initiate(batchConfig, { + fixedCacheKey: 'enqueueBatch', + }) + ); + req.reset(); }, }); }; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionCanceled.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionCanceled.ts deleted file mode 100644 index 2592437348..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionCanceled.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { serializeError } from 'serialize-error'; -import { sessionCanceled } from 'services/api/thunks/session'; -import { startAppListening } from '..'; - -export const addSessionCanceledPendingListener = () => { - startAppListening({ - actionCreator: sessionCanceled.pending, - effect: () => { - // - }, - }); -}; - -export const addSessionCanceledFulfilledListener = () => { - startAppListening({ - actionCreator: sessionCanceled.fulfilled, - effect: (action) => { - const log = logger('session'); - const { session_id } = action.meta.arg; - log.debug({ session_id }, `Session canceled (${session_id})`); - }, - }); -}; - -export const addSessionCanceledRejectedListener = () => { - startAppListening({ - actionCreator: sessionCanceled.rejected, - effect: (action) => { - const log = logger('session'); - const { session_id } = action.meta.arg; - if (action.payload) { - const { error } = action.payload; - log.error( - { - session_id, - error: serializeError(error), - }, - `Problem canceling session` - ); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionCreated.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionCreated.ts deleted file mode 100644 index e89acb7542..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionCreated.ts +++ /dev/null @@ -1,45 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { parseify } from 'common/util/serialize'; -import { serializeError } from 'serialize-error'; -import { sessionCreated } from 'services/api/thunks/session'; -import { startAppListening } from '..'; - -export const addSessionCreatedPendingListener = () => { - startAppListening({ - actionCreator: sessionCreated.pending, - effect: () => { - // - }, - }); -}; - -export const addSessionCreatedFulfilledListener = () => { - startAppListening({ - actionCreator: sessionCreated.fulfilled, - effect: (action) => { - const log = logger('session'); - const session = action.payload; - log.debug( - { session: parseify(session) }, - `Session created (${session.id})` - ); - }, - }); -}; - -export const addSessionCreatedRejectedListener = () => { - startAppListening({ - actionCreator: sessionCreated.rejected, - effect: (action) => { - const log = logger('session'); - if (action.payload) { - const { error, status } = action.payload; - const graph = parseify(action.meta.arg); - log.error( - { graph, status, error: serializeError(error) }, - `Problem creating session` - ); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionInvoked.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionInvoked.ts deleted file mode 100644 index a62f75d957..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/sessionInvoked.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { serializeError } from 'serialize-error'; -import { sessionInvoked } from 'services/api/thunks/session'; -import { startAppListening } from '..'; - -export const addSessionInvokedPendingListener = () => { - startAppListening({ - actionCreator: sessionInvoked.pending, - effect: () => { - // - }, - }); -}; - -export const addSessionInvokedFulfilledListener = () => { - startAppListening({ - actionCreator: sessionInvoked.fulfilled, - effect: (action) => { - const log = logger('session'); - const { session_id } = action.meta.arg; - log.debug({ session_id }, `Session invoked (${session_id})`); - }, - }); -}; - -export const addSessionInvokedRejectedListener = () => { - startAppListening({ - actionCreator: sessionInvoked.rejected, - effect: (action) => { - const log = logger('session'); - const { session_id } = action.meta.arg; - if (action.payload) { - const { error } = action.payload; - log.error( - { - session_id, - error: serializeError(error), - }, - `Problem invoking session` - ); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/util/enqueueBatch.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/util/enqueueBatch.ts deleted file mode 100644 index 1d5a1232c8..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/util/enqueueBatch.ts +++ /dev/null @@ -1,54 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { AppThunkDispatch } from 'app/store/store'; -import { parseify } from 'common/util/serialize'; -import { addToast } from 'features/system/store/systemSlice'; -import { t } from 'i18next'; -import { queueApi } from 'services/api/endpoints/queue'; -import { BatchConfig } from 'services/api/types'; - -export const enqueueBatch = async ( - batchConfig: BatchConfig, - dispatch: AppThunkDispatch -) => { - const log = logger('session'); - const { prepend } = batchConfig; - - try { - const req = dispatch( - queueApi.endpoints.enqueueBatch.initiate(batchConfig, { - fixedCacheKey: 'enqueueBatch', - }) - ); - const enqueueResult = await req.unwrap(); - req.reset(); - - dispatch( - queueApi.endpoints.resumeProcessor.initiate(undefined, { - fixedCacheKey: 'resumeProcessor', - }) - ); - - log.debug({ enqueueResult: parseify(enqueueResult) }, 'Batch enqueued'); - dispatch( - addToast({ - title: t('queue.batchQueued'), - description: t('queue.batchQueuedDesc', { - item_count: enqueueResult.enqueued, - direction: prepend ? t('queue.front') : t('queue.back'), - }), - status: 'success', - }) - ); - } catch { - log.error( - { batchConfig: parseify(batchConfig) }, - t('queue.batchFailedToQueue') - ); - dispatch( - addToast({ - title: t('queue.batchFailedToQueue'), - status: 'error', - }) - ); - } -}; diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasImage.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasImage.tsx index 9f8829c280..d87d912a1e 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasImage.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasImage.tsx @@ -1,26 +1,27 @@ import { skipToken } from '@reduxjs/toolkit/dist/query'; -import { Image, Rect } from 'react-konva'; +import { memo } from 'react'; +import { Image } from 'react-konva'; +import { $authToken } from 'services/api/client'; import { useGetImageDTOQuery } from 'services/api/endpoints/images'; import useImage from 'use-image'; import { CanvasImage } from '../store/canvasTypes'; -import { $authToken } from 'services/api/client'; -import { memo } from 'react'; +import IAICanvasImageErrorFallback from './IAICanvasImageErrorFallback'; type IAICanvasImageProps = { canvasImage: CanvasImage; }; const IAICanvasImage = (props: IAICanvasImageProps) => { - const { width, height, x, y, imageName } = props.canvasImage; + const { x, y, imageName } = props.canvasImage; const { currentData: imageDTO, isError } = useGetImageDTOQuery( imageName ?? skipToken ); - const [image] = useImage( + const [image, status] = useImage( imageDTO?.image_url ?? '', $authToken.get() ? 'use-credentials' : 'anonymous' ); - if (isError) { - return ; + if (isError || status === 'failed') { + return ; } return ; diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasImageErrorFallback.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasImageErrorFallback.tsx new file mode 100644 index 0000000000..b61cf547cc --- /dev/null +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasImageErrorFallback.tsx @@ -0,0 +1,44 @@ +import { useColorModeValue, useToken } from '@chakra-ui/react'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Group, Rect, Text } from 'react-konva'; +import { CanvasImage } from '../store/canvasTypes'; + +type IAICanvasImageErrorFallbackProps = { + canvasImage: CanvasImage; +}; +const IAICanvasImageErrorFallback = ({ + canvasImage, +}: IAICanvasImageErrorFallbackProps) => { + const [errorColorLight, errorColorDark, fontColorLight, fontColorDark] = + useToken('colors', ['gray.400', 'gray.500', 'base.700', 'base.900']); + const errorColor = useColorModeValue(errorColorLight, errorColorDark); + const fontColor = useColorModeValue(fontColorLight, fontColorDark); + const { t } = useTranslation(); + return ( + + + + + ); +}; + +export default memo(IAICanvasImageErrorFallback); diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts index b726e757f6..754bca2dbc 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts @@ -8,7 +8,6 @@ import { setAspectRatio } from 'features/parameters/store/generationSlice'; import { IRect, Vector2d } from 'konva/lib/types'; import { clamp, cloneDeep } from 'lodash-es'; import { RgbaColor } from 'react-colorful'; -import { sessionCanceled } from 'services/api/thunks/session'; import { ImageDTO } from 'services/api/types'; import calculateCoordinates from '../util/calculateCoordinates'; import calculateScale from '../util/calculateScale'; @@ -786,11 +785,6 @@ export const canvasSlice = createSlice({ }, }, extraReducers: (builder) => { - builder.addCase(sessionCanceled.pending, (state) => { - if (!state.layerState.stagingArea.images.length) { - state.layerState.stagingArea = initialLayerState.stagingArea; - } - }); builder.addCase(setAspectRatio, (state, action) => { const ratio = action.payload; if (ratio) { diff --git a/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts b/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts index 70c459f0a4..ae3bdd7112 100644 --- a/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts +++ b/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts @@ -6,7 +6,6 @@ import { import { cloneDeep, forEach } from 'lodash-es'; import { imagesApi } from 'services/api/endpoints/images'; import { components } from 'services/api/schema'; -import { isAnySessionRejected } from 'services/api/thunks/session'; import { ImageDTO } from 'services/api/types'; import { appSocketInvocationError } from 'services/events/actions'; import { controlNetImageProcessed } from './actions'; @@ -418,10 +417,6 @@ export const controlNetSlice = createSlice({ state.pendingControlImages = []; }); - builder.addMatcher(isAnySessionRejected, (state) => { - state.pendingControlImages = []; - }); - builder.addMatcher( imagesApi.endpoints.deleteImage.matchFulfilled, (state, action) => { diff --git a/invokeai/frontend/web/src/features/system/store/systemSlice.ts b/invokeai/frontend/web/src/features/system/store/systemSlice.ts index 7d31838afd..9a110f5f23 100644 --- a/invokeai/frontend/web/src/features/system/store/systemSlice.ts +++ b/invokeai/frontend/web/src/features/system/store/systemSlice.ts @@ -1,9 +1,8 @@ import { UseToastOptions } from '@chakra-ui/react'; import { PayloadAction, createSlice, isAnyOf } from '@reduxjs/toolkit'; import { t } from 'i18next'; -import { get, startCase, truncate, upperFirst } from 'lodash-es'; +import { startCase } from 'lodash-es'; import { LogLevelName } from 'roarr'; -import { isAnySessionRejected } from 'services/api/thunks/session'; import { appSocketConnected, appSocketDisconnected, @@ -20,8 +19,7 @@ import { } from 'services/events/actions'; import { calculateStepPercentage } from '../util/calculateStepPercentage'; import { makeToast } from '../util/makeToast'; -import { SystemState, LANGUAGES } from './types'; -import { zPydanticValidationError } from './zodSchemas'; +import { LANGUAGES, SystemState } from './types'; export const initialSystemState: SystemState = { isInitialized: false, @@ -175,50 +173,6 @@ export const systemSlice = createSlice({ // *** Matchers - must be after all cases *** - /** - * Session Invoked - REJECTED - * Session Created - REJECTED - */ - builder.addMatcher(isAnySessionRejected, (state, action) => { - let errorDescription = undefined; - const duration = 5000; - - if (action.payload?.status === 422) { - const result = zPydanticValidationError.safeParse(action.payload); - if (result.success) { - result.data.error.detail.map((e) => { - state.toastQueue.push( - makeToast({ - title: truncate(upperFirst(e.msg), { length: 128 }), - status: 'error', - description: truncate( - `Path: - ${e.loc.join('.')}`, - { length: 128 } - ), - duration, - }) - ); - }); - return; - } - } else if (action.payload?.error) { - errorDescription = action.payload?.error; - } - - state.toastQueue.push( - makeToast({ - title: t('toast.serverError'), - status: 'error', - description: truncate( - get(errorDescription, 'detail', 'Unknown Error'), - { length: 128 } - ), - duration, - }) - ); - }); - /** * Any server error */ diff --git a/invokeai/frontend/web/src/features/system/store/zodSchemas.ts b/invokeai/frontend/web/src/features/system/store/zodSchemas.ts index 3a3b950019..9d66f5ae88 100644 --- a/invokeai/frontend/web/src/features/system/store/zodSchemas.ts +++ b/invokeai/frontend/web/src/features/system/store/zodSchemas.ts @@ -2,7 +2,7 @@ import { z } from 'zod'; export const zPydanticValidationError = z.object({ status: z.literal(422), - error: z.object({ + data: z.object({ detail: z.array( z.object({ loc: z.array(z.string()), diff --git a/invokeai/frontend/web/src/services/api/thunks/session.ts b/invokeai/frontend/web/src/services/api/thunks/session.ts deleted file mode 100644 index 837fd7a28e..0000000000 --- a/invokeai/frontend/web/src/services/api/thunks/session.ts +++ /dev/null @@ -1,184 +0,0 @@ -import { createAsyncThunk, isAnyOf } from '@reduxjs/toolkit'; -import { $queueId } from 'features/queue/store/queueNanoStore'; -import { isObject } from 'lodash-es'; -import { $client } from 'services/api/client'; -import { paths } from 'services/api/schema'; -import { O } from 'ts-toolbelt'; - -type CreateSessionArg = { - graph: NonNullable< - paths['/api/v1/sessions/']['post']['requestBody'] - >['content']['application/json']; -}; - -type CreateSessionResponse = O.Required< - NonNullable< - paths['/api/v1/sessions/']['post']['requestBody'] - >['content']['application/json'], - 'id' ->; - -type CreateSessionThunkConfig = { - rejectValue: { arg: CreateSessionArg; status: number; error: unknown }; -}; - -/** - * `SessionsService.createSession()` thunk - */ -export const sessionCreated = createAsyncThunk< - CreateSessionResponse, - CreateSessionArg, - CreateSessionThunkConfig ->('api/sessionCreated', async (arg, { rejectWithValue }) => { - const { graph } = arg; - const { POST } = $client.get(); - const { data, error, response } = await POST('/api/v1/sessions/', { - body: graph, - params: { query: { queue_id: $queueId.get() } }, - }); - - if (error) { - return rejectWithValue({ arg, status: response.status, error }); - } - - return data; -}); - -type InvokedSessionArg = { - session_id: paths['/api/v1/sessions/{session_id}/invoke']['put']['parameters']['path']['session_id']; -}; - -type InvokedSessionResponse = - paths['/api/v1/sessions/{session_id}/invoke']['put']['responses']['200']['content']['application/json']; - -type InvokedSessionThunkConfig = { - rejectValue: { - arg: InvokedSessionArg; - error: unknown; - status: number; - }; -}; - -const isErrorWithStatus = (error: unknown): error is { status: number } => - isObject(error) && 'status' in error; - -const isErrorWithDetail = (error: unknown): error is { detail: string } => - isObject(error) && 'detail' in error; - -/** - * `SessionsService.invokeSession()` thunk - */ -export const sessionInvoked = createAsyncThunk< - InvokedSessionResponse, - InvokedSessionArg, - InvokedSessionThunkConfig ->('api/sessionInvoked', async (arg, { rejectWithValue }) => { - const { session_id } = arg; - const { PUT } = $client.get(); - const { error, response } = await PUT( - '/api/v1/sessions/{session_id}/invoke', - { - params: { - query: { queue_id: $queueId.get(), all: true }, - path: { session_id }, - }, - } - ); - - if (error) { - if (isErrorWithStatus(error) && error.status === 403) { - return rejectWithValue({ - arg, - status: response.status, - // eslint-disable-next-line @typescript-eslint/no-explicit-any - error: (error as any).body.detail, - }); - } - if (isErrorWithDetail(error) && response.status === 403) { - return rejectWithValue({ - arg, - status: response.status, - error: error.detail, - }); - } - if (error) { - return rejectWithValue({ arg, status: response.status, error }); - } - } -}); - -type CancelSessionArg = - paths['/api/v1/sessions/{session_id}/invoke']['delete']['parameters']['path']; - -type CancelSessionResponse = - paths['/api/v1/sessions/{session_id}/invoke']['delete']['responses']['200']['content']['application/json']; - -type CancelSessionThunkConfig = { - rejectValue: { - arg: CancelSessionArg; - error: unknown; - }; -}; - -/** - * `SessionsService.cancelSession()` thunk - */ -export const sessionCanceled = createAsyncThunk< - CancelSessionResponse, - CancelSessionArg, - CancelSessionThunkConfig ->('api/sessionCanceled', async (arg, { rejectWithValue }) => { - const { session_id } = arg; - const { DELETE } = $client.get(); - const { data, error } = await DELETE('/api/v1/sessions/{session_id}/invoke', { - params: { - path: { session_id }, - }, - }); - - if (error) { - return rejectWithValue({ arg, error }); - } - - return data; -}); - -type ListSessionsArg = { - params: paths['/api/v1/sessions/']['get']['parameters']; -}; - -type ListSessionsResponse = - paths['/api/v1/sessions/']['get']['responses']['200']['content']['application/json']; - -type ListSessionsThunkConfig = { - rejectValue: { - arg: ListSessionsArg; - error: unknown; - }; -}; - -/** - * `SessionsService.listSessions()` thunk - */ -export const listedSessions = createAsyncThunk< - ListSessionsResponse, - ListSessionsArg, - ListSessionsThunkConfig ->('api/listSessions', async (arg, { rejectWithValue }) => { - const { params } = arg; - const { GET } = $client.get(); - const { data, error } = await GET('/api/v1/sessions/', { - params, - }); - - if (error) { - return rejectWithValue({ arg, error }); - } - - return data; -}); - -export const isAnySessionRejected = isAnyOf( - sessionCreated.rejected, - sessionInvoked.rejected -); diff --git a/invokeai/frontend/web/src/theme/theme.ts b/invokeai/frontend/web/src/theme/theme.ts index 3b83ea2393..ae38aefca0 100644 --- a/invokeai/frontend/web/src/theme/theme.ts +++ b/invokeai/frontend/web/src/theme/theme.ts @@ -1,5 +1,4 @@ -import { ThemeOverride } from '@chakra-ui/react'; - +import { ThemeOverride, ToastProviderProps } from '@chakra-ui/react'; import { InvokeAIColors } from './colors/colors'; import { accordionTheme } from './components/accordion'; import { buttonTheme } from './components/button'; @@ -149,3 +148,7 @@ export const theme: ThemeOverride = { Tooltip: tooltipTheme, }, }; + +export const TOAST_OPTIONS: ToastProviderProps = { + defaultOptions: { isClosable: true }, +}; From a953944894d177987c148fbab70f5b8cb8154398 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 01:54:35 +1000 Subject: [PATCH 14/37] feat(ui): updatable edges in workflow editor (#4701) - Drag the end of an edge away from its handle to disconnect it - Drop in empty space to delete the edge - Drop on valid handle to reconnect it - Update connection logic slightly to allow edge updates --- invokeai/frontend/web/public/locales/en.json | 1 + .../features/nodes/components/flow/Flow.tsx | 64 ++++++++++ .../nodes/hooks/useIsValidConnection.ts | 13 +- .../src/features/nodes/store/nodesSlice.ts | 113 ++++++++++-------- .../util/makeIsConnectionValidSelector.ts | 22 +++- 5 files changed, 158 insertions(+), 55 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index e9261692a2..6e783b0567 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -717,6 +717,7 @@ "cannotConnectInputToInput": "Cannot connect input to input", "cannotConnectOutputToOutput": "Cannot connect output to output", "cannotConnectToSelf": "Cannot connect to self", + "cannotDuplicateConnection": "Cannot create duplicate connections", "clipField": "Clip", "clipFieldDescription": "Tokenizer and text_encoder submodels.", "collection": "Collection", diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx index 57e5825fb9..e2ff7c5bb0 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx @@ -12,6 +12,7 @@ import { OnConnect, OnConnectEnd, OnConnectStart, + OnEdgeUpdateFunc, OnEdgesChange, OnEdgesDelete, OnInit, @@ -21,6 +22,7 @@ import { OnSelectionChangeFunc, ProOptions, ReactFlow, + ReactFlowProps, XYPosition, } from 'reactflow'; import { useIsValidConnection } from '../../hooks/useIsValidConnection'; @@ -28,6 +30,8 @@ import { connectionEnded, connectionMade, connectionStarted, + edgeAdded, + edgeDeleted, edgesChanged, edgesDeleted, nodesChanged, @@ -167,6 +171,63 @@ export const Flow = () => { } }, []); + // #region Updatable Edges + + /** + * Adapted from https://reactflow.dev/docs/examples/edges/updatable-edge/ + * and https://reactflow.dev/docs/examples/edges/delete-edge-on-drop/ + * + * - Edges can be dragged from one handle to another. + * - If the user drags the edge away from the node and drops it, delete the edge. + * - Do not delete the edge if the cursor didn't move (resolves annoying behaviour + * where the edge is deleted if you click it accidentally). + */ + + // We have a ref for cursor position, but it is the *projected* cursor position. + // Easiest to just keep track of the last mouse event for this particular feature + const edgeUpdateMouseEvent = useRef(); + + const onEdgeUpdateStart: NonNullable = + useCallback( + (e, edge, _handleType) => { + // update mouse event + edgeUpdateMouseEvent.current = e; + // always delete the edge when starting an updated + dispatch(edgeDeleted(edge.id)); + }, + [dispatch] + ); + + const onEdgeUpdate: OnEdgeUpdateFunc = useCallback( + (_oldEdge, newConnection) => { + // instead of updating the edge (we deleted it earlier), we instead create + // a new one. + dispatch(connectionMade(newConnection)); + }, + [dispatch] + ); + + const onEdgeUpdateEnd: NonNullable = + useCallback( + (e, edge, _handleType) => { + // Handle the case where user begins a drag but didn't move the cursor - + // bc we deleted the edge, we need to add it back + if ( + // ignore touch events + !('touches' in e) && + edgeUpdateMouseEvent.current?.clientX === e.clientX && + edgeUpdateMouseEvent.current?.clientY === e.clientY + ) { + dispatch(edgeAdded(edge)); + } + // reset mouse event + edgeUpdateMouseEvent.current = undefined; + }, + [dispatch] + ); + + // #endregion + useHotkeys(['Ctrl+c', 'Meta+c'], (e) => { e.preventDefault(); dispatch(selectionCopied()); @@ -196,6 +257,9 @@ export const Flow = () => { onNodesChange={onNodesChange} onEdgesChange={onEdgesChange} onEdgesDelete={onEdgesDelete} + onEdgeUpdate={onEdgeUpdate} + onEdgeUpdateStart={onEdgeUpdateStart} + onEdgeUpdateEnd={onEdgeUpdateEnd} onNodesDelete={onNodesDelete} onConnectStart={onConnectStart} onConnect={onConnect} diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useIsValidConnection.ts b/invokeai/frontend/web/src/features/nodes/hooks/useIsValidConnection.ts index 0439445c24..a57787556c 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useIsValidConnection.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useIsValidConnection.ts @@ -53,13 +53,12 @@ export const useIsValidConnection = () => { } if ( - edges - .filter((edge) => { - return edge.target === target && edge.targetHandle === targetHandle; - }) - .find((edge) => { - edge.source === source && edge.sourceHandle === sourceHandle; - }) + edges.find((edge) => { + edge.target === target && + edge.targetHandle === targetHandle && + edge.source === source && + edge.sourceHandle === sourceHandle; + }) ) { // We already have a connection from this source to this target return false; diff --git a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts index 01de3de883..1b3a5ca929 100644 --- a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts +++ b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts @@ -15,6 +15,7 @@ import { NodeChange, OnConnectStartParams, SelectionMode, + updateEdge, Viewport, XYPosition, } from 'reactflow'; @@ -182,6 +183,16 @@ const nodesSlice = createSlice({ edgesChanged: (state, action: PayloadAction) => { state.edges = applyEdgeChanges(action.payload, state.edges); }, + edgeAdded: (state, action: PayloadAction) => { + state.edges = addEdge(action.payload, state.edges); + }, + edgeUpdated: ( + state, + action: PayloadAction<{ oldEdge: Edge; newConnection: Connection }> + ) => { + const { oldEdge, newConnection } = action.payload; + state.edges = updateEdge(oldEdge, newConnection, state.edges); + }, connectionStarted: (state, action: PayloadAction) => { state.connectionStartParams = action.payload; const { nodeId, handleId, handleType } = action.payload; @@ -366,6 +377,7 @@ const nodesSlice = createSlice({ target: edge.target, type: 'collapsed', data: { count: 1 }, + updatable: false, }); } } @@ -388,6 +400,7 @@ const nodesSlice = createSlice({ target: edge.target, type: 'collapsed', data: { count: 1 }, + updatable: false, }); } } @@ -400,6 +413,9 @@ const nodesSlice = createSlice({ } } }, + edgeDeleted: (state, action: PayloadAction) => { + state.edges = state.edges.filter((e) => e.id !== action.payload); + }, edgesDeleted: (state, action: PayloadAction) => { const edges = action.payload; const collapsedEdges = edges.filter((e) => e.type === 'collapsed'); @@ -890,69 +906,72 @@ const nodesSlice = createSlice({ }); export const { - nodesChanged, - edgesChanged, - nodeAdded, - nodesDeleted, + addNodePopoverClosed, + addNodePopoverOpened, + addNodePopoverToggled, + connectionEnded, connectionMade, connectionStarted, - connectionEnded, - shouldShowFieldTypeLegendChanged, - shouldShowMinimapPanelChanged, - nodeTemplatesBuilt, - nodeEditorReset, - imageCollectionFieldValueChanged, - fieldStringValueChanged, - fieldNumberValueChanged, + edgeDeleted, + edgesChanged, + edgesDeleted, + edgeUpdated, fieldBoardValueChanged, fieldBooleanValueChanged, - fieldImageValueChanged, fieldColorValueChanged, - fieldMainModelValueChanged, - fieldVaeModelValueChanged, - fieldLoRAModelValueChanged, - fieldEnumModelValueChanged, fieldControlNetModelValueChanged, + fieldEnumModelValueChanged, + fieldImageValueChanged, fieldIPAdapterModelValueChanged, + fieldLabelChanged, + fieldLoRAModelValueChanged, + fieldMainModelValueChanged, + fieldNumberValueChanged, fieldRefinerModelValueChanged, fieldSchedulerValueChanged, + fieldStringValueChanged, + fieldVaeModelValueChanged, + imageCollectionFieldValueChanged, + mouseOverFieldChanged, + mouseOverNodeChanged, + nodeAdded, + nodeEditorReset, + nodeEmbedWorkflowChanged, + nodeExclusivelySelected, + nodeIsIntermediateChanged, nodeIsOpenChanged, nodeLabelChanged, nodeNotesChanged, - edgesDeleted, - shouldValidateGraphChanged, - shouldAnimateEdgesChanged, nodeOpacityChanged, - shouldSnapToGridChanged, - shouldColorEdgesChanged, - selectedNodesChanged, - selectedEdgesChanged, - workflowNameChanged, - workflowDescriptionChanged, - workflowTagsChanged, - workflowAuthorChanged, - workflowNotesChanged, - workflowVersionChanged, - workflowContactChanged, - workflowLoaded, + nodesChanged, + nodesDeleted, + nodeTemplatesBuilt, + nodeUseCacheChanged, notesNodeValueChanged, + selectedAll, + selectedEdgesChanged, + selectedNodesChanged, + selectionCopied, + selectionModeChanged, + selectionPasted, + shouldAnimateEdgesChanged, + shouldColorEdgesChanged, + shouldShowFieldTypeLegendChanged, + shouldShowMinimapPanelChanged, + shouldSnapToGridChanged, + shouldValidateGraphChanged, + viewportChanged, + workflowAuthorChanged, + workflowContactChanged, + workflowDescriptionChanged, workflowExposedFieldAdded, workflowExposedFieldRemoved, - fieldLabelChanged, - viewportChanged, - mouseOverFieldChanged, - selectionCopied, - selectionPasted, - selectedAll, - addNodePopoverOpened, - addNodePopoverClosed, - addNodePopoverToggled, - selectionModeChanged, - nodeEmbedWorkflowChanged, - nodeIsIntermediateChanged, - mouseOverNodeChanged, - nodeExclusivelySelected, - nodeUseCacheChanged, + workflowLoaded, + workflowNameChanged, + workflowNotesChanged, + workflowTagsChanged, + workflowVersionChanged, + edgeAdded, } = nodesSlice.actions; export default nodesSlice.reducer; diff --git a/invokeai/frontend/web/src/features/nodes/store/util/makeIsConnectionValidSelector.ts b/invokeai/frontend/web/src/features/nodes/store/util/makeIsConnectionValidSelector.ts index 1be2d579d8..6343240a88 100644 --- a/invokeai/frontend/web/src/features/nodes/store/util/makeIsConnectionValidSelector.ts +++ b/invokeai/frontend/web/src/features/nodes/store/util/makeIsConnectionValidSelector.ts @@ -55,9 +55,29 @@ export const makeConnectionErrorSelector = ( return i18n.t('nodes.cannotConnectInputToInput'); } + // we have to figure out which is the target and which is the source + const target = handleType === 'target' ? nodeId : connectionNodeId; + const targetHandle = + handleType === 'target' ? fieldName : connectionFieldName; + const source = handleType === 'source' ? nodeId : connectionNodeId; + const sourceHandle = + handleType === 'source' ? fieldName : connectionFieldName; + if ( edges.find((edge) => { - return edge.target === nodeId && edge.targetHandle === fieldName; + edge.target === target && + edge.targetHandle === targetHandle && + edge.source === source && + edge.sourceHandle === sourceHandle; + }) + ) { + // We already have a connection from this source to this target + return i18n.t('nodes.cannotDuplicateConnection'); + } + + if ( + edges.find((edge) => { + return edge.target === target && edge.targetHandle === targetHandle; }) && // except CollectionItem inputs can have multiples targetType !== 'CollectionItem' From 1419977e8943ce56341f7ee5799253da06732048 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 26 Sep 2023 18:39:54 +1000 Subject: [PATCH 15/37] feat(ui): update cache status on queue event It was polling every 5s before. No need - just invalidate the tag when we have a queue item status change event. --- .../socketio/socketQueueItemStatusChanged.ts | 1 + .../queue/components/InvocationCacheStatus.tsx | 13 +------------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged.ts index b0377e950b..4af35dbe9c 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged.ts @@ -35,6 +35,7 @@ export const addSocketQueueItemStatusChangedEventListener = () => { queueApi.util.invalidateTags([ 'CurrentSessionQueueItem', 'NextSessionQueueItem', + 'InvocationCacheStatus', { type: 'SessionQueueItem', id: item_id }, { type: 'SessionQueueItemDTO', id: item_id }, { type: 'BatchStatus', id: queue_batch_id }, diff --git a/invokeai/frontend/web/src/features/queue/components/InvocationCacheStatus.tsx b/invokeai/frontend/web/src/features/queue/components/InvocationCacheStatus.tsx index 423ab09376..1720f81285 100644 --- a/invokeai/frontend/web/src/features/queue/components/InvocationCacheStatus.tsx +++ b/invokeai/frontend/web/src/features/queue/components/InvocationCacheStatus.tsx @@ -1,9 +1,7 @@ import { ButtonGroup } from '@chakra-ui/react'; -import { useAppSelector } from 'app/store/storeHooks'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; import { useGetInvocationCacheStatusQuery } from 'services/api/endpoints/appInfo'; -import { useGetQueueStatusQuery } from 'services/api/endpoints/queue'; import ClearInvocationCacheButton from './ClearInvocationCacheButton'; import ToggleInvocationCacheButton from './ToggleInvocationCacheButton'; import StatusStatGroup from './common/StatusStatGroup'; @@ -11,16 +9,7 @@ import StatusStatItem from './common/StatusStatItem'; const InvocationCacheStatus = () => { const { t } = useTranslation(); - const isConnected = useAppSelector((state) => state.system.isConnected); - const { data: queueStatus } = useGetQueueStatusQuery(undefined); - const { data: cacheStatus } = useGetInvocationCacheStatusQuery(undefined, { - pollingInterval: - isConnected && - queueStatus?.processor.is_started && - queueStatus?.queue.pending > 0 - ? 5000 - : 0, - }); + const { data: cacheStatus } = useGetInvocationCacheStatusQuery(undefined); return ( From 407bca50630b1ab83aa0eb4f1282c4c2b6d06e13 Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Wed, 27 Sep 2023 10:10:09 +1000 Subject: [PATCH 16/37] fix merges --- docs/nodes/NODES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/nodes/NODES.md b/docs/nodes/NODES.md index fdb522eca1..f6496c09bb 100644 --- a/docs/nodes/NODES.md +++ b/docs/nodes/NODES.md @@ -9,7 +9,7 @@ If you're not familiar with Diffusion, take a look at our [Diffusion Overview.]( ### Linear View The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations. -To add an input to the Linear UI, right click on the input and select "Add to Linear View". +To add an input to the Linear UI, right click on the input label and select "Add to Linear View". The Linear UI View will also be part of the saved workflow, allowing you share workflows and enable other to use them, regardless of complexity. From 0cfc1c5f86573b9af1ff62d8750a8bed6dd496bc Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 25 Sep 2023 23:15:09 +1000 Subject: [PATCH 17/37] fix(ui): save cache setting to workflow Do not strip out unknown values. Quick fix, probably not the best way to handle this. --- .../web/src/features/nodes/types/types.ts | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index fc8fe10ccc..22e5d78634 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -1210,20 +1210,22 @@ export const zParsedSemver = zSemVer.transform((val) => { export type SemVer = z.infer; -export const zInvocationNodeData = z.object({ - id: z.string().trim().min(1), - // no easy way to build this dynamically, and we don't want to anyways, because this will be used - // to validate incoming workflows, and we want to allow community nodes. - type: z.string().trim().min(1), - inputs: z.record(zInputFieldValue), - outputs: z.record(zOutputFieldValue), - label: z.string(), - isOpen: z.boolean(), - notes: z.string(), - embedWorkflow: z.boolean(), - isIntermediate: z.boolean(), - version: zSemVer.optional(), -}); +export const zInvocationNodeData = z + .object({ + id: z.string().trim().min(1), + // no easy way to build this dynamically, and we don't want to anyways, because this will be used + // to validate incoming workflows, and we want to allow community nodes. + type: z.string().trim().min(1), + inputs: z.record(zInputFieldValue), + outputs: z.record(zOutputFieldValue), + label: z.string(), + isOpen: z.boolean(), + notes: z.string(), + embedWorkflow: z.boolean(), + isIntermediate: z.boolean(), + version: zSemVer.optional(), + }) + .passthrough(); export const zInvocationNodeDataV2 = z.preprocess( (arg) => { From 50d254fdb78c4ae1c1a24429ce5fc1e9b0a040ff Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 26 Sep 2023 10:29:54 +1000 Subject: [PATCH 18/37] fix(ui): fix types for cache setting --- .../web/src/features/nodes/types/types.ts | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index 22e5d78634..0033e462cb 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -1210,22 +1210,21 @@ export const zParsedSemver = zSemVer.transform((val) => { export type SemVer = z.infer; -export const zInvocationNodeData = z - .object({ - id: z.string().trim().min(1), - // no easy way to build this dynamically, and we don't want to anyways, because this will be used - // to validate incoming workflows, and we want to allow community nodes. - type: z.string().trim().min(1), - inputs: z.record(zInputFieldValue), - outputs: z.record(zOutputFieldValue), - label: z.string(), - isOpen: z.boolean(), - notes: z.string(), - embedWorkflow: z.boolean(), - isIntermediate: z.boolean(), - version: zSemVer.optional(), - }) - .passthrough(); +export const zInvocationNodeData = z.object({ + id: z.string().trim().min(1), + // no easy way to build this dynamically, and we don't want to anyways, because this will be used + // to validate incoming workflows, and we want to allow community nodes. + type: z.string().trim().min(1), + inputs: z.record(zInputFieldValue), + outputs: z.record(zOutputFieldValue), + label: z.string(), + isOpen: z.boolean(), + notes: z.string(), + embedWorkflow: z.boolean(), + isIntermediate: z.boolean(), + useCache: z.boolean().optional(), + version: zSemVer.optional(), +}); export const zInvocationNodeDataV2 = z.preprocess( (arg) => { From fc9a7320eb96bc26aa3447a490a76038651bc2b2 Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Wed, 27 Sep 2023 12:21:20 +1000 Subject: [PATCH 19/37] Update to be more accurate --- docs/features/CONTROLNET.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index f079e0fc9d..b4575653bd 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -139,14 +139,15 @@ Additionally, each ControlNet section can be expanded in order to manipulate set [IP-Adapter](https://ip-adapter.github.io) is a tooling that allows for image prompt capabilities with text-to-image diffusion models. IP-Adapter works by analyzing the given image prompt to extract features, then passing those features to the UNet along with any other conditioning provided. ![IP-Adapter + T2I](https://github.com/tencent-ailab/IP-Adapter/raw/main/assets/demo/ip_adpter_plus_multi.jpg) + ![IP-Adapter + IMG2IMG](https://github.com/tencent-ailab/IP-Adapter/blob/main/assets/demo/image-to-image.jpg) #### Installation There are several ways to install IP-Adapter models with an existing InvokeAI installation: -1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [5] to download models -2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. -3. Manually downloading the models files and placed in the `models/ip-adapter` folder of the Invoke root directory. **Note:** The image_encoder folder is necessary for IP-Adapter to function. +1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [5] to download models. +2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models. +3. Manually downloading the IP-Adapter and Image Encoder files and placing them in the `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder. #### Using IP-Adapter From acee4bd2828e0c5248541cca28bd5d9b02bf008a Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Wed, 27 Sep 2023 04:03:14 +0530 Subject: [PATCH 20/37] fix: Always use bbox bounds for Controlnet Image (canvas) --- .../listeners/canvasImageToControlNet.ts | 4 ++-- .../features/canvas/util/getBaseLayerBlob.ts | 24 +++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts index 835b8246f1..7c312b78a4 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts @@ -3,9 +3,9 @@ import { canvasImageToControlNet } from 'features/canvas/store/actions'; import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob'; import { controlNetImageChanged } from 'features/controlNet/store/controlNetSlice'; import { addToast } from 'features/system/store/systemSlice'; +import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; import { startAppListening } from '..'; -import { t } from 'i18next'; export const addCanvasImageToControlNetListener = () => { startAppListening({ @@ -16,7 +16,7 @@ export const addCanvasImageToControlNetListener = () => { let blob; try { - blob = await getBaseLayerBlob(state); + blob = await getBaseLayerBlob(state, true); } catch (err) { log.error(String(err)); dispatch( diff --git a/invokeai/frontend/web/src/features/canvas/util/getBaseLayerBlob.ts b/invokeai/frontend/web/src/features/canvas/util/getBaseLayerBlob.ts index 3667acc79b..b67789e07e 100644 --- a/invokeai/frontend/web/src/features/canvas/util/getBaseLayerBlob.ts +++ b/invokeai/frontend/web/src/features/canvas/util/getBaseLayerBlob.ts @@ -1,11 +1,14 @@ -import { getCanvasBaseLayer } from './konvaInstanceProvider'; import { RootState } from 'app/store/store'; +import { getCanvasBaseLayer } from './konvaInstanceProvider'; import { konvaNodeToBlob } from './konvaNodeToBlob'; /** * Get the canvas base layer blob, with or without bounding box according to `shouldCropToBoundingBoxOnSave` */ -export const getBaseLayerBlob = async (state: RootState) => { +export const getBaseLayerBlob = async ( + state: RootState, + alwaysUseBoundingBox: boolean = false +) => { const canvasBaseLayer = getCanvasBaseLayer(); if (!canvasBaseLayer) { @@ -24,14 +27,15 @@ export const getBaseLayerBlob = async (state: RootState) => { const absPos = clonedBaseLayer.getAbsolutePosition(); - const boundingBox = shouldCropToBoundingBoxOnSave - ? { - x: boundingBoxCoordinates.x + absPos.x, - y: boundingBoxCoordinates.y + absPos.y, - width: boundingBoxDimensions.width, - height: boundingBoxDimensions.height, - } - : clonedBaseLayer.getClientRect(); + const boundingBox = + shouldCropToBoundingBoxOnSave || alwaysUseBoundingBox + ? { + x: boundingBoxCoordinates.x + absPos.x, + y: boundingBoxCoordinates.y + absPos.y, + width: boundingBoxDimensions.width, + height: boundingBoxDimensions.height, + } + : clonedBaseLayer.getClientRect(); return konvaNodeToBlob(clonedBaseLayer, boundingBox); }; From b3f4f28d763c16186d90d8a2d655e989f0652459 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Wed, 27 Sep 2023 05:50:43 +0530 Subject: [PATCH 21/37] fix: Canvas pull getting cropped for Control Images --- .../listenerMiddleware/listeners/canvasImageToControlNet.ts | 4 ++-- .../listenerMiddleware/listeners/canvasMaskToControlNet.ts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts index 7c312b78a4..9389b0f373 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts @@ -36,10 +36,10 @@ export const addCanvasImageToControlNetListener = () => { file: new File([blob], 'savedCanvas.png', { type: 'image/png', }), - image_category: 'mask', + image_category: 'control', is_intermediate: false, board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId, - crop_visible: true, + crop_visible: false, postUploadAction: { type: 'TOAST', toastOptions: { title: t('toast.canvasSentControlnetAssets') }, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts index 671c7f63e4..2c5c26e830 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts @@ -3,9 +3,9 @@ import { canvasMaskToControlNet } from 'features/canvas/store/actions'; import { getCanvasData } from 'features/canvas/util/getCanvasData'; import { controlNetImageChanged } from 'features/controlNet/store/controlNetSlice'; import { addToast } from 'features/system/store/systemSlice'; +import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; import { startAppListening } from '..'; -import { t } from 'i18next'; export const addCanvasMaskToControlNetListener = () => { startAppListening({ @@ -50,7 +50,7 @@ export const addCanvasMaskToControlNetListener = () => { image_category: 'mask', is_intermediate: false, board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId, - crop_visible: true, + crop_visible: false, postUploadAction: { type: 'TOAST', toastOptions: { title: t('toast.maskSentControlnetAssets') }, From 98a076260b1f1721a82d2fa16b84591aaa1d9e0f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 12:45:23 +1000 Subject: [PATCH 22/37] fix(ui): only disable cancel item button if value is null/undefined 0 is falsy and the `item_id` is an integer --- .../web/src/features/queue/hooks/useCancelCurrentQueueItem.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/queue/hooks/useCancelCurrentQueueItem.ts b/invokeai/frontend/web/src/features/queue/hooks/useCancelCurrentQueueItem.ts index 308695dd67..1b07221a74 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useCancelCurrentQueueItem.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useCancelCurrentQueueItem.ts @@ -1,5 +1,6 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { addToast } from 'features/system/store/systemSlice'; +import { isNil } from 'lodash-es'; import { useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { @@ -40,7 +41,7 @@ export const useCancelCurrentQueueItem = () => { }, [currentQueueItemId, dispatch, t, trigger]); const isDisabled = useMemo( - () => !isConnected || !currentQueueItemId, + () => !isConnected || isNil(currentQueueItemId), [isConnected, currentQueueItemId] ); From 8b969053e78ff64892a071759656fa2fc4893afa Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Wed, 27 Sep 2023 04:14:32 +0530 Subject: [PATCH 23/37] fix: SDXL Refiner using the incorrect node during inpainting --- .../graphBuilders/addSDXLRefinerToGraph.ts | 52 ++++++++++++++----- .../buildCanvasSDXLInpaintGraph.ts | 3 +- 2 files changed, 40 insertions(+), 15 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index 6bd44db197..a6ee6a091d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -32,7 +32,8 @@ export const addSDXLRefinerToGraph = ( graph: NonNullableGraph, baseNodeId: string, modelLoaderNodeId?: string, - canvasInitImage?: ImageDTO + canvasInitImage?: ImageDTO, + canvasMaskImage?: ImageDTO ): void => { const { refinerModel, @@ -257,8 +258,30 @@ export const addSDXLRefinerToGraph = ( }; } - graph.edges.push( - { + if (graph.id === SDXL_CANVAS_INPAINT_GRAPH) { + if (isUsingScaledDimensions) { + graph.edges.push({ + source: { + node_id: MASK_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: SDXL_REFINER_INPAINT_CREATE_MASK, + field: 'mask', + }, + }); + } else { + graph.nodes[SDXL_REFINER_INPAINT_CREATE_MASK] = { + ...(graph.nodes[ + SDXL_REFINER_INPAINT_CREATE_MASK + ] as CreateDenoiseMaskInvocation), + mask: canvasMaskImage, + }; + } + } + + if (graph.id === SDXL_CANVAS_OUTPAINT_GRAPH) { + graph.edges.push({ source: { node_id: isUsingScaledDimensions ? MASK_RESIZE_UP : MASK_COMBINE, field: 'image', @@ -267,18 +290,19 @@ export const addSDXLRefinerToGraph = ( node_id: SDXL_REFINER_INPAINT_CREATE_MASK, field: 'mask', }, + }); + } + + graph.edges.push({ + source: { + node_id: SDXL_REFINER_INPAINT_CREATE_MASK, + field: 'denoise_mask', }, - { - source: { - node_id: SDXL_REFINER_INPAINT_CREATE_MASK, - field: 'denoise_mask', - }, - destination: { - node_id: SDXL_REFINER_DENOISE_LATENTS, - field: 'denoise_mask', - }, - } - ); + destination: { + node_id: SDXL_REFINER_DENOISE_LATENTS, + field: 'denoise_mask', + }, + }); } if ( diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 389d510ac7..a245953c8e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -663,7 +663,8 @@ export const buildCanvasSDXLInpaintGraph = ( graph, CANVAS_COHERENCE_DENOISE_LATENTS, modelLoaderNodeId, - canvasInitImage + canvasInitImage, + canvasMaskImage ); if (seamlessXAxis || seamlessYAxis) { modelLoaderNodeId = SDXL_REFINER_SEAMLESS; From 53eb23b8b66a891cdabec402e9214245c5497701 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:07:37 +1000 Subject: [PATCH 24/37] fix(ui): fix canvas staging images offset from bounding box The staging area used the stage bbox, not the staging area bbox. --- .../canvas/components/IAICanvasStagingArea.tsx | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingArea.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingArea.tsx index fa73f020da..4585ab76af 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingArea.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingArea.tsx @@ -3,10 +3,9 @@ import { useAppSelector } from 'app/store/storeHooks'; import { canvasSelector } from 'features/canvas/store/canvasSelectors'; import { GroupConfig } from 'konva/lib/Group'; import { isEqual } from 'lodash-es'; - +import { memo } from 'react'; import { Group, Rect } from 'react-konva'; import IAICanvasImage from './IAICanvasImage'; -import { memo } from 'react'; const selector = createSelector( [canvasSelector], @@ -15,11 +14,11 @@ const selector = createSelector( layerState, shouldShowStagingImage, shouldShowStagingOutline, - boundingBoxCoordinates: { x, y }, - boundingBoxDimensions: { width, height }, + boundingBoxCoordinates: stageBoundingBoxCoordinates, + boundingBoxDimensions: stageBoundingBoxDimensions, } = canvas; - const { selectedImageIndex, images } = layerState.stagingArea; + const { selectedImageIndex, images, boundingBox } = layerState.stagingArea; return { currentStagingAreaImage: @@ -30,10 +29,10 @@ const selector = createSelector( isOnLastImage: selectedImageIndex === images.length - 1, shouldShowStagingImage, shouldShowStagingOutline, - x, - y, - width, - height, + x: boundingBox?.x ?? stageBoundingBoxCoordinates.x, + y: boundingBox?.y ?? stageBoundingBoxCoordinates.y, + width: boundingBox?.width ?? stageBoundingBoxDimensions.width, + height: boundingBox?.height ?? stageBoundingBoxDimensions.height, }; }, { From ba4aaea45b438019f810d36b58d31a756e4c592f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:08:14 +1000 Subject: [PATCH 25/37] fix(ui): memoize event handlers on bounding box --- .../IAICanvasToolbar/IAICanvasBoundingBox.tsx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx index 0f94b1c57a..8f86605726 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx @@ -213,45 +213,45 @@ const IAICanvasBoundingBox = (props: IAICanvasBoundingBoxPreviewProps) => { [scaledStep] ); - const handleStartedTransforming = () => { + const handleStartedTransforming = useCallback(() => { dispatch(setIsTransformingBoundingBox(true)); - }; + }, [dispatch]); - const handleEndedTransforming = () => { + const handleEndedTransforming = useCallback(() => { dispatch(setIsTransformingBoundingBox(false)); dispatch(setIsMovingBoundingBox(false)); dispatch(setIsMouseOverBoundingBox(false)); setIsMouseOverBoundingBoxOutline(false); - }; + }, [dispatch]); - const handleStartedMoving = () => { + const handleStartedMoving = useCallback(() => { dispatch(setIsMovingBoundingBox(true)); - }; + }, [dispatch]); - const handleEndedModifying = () => { + const handleEndedModifying = useCallback(() => { dispatch(setIsTransformingBoundingBox(false)); dispatch(setIsMovingBoundingBox(false)); dispatch(setIsMouseOverBoundingBox(false)); setIsMouseOverBoundingBoxOutline(false); - }; + }, [dispatch]); - const handleMouseOver = () => { + const handleMouseOver = useCallback(() => { setIsMouseOverBoundingBoxOutline(true); - }; + }, []); - const handleMouseOut = () => { + const handleMouseOut = useCallback(() => { !isTransformingBoundingBox && !isMovingBoundingBox && setIsMouseOverBoundingBoxOutline(false); - }; + }, [isMovingBoundingBox, isTransformingBoundingBox]); - const handleMouseEnterBoundingBox = () => { + const handleMouseEnterBoundingBox = useCallback(() => { dispatch(setIsMouseOverBoundingBox(true)); - }; + }, [dispatch]); - const handleMouseLeaveBoundingBox = () => { + const handleMouseLeaveBoundingBox = useCallback(() => { dispatch(setIsMouseOverBoundingBox(false)); - }; + }, [dispatch]); return ( From d467e138a4e04732d7409b6e36a3f449307922c7 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:08:41 +1000 Subject: [PATCH 26/37] fix(ui): canvas is staging if is listening for batch ids --- .../frontend/web/src/features/canvas/store/canvasSelectors.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts index 46bf7db3d0..8f1e246aaa 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts @@ -6,7 +6,7 @@ export const canvasSelector = (state: RootState): CanvasState => state.canvas; export const isStagingSelector = createSelector( [stateSelector], - ({ canvas }) => canvas.layerState.stagingArea.images.length > 0 + ({ canvas }) => canvas.batchIds.length > 0 ); export const initialCanvasImageSelector = ( From 03e463dc897ba46385bd46b5b75fd28d222fba93 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:11:07 +1000 Subject: [PATCH 27/37] fix(ui): reset canvas batchIds on staging area init/discard/commit This prevents the bbox from being used inadvertantly during canvas generation --- .../src/features/canvas/store/canvasSlice.ts | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts index 754bca2dbc..0016ae6599 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts @@ -186,7 +186,7 @@ export const canvasSlice = createSlice({ state.pastLayerStates.push(cloneDeep(state.layerState)); state.layerState = { - ...initialLayerState, + ...cloneDeep(initialLayerState), objects: [ { kind: 'image', @@ -200,6 +200,7 @@ export const canvasSlice = createSlice({ ], }; state.futureLayerStates = []; + state.batchIds = []; const newScale = calculateScale( stageDimensions.width, @@ -349,11 +350,14 @@ export const canvasSlice = createSlice({ state.pastLayerStates.shift(); } - state.layerState.stagingArea = { ...initialLayerState.stagingArea }; + state.layerState.stagingArea = cloneDeep( + cloneDeep(initialLayerState) + ).stagingArea; state.futureLayerStates = []; state.shouldShowStagingOutline = true; - state.shouldShowStagingOutline = true; + state.shouldShowStagingImage = true; + state.batchIds = []; }, addFillRect: (state) => { const { boundingBoxCoordinates, boundingBoxDimensions, brushColor } = @@ -490,8 +494,9 @@ export const canvasSlice = createSlice({ resetCanvas: (state) => { state.pastLayerStates.push(cloneDeep(state.layerState)); - state.layerState = initialLayerState; + state.layerState = cloneDeep(initialLayerState); state.futureLayerStates = []; + state.batchIds = []; }, canvasResized: ( state, @@ -656,13 +661,12 @@ export const canvasSlice = createSlice({ ...imageToCommit, }); } - state.layerState.stagingArea = { - ...initialLayerState.stagingArea, - }; + state.layerState.stagingArea = cloneDeep(initialLayerState).stagingArea; state.futureLayerStates = []; state.shouldShowStagingOutline = true; state.shouldShowStagingImage = true; + state.batchIds = []; }, fitBoundingBoxToStage: (state) => { const { From a35087ee6e9a418a46db4bf4a5851203b0fd509c Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:11:58 +1000 Subject: [PATCH 28/37] feat(ui): hide mask when staging Now you can compare inpainted area with new image data --- .../web/src/features/canvas/components/IAICanvas.tsx | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx index e2f20f99a2..9de50e686c 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx @@ -223,7 +223,11 @@ const IAICanvas = () => { > - + From fbccce7573d3d44c9e4c0e102495588b5b6808ce Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:12:57 +1000 Subject: [PATCH 29/37] feat(ui): staging area toolbar enhancements - Current image number & total are displayed - Left/right wrap around instead of stopping on first/last image - Disable the left/right/number buttons when showing base layer - improved translations --- invokeai/frontend/web/public/locales/en.json | 2 + .../IAICanvasStagingAreaToolbar.tsx | 124 +++++++++--------- .../src/features/canvas/store/canvasSlice.ts | 19 ++- 3 files changed, 74 insertions(+), 71 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 6e783b0567..fc9dd0cc5f 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1444,6 +1444,8 @@ "showCanvasDebugInfo": "Show Additional Canvas Info", "showGrid": "Show Grid", "showHide": "Show/Hide", + "showResultsOn": "Show Results (On)", + "showResultsOff": "Show Results (Off)", "showIntermediates": "Show Intermediates", "snapToGrid": "Snap to Grid", "undo": "Undo" diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx index 3e617f8767..17e76f84b4 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx @@ -14,6 +14,7 @@ import { import { skipToken } from '@reduxjs/toolkit/dist/query'; import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; +import IAIButton from 'common/components/IAIButton'; import { memo, useCallback } from 'react'; import { useHotkeys } from 'react-hotkeys-hook'; import { useTranslation } from 'react-i18next'; @@ -23,8 +24,8 @@ import { FaCheck, FaEye, FaEyeSlash, - FaPlus, FaSave, + FaTimes, } from 'react-icons/fa'; import { useGetImageDTOQuery } from 'services/api/endpoints/images'; import { stagingAreaImageSaved } from '../store/actions'; @@ -41,10 +42,10 @@ const selector = createSelector( } = canvas; return { + currentIndex: selectedImageIndex, + total: images.length, currentStagingAreaImage: images.length > 0 ? images[selectedImageIndex] : undefined, - isOnFirstImage: selectedImageIndex === 0, - isOnLastImage: selectedImageIndex === images.length - 1, shouldShowStagingImage, shouldShowStagingOutline, }; @@ -55,10 +56,10 @@ const selector = createSelector( const IAICanvasStagingAreaToolbar = () => { const dispatch = useAppDispatch(); const { - isOnFirstImage, - isOnLastImage, currentStagingAreaImage, shouldShowStagingImage, + currentIndex, + total, } = useAppSelector(selector); const { t } = useTranslation(); @@ -71,39 +72,6 @@ const IAICanvasStagingAreaToolbar = () => { dispatch(setShouldShowStagingOutline(false)); }, [dispatch]); - useHotkeys( - ['left'], - () => { - handlePrevImage(); - }, - { - enabled: () => true, - preventDefault: true, - } - ); - - useHotkeys( - ['right'], - () => { - handleNextImage(); - }, - { - enabled: () => true, - preventDefault: true, - } - ); - - useHotkeys( - ['enter'], - () => { - handleAccept(); - }, - { - enabled: () => true, - preventDefault: true, - } - ); - const handlePrevImage = useCallback( () => dispatch(prevStagingAreaImage()), [dispatch] @@ -119,10 +87,45 @@ const IAICanvasStagingAreaToolbar = () => { [dispatch] ); + useHotkeys(['left'], handlePrevImage, { + enabled: () => true, + preventDefault: true, + }); + + useHotkeys(['right'], handleNextImage, { + enabled: () => true, + preventDefault: true, + }); + + useHotkeys(['enter'], () => handleAccept, { + enabled: () => true, + preventDefault: true, + }); + const { data: imageDTO } = useGetImageDTOQuery( currentStagingAreaImage?.imageName ?? skipToken ); + const handleToggleShouldShowStagingImage = useCallback(() => { + dispatch(setShouldShowStagingImage(!shouldShowStagingImage)); + }, [dispatch, shouldShowStagingImage]); + + const handleSaveToGallery = useCallback(() => { + if (!imageDTO) { + return; + } + + dispatch( + stagingAreaImageSaved({ + imageDTO, + }) + ); + }, [dispatch, imageDTO]); + + const handleDiscardStagingArea = useCallback(() => { + dispatch(discardStagedImages()); + }, [dispatch]); + if (!currentStagingAreaImage) { return null; } @@ -134,8 +137,8 @@ const IAICanvasStagingAreaToolbar = () => { w="100%" align="center" justify="center" - onMouseOver={handleMouseOver} - onMouseOut={handleMouseOut} + onMouseEnter={handleMouseOver} + onMouseLeave={handleMouseOut} > { icon={} onClick={handlePrevImage} colorScheme="accent" - isDisabled={isOnFirstImage} + isDisabled={!shouldShowStagingImage} /> + {`${currentIndex + 1}/${total}`} } onClick={handleNextImage} colorScheme="accent" - isDisabled={isOnLastImage} + isDisabled={!shouldShowStagingImage} /> { colorScheme="accent" /> : } - onClick={() => - dispatch(setShouldShowStagingImage(!shouldShowStagingImage)) - } + onClick={handleToggleShouldShowStagingImage} colorScheme="accent" /> { aria-label={t('unifiedCanvas.saveToGallery')} isDisabled={!imageDTO || !imageDTO.is_intermediate} icon={} - onClick={() => { - if (!imageDTO) { - return; - } - - dispatch( - stagingAreaImageSaved({ - imageDTO, - }) - ); - }} + onClick={handleSaveToGallery} colorScheme="accent" /> } - onClick={() => dispatch(discardStagedImages())} + icon={} + onClick={handleDiscardStagingArea} colorScheme="error" fontSize={20} /> diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts index 0016ae6599..df601e9e67 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts @@ -621,25 +621,22 @@ export const canvasSlice = createSlice({ return; } - const currentIndex = state.layerState.stagingArea.selectedImageIndex; - const length = state.layerState.stagingArea.images.length; + const nextIndex = state.layerState.stagingArea.selectedImageIndex + 1; + const lastIndex = state.layerState.stagingArea.images.length - 1; - state.layerState.stagingArea.selectedImageIndex = Math.min( - currentIndex + 1, - length - 1 - ); + state.layerState.stagingArea.selectedImageIndex = + nextIndex > lastIndex ? 0 : nextIndex; }, prevStagingAreaImage: (state) => { if (!state.layerState.stagingArea.images.length) { return; } - const currentIndex = state.layerState.stagingArea.selectedImageIndex; + const prevIndex = state.layerState.stagingArea.selectedImageIndex - 1; + const lastIndex = state.layerState.stagingArea.images.length - 1; - state.layerState.stagingArea.selectedImageIndex = Math.max( - currentIndex - 1, - 0 - ); + state.layerState.stagingArea.selectedImageIndex = + prevIndex < 0 ? lastIndex : prevIndex; }, commitStagingAreaImage: (state) => { if (!state.layerState.stagingArea.images.length) { From aa2f68f608f28380de2430a67a2137f9ded3236a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:17:59 +1000 Subject: [PATCH 30/37] fix(ui): use theme colors for canvas error fallback --- .../features/canvas/components/IAICanvasImageErrorFallback.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasImageErrorFallback.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasImageErrorFallback.tsx index b61cf547cc..38322daafa 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasImageErrorFallback.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasImageErrorFallback.tsx @@ -11,7 +11,7 @@ const IAICanvasImageErrorFallback = ({ canvasImage, }: IAICanvasImageErrorFallbackProps) => { const [errorColorLight, errorColorDark, fontColorLight, fontColorDark] = - useToken('colors', ['gray.400', 'gray.500', 'base.700', 'base.900']); + useToken('colors', ['base.400', 'base.500', 'base.700', 'base.900']); const errorColor = useColorModeValue(errorColorLight, errorColorDark); const fontColor = useColorModeValue(fontColorLight, fontColorDark); const { t } = useTranslation(); From bb48617101fc8824b76328d03972f93ca96d583e Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:19:05 +1000 Subject: [PATCH 31/37] fix(ui): memoize canvas context menu callback --- .../web/src/features/canvas/components/IAICanvas.tsx | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx index 9de50e686c..360d764a6e 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx @@ -139,6 +139,11 @@ const IAICanvas = () => { const { handleDragStart, handleDragMove, handleDragEnd } = useCanvasDragMove(); + const handleContextMenu = useCallback( + (e: KonvaEventObject) => e.evt.preventDefault(), + [] + ); + useEffect(() => { if (!containerRef.current) { return; @@ -205,9 +210,7 @@ const IAICanvas = () => { onDragStart={handleDragStart} onDragMove={handleDragMove} onDragEnd={handleDragEnd} - onContextMenu={(e: KonvaEventObject) => - e.evt.preventDefault() - } + onContextMenu={handleContextMenu} onWheel={handleWheel} draggable={(tool === 'move' || isStaging) && !isModifyingBoundingBox} > From 05a43c41f9d75f0176a887985b50ae92d2d37eac Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Wed, 27 Sep 2023 13:07:12 +0530 Subject: [PATCH 32/37] feat: Improve Staging Toolbar Styling --- .../canvas/components/IAICanvasStagingAreaToolbar.tsx | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx index 17e76f84b4..8bb45840d0 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx @@ -134,6 +134,7 @@ const IAICanvasStagingAreaToolbar = () => { { colorScheme="accent" pointerEvents="none" isDisabled={!shouldShowStagingImage} + sx={{ + background: 'base.600', + _dark: { + background: 'base.800', + }, + }} >{`${currentIndex + 1}/${total}`} { colorScheme="accent" isDisabled={!shouldShowStagingImage} /> + + Date: Wed, 27 Sep 2023 03:51:37 -0400 Subject: [PATCH 33/37] fix auto-switch alongside starred images (#4708) * add skeleton loading state for queue lit * add optional selectedImage when switching a board * unstage --------- Co-authored-by: Mary Hipp Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> --- .../listeners/boardIdSelected.ts | 9 ++++-- .../socketio/socketInvocationComplete.ts | 29 +++++++++++++++++-- .../Boards/BoardsList/GalleryBoard.tsx | 2 +- .../Boards/BoardsList/NoBoardBoard.tsx | 2 +- .../Boards/BoardsList/SystemBoardButton.tsx | 2 +- .../features/gallery/store/gallerySlice.ts | 7 +++-- 6 files changed, 41 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts index 1b13181911..a8e1a04fc1 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts @@ -25,7 +25,7 @@ export const addBoardIdSelectedListener = () => { const state = getState(); const board_id = boardIdSelected.match(action) - ? action.payload + ? action.payload.boardId : state.gallery.selectedBoardId; const galleryView = galleryViewChanged.match(action) @@ -55,7 +55,12 @@ export const addBoardIdSelectedListener = () => { if (boardImagesData) { const firstImage = imagesSelectors.selectAll(boardImagesData)[0]; - dispatch(imageSelected(firstImage ?? null)); + const selectedImage = imagesSelectors.selectById( + boardImagesData, + action.payload.selectedImageName + ); + + dispatch(imageSelected(selectedImage || firstImage || null)); } else { // board has no images - deselect dispatch(imageSelected(null)); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts index 7e918410a7..beaa4835b3 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts @@ -81,9 +81,32 @@ export const addInvocationCompleteEventListener = () => { // If auto-switch is enabled, select the new image if (shouldAutoSwitch) { - // if auto-add is enabled, switch the board as the image comes in - dispatch(galleryViewChanged('images')); - dispatch(boardIdSelected(imageDTO.board_id ?? 'none')); + // if auto-add is enabled, switch the gallery view and board if needed as the image comes in + if (gallery.galleryView !== 'images') { + dispatch(galleryViewChanged('images')); + } + + if ( + imageDTO.board_id && + imageDTO.board_id !== gallery.selectedBoardId + ) { + dispatch( + boardIdSelected({ + boardId: imageDTO.board_id, + selectedImageName: imageDTO.image_name, + }) + ); + } + + if (!imageDTO.board_id && gallery.selectedBoardId !== 'none') { + dispatch( + boardIdSelected({ + boardId: 'none', + selectedImageName: imageDTO.image_name, + }) + ); + } + dispatch(imageSelected(imageDTO)); } } diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx index 1bb6816bd9..104512a9c6 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx @@ -93,7 +93,7 @@ const GalleryBoard = ({ const [localBoardName, setLocalBoardName] = useState(board_name); const handleSelectBoard = useCallback(() => { - dispatch(boardIdSelected(board_id)); + dispatch(boardIdSelected({ boardId: board_id })); if (autoAssignBoardOnClick) { dispatch(autoAddBoardIdChanged(board_id)); } diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx index 55034decf0..6cea7d3eac 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx @@ -34,7 +34,7 @@ const NoBoardBoard = memo(({ isSelected }: Props) => { const { autoAddBoardId, autoAssignBoardOnClick } = useAppSelector(selector); const boardName = useBoardName('none'); const handleSelectBoard = useCallback(() => { - dispatch(boardIdSelected('none')); + dispatch(boardIdSelected({ boardId: 'none' })); if (autoAssignBoardOnClick) { dispatch(autoAddBoardIdChanged('none')); } diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/SystemBoardButton.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/SystemBoardButton.tsx index b538eee9d1..462aa4b5e6 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/SystemBoardButton.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/SystemBoardButton.tsx @@ -32,7 +32,7 @@ const SystemBoardButton = ({ board_id }: Props) => { const boardName = useBoardName(board_id); const handleClick = useCallback(() => { - dispatch(boardIdSelected(board_id)); + dispatch(boardIdSelected({ boardId: board_id })); }, [board_id, dispatch]); return ( diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index a4e4b02937..c78b22dd78 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -35,8 +35,11 @@ export const gallerySlice = createSlice({ autoAssignBoardOnClickChanged: (state, action: PayloadAction) => { state.autoAssignBoardOnClick = action.payload; }, - boardIdSelected: (state, action: PayloadAction) => { - state.selectedBoardId = action.payload; + boardIdSelected: ( + state, + action: PayloadAction<{ boardId: BoardId; selectedImageName?: string }> + ) => { + state.selectedBoardId = action.payload.boardId; state.galleryView = 'images'; }, autoAddBoardIdChanged: (state, action: PayloadAction) => { From 4a0a1c30dbadec290be67f6c7104af401a4d94ac Mon Sep 17 00:00:00 2001 From: chainchompa Date: Wed, 27 Sep 2023 05:30:50 -0400 Subject: [PATCH 34/37] use controlnet from metadata if available (#4658) * add control net to useRecallParams * got recall controlnets working * fix metadata viewer controlnet * fix type errors * fix controlnet metadata viewer * set control image and use correct processor type and node * clean up logs * recall processor using substring * feat(ui): enable controlNet when recalling one --------- Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> --- .../controlNet/store/controlNetSlice.ts | 11 ++ .../ImageMetadataActions.tsx | 37 ++++- .../web/src/features/nodes/types/types.ts | 4 + .../parameters/hooks/useRecallParameters.ts | 148 +++++++++++++++++- 4 files changed, 196 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts b/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts index ae3bdd7112..f0745eae2b 100644 --- a/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts +++ b/invokeai/frontend/web/src/features/controlNet/store/controlNetSlice.ts @@ -98,6 +98,9 @@ export const controlNetSlice = createSlice({ isControlNetEnabledToggled: (state) => { state.isEnabled = !state.isEnabled; }, + controlNetEnabled: (state) => { + state.isEnabled = true; + }, controlNetAdded: ( state, action: PayloadAction<{ @@ -111,6 +114,12 @@ export const controlNetSlice = createSlice({ controlNetId, }; }, + controlNetRecalled: (state, action: PayloadAction) => { + const controlNet = action.payload; + state.controlNets[controlNet.controlNetId] = { + ...controlNet, + }; + }, controlNetDuplicated: ( state, action: PayloadAction<{ @@ -439,7 +448,9 @@ export const controlNetSlice = createSlice({ export const { isControlNetEnabledToggled, + controlNetEnabled, controlNetAdded, + controlNetRecalled, controlNetDuplicated, controlNetAddedFromImage, controlNetRemoved, diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx index 955e8a5a3a..25d8e1e5ac 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx @@ -1,8 +1,15 @@ -import { CoreMetadata, LoRAMetadataItem } from 'features/nodes/types/types'; +import { + ControlNetMetadataItem, + CoreMetadata, + LoRAMetadataItem, +} from 'features/nodes/types/types'; import { useRecallParameters } from 'features/parameters/hooks/useRecallParameters'; -import { memo, useCallback } from 'react'; +import { memo, useMemo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; -import { isValidLoRAModel } from '../../../parameters/types/parameterSchemas'; +import { + isValidControlNetModel, + isValidLoRAModel, +} from '../../../parameters/types/parameterSchemas'; import ImageMetadataItem from './ImageMetadataItem'; type Props = { @@ -26,6 +33,7 @@ const ImageMetadataActions = (props: Props) => { recallHeight, recallStrength, recallLoRA, + recallControlNet, } = useRecallParameters(); const handleRecallPositivePrompt = useCallback(() => { @@ -75,6 +83,21 @@ const ImageMetadataActions = (props: Props) => { [recallLoRA] ); + const handleRecallControlNet = useCallback( + (controlnet: ControlNetMetadataItem) => { + recallControlNet(controlnet); + }, + [recallControlNet] + ); + + const validControlNets: ControlNetMetadataItem[] = useMemo(() => { + return metadata?.controlnets + ? metadata.controlnets.filter((controlnet) => + isValidControlNetModel(controlnet.control_model) + ) + : []; + }, [metadata?.controlnets]); + if (!metadata || Object.keys(metadata).length === 0) { return null; } @@ -180,6 +203,14 @@ const ImageMetadataActions = (props: Props) => { ); } })} + {validControlNets.map((controlnet, index) => ( + handleRecallControlNet(controlnet)} + /> + ))} ); }; diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index 0033e462cb..eb8baf513e 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -1141,6 +1141,10 @@ const zLoRAMetadataItem = z.object({ export type LoRAMetadataItem = z.infer; +const zControlNetMetadataItem = zControlField.deepPartial(); + +export type ControlNetMetadataItem = z.infer; + export const zCoreMetadata = z .object({ app_version: z.string().nullish().catch(null), diff --git a/invokeai/frontend/web/src/features/parameters/hooks/useRecallParameters.ts b/invokeai/frontend/web/src/features/parameters/hooks/useRecallParameters.ts index 4fb9a0ce2c..d8561ab122 100644 --- a/invokeai/frontend/web/src/features/parameters/hooks/useRecallParameters.ts +++ b/invokeai/frontend/web/src/features/parameters/hooks/useRecallParameters.ts @@ -2,7 +2,11 @@ import { createSelector } from '@reduxjs/toolkit'; import { useAppToaster } from 'app/components/Toaster'; import { stateSelector } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { CoreMetadata, LoRAMetadataItem } from 'features/nodes/types/types'; +import { + CoreMetadata, + LoRAMetadataItem, + ControlNetMetadataItem, +} from 'features/nodes/types/types'; import { refinerModelChanged, setNegativeStylePromptSDXL, @@ -18,9 +22,18 @@ import { useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { ImageDTO } from 'services/api/types'; import { + controlNetModelsAdapter, loraModelsAdapter, + useGetControlNetModelsQuery, useGetLoRAModelsQuery, } from '../../../services/api/endpoints/models'; +import { + ControlNetConfig, + controlNetEnabled, + controlNetRecalled, + controlNetReset, + initialControlNet, +} from '../../controlNet/store/controlNetSlice'; import { loraRecalled, lorasCleared } from '../../lora/store/loraSlice'; import { initialImageSelected, modelSelected } from '../store/actions'; import { @@ -38,6 +51,7 @@ import { isValidCfgScale, isValidHeight, isValidLoRAModel, + isValidControlNetModel, isValidMainModel, isValidNegativePrompt, isValidPositivePrompt, @@ -53,6 +67,11 @@ import { isValidStrength, isValidWidth, } from '../types/parameterSchemas'; +import { v4 as uuidv4 } from 'uuid'; +import { + CONTROLNET_PROCESSORS, + CONTROLNET_MODEL_DEFAULT_PROCESSORS, +} from 'features/controlNet/store/constants'; const selector = createSelector(stateSelector, ({ generation }) => { const { model } = generation; @@ -390,6 +409,121 @@ export const useRecallParameters = () => { [prepareLoRAMetadataItem, dispatch, parameterSetToast, parameterNotSetToast] ); + /** + * Recall ControlNet with toast + */ + + const { controlnets } = useGetControlNetModelsQuery(undefined, { + selectFromResult: (result) => ({ + controlnets: result.data + ? controlNetModelsAdapter.getSelectors().selectAll(result.data) + : [], + }), + }); + + const prepareControlNetMetadataItem = useCallback( + (controlnetMetadataItem: ControlNetMetadataItem) => { + if (!isValidControlNetModel(controlnetMetadataItem.control_model)) { + return { controlnet: null, error: 'Invalid ControlNet model' }; + } + + const { + image, + control_model, + control_weight, + begin_step_percent, + end_step_percent, + control_mode, + resize_mode, + } = controlnetMetadataItem; + + const matchingControlNetModel = controlnets.find( + (c) => + c.base_model === control_model.base_model && + c.model_name === control_model.model_name + ); + + if (!matchingControlNetModel) { + return { controlnet: null, error: 'ControlNet model is not installed' }; + } + + const isCompatibleBaseModel = + matchingControlNetModel?.base_model === model?.base_model; + + if (!isCompatibleBaseModel) { + return { + controlnet: null, + error: 'ControlNet incompatible with currently-selected model', + }; + } + + const controlNetId = uuidv4(); + + let processorType = initialControlNet.processorType; + for (const modelSubstring in CONTROLNET_MODEL_DEFAULT_PROCESSORS) { + if (matchingControlNetModel.model_name.includes(modelSubstring)) { + processorType = + CONTROLNET_MODEL_DEFAULT_PROCESSORS[modelSubstring] || + initialControlNet.processorType; + break; + } + } + const processorNode = CONTROLNET_PROCESSORS[processorType].default; + + const controlnet: ControlNetConfig = { + isEnabled: true, + model: matchingControlNetModel, + weight: + typeof control_weight === 'number' + ? control_weight + : initialControlNet.weight, + beginStepPct: begin_step_percent || initialControlNet.beginStepPct, + endStepPct: end_step_percent || initialControlNet.endStepPct, + controlMode: control_mode || initialControlNet.controlMode, + resizeMode: resize_mode || initialControlNet.resizeMode, + controlImage: image?.image_name || null, + processedControlImage: image?.image_name || null, + processorType, + processorNode: + processorNode.type !== 'none' + ? processorNode + : initialControlNet.processorNode, + shouldAutoConfig: true, + controlNetId, + }; + + return { controlnet, error: null }; + }, + [controlnets, model?.base_model] + ); + + const recallControlNet = useCallback( + (controlnetMetadataItem: ControlNetMetadataItem) => { + const result = prepareControlNetMetadataItem(controlnetMetadataItem); + + if (!result.controlnet) { + parameterNotSetToast(result.error); + return; + } + + dispatch( + controlNetRecalled({ + ...result.controlnet, + }) + ); + + dispatch(controlNetEnabled()); + + parameterSetToast(); + }, + [ + prepareControlNetMetadataItem, + dispatch, + parameterSetToast, + parameterNotSetToast, + ] + ); + /* * Sets image as initial image with toast */ @@ -428,6 +562,7 @@ export const useRecallParameters = () => { refiner_negative_aesthetic_score, refiner_start, loras, + controlnets, } = metadata; if (isValidCfgScale(cfg_scale)) { @@ -517,6 +652,15 @@ export const useRecallParameters = () => { } }); + dispatch(controlNetReset()); + dispatch(controlNetEnabled()); + controlnets?.forEach((controlnet) => { + const result = prepareControlNetMetadataItem(controlnet); + if (result.controlnet) { + dispatch(controlNetRecalled(result.controlnet)); + } + }); + allParameterSetToast(); }, [ @@ -524,6 +668,7 @@ export const useRecallParameters = () => { allParameterSetToast, dispatch, prepareLoRAMetadataItem, + prepareControlNetMetadataItem, ] ); @@ -542,6 +687,7 @@ export const useRecallParameters = () => { recallHeight, recallStrength, recallLoRA, + recallControlNet, recallAllParameters, sendToImageToImage, }; From 062df07de2230667b1ac0abf5e820808439ad1d1 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 28 Sep 2023 00:54:20 +1000 Subject: [PATCH 35/37] fix(ui): fix loading queue item translation --- invokeai/frontend/web/public/locales/en.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index fc9dd0cc5f..90445ba5d4 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -80,7 +80,7 @@ "lightMode": "Light Mode", "linear": "Linear", "load": "Load", - "loading": "Loading $t({{noun}})...", + "loading": "Loading", "loadingInvokeAI": "Loading Invoke AI", "learnMore": "Learn More", "modelManager": "Model Manager", From 81581246794f1f613eb82a15a76f4afe18347d6a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 27 Sep 2023 21:07:45 +1000 Subject: [PATCH 36/37] fix(ui): usePreselectedImage causing re-renders This hook was rerendering any time anything changed. Moved it to a logical component, put its useEffects inside the component. This reduces the effect of the rerenders to just that tiny always-null component. --- .../frontend/web/src/app/components/App.tsx | 28 ++++-------------- .../src/app/components/PreselectedImage.tsx | 16 ++++++++++ .../parameters/hooks/usePreselectedImage.ts | 29 ++++++++++++++++--- 3 files changed, 46 insertions(+), 27 deletions(-) create mode 100644 invokeai/frontend/web/src/app/components/PreselectedImage.tsx diff --git a/invokeai/frontend/web/src/app/components/App.tsx b/invokeai/frontend/web/src/app/components/App.tsx index b3ffeee333..2265999acd 100644 --- a/invokeai/frontend/web/src/app/components/App.tsx +++ b/invokeai/frontend/web/src/app/components/App.tsx @@ -1,6 +1,8 @@ import { Flex, Grid } from '@chakra-ui/react'; +import { useStore } from '@nanostores/react'; import { useLogger } from 'app/logging/useLogger'; import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted'; +import { $headerComponent } from 'app/store/nanostores/headerComponent'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { PartialAppConfig } from 'app/types/invokeai'; import ImageUploader from 'common/components/ImageUploader'; @@ -14,12 +16,10 @@ import i18n from 'i18n'; import { size } from 'lodash-es'; import { memo, useCallback, useEffect } from 'react'; import { ErrorBoundary } from 'react-error-boundary'; -import { usePreselectedImage } from '../../features/parameters/hooks/usePreselectedImage'; import AppErrorBoundaryFallback from './AppErrorBoundaryFallback'; import GlobalHotkeys from './GlobalHotkeys'; +import PreselectedImage from './PreselectedImage'; import Toaster from './Toaster'; -import { useStore } from '@nanostores/react'; -import { $headerComponent } from 'app/store/nanostores/headerComponent'; const DEFAULT_CONFIG = {}; @@ -36,8 +36,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => { const logger = useLogger('system'); const dispatch = useAppDispatch(); - const { handleSendToCanvas, handleSendToImg2Img, handleUseAllMetadata } = - usePreselectedImage(selectedImage?.imageName); + const handleReset = useCallback(() => { localStorage.clear(); location.reload(); @@ -59,24 +58,6 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => { dispatch(appStarted()); }, [dispatch]); - useEffect(() => { - if (selectedImage && selectedImage.action === 'sendToCanvas') { - handleSendToCanvas(); - } - }, [selectedImage, handleSendToCanvas]); - - useEffect(() => { - if (selectedImage && selectedImage.action === 'sendToImg2Img') { - handleSendToImg2Img(); - } - }, [selectedImage, handleSendToImg2Img]); - - useEffect(() => { - if (selectedImage && selectedImage.action === 'useAllParameters') { - handleUseAllMetadata(); - } - }, [selectedImage, handleUseAllMetadata]); - const headerComponent = useStore($headerComponent); return ( @@ -112,6 +93,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => { + ); }; diff --git a/invokeai/frontend/web/src/app/components/PreselectedImage.tsx b/invokeai/frontend/web/src/app/components/PreselectedImage.tsx new file mode 100644 index 0000000000..8fa4fd2ffd --- /dev/null +++ b/invokeai/frontend/web/src/app/components/PreselectedImage.tsx @@ -0,0 +1,16 @@ +import { usePreselectedImage } from 'features/parameters/hooks/usePreselectedImage'; +import { memo } from 'react'; + +type Props = { + selectedImage?: { + imageName: string; + action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters'; + }; +}; + +const PreselectedImage = (props: Props) => { + usePreselectedImage(props.selectedImage); + return null; +}; + +export default memo(PreselectedImage); diff --git a/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts b/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts index a14430a55d..4ea4f93bac 100644 --- a/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts +++ b/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts @@ -1,7 +1,7 @@ import { skipToken } from '@reduxjs/toolkit/dist/query'; import { CoreMetadata } from 'features/nodes/types/types'; import { t } from 'i18next'; -import { useCallback } from 'react'; +import { useCallback, useEffect } from 'react'; import { useAppToaster } from '../../../app/components/Toaster'; import { useAppDispatch } from '../../../app/store/storeHooks'; import { @@ -13,18 +13,21 @@ import { setActiveTab } from '../../ui/store/uiSlice'; import { initialImageSelected } from '../store/actions'; import { useRecallParameters } from './useRecallParameters'; -export const usePreselectedImage = (imageName?: string) => { +export const usePreselectedImage = (selectedImage?: { + imageName: string; + action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters'; +}) => { const dispatch = useAppDispatch(); const { recallAllParameters } = useRecallParameters(); const toaster = useAppToaster(); const { currentData: selectedImageDto } = useGetImageDTOQuery( - imageName ?? skipToken + selectedImage?.imageName ?? skipToken ); const { currentData: selectedImageMetadata } = useGetImageMetadataQuery( - imageName ?? skipToken + selectedImage?.imageName ?? skipToken ); const handleSendToCanvas = useCallback(() => { @@ -54,5 +57,23 @@ export const usePreselectedImage = (imageName?: string) => { // eslint-disable-next-line react-hooks/exhaustive-deps }, [selectedImageMetadata]); + useEffect(() => { + if (selectedImage && selectedImage.action === 'sendToCanvas') { + handleSendToCanvas(); + } + }, [selectedImage, handleSendToCanvas]); + + useEffect(() => { + if (selectedImage && selectedImage.action === 'sendToImg2Img') { + handleSendToImg2Img(); + } + }, [selectedImage, handleSendToImg2Img]); + + useEffect(() => { + if (selectedImage && selectedImage.action === 'useAllParameters') { + handleUseAllMetadata(); + } + }, [selectedImage, handleUseAllMetadata]); + return { handleSendToCanvas, handleSendToImg2Img, handleUseAllMetadata }; }; From a263a4f4cc2e8c799d27fe5c4b049d3a5ada0613 Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Wed, 27 Sep 2023 20:51:02 -0400 Subject: [PATCH 37/37] Update CONTROLNET.md --- docs/features/CONTROLNET.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index b4575653bd..8284ddf75d 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -147,7 +147,7 @@ There are several ways to install IP-Adapter models with an existing InvokeAI in 1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [5] to download models. 2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models. -3. Manually downloading the IP-Adapter and Image Encoder files and placing them in the `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder. +3. **Advanced -- Not recommended ** Manually downloading the IP-Adapter and Image Encoder files - Image Encoder folders shouid be placed in the `models\any\clip_vision` folders. IP Adapter Model folders should be placed in the relevant `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder. #### Using IP-Adapter