diff --git a/README.md b/README.md index ff06db8d21..f540e7be75 100644 --- a/README.md +++ b/README.md @@ -2,21 +2,102 @@ ![project hero](https://github.com/invoke-ai/InvokeAI/assets/31807370/6e3728c7-e90e-4711-905c-3b55844ff5be) -# Invoke - Professional Creative AI Tools for Visual Media -## To learn more about Invoke, or implement our Business solutions, visit [invoke.com](https://www.invoke.com/about) - +# Invoke - Professional Creative AI Tools for Visual Media +#### To learn more about Invoke, or implement our Business solutions, visit [invoke.com] -[![discord badge]][discord link] +[![discord badge]][discord link] [![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] [![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link] -[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] + -[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] +Invoke is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. Invoke offers an industry leading web-based UI, and serves as the foundation for multiple commercial products. -[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link] +[Installation][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] +
+ +![Highlighted Features - Canvas and Workflows](https://github.com/invoke-ai/InvokeAI/assets/31807370/708f7a82-084f-4860-bfbe-e2588c53548d) + +
+ +## Quick Start + +1. Download and unzip the installer from the bottom of the [latest release][latest release link]. +2. Run the installer script. + + - **Windows**: Double-click on the `install.bat` script. + - **macOS**: Open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press enter. + - **Linux**: Run `install.sh`. + +3. When prompted, enter a location for the install and select your GPU type. +4. Once the install finishes, find the directory you selected during install. The default location is `C:\Users\Username\invokeai` for Windows or `~/invokeai` for Linux/macOS. +5. Run the launcher script (`invoke.bat` for Windows, `invoke.sh` for macOS and Linux) the same way you ran the installer script in step 2. +6. Select option 1 to start the application. Once it starts up, open your browser and go to . +7. Open the model manager tab to install a starter model and then you'll be ready to generate. + +More detail, including hardware requirements and manual install instructions, are available in the [installation documentation][installation docs]. + +## Troubleshooting, FAQ and Support + +Please review our [FAQ][faq] for solutions to common installation problems and other issues. + +For more help, please join our [Discord][discord link]. + +## Features + +Full details on features can be found in [our documentation][features docs]. + +### Web Server & UI + +Invoke runs a locally hosted web server & React UI with an industry-leading user experience. + +### Unified Canvas + +The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/out-painting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more. + +### Workflows & Nodes + +Invoke offers a fully featured workflow management solution, enabling users to combine the power of node-based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases. + +### Board & Gallery Management + +Invoke features an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow. + +### Other features + +- Support for both ckpt and diffusers models +- SD1.5, SD2.0, and SDXL support +- Upscaling Tools +- Embedding Manager & Support +- Model Manager & Support +- Workflow creation & management +- Node-Based Architecture + +## Contributing + +Anyone who wishes to contribute to this project - whether documentation, features, bug fixes, code cleanup, testing, or code reviews - is very much encouraged to do so. + +Get started with contributing by reading our [contribution documentation][contributing docs], joining the [#dev-chat] or the GitHub discussion board. + +We hope you enjoy using Invoke as much as we enjoy creating it, and we hope you will elect to become part of our community. + +## Thanks + +Invoke is a combined effort of [passionate and talented people from across the world][contributors]. We thank them for their time, hard work and effort. + +Original portions of the software are Copyright © 2024 by respective contributors. + +[features docs]: https://invoke-ai.github.io/InvokeAI/features/ +[faq]: https://invoke-ai.github.io/InvokeAI/help/FAQ/ +[contributors]: https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/ +[invoke.com]: https://www.invoke.com/about +[github issues]: https://github.com/invoke-ai/InvokeAI/issues +[docs home]: https://invoke-ai.github.io/InvokeAI +[installation docs]: https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/ +[#dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939 +[contributing docs]: https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/ [CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github -[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain +[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain [discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord [discord link]: https://discord.gg/ZmtBAhwWhy [github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github @@ -30,402 +111,6 @@ [latest commit to main badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/main?icon=github&color=yellow&label=last%20dev%20commit&cache=900 [latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main [latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github -[latest release link]: https://github.com/invoke-ai/InvokeAI/releases +[latest release link]: https://github.com/invoke-ai/InvokeAI/releases/latest [translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg [translation status link]: https://hosted.weblate.org/engage/invokeai/ - - - -InvokeAI is a leading creative engine built to empower professionals -and enthusiasts alike. Generate and create stunning visual media using -the latest AI-driven technologies. InvokeAI offers an industry leading -Web Interface, interactive Command Line Interface, and also serves as -the foundation for multiple commercial products. - -**Quick links**: [[How to - Install](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/)] [Discord Server] [Documentation and - Tutorials] - [Bug Reports] - [Discussion, - Ideas & Q&A] - [Contributing] - -
- - -![Highlighted Features - Canvas and Workflows](https://github.com/invoke-ai/InvokeAI/assets/31807370/708f7a82-084f-4860-bfbe-e2588c53548d) - - -
- -## Table of Contents - -Table of Contents 📝 - -**Getting Started** -1. 🏁 [Quick Start](#quick-start) -3. 🖥️ [Hardware Requirements](#hardware-requirements) - -**More About Invoke** -1. 🌟 [Features](#features) -2. 📣 [Latest Changes](#latest-changes) -3. 🛠️ [Troubleshooting](#troubleshooting) - -**Supporting the Project** -1. 🤝 [Contributing](#contributing) -2. 👥 [Contributors](#contributors) -3. 💕 [Support](#support) - -## Quick Start - -For full installation and upgrade instructions, please see: -[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/) - -If upgrading from version 2.3, please read [Migrating a 2.3 root -directory to 3.0](#migrating-to-3) first. - -### Automatic Installer (suggested for 1st time users) - -1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest) - -2. Download the .zip file for your OS (Windows/macOS/Linux). - -3. Unzip the file. - -4. **Windows:** double-click on the `install.bat` script. **macOS:** Open a Terminal window, drag the file `install.sh` from Finder -into the Terminal, and press return. **Linux:** run `install.sh`. - -5. You'll be asked to confirm the location of the folder in which -to install InvokeAI and its image generation model files. Pick a -location with at least 15 GB of free memory. More if you plan on -installing lots of models. - -6. Wait while the installer does its thing. After installing the software, -the installer will launch a script that lets you configure InvokeAI and -select a set of starting image generation models. - -7. Find the folder that InvokeAI was installed into (it is not the -same as the unpacked zip file directory!) The default location of this -folder (if you didn't change it in step 5) is `~/invokeai` on -Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`. - -8. On Windows systems, double-click on the `invoke.bat` file. On -macOS, open a Terminal window, drag `invoke.sh` from the folder into -the Terminal, and press return. On Linux, run `invoke.sh` - -9. Press 2 to open the "browser-based UI", press enter/return, wait a -minute or two for Stable Diffusion to start up, then open your browser -and go to http://localhost:9090. - -10. Type `banana sushi` in the box on the top left and click `Invoke` - -### Command-Line Installation (for developers and users familiar with Terminals) - -You must have Python 3.10 through 3.11 installed on your machine. Earlier or -later versions are not supported. -Node.js also needs to be installed along with `pnpm` (can be installed with -the command `npm install -g pnpm` if needed) - -1. Open a command-line window on your machine. The PowerShell is recommended for Windows. -2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space: - - ```terminal - mkdir invokeai - ```` - -3. Create a virtual environment named `.venv` inside this directory and activate it: - - ```terminal - cd invokeai - python -m venv .venv --prompt InvokeAI - ``` - -4. Activate the virtual environment (do it every time you run InvokeAI) - - _For Linux/Mac users:_ - - ```sh - source .venv/bin/activate - ``` - - _For Windows users:_ - - ```ps - .venv\Scripts\activate - ``` - -5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU. - - _For Windows/Linux with an NVIDIA GPU:_ - - ```terminal - pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121 - ``` - - _For Linux with an AMD GPU:_ - - ```sh - pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6 - ``` - - _For non-GPU systems:_ - ```terminal - pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu - ``` - - _For Macintoshes, either Intel or M1/M2/M3:_ - - ```sh - pip install InvokeAI --use-pep517 - ``` - -6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once): - - ```terminal - invokeai-configure --root . - ``` - Don't miss the dot at the end! - -7. Launch the web server (do it every time you run InvokeAI): - - ```terminal - invokeai-web - ``` - -8. Point your browser to http://localhost:9090 to bring up the web interface. - -9. Type `banana sushi` in the box on the top left and click `Invoke`. - -Be sure to activate the virtual environment each time before re-launching InvokeAI, -using `source .venv/bin/activate` or `.venv\Scripts\activate`. - -## Detailed Installation Instructions - -This fork is supported across Linux, Windows and Macintosh. Linux -users can use either an Nvidia-based card (with CUDA support) or an -AMD card (using the ROCm driver). For full installation and upgrade -instructions, please see: -[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/) - - -### Migrating a v2.3 InvokeAI root directory - -The InvokeAI root directory is where the InvokeAI startup file, -installed models, and generated images are stored. It is ordinarily -named `invokeai` and located in your home directory. The contents and -layout of this directory has changed between versions 2.3 and 3.0 and -cannot be used directly. - -We currently recommend that you use the installer to create a new root -directory named differently from the 2.3 one, e.g. `invokeai-3` and -then use a migration script to copy your 2.3 models into the new -location. However, if you choose, you can upgrade this directory in -place. This section gives both recipes. - -#### Creating a new root directory and migrating old models - -This is the safer recipe because it leaves your old root directory in -place to fall back on. - -1. Follow the instructions above to create and install InvokeAI in a -directory that has a different name from the 2.3 invokeai directory. -In this example, we will use "invokeai-3" - -2. When you are prompted to select models to install, select a minimal -set of models, such as stable-diffusion-v1.5 only. - -3. After installation is complete launch `invokeai.sh` (Linux/Mac) or -`invokeai.bat` and select option 8 "Open the developers console". This -will take you to the command line. - -4. Issue the command `invokeai-migrate3 --from /path/to/v2.3-root --to -/path/to/invokeai-3-root`. Provide the correct `--from` and `--to` -paths for your v2.3 and v3.0 root directories respectively. - -This will copy and convert your old models from 2.3 format to 3.0 -format and create a new `models` directory in the 3.0 directory. The -old models directory (which contains the models selected at install -time) will be renamed `models.orig` and can be deleted once you have -confirmed that the migration was successful. - - If you wish, you can pass the 2.3 root directory to both `--from` and -`--to` in order to update in place. Warning: this directory will no -longer be usable with InvokeAI 2.3. - -#### Migrating in place - -For the adventurous, you may do an in-place upgrade from 2.3 to 3.0 -without touching the command line. ***This recipe does not work on -Windows platforms due to a bug in the Windows version of the 2.3 -upgrade script.** See the next section for a Windows recipe. - -##### For Mac and Linux Users: - -1. Launch the InvokeAI launcher script in your current v2.3 root directory. - -2. Select option [9] "Update InvokeAI" to bring up the updater dialog. - -3. Select option [1] to upgrade to the latest release. - -4. Once the upgrade is finished you will be returned to the launcher -menu. Select option [6] "Re-run the configure script to fix a broken -install or to complete a major upgrade". - -This will run the configure script against the v2.3 directory and -update it to the 3.0 format. The following files will be replaced: - - - The invokeai.init file, replaced by invokeai.yaml - - The models directory - - The configs/models.yaml model index - -The original versions of these files will be saved with the suffix -".orig" appended to the end. Once you have confirmed that the upgrade -worked, you can safely remove these files. Alternatively you can -restore a working v2.3 directory by removing the new files and -restoring the ".orig" files' original names. - -##### For Windows Users: - -Windows Users can upgrade with the - -1. Enter the 2.3 root directory you wish to upgrade -2. Launch `invoke.sh` or `invoke.bat` -3. Select the "Developer's console" option [8] -4. Type the following commands - -``` -pip install "invokeai @ https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v3.0.0" --use-pep517 --upgrade -invokeai-configure --root . -``` -(Replace `v3.0.0` with the current release number if this document is out of date). - -The first command will install and upgrade new software to run -InvokeAI. The second will prepare the 2.3 directory for use with 3.0. -You may now launch the WebUI in the usual way, by selecting option [1] -from the launcher script - -#### Migrating Images - -The migration script will migrate your invokeai settings and models, -including textual inversion models, LoRAs and merges that you may have -installed previously. However it does **not** migrate the generated -images stored in your 2.3-format outputs directory. To do this, you -need to run an additional step: - -1. From a working InvokeAI 3.0 root directory, start the launcher and -enter menu option [8] to open the "developer's console". - -2. At the developer's console command line, type the command: - -```bash -invokeai-import-images -``` - -3. This will lead you through the process of confirming the desired - source and destination for the imported images. The images will - appear in the gallery board of your choice, and contain the - original prompt, model name, and other parameters used to generate - the image. - -(Many kudos to **techjedi** for contributing this script.) - -## Hardware Requirements - -InvokeAI is supported across Linux, Windows and macOS. Linux -users can use either an Nvidia-based card (with CUDA support) or an -AMD card (using the ROCm driver). - -### System - -You will need one of the following: - -- An NVIDIA-based graphics card with 4 GB or more VRAM memory. 6-8 GB - of VRAM is highly recommended for rendering using the Stable - Diffusion XL models -- An Apple computer with an M1 chip. -- An AMD-based graphics card with 4GB or more VRAM memory (Linux - only), 6-8 GB for XL rendering. - -We do not recommend the GTX 1650 or 1660 series video cards. They are -unable to run in half-precision mode and do not have sufficient VRAM -to render 512x512 images. - -**Memory** - At least 12 GB Main Memory RAM. - -**Disk** - At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies. - -## Features - -Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/) - -### *Web Server & UI* - -InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server. - -### *Unified Canvas* - -The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more. - -### *Workflows & Nodes* - -InvokeAI offers a fully featured workflow management solution, enabling users to combine the power of nodes based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases. - -### *Board & Gallery Management* - -Invoke AI provides an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow. - -### Other features - -- *Support for both ckpt and diffusers models* -- *SD 2.0, 2.1, XL support* -- *Upscaling Tools* -- *Embedding Manager & Support* -- *Model Manager & Support* -- *Workflow creation & management* -- *Node-Based Architecture* - - -### Latest Changes - -For our latest changes, view our [Release -Notes](https://github.com/invoke-ai/InvokeAI/releases) and the -[CHANGELOG](docs/CHANGELOG.md). - -### Troubleshooting / FAQ - -Please check out our **[FAQ](https://invoke-ai.github.io/InvokeAI/help/FAQ/)** to get solutions for common installation -problems and other issues. For more help, please join our [Discord][discord link] - -## Contributing - -Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code -cleanup, testing, or code reviews, is very much encouraged to do so. - -Get started with contributing by reading our [Contribution documentation](https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/), joining the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) or the GitHub discussion board. - -If you are unfamiliar with how -to contribute to GitHub projects, we have a new contributor checklist you can follow to get started contributing: -[New Contributor Checklist](https://invoke-ai.github.io/InvokeAI/contributing/contribution_guides/newContributorChecklist/). - -We hope you enjoy using our software as much as we enjoy creating it, -and we hope that some of those of you who are reading this will elect -to become part of our community. - -Welcome to InvokeAI! - -### Contributors - -This fork is a combined effort of various people from across the world. -[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for -their time, hard work and effort. - -### Support - -For support, please use this repository's GitHub Issues tracking service, or join the [Discord][discord link]. - -Original portions of the software are Copyright (c) 2023 by respective contributors. - diff --git a/docs/features/CONFIGURATION.md b/docs/features/CONFIGURATION.md index 41f7a3ced3..d6bfe44901 100644 --- a/docs/features/CONFIGURATION.md +++ b/docs/features/CONFIGURATION.md @@ -51,13 +51,11 @@ The settings in this file will override the defaults. You only need to change this file if the default for a particular setting doesn't work for you. +You'll find an example file next to `invokeai.yaml` that shows the default values. + Some settings, like [Model Marketplace API Keys], require the YAML to be formatted correctly. Here is a [basic guide to YAML files]. -You can fix a broken `invokeai.yaml` by deleting it and running the -configuration script again -- option [6] in the launcher, "Re-run the -configure script". - #### Custom Config File Location You can use any config file with the `--config` CLI arg. Pass in the path to the `invokeai.yaml` file you want to use. diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index a49c910eeb..6510d2f74a 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -35,22 +35,16 @@ from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.invocations.util import validate_begin_end_step, validate_weights from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize from invokeai.backend.image_util.canny import get_canny_edges from invokeai.backend.image_util.depth_anything import DepthAnythingDetector from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector from invokeai.backend.image_util.hed import HEDProcessor from invokeai.backend.image_util.lineart import LineartProcessor from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor +from invokeai.backend.image_util.util import np_to_pil, pil_to_np -from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output - -CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"] -CONTROLNET_RESIZE_VALUES = Literal[ - "just_resize", - "crop_resize", - "fill_resize", - "just_resize_simple", -] +from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output class ControlField(BaseModel): @@ -641,3 +635,27 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation): resolution=self.image_resolution, ) return processed_image + + +@invocation( + "heuristic_resize", + title="Heuristic Resize", + tags=["image, controlnet"], + category="image", + version="1.0.0", + classification=Classification.Prototype, +) +class HeuristicResizeInvocation(BaseInvocation): + """Resize an image using a heuristic method. Preserves edge maps.""" + + image: ImageField = InputField(description="The image to resize") + width: int = InputField(default=512, gt=0, description="The width to resize to (px)") + height: int = InputField(default=512, gt=0, description="The height to resize to (px)") + + def invoke(self, context: InvocationContext) -> ImageOutput: + image = context.images.get_pil(self.image.image_name, "RGB") + np_img = pil_to_np(image) + np_resized = heuristic_resize(np_img, (self.width, self.height)) + resized = np_to_pil(np_resized) + image_dto = context.images.save(image=resized) + return ImageOutput.build(image_dto) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 4534df89c1..4ad63f4f89 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -51,6 +51,7 @@ from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus from invokeai.backend.lora import LoRAModelRaw from invokeai.backend.model_manager import BaseModelType, LoadedModel +from invokeai.backend.model_manager.config import MainConfigBase, ModelVariantType from invokeai.backend.model_patcher import ModelPatcher from invokeai.backend.stable_diffusion import PipelineIntermediateState, set_seamless from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( @@ -185,7 +186,7 @@ class GradientMaskOutput(BaseInvocationOutput): title="Create Gradient Mask", tags=["mask", "denoise"], category="latents", - version="1.0.0", + version="1.1.0", ) class CreateGradientMaskInvocation(BaseInvocation): """Creates mask for denoising model run.""" @@ -198,6 +199,32 @@ class CreateGradientMaskInvocation(BaseInvocation): minimum_denoise: float = InputField( default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4 ) + image: Optional[ImageField] = InputField( + default=None, + description="OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE", + title="[OPTIONAL] Image", + ui_order=6, + ) + unet: Optional[UNetField] = InputField( + description="OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE", + default=None, + input=Input.Connection, + title="[OPTIONAL] UNet", + ui_order=5, + ) + vae: Optional[VAEField] = InputField( + default=None, + description="OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE", + title="[OPTIONAL] VAE", + input=Input.Connection, + ui_order=7, + ) + tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8) + fp32: bool = InputField( + default=DEFAULT_PRECISION == "float32", + description=FieldDescriptions.fp32, + ui_order=9, + ) @torch.no_grad() def invoke(self, context: InvocationContext) -> GradientMaskOutput: @@ -233,8 +260,27 @@ class CreateGradientMaskInvocation(BaseInvocation): expanded_mask_image = Image.fromarray((expanded_mask.squeeze(0).numpy() * 255).astype(np.uint8), mode="L") expanded_image_dto = context.images.save(expanded_mask_image) + masked_latents_name = None + if self.unet is not None and self.vae is not None and self.image is not None: + # all three fields must be present at the same time + main_model_config = context.models.get_config(self.unet.unet.key) + assert isinstance(main_model_config, MainConfigBase) + if main_model_config.variant is ModelVariantType.Inpaint: + mask = blur_tensor + vae_info: LoadedModel = context.models.load(self.vae.vae) + image = context.images.get_pil(self.image.image_name) + image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) + if image_tensor.dim() == 3: + image_tensor = image_tensor.unsqueeze(0) + img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) + masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0) + masked_latents = ImageToLatentsInvocation.vae_encode( + vae_info, self.fp32, self.tiled, masked_image.clone() + ) + masked_latents_name = context.tensors.save(tensor=masked_latents) + return GradientMaskOutput( - denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=None, gradient=True), + denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=True), expanded_mask_area=ImageField(image_name=expanded_image_dto.image_name), ) @@ -295,7 +341,7 @@ class DenoiseLatentsInvocation(BaseInvocation): ) steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps) cfg_scale: Union[float, List[float]] = InputField( - default=7.5, ge=1, description=FieldDescriptions.cfg_scale, title="CFG Scale" + default=7.5, description=FieldDescriptions.cfg_scale, title="CFG Scale" ) denoising_start: float = InputField( default=0.0, @@ -517,6 +563,11 @@ class DenoiseLatentsInvocation(BaseInvocation): dtype=unet.dtype, ) + if isinstance(self.cfg_scale, list): + assert ( + len(self.cfg_scale) == self.steps + ), "cfg_scale (list) must have the same length as the number of steps" + conditioning_data = TextConditioningData( uncond_text=uncond_text_embedding, cond_text=cond_text_embedding, diff --git a/invokeai/app/invocations/mask.py b/invokeai/app/invocations/mask.py index a7f3207764..6f54660847 100644 --- a/invokeai/app/invocations/mask.py +++ b/invokeai/app/invocations/mask.py @@ -1,7 +1,8 @@ +import numpy as np import torch -from invokeai.app.invocations.baseinvocation import BaseInvocation, InvocationContext, invocation -from invokeai.app.invocations.fields import InputField, TensorField, WithMetadata +from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, InvocationContext, invocation +from invokeai.app.invocations.fields import ImageField, InputField, TensorField, WithMetadata from invokeai.app.invocations.primitives import MaskOutput @@ -34,3 +35,86 @@ class RectangleMaskInvocation(BaseInvocation, WithMetadata): width=self.width, height=self.height, ) + + +@invocation( + "alpha_mask_to_tensor", + title="Alpha Mask to Tensor", + tags=["conditioning"], + category="conditioning", + version="1.0.0", + classification=Classification.Beta, +) +class AlphaMaskToTensorInvocation(BaseInvocation): + """Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0.""" + + image: ImageField = InputField(description="The mask image to convert.") + invert: bool = InputField(default=False, description="Whether to invert the mask.") + + def invoke(self, context: InvocationContext) -> MaskOutput: + image = context.images.get_pil(self.image.image_name) + mask = torch.zeros((1, image.height, image.width), dtype=torch.bool) + if self.invert: + mask[0] = torch.tensor(np.array(image)[:, :, 3] == 0, dtype=torch.bool) + else: + mask[0] = torch.tensor(np.array(image)[:, :, 3] > 0, dtype=torch.bool) + + return MaskOutput( + mask=TensorField(tensor_name=context.tensors.save(mask)), + height=mask.shape[1], + width=mask.shape[2], + ) + + +@invocation( + "invert_tensor_mask", + title="Invert Tensor Mask", + tags=["conditioning"], + category="conditioning", + version="1.0.0", + classification=Classification.Beta, +) +class InvertTensorMaskInvocation(BaseInvocation): + """Inverts a tensor mask.""" + + mask: TensorField = InputField(description="The tensor mask to convert.") + + def invoke(self, context: InvocationContext) -> MaskOutput: + mask = context.tensors.load(self.mask.tensor_name) + inverted = ~mask + + return MaskOutput( + mask=TensorField(tensor_name=context.tensors.save(inverted)), + height=inverted.shape[1], + width=inverted.shape[2], + ) + + +@invocation( + "image_mask_to_tensor", + title="Image Mask to Tensor", + tags=["conditioning"], + category="conditioning", + version="1.0.0", +) +class ImageMaskToTensorInvocation(BaseInvocation, WithMetadata): + """Convert a mask image to a tensor. Converts the image to grayscale and uses thresholding at the specified value.""" + + image: ImageField = InputField(description="The mask image to convert.") + cutoff: int = InputField(ge=0, le=255, description="Cutoff (<)", default=128) + invert: bool = InputField(default=False, description="Whether to invert the mask.") + + def invoke(self, context: InvocationContext) -> MaskOutput: + image = context.images.get_pil(self.image.image_name, mode="L") + + mask = torch.zeros((1, image.height, image.width), dtype=torch.bool) + if self.invert: + mask[0] = torch.tensor(np.array(image)[:, :] >= self.cutoff, dtype=torch.bool) + else: + mask[0] = torch.tensor(np.array(image)[:, :] < self.cutoff, dtype=torch.bool) + + return MaskOutput( + mask=TensorField(tensor_name=context.tensors.save(mask)), + height=mask.shape[1], + width=mask.shape[2], + ) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index a02d0a57ef..9c7264a9bb 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -3,7 +3,6 @@ from typing import Any, Literal, Optional, Union from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output -from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES from invokeai.app.invocations.fields import ( FieldDescriptions, ImageField, @@ -14,6 +13,7 @@ from invokeai.app.invocations.fields import ( ) from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES from ...version import __version__ diff --git a/invokeai/app/invocations/t2i_adapter.py b/invokeai/app/invocations/t2i_adapter.py index e550a7b313..b22a089d3f 100644 --- a/invokeai/app/invocations/t2i_adapter.py +++ b/invokeai/app/invocations/t2i_adapter.py @@ -8,11 +8,11 @@ from invokeai.app.invocations.baseinvocation import ( invocation, invocation_output, ) -from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.util import validate_begin_end_step, validate_weights from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES class T2IAdapterField(BaseModel): diff --git a/invokeai/app/services/download/download_default.py b/invokeai/app/services/download/download_default.py index f393a18dcb..7d8229fba1 100644 --- a/invokeai/app/services/download/download_default.py +++ b/invokeai/app/services/download/download_default.py @@ -318,10 +318,8 @@ class DownloadQueueService(DownloadQueueServiceBase): in_progress_path.rename(job.download_path) def _validate_filename(self, directory: str, filename: str) -> bool: - pc_name_max = os.pathconf(directory, "PC_NAME_MAX") if hasattr(os, "pathconf") else 260 # hardcoded for windows - pc_path_max = ( - os.pathconf(directory, "PC_PATH_MAX") if hasattr(os, "pathconf") else 32767 - ) # hardcoded for windows with long names enabled + pc_name_max = get_pc_name_max(directory) + pc_path_max = get_pc_path_max(directory) if "/" in filename: return False if filename.startswith(".."): @@ -419,6 +417,26 @@ class DownloadQueueService(DownloadQueueServiceBase): self._logger.warning(excp) +def get_pc_name_max(directory: str) -> int: + if hasattr(os, "pathconf"): + try: + return os.pathconf(directory, "PC_NAME_MAX") + except OSError: + # macOS w/ external drives raise OSError + pass + return 260 # hardcoded for windows + + +def get_pc_path_max(directory: str) -> int: + if hasattr(os, "pathconf"): + try: + return os.pathconf(directory, "PC_PATH_MAX") + except OSError: + # some platforms may not have this value + pass + return 32767 # hardcoded for windows with long names enabled + + # Example on_progress event handler to display a TQDM status bar # Activate with: # download_service.download(DownloadJob('http://foo.bar/baz', '/tmp', on_progress=TqdmProgress().update)) diff --git a/invokeai/app/services/model_install/model_install_default.py b/invokeai/app/services/model_install/model_install_default.py index 6a3117bcb8..6eb9549ef0 100644 --- a/invokeai/app/services/model_install/model_install_default.py +++ b/invokeai/app/services/model_install/model_install_default.py @@ -3,7 +3,6 @@ import locale import os import re -import signal import threading import time from hashlib import sha256 @@ -43,6 +42,7 @@ from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMet from invokeai.backend.model_manager.probe import ModelProbe from invokeai.backend.model_manager.search import ModelSearch from invokeai.backend.util import InvokeAILogger +from invokeai.backend.util.catch_sigint import catch_sigint from invokeai.backend.util.devices import TorchDevice from .model_install_base import ( @@ -112,17 +112,6 @@ class ModelInstallService(ModelInstallServiceBase): def start(self, invoker: Optional[Invoker] = None) -> None: """Start the installer thread.""" - # Yes, this is weird. When the installer thread is running, the - # thread masks the ^C signal. When we receive a - # sigINT, we stop the thread, reset sigINT, and send a new - # sigINT to the parent process. - def sigint_handler(signum, frame): - self.stop() - signal.signal(signal.SIGINT, signal.SIG_DFL) - signal.raise_signal(signal.SIGINT) - - signal.signal(signal.SIGINT, sigint_handler) - with self._lock: if self._running: raise Exception("Attempt to start the installer service twice") @@ -132,7 +121,8 @@ class ModelInstallService(ModelInstallServiceBase): # In normal use, we do not want to scan the models directory - it should never have orphaned models. # We should only do the scan when the flag is set (which should only be set when testing). if self.app_config.scan_models_on_startup: - self._register_orphaned_models() + with catch_sigint(): + self._register_orphaned_models() # Check all models' paths and confirm they exist. A model could be missing if it was installed on a volume # that isn't currently mounted. In this case, we don't want to delete the model from the database, but we do diff --git a/invokeai/app/services/object_serializer/object_serializer_disk.py b/invokeai/app/services/object_serializer/object_serializer_disk.py index 354a9b0c04..d3171f8530 100644 --- a/invokeai/app/services/object_serializer/object_serializer_disk.py +++ b/invokeai/app/services/object_serializer/object_serializer_disk.py @@ -1,7 +1,7 @@ +import shutil import tempfile import threading import typing -from dataclasses import dataclass from pathlib import Path from typing import TYPE_CHECKING, Optional, TypeVar @@ -18,12 +18,6 @@ if TYPE_CHECKING: T = TypeVar("T") -@dataclass -class DeleteAllResult: - deleted_count: int - freed_space_bytes: float - - class ObjectSerializerDisk(ObjectSerializerBase[T]): """Disk-backed storage for arbitrary python objects. Serialization is handled by `torch.save` and `torch.load`. @@ -36,6 +30,12 @@ class ObjectSerializerDisk(ObjectSerializerBase[T]): self._ephemeral = ephemeral self._base_output_dir = output_dir self._base_output_dir.mkdir(parents=True, exist_ok=True) + + if self._ephemeral: + # Remove dangling tempdirs that might have been left over from an earlier unplanned shutdown. + for temp_dir in filter(Path.is_dir, self._base_output_dir.glob("tmp*")): + shutil.rmtree(temp_dir) + # Must specify `ignore_cleanup_errors` to avoid fatal errors during cleanup on Windows self._tempdir = ( tempfile.TemporaryDirectory(dir=self._base_output_dir, ignore_cleanup_errors=True) if ephemeral else None diff --git a/invokeai/app/util/controlnet_utils.py b/invokeai/app/util/controlnet_utils.py index b3e2560211..fde8d52ee6 100644 --- a/invokeai/app/util/controlnet_utils.py +++ b/invokeai/app/util/controlnet_utils.py @@ -1,13 +1,21 @@ -from typing import Union +from typing import Any, Literal, Union import cv2 import numpy as np import torch -from controlnet_aux.util import HWC3 -from diffusers.utils import PIL_INTERPOLATION from einops import rearrange from PIL import Image +from invokeai.backend.image_util.util import nms, normalize_image_channel_count + +CONTROLNET_RESIZE_VALUES = Literal[ + "just_resize", + "crop_resize", + "fill_resize", + "just_resize_simple", +] +CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"] + ################################################################### # Copy of scripts/lvminthin.py from Mikubill/sd-webui-controlnet ################################################################### @@ -68,17 +76,6 @@ def lvmin_thin(x, prunings=True): return y -def nake_nms(x): - f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) - f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) - f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) - f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) - y = np.zeros_like(x) - for f in [f1, f2, f3, f4]: - np.putmask(y, cv2.dilate(x, kernel=f) == x, x) - return y - - ################################################################################ # copied from Mikubill/sd-webui-controlnet external_code.py and modified for InvokeAI ################################################################################ @@ -134,98 +131,122 @@ def pixel_perfect_resolution( return int(np.round(estimation)) +def clone_contiguous(x: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]: + """Get a memory-contiguous clone of the given numpy array, as a safety measure and to improve computation efficiency.""" + return np.ascontiguousarray(x).copy() + + +def np_img_to_torch(np_img: np.ndarray[Any, Any], device: torch.device) -> torch.Tensor: + """Convert a numpy image to a PyTorch tensor. The image is normalized to 0-1, rearranged to BCHW format and sent to + the specified device.""" + + torch_img = torch.from_numpy(np_img) + normalized = torch_img.float() / 255.0 + bchw = rearrange(normalized, "h w c -> 1 c h w") + on_device = bchw.to(device) + return on_device.clone() + + +def heuristic_resize(np_img: np.ndarray[Any, Any], size: tuple[int, int]) -> np.ndarray[Any, Any]: + """Resizes an image using a heuristic to choose the best resizing strategy. + + - If the image appears to be an edge map, special handling will be applied to ensure the edges are not distorted. + - Single-pixel edge maps use NMS and thinning to keep the edges as single-pixel lines. + - Low-color-count images are resized with nearest-neighbor to preserve color information (for e.g. segmentation maps). + - The alpha channel is handled separately to ensure it is resized correctly. + + Args: + np_img (np.ndarray): The input image. + size (tuple[int, int]): The target size for the image. + + Returns: + np.ndarray: The resized image. + + Adapted from https://github.com/Mikubill/sd-webui-controlnet. + """ + + # Return early if the image is already at the requested size + if np_img.shape[0] == size[1] and np_img.shape[1] == size[0]: + return np_img + + # If the image has an alpha channel, separate it for special handling later. + inpaint_mask = None + if np_img.ndim == 3 and np_img.shape[2] == 4: + inpaint_mask = np_img[:, :, 3] + np_img = np_img[:, :, 0:3] + + new_size_is_smaller = (size[0] * size[1]) < (np_img.shape[0] * np_img.shape[1]) + new_size_is_bigger = (size[0] * size[1]) > (np_img.shape[0] * np_img.shape[1]) + unique_color_count = np.unique(np_img.reshape(-1, np_img.shape[2]), axis=0).shape[0] + is_one_pixel_edge = False + is_binary = False + + if unique_color_count == 2: + # If the image has only two colors, it is likely binary. Check if the image has one-pixel edges. + is_binary = np.min(np_img) < 16 and np.max(np_img) > 240 + if is_binary: + eroded = cv2.erode(np_img, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1) + dilated = cv2.dilate(eroded, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1) + one_pixel_edge_count = np.where(dilated < np_img)[0].shape[0] + all_edge_count = np.where(np_img > 127)[0].shape[0] + is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count + + if 2 < unique_color_count < 200: + # With a low color count, we assume this is a map where exact colors are important. Near-neighbor preserves + # the colors as needed. + interpolation = cv2.INTER_NEAREST + elif new_size_is_smaller: + # This works best for downscaling + interpolation = cv2.INTER_AREA + else: + # Fall back for other cases + interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS + + # This may be further transformed depending on the binary nature of the image. + resized = cv2.resize(np_img, size, interpolation=interpolation) + + if inpaint_mask is not None: + # Resize the inpaint mask to match the resized image using the same interpolation method. + inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation) + + # If the image is binary, we will perform some additional processing to ensure the edges are preserved. + if is_binary: + resized = np.mean(resized.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8) + if is_one_pixel_edge: + # Use NMS and thinning to keep the edges as single-pixel lines. + resized = nms(resized) + _, resized = cv2.threshold(resized, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + resized = lvmin_thin(resized, prunings=new_size_is_bigger) + else: + _, resized = cv2.threshold(resized, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + resized = np.stack([resized] * 3, axis=2) + + # Restore the alpha channel if it was present. + if inpaint_mask is not None: + inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0 + inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8) + resized = np.concatenate([resized, inpaint_mask], axis=2) + + return resized + + ########################################################################### # Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet # modified for InvokeAI ########################################################################### -# def detectmap_proc(detected_map, module, resize_mode, h, w): -def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device: torch.device = torch.device("cpu")): - # if 'inpaint' in module: - # np_img = np_img.astype(np.float32) - # else: - # np_img = HWC3(np_img) - np_img = HWC3(np_img) +def np_img_resize( + np_img: np.ndarray, + resize_mode: CONTROLNET_RESIZE_VALUES, + h: int, + w: int, + device: torch.device = torch.device("cpu"), +) -> tuple[torch.Tensor, np.ndarray[Any, Any]]: + np_img = normalize_image_channel_count(np_img) - def safe_numpy(x): - # A very safe method to make sure that Apple/Mac works - y = x - - # below is very boring but do not change these. If you change these Apple or Mac may fail. - y = y.copy() - y = np.ascontiguousarray(y) - y = y.copy() - return y - - def get_pytorch_control(x): - # A very safe method to make sure that Apple/Mac works - y = x - - # below is very boring but do not change these. If you change these Apple or Mac may fail. - y = torch.from_numpy(y) - y = y.float() / 255.0 - y = rearrange(y, "h w c -> 1 c h w") - y = y.clone() - # y = y.to(devices.get_device_for("controlnet")) - y = y.to(device) - y = y.clone() - return y - - def high_quality_resize(x: np.ndarray, size): - # Written by lvmin - # Super high-quality control map up-scaling, considering binary, seg, and one-pixel edges - inpaint_mask = None - if x.ndim == 3 and x.shape[2] == 4: - inpaint_mask = x[:, :, 3] - x = x[:, :, 0:3] - - new_size_is_smaller = (size[0] * size[1]) < (x.shape[0] * x.shape[1]) - new_size_is_bigger = (size[0] * size[1]) > (x.shape[0] * x.shape[1]) - unique_color_count = np.unique(x.reshape(-1, x.shape[2]), axis=0).shape[0] - is_one_pixel_edge = False - is_binary = False - if unique_color_count == 2: - is_binary = np.min(x) < 16 and np.max(x) > 240 - if is_binary: - xc = x - xc = cv2.erode(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1) - xc = cv2.dilate(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1) - one_pixel_edge_count = np.where(xc < x)[0].shape[0] - all_edge_count = np.where(x > 127)[0].shape[0] - is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count - - if 2 < unique_color_count < 200: - interpolation = cv2.INTER_NEAREST - elif new_size_is_smaller: - interpolation = cv2.INTER_AREA - else: - interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS - - y = cv2.resize(x, size, interpolation=interpolation) - if inpaint_mask is not None: - inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation) - - if is_binary: - y = np.mean(y.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8) - if is_one_pixel_edge: - y = nake_nms(y) - _, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - y = lvmin_thin(y, prunings=new_size_is_bigger) - else: - _, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - y = np.stack([y] * 3, axis=2) - - if inpaint_mask is not None: - inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0 - inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8) - y = np.concatenate([y, inpaint_mask], axis=2) - - return y - - # if resize_mode == external_code.ResizeMode.RESIZE: if resize_mode == "just_resize": # RESIZE - np_img = high_quality_resize(np_img, (w, h)) - np_img = safe_numpy(np_img) - return get_pytorch_control(np_img), np_img + np_img = heuristic_resize(np_img, (w, h)) + np_img = clone_contiguous(np_img) + return np_img_to_torch(np_img, device), np_img old_h, old_w, _ = np_img.shape old_w = float(old_w) @@ -236,7 +257,6 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device: def safeint(x: Union[int, float]) -> int: return int(np.round(x)) - # if resize_mode == external_code.ResizeMode.OUTER_FIT: if resize_mode == "fill_resize": # OUTER_FIT k = min(k0, k1) borders = np.concatenate([np_img[0, :, :], np_img[-1, :, :], np_img[:, 0, :], np_img[:, -1, :]], axis=0) @@ -245,23 +265,23 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device: # Inpaint hijack high_quality_border_color[3] = 255 high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1]) - np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k))) + np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k))) new_h, new_w, _ = np_img.shape pad_h = max(0, (h - new_h) // 2) pad_w = max(0, (w - new_w) // 2) high_quality_background[pad_h : pad_h + new_h, pad_w : pad_w + new_w] = np_img np_img = high_quality_background - np_img = safe_numpy(np_img) - return get_pytorch_control(np_img), np_img + np_img = clone_contiguous(np_img) + return np_img_to_torch(np_img, device), np_img else: # resize_mode == "crop_resize" (INNER_FIT) k = max(k0, k1) - np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k))) + np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k))) new_h, new_w, _ = np_img.shape pad_h = max(0, (new_h - h) // 2) pad_w = max(0, (new_w - w) // 2) np_img = np_img[pad_h : pad_h + h, pad_w : pad_w + w] - np_img = safe_numpy(np_img) - return get_pytorch_control(np_img), np_img + np_img = clone_contiguous(np_img) + return np_img_to_torch(np_img, device), np_img def prepare_control_image( @@ -269,12 +289,12 @@ def prepare_control_image( width: int, height: int, num_channels: int = 3, - device="cuda", - dtype=torch.float16, - do_classifier_free_guidance=True, - control_mode="balanced", - resize_mode="just_resize_simple", -): + device: str = "cuda", + dtype: torch.dtype = torch.float16, + control_mode: CONTROLNET_MODE_VALUES = "balanced", + resize_mode: CONTROLNET_RESIZE_VALUES = "just_resize_simple", + do_classifier_free_guidance: bool = True, +) -> torch.Tensor: """Pre-process images for ControlNets or T2I-Adapters. Args: @@ -292,26 +312,15 @@ def prepare_control_image( resize_mode (str, optional): Defaults to "just_resize_simple". Raises: - NotImplementedError: If resize_mode == "crop_resize_simple". - NotImplementedError: If resize_mode == "fill_resize_simple". ValueError: If `resize_mode` is not recognized. ValueError: If `num_channels` is out of range. Returns: torch.Tensor: The pre-processed input tensor. """ - if ( - resize_mode == "just_resize_simple" - or resize_mode == "crop_resize_simple" - or resize_mode == "fill_resize_simple" - ): + if resize_mode == "just_resize_simple": image = image.convert("RGB") - if resize_mode == "just_resize_simple": - image = image.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]) - elif resize_mode == "crop_resize_simple": - raise NotImplementedError(f"prepare_control_image is not implemented for resize_mode='{resize_mode}'.") - elif resize_mode == "fill_resize_simple": - raise NotImplementedError(f"prepare_control_image is not implemented for resize_mode='{resize_mode}'.") + image = image.resize((width, height), resample=Image.LANCZOS) nimage = np.array(image) nimage = nimage[None, :] nimage = np.concatenate([nimage], axis=0) @@ -328,8 +337,7 @@ def prepare_control_image( resize_mode=resize_mode, h=height, w=width, - # device=torch.device('cpu') - device=device, + device=torch.device(device), ) else: raise ValueError(f"Unsupported resize_mode: '{resize_mode}'.") diff --git a/invokeai/backend/image_util/hed.py b/invokeai/backend/image_util/hed.py index 378e3b96e9..97706df8b9 100644 --- a/invokeai/backend/image_util/hed.py +++ b/invokeai/backend/image_util/hed.py @@ -8,7 +8,7 @@ from huggingface_hub import hf_hub_download from PIL import Image from invokeai.backend.image_util.util import ( - non_maximum_suppression, + nms, normalize_image_channel_count, np_to_pil, pil_to_np, @@ -134,7 +134,7 @@ class HEDProcessor: detected_map = cv2.resize(detected_map, (width, height), interpolation=cv2.INTER_LINEAR) if scribble: - detected_map = non_maximum_suppression(detected_map, 127, 3.0) + detected_map = nms(detected_map, 127, 3.0) detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0) detected_map[detected_map > 4] = 255 detected_map[detected_map < 255] = 0 diff --git a/invokeai/backend/image_util/util.py b/invokeai/backend/image_util/util.py index 7cfe0ad1a5..5b2116975f 100644 --- a/invokeai/backend/image_util/util.py +++ b/invokeai/backend/image_util/util.py @@ -1,4 +1,5 @@ from math import ceil, floor, sqrt +from typing import Optional import cv2 import numpy as np @@ -143,20 +144,21 @@ def resize_image_to_resolution(input_image: np.ndarray, resolution: int) -> np.n h = float(input_image.shape[0]) w = float(input_image.shape[1]) scaling_factor = float(resolution) / min(h, w) - h *= scaling_factor - w *= scaling_factor - h = int(np.round(h / 64.0)) * 64 - w = int(np.round(w / 64.0)) * 64 + h = int(h * scaling_factor) + w = int(w * scaling_factor) if scaling_factor > 1: return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_LANCZOS4) else: return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_AREA) -def non_maximum_suppression(image: np.ndarray, threshold: int, sigma: float): +def nms(np_img: np.ndarray, threshold: Optional[int] = None, sigma: Optional[float] = None) -> np.ndarray: """ Apply non-maximum suppression to an image. + If both threshold and sigma are provided, the image will blurred before the suppression and thresholded afterwards, + resulting in a binary output image. + This function is adapted from https://github.com/lllyasviel/ControlNet. Args: @@ -166,23 +168,36 @@ def non_maximum_suppression(image: np.ndarray, threshold: int, sigma: float): Returns: The image after non-maximum suppression. + + Raises: + ValueError: If only one of threshold and sigma provided. """ - image = cv2.GaussianBlur(image.astype(np.float32), (0, 0), sigma) + # Raise a value error if only one of threshold and sigma is provided + if (threshold is None) != (sigma is None): + raise ValueError("Both threshold and sigma must be provided if one is provided.") + + if sigma is not None and threshold is not None: + # Blurring the image can help to thin out features + np_img = cv2.GaussianBlur(np_img.astype(np.float32), (0, 0), sigma) filter_1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) filter_2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) filter_3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) filter_4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) - y = np.zeros_like(image) + nms_img = np.zeros_like(np_img) for f in [filter_1, filter_2, filter_3, filter_4]: - np.putmask(y, cv2.dilate(image, kernel=f) == image, image) + np.putmask(nms_img, cv2.dilate(np_img, kernel=f) == np_img, np_img) - z = np.zeros_like(y, dtype=np.uint8) - z[y > threshold] = 255 - return z + if sigma is not None and threshold is not None: + # We blurred - now threshold to get a binary image + thresholded = np.zeros_like(nms_img, dtype=np.uint8) + thresholded[nms_img > threshold] = 255 + return thresholded + + return nms_img def safe_step(x: np.ndarray, step: int = 2) -> np.ndarray: diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 82f88c0e81..b19501843c 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -301,12 +301,12 @@ class MainConfigBase(ModelConfigBase): default_settings: Optional[MainModelDefaultSettings] = Field( description="Default settings for this model", default=None ) + variant: ModelVariantType = ModelVariantType.Normal class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase): """Model config for main checkpoint models.""" - variant: ModelVariantType = ModelVariantType.Normal prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon upcast_attention: bool = False diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index bf21a7fe7b..8f33e4b49f 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -51,6 +51,7 @@ LEGACY_CONFIGS: Dict[BaseModelType, Dict[ModelVariantType, Union[str, Dict[Sched }, BaseModelType.StableDiffusionXL: { ModelVariantType.Normal: "sd_xl_base.yaml", + ModelVariantType.Inpaint: "sd_xl_inpaint.yaml", }, BaseModelType.StableDiffusionXLRefiner: { ModelVariantType.Normal: "sd_xl_refiner.yaml", diff --git a/invokeai/backend/model_manager/starter_models.py b/invokeai/backend/model_manager/starter_models.py index f92f5e08d5..31b16d9c8a 100644 --- a/invokeai/backend/model_manager/starter_models.py +++ b/invokeai/backend/model_manager/starter_models.py @@ -155,7 +155,7 @@ STARTER_MODELS: list[StarterModel] = [ StarterModel( name="IP Adapter", base=BaseModelType.StableDiffusion1, - source="InvokeAI/ip_adapter_sd15", + source="https://huggingface.co/InvokeAI/ip_adapter_sd15/resolve/main/ip-adapter_sd15.safetensors", description="IP-Adapter for SD 1.5 models", type=ModelType.IPAdapter, dependencies=[ip_adapter_sd_image_encoder], @@ -163,7 +163,7 @@ STARTER_MODELS: list[StarterModel] = [ StarterModel( name="IP Adapter Plus", base=BaseModelType.StableDiffusion1, - source="InvokeAI/ip_adapter_plus_sd15", + source="https://huggingface.co/InvokeAI/ip_adapter_plus_sd15/resolve/main/ip-adapter-plus_sd15.safetensors", description="Refined IP-Adapter for SD 1.5 models", type=ModelType.IPAdapter, dependencies=[ip_adapter_sd_image_encoder], @@ -171,7 +171,7 @@ STARTER_MODELS: list[StarterModel] = [ StarterModel( name="IP Adapter Plus Face", base=BaseModelType.StableDiffusion1, - source="InvokeAI/ip_adapter_plus_face_sd15", + source="https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15/resolve/main/ip-adapter-plus-face_sd15.safetensors", description="Refined IP-Adapter for SD 1.5 models, adapted for faces", type=ModelType.IPAdapter, dependencies=[ip_adapter_sd_image_encoder], @@ -179,7 +179,7 @@ STARTER_MODELS: list[StarterModel] = [ StarterModel( name="IP Adapter SDXL", base=BaseModelType.StableDiffusionXL, - source="InvokeAI/ip_adapter_sdxl", + source="https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h/resolve/main/ip-adapter_sdxl_vit-h.safetensors", description="IP-Adapter for SDXL models", type=ModelType.IPAdapter, dependencies=[ip_adapter_sdxl_image_encoder], diff --git a/invokeai/backend/util/catch_sigint.py b/invokeai/backend/util/catch_sigint.py new file mode 100644 index 0000000000..b9735d94f9 --- /dev/null +++ b/invokeai/backend/util/catch_sigint.py @@ -0,0 +1,29 @@ +""" +This module defines a context manager `catch_sigint()` which temporarily replaces +the sigINT handler defined by the ASGI in order to allow the user to ^C the application +and shut it down immediately. This was implemented in order to allow the user to interrupt +slow model hashing during startup. + +Use like this: + + from invokeai.backend.util.catch_sigint import catch_sigint + with catch_sigint(): + run_some_hard_to_interrupt_process() +""" + +import signal +from contextlib import contextmanager +from typing import Generator + + +def sigint_handler(signum, frame): # type: ignore + signal.signal(signal.SIGINT, signal.SIG_DFL) + signal.raise_signal(signal.SIGINT) + + +@contextmanager +def catch_sigint() -> Generator[None, None, None]: + original_handler = signal.getsignal(signal.SIGINT) + signal.signal(signal.SIGINT, sigint_handler) + yield + signal.signal(signal.SIGINT, original_handler) diff --git a/invokeai/configs/stable-diffusion/sd_xl_inpaint.yaml b/invokeai/configs/stable-diffusion/sd_xl_inpaint.yaml new file mode 100644 index 0000000000..eea5c15a49 --- /dev/null +++ b/invokeai/configs/stable-diffusion/sd_xl_inpaint.yaml @@ -0,0 +1,98 @@ +model: + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.13025 + disable_first_stage_autocast: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + adm_in_channels: 2816 + num_classes: sequential + use_checkpoint: True + in_channels: 9 + out_channels: 4 + model_channels: 320 + attention_resolutions: [4, 2] + num_res_blocks: 2 + channel_mult: [1, 2, 4] + num_head_channels: 64 + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16 + context_dim: 2048 + spatial_transformer_attn_type: softmax-xformers + legacy: False + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn cond + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenCLIPEmbedder + params: + layer: hidden + layer_idx: 11 + # crossattn and vector cond + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2 + params: + arch: ViT-bigG-14 + version: laion2b_s39b_b160k + freeze: True + layer: penultimate + always_return_pooled: True + legacy: False + # vector cond + - is_trainable: False + input_key: original_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 # multiplied by two + # vector cond + - is_trainable: False + input_key: crop_coords_top_left + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 # multiplied by two + # vector cond + - is_trainable: False + input_key: target_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 # multiplied by two + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + attn_type: vanilla-xformers + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity \ No newline at end of file diff --git a/invokeai/frontend/web/.storybook/preview.tsx b/invokeai/frontend/web/.storybook/preview.tsx index 791a48ab9e..8b21b48230 100644 --- a/invokeai/frontend/web/.storybook/preview.tsx +++ b/invokeai/frontend/web/.storybook/preview.tsx @@ -11,6 +11,7 @@ import { createStore } from '../src/app/store/store'; // @ts-ignore import translationEN from '../public/locales/en.json'; import { ReduxInit } from './ReduxInit'; +import { $store } from 'app/store/nanostores/store'; i18n.use(initReactI18next).init({ lng: 'en', @@ -25,6 +26,7 @@ i18n.use(initReactI18next).init({ }); const store = createStore(undefined, false); +$store.set(store); $baseUrl.set('http://localhost:9090'); const preview: Preview = { diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index aabaa17c73..9e661e0737 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -25,7 +25,7 @@ "typegen": "node scripts/typegen.js", "preview": "vite preview", "lint:knip": "knip", - "lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:1 src/main.tsx", + "lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:0 src/main.tsx", "lint:eslint": "eslint --max-warnings=0 .", "lint:prettier": "prettier --check .", "lint:tsc": "tsc --noEmit", @@ -95,11 +95,13 @@ "reactflow": "^11.10.4", "redux-dynamic-middlewares": "^2.2.0", "redux-remember": "^5.1.0", + "redux-undo": "^1.1.0", "rfdc": "^1.3.1", "roarr": "^7.21.1", "serialize-error": "^11.0.3", "socket.io-client": "^4.7.5", "use-debounce": "^10.0.0", + "use-device-pixel-ratio": "^1.1.2", "use-image": "^1.1.1", "uuid": "^9.0.1", "zod": "^3.22.4", diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml index bf423c3d46..9910e32391 100644 --- a/invokeai/frontend/web/pnpm-lock.yaml +++ b/invokeai/frontend/web/pnpm-lock.yaml @@ -140,6 +140,9 @@ dependencies: redux-remember: specifier: ^5.1.0 version: 5.1.0(redux@5.0.1) + redux-undo: + specifier: ^1.1.0 + version: 1.1.0 rfdc: specifier: ^1.3.1 version: 1.3.1 @@ -155,6 +158,9 @@ dependencies: use-debounce: specifier: ^10.0.0 version: 10.0.0(react@18.2.0) + use-device-pixel-ratio: + specifier: ^1.1.2 + version: 1.1.2(react@18.2.0) use-image: specifier: ^1.1.1 version: 1.1.1(react-dom@18.2.0)(react@18.2.0) @@ -11962,6 +11968,10 @@ packages: redux: 5.0.1 dev: false + /redux-undo@1.1.0: + resolution: {integrity: sha512-zzLFh2qeF0MTIlzDhDLm9NtkfBqCllQJ3OCuIl5RKlG/ayHw6GUdIFdMhzMS9NnrnWdBX5u//ExMOHpfudGGOg==} + dev: false + /redux@5.0.1: resolution: {integrity: sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w==} dev: false @@ -13317,6 +13327,14 @@ packages: react: 18.2.0 dev: false + /use-device-pixel-ratio@1.1.2(react@18.2.0): + resolution: {integrity: sha512-nFxV0HwLdRUt20kvIgqHYZe6PK/v4mU1X8/eLsT1ti5ck0l2ob0HDRziaJPx+YWzBo6dMm4cTac3mcyk68Gh+A==} + peerDependencies: + react: '>=16.8.0' + dependencies: + react: 18.2.0 + dev: false + /use-image@1.1.1(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-n4YO2k8AJG/BcDtxmBx8Aa+47kxY5m335dJiCQA5tTeVU4XdhrhqR6wT0WISRXwdMEOv5CSjqekDZkEMiiWaYQ==} peerDependencies: diff --git a/invokeai/frontend/web/public/assets/images/transparent_bg.png b/invokeai/frontend/web/public/assets/images/transparent_bg.png new file mode 100644 index 0000000000..e1a3c339ce Binary files /dev/null and b/invokeai/frontend/web/public/assets/images/transparent_bg.png differ diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 033dffdc44..0a104c083b 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -85,7 +85,8 @@ "loadMore": "Mehr laden", "noImagesInGallery": "Keine Bilder in der Galerie", "loading": "Lade", - "deleteImage": "Lösche Bild", + "deleteImage_one": "Lösche Bild", + "deleteImage_other": "", "copy": "Kopieren", "download": "Runterladen", "setCurrentImage": "Setze aktuelle Bild", diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index f69f09552a..885a937de3 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -69,6 +69,7 @@ "auto": "Auto", "back": "Back", "batch": "Batch Manager", + "beta": "Beta", "cancel": "Cancel", "copy": "Copy", "copyError": "$t(gallery.copy) Error", @@ -83,6 +84,8 @@ "direction": "Direction", "ipAdapter": "IP Adapter", "t2iAdapter": "T2I Adapter", + "positivePrompt": "Positive Prompt", + "negativePrompt": "Negative Prompt", "discordLabel": "Discord", "dontAskMeAgain": "Don't ask me again", "error": "Error", @@ -135,7 +138,9 @@ "red": "Red", "green": "Green", "blue": "Blue", - "alpha": "Alpha" + "alpha": "Alpha", + "selected": "Selected", + "viewer": "Viewer" }, "controlnet": { "controlAdapter_one": "Control Adapter", @@ -151,6 +156,7 @@ "balanced": "Balanced", "base": "Base", "beginEndStepPercent": "Begin / End Step Percentage", + "beginEndStepPercentShort": "Begin/End %", "bgth": "bg_th", "canny": "Canny", "cannyDescription": "Canny edge detection", @@ -222,7 +228,8 @@ "scribble": "scribble", "selectModel": "Select a model", "selectCLIPVisionModel": "Select a CLIP Vision model", - "setControlImageDimensions": "Set Control Image Dimensions To W/H", + "setControlImageDimensions": "Copy size to W/H (optimize for model)", + "setControlImageDimensionsForce": "Copy size to W/H (ignore model)", "showAdvanced": "Show Advanced", "small": "Small", "toggleControlNet": "Toggle this ControlNet", @@ -892,6 +899,7 @@ "denoisingStrength": "Denoising Strength", "downloadImage": "Download Image", "general": "General", + "globalSettings": "Global Settings", "height": "Height", "imageFit": "Fit Initial Image To Output Size", "images": "Images", @@ -1182,6 +1190,10 @@ "heading": "Resize Mode", "paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."] }, + "ipAdapterMethod": { + "heading": "Method", + "paragraphs": ["Method by which to apply the current IP Adapter."] + }, "controlNetWeight": { "heading": "Weight", "paragraphs": [ @@ -1500,5 +1512,36 @@ }, "app": { "storeNotInitialized": "Store is not initialized" + }, + "controlLayers": { + "deleteAll": "Delete All", + "addLayer": "Add Layer", + "moveToFront": "Move to Front", + "moveToBack": "Move to Back", + "moveForward": "Move Forward", + "moveBackward": "Move Backward", + "brushSize": "Brush Size", + "controlLayers": "Control Layers (BETA)", + "globalMaskOpacity": "Global Mask Opacity", + "autoNegative": "Auto Negative", + "toggleVisibility": "Toggle Layer Visibility", + "deletePrompt": "Delete Prompt", + "resetRegion": "Reset Region", + "debugLayers": "Debug Layers", + "rectangle": "Rectangle", + "maskPreviewColor": "Mask Preview Color", + "addPositivePrompt": "Add $t(common.positivePrompt)", + "addNegativePrompt": "Add $t(common.negativePrompt)", + "addIPAdapter": "Add $t(common.ipAdapter)", + "regionalGuidance": "Regional Guidance", + "regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)", + "controlNetLayer": "$t(common.controlNet) $t(unifiedCanvas.layer)", + "ipAdapterLayer": "$t(common.ipAdapter) $t(unifiedCanvas.layer)", + "opacity": "Opacity", + "globalControlAdapter": "Global $t(controlnet.controlAdapter_one)", + "globalControlAdapterLayer": "Global $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)", + "globalIPAdapter": "Global $t(common.ipAdapter)", + "globalIPAdapterLayer": "Global $t(common.ipAdapter) $t(unifiedCanvas.layer)", + "opacityFilter": "Opacity Filter" } } diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 3037045db5..6b410cd0bf 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -33,7 +33,9 @@ "autoSwitchNewImages": "Auto seleccionar Imágenes nuevas", "loadMore": "Cargar más", "noImagesInGallery": "No hay imágenes para mostrar", - "deleteImage": "Eliminar Imagen", + "deleteImage_one": "Eliminar Imagen", + "deleteImage_many": "", + "deleteImage_other": "", "deleteImageBin": "Las imágenes eliminadas se enviarán a la papelera de tu sistema operativo.", "deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.", "assets": "Activos", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index db01e0af4b..491b31907b 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -82,7 +82,9 @@ "autoSwitchNewImages": "Passaggio automatico a nuove immagini", "loadMore": "Carica altro", "noImagesInGallery": "Nessuna immagine da visualizzare", - "deleteImage": "Elimina l'immagine", + "deleteImage_one": "Elimina l'immagine", + "deleteImage_many": "Elimina {{count}} immagini", + "deleteImage_other": "Elimina {{count}} immagini", "deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.", "deleteImageBin": "Le immagini eliminate verranno spostate nel cestino del tuo sistema operativo.", "assets": "Risorse", diff --git a/invokeai/frontend/web/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json index d13b1e4cb0..264593153a 100644 --- a/invokeai/frontend/web/public/locales/ja.json +++ b/invokeai/frontend/web/public/locales/ja.json @@ -90,7 +90,7 @@ "problemDeletingImages": "画像の削除中に問題が発生", "drop": "ドロップ", "dropOrUpload": "$t(gallery.drop) またはアップロード", - "deleteImage": "画像を削除", + "deleteImage_other": "画像を削除", "deleteImageBin": "削除された画像はOSのゴミ箱に送られます。", "deleteImagePermanent": "削除された画像は復元できません。", "download": "ダウンロード", diff --git a/invokeai/frontend/web/public/locales/ko.json b/invokeai/frontend/web/public/locales/ko.json index 44f0f5eac6..1c02d86105 100644 --- a/invokeai/frontend/web/public/locales/ko.json +++ b/invokeai/frontend/web/public/locales/ko.json @@ -82,7 +82,7 @@ "drop": "드랍", "problemDeletingImages": "이미지 삭제 중 발생한 문제", "downloadSelection": "선택 항목 다운로드", - "deleteImage": "이미지 삭제", + "deleteImage_other": "이미지 삭제", "currentlyInUse": "이 이미지는 현재 다음 기능에서 사용되고 있습니다:", "dropOrUpload": "$t(gallery.drop) 또는 업로드", "copy": "복사", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 70adbb371d..29ceb3227b 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -42,7 +42,8 @@ "autoSwitchNewImages": "Wissel autom. naar nieuwe afbeeldingen", "loadMore": "Laad meer", "noImagesInGallery": "Geen afbeeldingen om te tonen", - "deleteImage": "Verwijder afbeelding", + "deleteImage_one": "Verwijder afbeelding", + "deleteImage_other": "", "deleteImageBin": "Verwijderde afbeeldingen worden naar de prullenbak van je besturingssysteem gestuurd.", "deleteImagePermanent": "Verwijderde afbeeldingen kunnen niet worden hersteld.", "assets": "Eigen onderdelen", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 8ac36ef2de..f254b7faa5 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -86,7 +86,9 @@ "noImagesInGallery": "Изображений нет", "deleteImagePermanent": "Удаленные изображения невозможно восстановить.", "deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.", - "deleteImage": "Удалить изображение", + "deleteImage_one": "Удалить изображение", + "deleteImage_few": "", + "deleteImage_many": "", "assets": "Ресурсы", "autoAssignBoardOnClick": "Авто-назначение доски по клику", "deleteSelection": "Удалить выделенное", diff --git a/invokeai/frontend/web/public/locales/tr.json b/invokeai/frontend/web/public/locales/tr.json index 2a666a128c..415bd2d744 100644 --- a/invokeai/frontend/web/public/locales/tr.json +++ b/invokeai/frontend/web/public/locales/tr.json @@ -298,7 +298,8 @@ "noImagesInGallery": "Gösterilecek Görsel Yok", "autoSwitchNewImages": "Yeni Görseli Biter Bitmez Gör", "currentlyInUse": "Bu görsel şurada kullanımda:", - "deleteImage": "Görseli Sil", + "deleteImage_one": "Görseli Sil", + "deleteImage_other": "", "loadMore": "Daha Getir", "setCurrentImage": "Çalışma Görseli Yap", "unableToLoad": "Galeri Yüklenemedi", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index e2cb35af74..8aff73d2a1 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -78,7 +78,7 @@ "autoSwitchNewImages": "自动切换到新图像", "loadMore": "加载更多", "noImagesInGallery": "无图像可用于显示", - "deleteImage": "删除图片", + "deleteImage_other": "删除图片", "deleteImageBin": "被删除的图片会发送到你操作系统的回收站。", "deleteImagePermanent": "删除的图片无法被恢复。", "assets": "素材", diff --git a/invokeai/frontend/web/src/app/logging/logger.ts b/invokeai/frontend/web/src/app/logging/logger.ts index d0e6340625..ca7a24201a 100644 --- a/invokeai/frontend/web/src/app/logging/logger.ts +++ b/invokeai/frontend/web/src/app/logging/logger.ts @@ -27,7 +27,8 @@ export type LoggerNamespace = | 'socketio' | 'session' | 'queue' - | 'dnd'; + | 'dnd' + | 'controlLayers'; export const logger = (namespace: LoggerNamespace) => $logger.get().child({ namespace }); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index cd0c1290e9..ac039c2df6 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -16,6 +16,7 @@ import { addCanvasMaskSavedToGalleryListener } from 'app/store/middleware/listen import { addCanvasMaskToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet'; import { addCanvasMergedListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMerged'; import { addCanvasSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery'; +import { addControlLayersToControlAdapterBridge } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; import { addControlNetAutoProcessListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess'; import { addControlNetImageProcessedListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed'; import { addEnqueueRequestedCanvasListener } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas'; @@ -157,3 +158,5 @@ addUpscaleRequestedListener(startAppListening); addDynamicPromptsListener(startAppListening); addSetDefaultSettingsListener(startAppListening); + +addControlLayersToControlAdapterBridge(startAppListening); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts index 55392ebff4..b1b19b35dc 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts @@ -48,12 +48,10 @@ export const addCanvasImageToControlNetListener = (startAppListening: AppStartLi }) ).unwrap(); - const { image_name } = imageDTO; - dispatch( controlAdapterImageChanged({ id, - controlImage: image_name, + controlImage: imageDTO, }) ); }, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts index 569b4badc7..b3014277f1 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts @@ -58,12 +58,10 @@ export const addCanvasMaskToControlNetListener = (startAppListening: AppStartLis }) ).unwrap(); - const { image_name } = imageDTO; - dispatch( controlAdapterImageChanged({ id, - controlImage: image_name, + controlImage: imageDTO, }) ); }, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge.ts new file mode 100644 index 0000000000..bc14277f88 --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge.ts @@ -0,0 +1,144 @@ +import { createAction } from '@reduxjs/toolkit'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; +import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants'; +import { controlAdapterAdded, controlAdapterRemoved } from 'features/controlAdapters/store/controlAdaptersSlice'; +import type { ControlNetConfig, IPAdapterConfig } from 'features/controlAdapters/store/types'; +import { isControlAdapterProcessorType } from 'features/controlAdapters/store/types'; +import { + controlAdapterLayerAdded, + ipAdapterLayerAdded, + layerDeleted, + maskLayerIPAdapterAdded, + maskLayerIPAdapterDeleted, + regionalGuidanceLayerAdded, +} from 'features/controlLayers/store/controlLayersSlice'; +import type { Layer } from 'features/controlLayers/store/types'; +import { modelConfigsAdapterSelectors, modelsApi } from 'services/api/endpoints/models'; +import { isControlNetModelConfig, isIPAdapterModelConfig } from 'services/api/types'; +import { assert } from 'tsafe'; +import { v4 as uuidv4 } from 'uuid'; + +export const guidanceLayerAdded = createAction('controlLayers/guidanceLayerAdded'); +export const guidanceLayerDeleted = createAction('controlLayers/guidanceLayerDeleted'); +export const allLayersDeleted = createAction('controlLayers/allLayersDeleted'); +export const guidanceLayerIPAdapterAdded = createAction('controlLayers/guidanceLayerIPAdapterAdded'); +export const guidanceLayerIPAdapterDeleted = createAction<{ layerId: string; ipAdapterId: string }>( + 'controlLayers/guidanceLayerIPAdapterDeleted' +); + +export const addControlLayersToControlAdapterBridge = (startAppListening: AppStartListening) => { + startAppListening({ + actionCreator: guidanceLayerAdded, + effect: (action, { dispatch, getState }) => { + const type = action.payload; + const layerId = uuidv4(); + if (type === 'regional_guidance_layer') { + dispatch(regionalGuidanceLayerAdded({ layerId })); + return; + } + + const state = getState(); + const baseModel = state.generation.model?.base; + const modelConfigs = modelsApi.endpoints.getModelConfigs.select(undefined)(state).data; + + if (type === 'ip_adapter_layer') { + const ipAdapterId = uuidv4(); + const overrides: Partial = { + id: ipAdapterId, + }; + + // Find and select the first matching model + if (modelConfigs) { + const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isIPAdapterModelConfig); + overrides.model = models.find((m) => m.base === baseModel) ?? null; + } + dispatch(controlAdapterAdded({ type: 'ip_adapter', overrides })); + dispatch(ipAdapterLayerAdded({ layerId, ipAdapterId })); + return; + } + + if (type === 'control_adapter_layer') { + const controlNetId = uuidv4(); + const overrides: Partial = { + id: controlNetId, + }; + + // Find and select the first matching model + if (modelConfigs) { + const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isControlNetModelConfig); + const model = models.find((m) => m.base === baseModel) ?? null; + overrides.model = model; + const defaultPreprocessor = model?.default_settings?.preprocessor; + overrides.processorType = isControlAdapterProcessorType(defaultPreprocessor) ? defaultPreprocessor : 'none'; + overrides.processorNode = CONTROLNET_PROCESSORS[overrides.processorType].buildDefaults(baseModel); + } + dispatch(controlAdapterAdded({ type: 'controlnet', overrides })); + dispatch(controlAdapterLayerAdded({ layerId, controlNetId })); + return; + } + }, + }); + + startAppListening({ + actionCreator: guidanceLayerDeleted, + effect: (action, { getState, dispatch }) => { + const layerId = action.payload; + const state = getState(); + const layer = state.controlLayers.present.layers.find((l) => l.id === layerId); + assert(layer, `Layer ${layerId} not found`); + + if (layer.type === 'ip_adapter_layer') { + dispatch(controlAdapterRemoved({ id: layer.ipAdapterId })); + } else if (layer.type === 'control_adapter_layer') { + dispatch(controlAdapterRemoved({ id: layer.controlNetId })); + } else if (layer.type === 'regional_guidance_layer') { + for (const ipAdapterId of layer.ipAdapterIds) { + dispatch(controlAdapterRemoved({ id: ipAdapterId })); + } + } + dispatch(layerDeleted(layerId)); + }, + }); + + startAppListening({ + actionCreator: allLayersDeleted, + effect: (action, { dispatch, getOriginalState }) => { + const state = getOriginalState(); + for (const layer of state.controlLayers.present.layers) { + dispatch(guidanceLayerDeleted(layer.id)); + } + }, + }); + + startAppListening({ + actionCreator: guidanceLayerIPAdapterAdded, + effect: (action, { dispatch, getState }) => { + const layerId = action.payload; + const ipAdapterId = uuidv4(); + const overrides: Partial = { + id: ipAdapterId, + }; + + // Find and select the first matching model + const state = getState(); + const baseModel = state.generation.model?.base; + const modelConfigs = modelsApi.endpoints.getModelConfigs.select(undefined)(state).data; + if (modelConfigs) { + const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isIPAdapterModelConfig); + overrides.model = models.find((m) => m.base === baseModel) ?? null; + } + + dispatch(controlAdapterAdded({ type: 'ip_adapter', overrides })); + dispatch(maskLayerIPAdapterAdded({ layerId, ipAdapterId })); + }, + }); + + startAppListening({ + actionCreator: guidanceLayerIPAdapterDeleted, + effect: (action, { dispatch }) => { + const { layerId, ipAdapterId } = action.payload; + dispatch(controlAdapterRemoved({ id: ipAdapterId })); + dispatch(maskLayerIPAdapterDeleted({ layerId, ipAdapterId })); + }, + }); +}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts index e52df30681..14af0246a2 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts @@ -12,6 +12,7 @@ import { selectControlAdapterById, } from 'features/controlAdapters/store/controlAdaptersSlice'; import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; +import { isEqual } from 'lodash-es'; type AnyControlAdapterParamChangeAction = | ReturnType @@ -52,6 +53,11 @@ const predicate: AnyListenerPredicate = (action, state, prevState) => return false; } + if (prevCA.controlImage === ca.controlImage && isEqual(prevCA.processorNode, ca.processorNode)) { + // Don't re-process if the processor hasn't changed + return false; + } + const isProcessorSelected = processorType !== 'none'; const hasControlImage = Boolean(controlImage); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts index 0055866aa7..08afc98836 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts @@ -91,7 +91,7 @@ export const addControlNetImageProcessedListener = (startAppListening: AppStartL dispatch( controlAdapterProcessedImageChanged({ id, - processedControlImage: processedControlImage.image_name, + processedControlImage, }) ); } diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts index 5c1f321b64..307e3487dd 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts @@ -71,7 +71,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) => dispatch( controlAdapterImageChanged({ id, - controlImage: activeData.payload.imageDTO.image_name, + controlImage: activeData.payload.imageDTO, }) ); dispatch( diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts index 2cebf0aef8..a2ca4baeb1 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts @@ -96,7 +96,7 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis dispatch( controlAdapterImageChanged({ id, - controlImage: imageDTO.image_name, + controlImage: imageDTO, }) ); dispatch( diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts index bc049cf498..b69e56e84a 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts @@ -1,7 +1,7 @@ import { logger } from 'app/logging/logger'; import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { - controlAdapterIsEnabledChanged, + controlAdapterModelChanged, selectControlAdapterAll, } from 'features/controlAdapters/store/controlAdaptersSlice'; import { loraRemoved } from 'features/lora/store/loraSlice'; @@ -54,7 +54,7 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) = // handle incompatible controlnets selectControlAdapterAll(state.controlAdapters).forEach((ca) => { if (ca.model?.base !== newBaseModel) { - dispatch(controlAdapterIsEnabledChanged({ id: ca.id, isEnabled: false })); + dispatch(controlAdapterModelChanged({ id: ca.id, modelConfig: null })); modelsCleared += 1; } }); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts index 2ba9aa3cbf..eb86f54c84 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts @@ -6,9 +6,10 @@ import { controlAdapterModelCleared, selectControlAdapterAll, } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice'; import { loraRemoved } from 'features/lora/store/loraSlice'; import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize'; -import { heightChanged, modelChanged, vaeSelected, widthChanged } from 'features/parameters/store/generationSlice'; +import { modelChanged, vaeSelected } from 'features/parameters/store/generationSlice'; import { zParameterModel, zParameterVAEModel } from 'features/parameters/types/parameterSchemas'; import { getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension'; import { refinerModelChanged } from 'features/sdxl/store/sdxlSlice'; @@ -69,16 +70,22 @@ const handleMainModels: ModelHandler = (models, state, dispatch, log) => { dispatch(modelChanged(defaultModelInList, currentModel)); const optimalDimension = getOptimalDimension(defaultModelInList); - if (getIsSizeOptimal(state.generation.width, state.generation.height, optimalDimension)) { + if ( + getIsSizeOptimal( + state.controlLayers.present.size.width, + state.controlLayers.present.size.height, + optimalDimension + ) + ) { return; } const { width, height } = calculateNewSize( - state.generation.aspectRatio.value, + state.controlLayers.present.size.aspectRatio.value, optimalDimension * optimalDimension ); - dispatch(widthChanged(width)); - dispatch(heightChanged(height)); + dispatch(widthChanged({ width })); + dispatch(heightChanged({ height })); return; } } diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/promptChanged.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/promptChanged.ts index b78ddc3f69..4633eb45a5 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/promptChanged.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/promptChanged.ts @@ -1,5 +1,6 @@ import { isAnyOf } from '@reduxjs/toolkit'; import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; +import { positivePromptChanged } from 'features/controlLayers/store/controlLayersSlice'; import { combinatorialToggled, isErrorChanged, @@ -10,11 +11,16 @@ import { promptsChanged, } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt'; -import { setPositivePrompt } from 'features/parameters/store/generationSlice'; import { utilitiesApi } from 'services/api/endpoints/utilities'; import { socketConnected } from 'services/events/actions'; -const matcher = isAnyOf(setPositivePrompt, combinatorialToggled, maxPromptsChanged, maxPromptsReset, socketConnected); +const matcher = isAnyOf( + positivePromptChanged, + combinatorialToggled, + maxPromptsChanged, + maxPromptsReset, + socketConnected +); export const addDynamicPromptsListener = (startAppListening: AppStartListening) => { startAppListening({ @@ -22,7 +28,7 @@ export const addDynamicPromptsListener = (startAppListening: AppStartListening) effect: async (action, { dispatch, getState, cancelActiveListeners, delay }) => { cancelActiveListeners(); const state = getState(); - const { positivePrompt } = state.generation; + const { positivePrompt } = state.controlLayers.present; const { maxPrompts } = state.dynamicPrompts; if (state.config.disabledFeatures.includes('dynamicPrompting')) { @@ -32,7 +38,7 @@ export const addDynamicPromptsListener = (startAppListening: AppStartListening) const cachedPrompts = utilitiesApi.endpoints.dynamicPrompts.select({ prompt: positivePrompt, max_prompts: maxPrompts, - })(getState()).data; + })(state).data; if (cachedPrompts) { dispatch(promptsChanged(cachedPrompts.prompts)); @@ -40,8 +46,8 @@ export const addDynamicPromptsListener = (startAppListening: AppStartListening) return; } - if (!getShouldProcessPrompt(state.generation.positivePrompt)) { - dispatch(promptsChanged([state.generation.positivePrompt])); + if (!getShouldProcessPrompt(positivePrompt)) { + dispatch(promptsChanged([positivePrompt])); dispatch(parsingErrorChanged(undefined)); dispatch(isErrorChanged(false)); return; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/setDefaultSettings.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/setDefaultSettings.ts index 7fbb55845f..6f3aa9756a 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/setDefaultSettings.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/setDefaultSettings.ts @@ -1,14 +1,13 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; +import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice'; import { setDefaultSettings } from 'features/parameters/store/actions'; import { - heightRecalled, setCfgRescaleMultiplier, setCfgScale, setScheduler, setSteps, vaePrecisionChanged, vaeSelected, - widthRecalled, } from 'features/parameters/store/generationSlice'; import { isParameterCFGRescaleMultiplier, @@ -100,13 +99,13 @@ export const addSetDefaultSettingsListener = (startAppListening: AppStartListeni if (width) { if (isParameterWidth(width)) { - dispatch(widthRecalled(width)); + dispatch(widthChanged({ width, updateAspectRatio: true })); } } if (height) { if (isParameterHeight(height)) { - dispatch(heightRecalled(height)); + dispatch(heightChanged({ height, updateAspectRatio: true })); } } diff --git a/invokeai/frontend/web/src/app/store/store.ts b/invokeai/frontend/web/src/app/store/store.ts index b538a3eaeb..9661f57f99 100644 --- a/invokeai/frontend/web/src/app/store/store.ts +++ b/invokeai/frontend/web/src/app/store/store.ts @@ -10,6 +10,11 @@ import { controlAdaptersPersistConfig, controlAdaptersSlice, } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { + controlLayersPersistConfig, + controlLayersSlice, + controlLayersUndoableConfig, +} from 'features/controlLayers/store/controlLayersSlice'; import { deleteImageModalSlice } from 'features/deleteImageModal/store/slice'; import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice'; @@ -30,6 +35,7 @@ import { defaultsDeep, keys, omit, pick } from 'lodash-es'; import dynamicMiddlewares from 'redux-dynamic-middlewares'; import type { SerializeFunction, UnserializeFunction } from 'redux-remember'; import { rememberEnhancer, rememberReducer } from 'redux-remember'; +import undoable from 'redux-undo'; import { serializeError } from 'serialize-error'; import { api } from 'services/api'; import { authToastMiddleware } from 'services/api/authToastMiddleware'; @@ -59,6 +65,7 @@ const allReducers = { [queueSlice.name]: queueSlice.reducer, [workflowSlice.name]: workflowSlice.reducer, [hrfSlice.name]: hrfSlice.reducer, + [controlLayersSlice.name]: undoable(controlLayersSlice.reducer, controlLayersUndoableConfig), [api.reducerPath]: api.reducer, }; @@ -103,6 +110,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = { [loraPersistConfig.name]: loraPersistConfig, [modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig, [hrfPersistConfig.name]: hrfPersistConfig, + [controlLayersPersistConfig.name]: controlLayersPersistConfig, }; const unserialize: UnserializeFunction = (data, key) => { @@ -114,6 +122,7 @@ const unserialize: UnserializeFunction = (data, key) => { try { const { initialState, migrate } = persistConfig; const parsed = JSON.parse(data); + // strip out old keys const stripped = pick(parsed, keys(initialState)); // run (additive) migrations @@ -141,7 +150,9 @@ const serialize: SerializeFunction = (data, key) => { if (!persistConfig) { throw new Error(`No persist config for slice "${key}"`); } - const result = omit(data, persistConfig.persistDenylist); + // Heuristic to determine if the slice is undoable - could just hardcode it in the persistConfig + const isUndoable = 'present' in data && 'past' in data && 'future' in data && '_latestUnfiltered' in data; + const result = omit(isUndoable ? data.present : data, persistConfig.persistDenylist); return JSON.stringify(result); }; diff --git a/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx b/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx index 68ffa5369e..25b129f678 100644 --- a/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx +++ b/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx @@ -26,7 +26,7 @@ const sx: ChakraProps['sx'] = { const colorPickerStyles: CSSProperties = { width: '100%' }; -const numberInputWidth: ChakraProps['w'] = '4.2rem'; +const numberInputWidth: ChakraProps['w'] = '3.5rem'; const IAIColorPicker = (props: IAIColorPickerProps) => { const { color, onChange, withNumberInput, ...rest } = props; @@ -41,7 +41,7 @@ const IAIColorPicker = (props: IAIColorPickerProps) => { {withNumberInput && ( - {t('common.red')} + {t('common.red')[0]} { /> - {t('common.green')} + {t('common.green')[0]} { /> - {t('common.blue')} + {t('common.blue')[0]} { /> - {t('common.alpha')} + {t('common.alpha')[0]} & { + withNumberInput?: boolean; +}; + +const colorPickerPointerStyles: NonNullable = { + width: 6, + height: 6, + borderColor: 'base.100', +}; + +const sx: ChakraProps['sx'] = { + '.react-colorful__hue-pointer': colorPickerPointerStyles, + '.react-colorful__saturation-pointer': colorPickerPointerStyles, + '.react-colorful__alpha-pointer': colorPickerPointerStyles, + gap: 5, + flexDir: 'column', +}; + +const colorPickerStyles: CSSProperties = { width: '100%' }; + +const numberInputWidth: ChakraProps['w'] = '3.5rem'; + +const RgbColorPicker = (props: RgbColorPickerProps) => { + const { color, onChange, withNumberInput, ...rest } = props; + const { t } = useTranslation(); + const handleChangeR = useCallback((r: number) => onChange({ ...color, r }), [color, onChange]); + const handleChangeG = useCallback((g: number) => onChange({ ...color, g }), [color, onChange]); + const handleChangeB = useCallback((b: number) => onChange({ ...color, b }), [color, onChange]); + return ( + + + {withNumberInput && ( + + + {t('common.red')[0]} + + + + {t('common.green')[0]} + + + + {t('common.blue')[0]} + + + + )} + + ); +}; + +export default memo(RgbColorPicker); diff --git a/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts b/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts index b31efed970..d765e987eb 100644 --- a/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts +++ b/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts @@ -5,6 +5,7 @@ import { selectControlAdaptersSlice, } from 'features/controlAdapters/store/controlAdaptersSlice'; import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; +import { selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt'; import { selectNodesSlice } from 'features/nodes/store/nodesSlice'; @@ -23,10 +24,12 @@ const selector = createMemoizedSelector( selectSystemSlice, selectNodesSlice, selectDynamicPromptsSlice, + selectControlLayersSlice, activeTabNameSelector, ], - (controlAdapters, generation, system, nodes, dynamicPrompts, activeTabName) => { - const { initialImage, model, positivePrompt } = generation; + (controlAdapters, generation, system, nodes, dynamicPrompts, controlLayers, activeTabName) => { + const { initialImage, model } = generation; + const { positivePrompt } = controlLayers.present; const { isConnected } = system; @@ -94,7 +97,41 @@ const selector = createMemoizedSelector( reasons.push(i18n.t('parameters.invoke.noModelSelected')); } - selectControlAdapterAll(controlAdapters).forEach((ca, i) => { + let enabledControlAdapters = selectControlAdapterAll(controlAdapters).filter((ca) => ca.isEnabled); + + if (activeTabName === 'txt2img') { + // Special handling for control layers on txt2img + const enabledControlLayersAdapterIds = controlLayers.present.layers + .filter((l) => l.isEnabled) + .flatMap((layer) => { + if (layer.type === 'regional_guidance_layer') { + return layer.ipAdapterIds; + } + if (layer.type === 'control_adapter_layer') { + return [layer.controlNetId]; + } + if (layer.type === 'ip_adapter_layer') { + return [layer.ipAdapterId]; + } + }); + + enabledControlAdapters = enabledControlAdapters.filter((ca) => enabledControlLayersAdapterIds.includes(ca.id)); + } else { + const allControlLayerAdapterIds = controlLayers.present.layers.flatMap((layer) => { + if (layer.type === 'regional_guidance_layer') { + return layer.ipAdapterIds; + } + if (layer.type === 'control_adapter_layer') { + return [layer.controlNetId]; + } + if (layer.type === 'ip_adapter_layer') { + return [layer.ipAdapterId]; + } + }); + enabledControlAdapters = enabledControlAdapters.filter((ca) => !allControlLayerAdapterIds.includes(ca.id)); + } + + enabledControlAdapters.forEach((ca, i) => { if (!ca.isEnabled) { return; } diff --git a/invokeai/frontend/web/src/common/util/arrayUtils.test.ts b/invokeai/frontend/web/src/common/util/arrayUtils.test.ts new file mode 100644 index 0000000000..5d0fd090f7 --- /dev/null +++ b/invokeai/frontend/web/src/common/util/arrayUtils.test.ts @@ -0,0 +1,85 @@ +import { moveBackward, moveForward, moveToBack, moveToFront } from 'common/util/arrayUtils'; +import { describe, expect, it } from 'vitest'; + +describe('Array Manipulation Functions', () => { + const originalArray = ['a', 'b', 'c', 'd']; + describe('moveForwardOne', () => { + it('should move an item forward by one position', () => { + const array = [...originalArray]; + const result = moveForward(array, (item) => item === 'b'); + expect(result).toEqual(['a', 'c', 'b', 'd']); + }); + + it('should do nothing if the item is at the end', () => { + const array = [...originalArray]; + const result = moveForward(array, (item) => item === 'd'); + expect(result).toEqual(['a', 'b', 'c', 'd']); + }); + + it("should leave the array unchanged if the item isn't in the array", () => { + const array = [...originalArray]; + const result = moveForward(array, (item) => item === 'z'); + expect(result).toEqual(originalArray); + }); + }); + + describe('moveToFront', () => { + it('should move an item to the front', () => { + const array = [...originalArray]; + const result = moveToFront(array, (item) => item === 'c'); + expect(result).toEqual(['c', 'a', 'b', 'd']); + }); + + it('should do nothing if the item is already at the front', () => { + const array = [...originalArray]; + const result = moveToFront(array, (item) => item === 'a'); + expect(result).toEqual(['a', 'b', 'c', 'd']); + }); + + it("should leave the array unchanged if the item isn't in the array", () => { + const array = [...originalArray]; + const result = moveToFront(array, (item) => item === 'z'); + expect(result).toEqual(originalArray); + }); + }); + + describe('moveBackwardsOne', () => { + it('should move an item backward by one position', () => { + const array = [...originalArray]; + const result = moveBackward(array, (item) => item === 'c'); + expect(result).toEqual(['a', 'c', 'b', 'd']); + }); + + it('should do nothing if the item is at the beginning', () => { + const array = [...originalArray]; + const result = moveBackward(array, (item) => item === 'a'); + expect(result).toEqual(['a', 'b', 'c', 'd']); + }); + + it("should leave the array unchanged if the item isn't in the array", () => { + const array = [...originalArray]; + const result = moveBackward(array, (item) => item === 'z'); + expect(result).toEqual(originalArray); + }); + }); + + describe('moveToBack', () => { + it('should move an item to the back', () => { + const array = [...originalArray]; + const result = moveToBack(array, (item) => item === 'b'); + expect(result).toEqual(['a', 'c', 'd', 'b']); + }); + + it('should do nothing if the item is already at the back', () => { + const array = [...originalArray]; + const result = moveToBack(array, (item) => item === 'd'); + expect(result).toEqual(['a', 'b', 'c', 'd']); + }); + + it("should leave the array unchanged if the item isn't in the array", () => { + const array = [...originalArray]; + const result = moveToBack(array, (item) => item === 'z'); + expect(result).toEqual(originalArray); + }); + }); +}); diff --git a/invokeai/frontend/web/src/common/util/arrayUtils.ts b/invokeai/frontend/web/src/common/util/arrayUtils.ts new file mode 100644 index 0000000000..38c99b63ec --- /dev/null +++ b/invokeai/frontend/web/src/common/util/arrayUtils.ts @@ -0,0 +1,37 @@ +export const moveForward = (array: T[], callback: (item: T) => boolean): T[] => { + const index = array.findIndex(callback); + if (index >= 0 && index < array.length - 1) { + //@ts-expect-error - These indicies are safe per the previous check + [array[index], array[index + 1]] = [array[index + 1], array[index]]; + } + return array; +}; + +export const moveToFront = (array: T[], callback: (item: T) => boolean): T[] => { + const index = array.findIndex(callback); + if (index > 0) { + const [item] = array.splice(index, 1); + //@ts-expect-error - These indicies are safe per the previous check + array.unshift(item); + } + return array; +}; + +export const moveBackward = (array: T[], callback: (item: T) => boolean): T[] => { + const index = array.findIndex(callback); + if (index > 0) { + //@ts-expect-error - These indicies are safe per the previous check + [array[index], array[index - 1]] = [array[index - 1], array[index]]; + } + return array; +}; + +export const moveToBack = (array: T[], callback: (item: T) => boolean): T[] => { + const index = array.findIndex(callback); + if (index >= 0 && index < array.length - 1) { + const [item] = array.splice(index, 1); + //@ts-expect-error - These indicies are safe per the previous check + array.push(item); + } + return array; +}; diff --git a/invokeai/frontend/web/src/common/util/stopPropagation.ts b/invokeai/frontend/web/src/common/util/stopPropagation.ts new file mode 100644 index 0000000000..b3481b7c0e --- /dev/null +++ b/invokeai/frontend/web/src/common/util/stopPropagation.ts @@ -0,0 +1,3 @@ +export const stopPropagation = (e: React.MouseEvent) => { + e.stopPropagation(); +}; diff --git a/invokeai/frontend/web/src/features/canvas/hooks/useCanvasZoom.ts b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasZoom.ts index ef6a74ae9c..1434bc9afc 100644 --- a/invokeai/frontend/web/src/features/canvas/hooks/useCanvasZoom.ts +++ b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasZoom.ts @@ -10,6 +10,18 @@ import { clamp } from 'lodash-es'; import type { MutableRefObject } from 'react'; import { useCallback } from 'react'; +export const calculateNewBrushSize = (brushSize: number, delta: number) => { + // This equation was derived by fitting a curve to the desired brush sizes and deltas + // see https://github.com/invoke-ai/InvokeAI/pull/5542#issuecomment-1915847565 + const targetDelta = Math.sign(delta) * 0.7363 * Math.pow(1.0394, brushSize); + // This needs to be clamped to prevent the delta from getting too large + const finalDelta = clamp(targetDelta, -20, 20); + // The new brush size is also clamped to prevent it from getting too large or small + const newBrushSize = clamp(brushSize + finalDelta, 1, 500); + + return newBrushSize; +}; + const useCanvasWheel = (stageRef: MutableRefObject) => { const dispatch = useAppDispatch(); const stageScale = useAppSelector((s) => s.canvas.stageScale); @@ -36,15 +48,7 @@ const useCanvasWheel = (stageRef: MutableRefObject) => { } if ($ctrl.get() || $meta.get()) { - // This equation was derived by fitting a curve to the desired brush sizes and deltas - // see https://github.com/invoke-ai/InvokeAI/pull/5542#issuecomment-1915847565 - const targetDelta = Math.sign(delta) * 0.7363 * Math.pow(1.0394, brushSize); - // This needs to be clamped to prevent the delta from getting too large - const finalDelta = clamp(targetDelta, -20, 20); - // The new brush size is also clamped to prevent it from getting too large or small - const newBrushSize = clamp(brushSize + finalDelta, 1, 500); - - dispatch(setBrushSize(newBrushSize)); + dispatch(setBrushSize(calculateNewBrushSize(brushSize, delta))); } else { const cursorPos = stageRef.current.getPointerPosition(); let delta = e.evt.deltaY; diff --git a/invokeai/frontend/web/src/features/canvas/util/blobToDataURL.ts b/invokeai/frontend/web/src/features/canvas/util/blobToDataURL.ts index 2443396105..f29010c99c 100644 --- a/invokeai/frontend/web/src/features/canvas/util/blobToDataURL.ts +++ b/invokeai/frontend/web/src/features/canvas/util/blobToDataURL.ts @@ -7,3 +7,22 @@ export const blobToDataURL = (blob: Blob): Promise => { reader.readAsDataURL(blob); }); }; + +export function imageDataToDataURL(imageData: ImageData): string { + const { width, height } = imageData; + + // Create a canvas to transfer the ImageData to + const canvas = document.createElement('canvas'); + canvas.width = width; + canvas.height = height; + + // Draw the ImageData onto the canvas + const ctx = canvas.getContext('2d'); + if (!ctx) { + throw new Error('Unable to get canvas context'); + } + ctx.putImageData(imageData, 0, 0); + + // Convert the canvas to a data URL (base64) + return canvas.toDataURL(); +} diff --git a/invokeai/frontend/web/src/features/canvas/util/colorToString.ts b/invokeai/frontend/web/src/features/canvas/util/colorToString.ts index a4b619c5de..25d79fed5a 100644 --- a/invokeai/frontend/web/src/features/canvas/util/colorToString.ts +++ b/invokeai/frontend/web/src/features/canvas/util/colorToString.ts @@ -1,6 +1,11 @@ -import type { RgbaColor } from 'react-colorful'; +import type { RgbaColor, RgbColor } from 'react-colorful'; export const rgbaColorToString = (color: RgbaColor): string => { const { r, g, b, a } = color; return `rgba(${r}, ${g}, ${b}, ${a})`; }; + +export const rgbColorToString = (color: RgbColor): string => { + const { r, g, b } = color; + return `rgba(${r}, ${g}, ${b})`; +}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterConfig.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterConfig.tsx index fcc816d75f..032e46f477 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterConfig.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterConfig.tsx @@ -113,7 +113,7 @@ const ControlAdapterConfig = (props: { id: string; number: number }) => { - + {controlAdapterType === 'ip_adapter' && } diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterImagePreview.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterImagePreview.tsx index c136fbe064..56589fe613 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterImagePreview.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterImagePreview.tsx @@ -13,9 +13,10 @@ import { controlAdapterImageChanged, selectControlAdaptersSlice, } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice'; import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types'; import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize'; -import { heightChanged, selectOptimalDimension, widthChanged } from 'features/parameters/store/generationSlice'; +import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; import { memo, useCallback, useEffect, useMemo, useState } from 'react'; import { useTranslation } from 'react-i18next'; @@ -99,8 +100,8 @@ const ControlAdapterImagePreview = ({ isSmall, id }: Props) => { controlImage.width / controlImage.height, optimalDimension * optimalDimension ); - dispatch(widthChanged(width)); - dispatch(heightChanged(height)); + dispatch(widthChanged({ width, updateAspectRatio: true })); + dispatch(heightChanged({ height, updateAspectRatio: true })); } }, [controlImage, activeTabName, dispatch, optimalDimension]); diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterIPMethod.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterIPMethod.tsx index 7385997804..d7d91ab780 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterIPMethod.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterIPMethod.tsx @@ -23,8 +23,8 @@ const ParamControlAdapterIPMethod = ({ id }: Props) => { const options: { label: string; value: IPMethod }[] = useMemo( () => [ { label: t('controlnet.full'), value: 'full' }, - { label: t('controlnet.style'), value: 'style' }, - { label: t('controlnet.composition'), value: 'composition' }, + { label: `${t('controlnet.style')} (${t('common.beta')})`, value: 'style' }, + { label: `${t('controlnet.composition')} (${t('common.beta')})`, value: 'composition' }, ], [t] ); @@ -46,13 +46,9 @@ const ParamControlAdapterIPMethod = ({ id }: Props) => { const value = useMemo(() => options.find((o) => o.value === method), [options, method]); - if (!method) { - return null; - } - return ( - + {t('controlnet.ipAdapterMethod')} diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterModel.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterModel.tsx index 00c7d5859d..73a7d695b3 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterModel.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterModel.tsx @@ -102,13 +102,9 @@ const ParamControlAdapterModel = ({ id }: ParamControlAdapterModelProps) => { ); return ( - + - + { { const selector = useMemo( () => createMemoizedSelector(selectControlAdaptersSlice, (controlAdapters) => { - const cn = selectControlAdapterById(controlAdapters, id); - if (cn && cn?.type === 'ip_adapter') { - return cn.method; - } + const ca = selectControlAdapterById(controlAdapters, id); + assert(ca?.type === 'ip_adapter'); + return ca.method; }), [id] ); diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index 9a1ce5e984..0c1ac20200 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -7,7 +7,7 @@ import { buildControlAdapter } from 'features/controlAdapters/util/buildControlA import { buildControlAdapterProcessor } from 'features/controlAdapters/util/buildControlAdapterProcessor'; import { zModelIdentifierField } from 'features/nodes/types/common'; import { merge, uniq } from 'lodash-es'; -import type { ControlNetModelConfig, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types'; +import type { ControlNetModelConfig, ImageDTO, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types'; import { socketInvocationError } from 'services/events/actions'; import { v4 as uuidv4 } from 'uuid'; @@ -134,23 +134,46 @@ export const controlAdaptersSlice = createSlice({ const { id, isEnabled } = action.payload; caAdapter.updateOne(state, { id, changes: { isEnabled } }); }, - controlAdapterImageChanged: ( - state, - action: PayloadAction<{ - id: string; - controlImage: string | null; - }> - ) => { + controlAdapterImageChanged: (state, action: PayloadAction<{ id: string; controlImage: ImageDTO | null }>) => { const { id, controlImage } = action.payload; const ca = selectControlAdapterById(state, id); if (!ca) { return; } - caAdapter.updateOne(state, { - id, - changes: { controlImage, processedControlImage: null }, - }); + if (isControlNetOrT2IAdapter(ca)) { + if (controlImage) { + const { image_name, width, height } = controlImage; + const processorNode = deepClone(ca.processorNode); + const minDim = Math.min(controlImage.width, controlImage.height); + if ('detect_resolution' in processorNode) { + processorNode.detect_resolution = minDim; + } + if ('image_resolution' in processorNode) { + processorNode.image_resolution = minDim; + } + if ('resolution' in processorNode) { + processorNode.resolution = minDim; + } + caAdapter.updateOne(state, { + id, + changes: { + processorNode, + controlImage: image_name, + controlImageDimensions: { width, height }, + processedControlImage: null, + }, + }); + } else { + caAdapter.updateOne(state, { + id, + changes: { controlImage: null, controlImageDimensions: null, processedControlImage: null }, + }); + } + } else { + // ip adapter + caAdapter.updateOne(state, { id, changes: { controlImage: controlImage?.image_name ?? null } }); + } if (controlImage !== null && isControlNetOrT2IAdapter(ca) && ca.processorType !== 'none') { state.pendingControlImages.push(id); @@ -160,7 +183,7 @@ export const controlAdaptersSlice = createSlice({ state, action: PayloadAction<{ id: string; - processedControlImage: string | null; + processedControlImage: ImageDTO | null; }> ) => { const { id, processedControlImage } = action.payload; @@ -173,12 +196,24 @@ export const controlAdaptersSlice = createSlice({ return; } - caAdapter.updateOne(state, { - id, - changes: { - processedControlImage, - }, - }); + if (processedControlImage) { + const { image_name, width, height } = processedControlImage; + caAdapter.updateOne(state, { + id, + changes: { + processedControlImage: image_name, + processedControlImageDimensions: { width, height }, + }, + }); + } else { + caAdapter.updateOne(state, { + id, + changes: { + processedControlImage: null, + processedControlImageDimensions: null, + }, + }); + } state.pendingControlImages = state.pendingControlImages.filter((pendingId) => pendingId !== id); }, @@ -192,7 +227,7 @@ export const controlAdaptersSlice = createSlice({ state, action: PayloadAction<{ id: string; - modelConfig: ControlNetModelConfig | T2IAdapterModelConfig | IPAdapterModelConfig; + modelConfig: ControlNetModelConfig | T2IAdapterModelConfig | IPAdapterModelConfig | null; }> ) => { const { id, modelConfig } = action.payload; @@ -201,6 +236,11 @@ export const controlAdaptersSlice = createSlice({ return; } + if (modelConfig === null) { + caAdapter.updateOne(state, { id, changes: { model: null } }); + return; + } + const model = zModelIdentifierField.parse(modelConfig); if (!isControlNetOrT2IAdapter(cn)) { @@ -208,22 +248,36 @@ export const controlAdaptersSlice = createSlice({ return; } - const update: Update = { - id, - changes: { model, shouldAutoConfig: true }, - }; - - update.changes.processedControlImage = null; - if (modelConfig.type === 'ip_adapter') { // should never happen... return; } - const processor = buildControlAdapterProcessor(modelConfig); - update.changes.processorType = processor.processorType; - update.changes.processorNode = processor.processorNode; + // We always update the model + const update: Update = { id, changes: { model } }; + // Build the default processor for this model + const processor = buildControlAdapterProcessor(modelConfig); + if (processor.processorType !== cn.processorNode.type) { + // If the processor type has changed, update the processor node + update.changes.shouldAutoConfig = true; + update.changes.processedControlImage = null; + update.changes.processorType = processor.processorType; + update.changes.processorNode = processor.processorNode; + + if (cn.controlImageDimensions) { + const minDim = Math.min(cn.controlImageDimensions.width, cn.controlImageDimensions.height); + if ('detect_resolution' in update.changes.processorNode) { + update.changes.processorNode.detect_resolution = minDim; + } + if ('image_resolution' in update.changes.processorNode) { + update.changes.processorNode.image_resolution = minDim; + } + if ('resolution' in update.changes.processorNode) { + update.changes.processorNode.resolution = minDim; + } + } + } caAdapter.updateOne(state, update); }, controlAdapterWeightChanged: (state, action: PayloadAction<{ id: string; weight: number }>) => { @@ -340,8 +394,23 @@ export const controlAdaptersSlice = createSlice({ if (update.changes.shouldAutoConfig && modelConfig) { const processor = buildControlAdapterProcessor(modelConfig); - update.changes.processorType = processor.processorType; - update.changes.processorNode = processor.processorNode; + if (processor.processorType !== cn.processorNode.type) { + update.changes.processorType = processor.processorType; + update.changes.processorNode = processor.processorNode; + // Copy image resolution settings, urgh + if (cn.controlImageDimensions) { + const minDim = Math.min(cn.controlImageDimensions.width, cn.controlImageDimensions.height); + if ('detect_resolution' in update.changes.processorNode) { + update.changes.processorNode.detect_resolution = minDim; + } + if ('image_resolution' in update.changes.processorNode) { + update.changes.processorNode.image_resolution = minDim; + } + if ('resolution' in update.changes.processorNode) { + update.changes.processorNode.resolution = minDim; + } + } + } } caAdapter.updateOne(state, update); diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/types.ts b/invokeai/frontend/web/src/features/controlAdapters/store/types.ts index 7e2f18af5c..80af59cd01 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/types.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/types.ts @@ -225,7 +225,9 @@ export type ControlNetConfig = { controlMode: ControlMode; resizeMode: ResizeMode; controlImage: string | null; + controlImageDimensions: { width: number; height: number } | null; processedControlImage: string | null; + processedControlImageDimensions: { width: number; height: number } | null; processorType: ControlAdapterProcessorType; processorNode: RequiredControlAdapterProcessorNode; shouldAutoConfig: boolean; @@ -241,7 +243,9 @@ export type T2IAdapterConfig = { endStepPct: number; resizeMode: ResizeMode; controlImage: string | null; + controlImageDimensions: { width: number; height: number } | null; processedControlImage: string | null; + processedControlImageDimensions: { width: number; height: number } | null; processorType: ControlAdapterProcessorType; processorNode: RequiredControlAdapterProcessorNode; shouldAutoConfig: boolean; diff --git a/invokeai/frontend/web/src/features/controlAdapters/util/buildControlAdapter.ts b/invokeai/frontend/web/src/features/controlAdapters/util/buildControlAdapter.ts index ad7bdba363..7c9c28e2b3 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/util/buildControlAdapter.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/util/buildControlAdapter.ts @@ -20,7 +20,9 @@ export const initialControlNet: Omit = { controlMode: 'balanced', resizeMode: 'just_resize', controlImage: null, + controlImageDimensions: null, processedControlImage: null, + processedControlImageDimensions: null, processorType: 'canny_image_processor', processorNode: CONTROLNET_PROCESSORS.canny_image_processor.buildDefaults() as RequiredCannyImageProcessorInvocation, shouldAutoConfig: true, @@ -35,7 +37,9 @@ export const initialT2IAdapter: Omit = { endStepPct: 1, resizeMode: 'just_resize', controlImage: null, + controlImageDimensions: null, processedControlImage: null, + processedControlImageDimensions: null, processorType: 'canny_image_processor', processorNode: CONTROLNET_PROCESSORS.canny_image_processor.buildDefaults() as RequiredCannyImageProcessorInvocation, shouldAutoConfig: true, diff --git a/invokeai/frontend/web/src/features/controlLayers/components/AddLayerButton.tsx b/invokeai/frontend/web/src/features/controlLayers/components/AddLayerButton.tsx new file mode 100644 index 0000000000..b521153239 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/AddLayerButton.tsx @@ -0,0 +1,41 @@ +import { Button, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library'; +import { guidanceLayerAdded } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiPlusBold } from 'react-icons/pi'; + +export const AddLayerButton = memo(() => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const addRegionalGuidanceLayer = useCallback(() => { + dispatch(guidanceLayerAdded('regional_guidance_layer')); + }, [dispatch]); + const addControlAdapterLayer = useCallback(() => { + dispatch(guidanceLayerAdded('control_adapter_layer')); + }, [dispatch]); + const addIPAdapterLayer = useCallback(() => { + dispatch(guidanceLayerAdded('ip_adapter_layer')); + }, [dispatch]); + + return ( + + } variant="ghost"> + {t('controlLayers.addLayer')} + + + } onClick={addRegionalGuidanceLayer}> + {t('controlLayers.regionalGuidanceLayer')} + + } onClick={addControlAdapterLayer}> + {t('controlLayers.globalControlAdapterLayer')} + + } onClick={addIPAdapterLayer}> + {t('controlLayers.globalIPAdapterLayer')} + + + + ); +}); + +AddLayerButton.displayName = 'AddLayerButton'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/AddPromptButtons.tsx b/invokeai/frontend/web/src/features/controlLayers/components/AddPromptButtons.tsx new file mode 100644 index 0000000000..88eac207b2 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/AddPromptButtons.tsx @@ -0,0 +1,70 @@ +import { Button, Flex } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { guidanceLayerIPAdapterAdded } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { + isRegionalGuidanceLayer, + maskLayerNegativePromptChanged, + maskLayerPositivePromptChanged, + selectControlLayersSlice, +} from 'features/controlLayers/store/controlLayersSlice'; +import { useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiPlusBold } from 'react-icons/pi'; +import { assert } from 'tsafe'; +type AddPromptButtonProps = { + layerId: string; +}; + +export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const selectValidActions = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.find((l) => l.id === layerId); + assert(isRegionalGuidanceLayer(layer), `Layer ${layerId} not found or not an RP layer`); + return { + canAddPositivePrompt: layer.positivePrompt === null, + canAddNegativePrompt: layer.negativePrompt === null, + }; + }), + [layerId] + ); + const validActions = useAppSelector(selectValidActions); + const addPositivePrompt = useCallback(() => { + dispatch(maskLayerPositivePromptChanged({ layerId, prompt: '' })); + }, [dispatch, layerId]); + const addNegativePrompt = useCallback(() => { + dispatch(maskLayerNegativePromptChanged({ layerId, prompt: '' })); + }, [dispatch, layerId]); + const addIPAdapter = useCallback(() => { + dispatch(guidanceLayerIPAdapterAdded(layerId)); + }, [dispatch, layerId]); + + return ( + + + + + + ); +}; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/BrushSize.tsx b/invokeai/frontend/web/src/features/controlLayers/components/BrushSize.tsx new file mode 100644 index 0000000000..a34250c29f --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/BrushSize.tsx @@ -0,0 +1,63 @@ +import { + CompositeNumberInput, + CompositeSlider, + FormControl, + FormLabel, + Popover, + PopoverArrow, + PopoverBody, + PopoverContent, + PopoverTrigger, +} from '@invoke-ai/ui-library'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { brushSizeChanged, initialControlLayersState } from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +const marks = [0, 100, 200, 300]; +const formatPx = (v: number | string) => `${v} px`; + +export const BrushSize = memo(() => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const brushSize = useAppSelector((s) => s.controlLayers.present.brushSize); + const onChange = useCallback( + (v: number) => { + dispatch(brushSizeChanged(Math.round(v))); + }, + [dispatch] + ); + return ( + + {t('controlLayers.brushSize')} + + + + + + + + + + + + + ); +}); + +BrushSize.displayName = 'BrushSize'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CALayerListItem.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CALayerListItem.tsx new file mode 100644 index 0000000000..f97546c4fe --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/CALayerListItem.tsx @@ -0,0 +1,71 @@ +import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import CALayerOpacity from 'features/controlLayers/components/CALayerOpacity'; +import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig'; +import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton'; +import { LayerMenu } from 'features/controlLayers/components/LayerMenu'; +import { LayerTitle } from 'features/controlLayers/components/LayerTitle'; +import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle'; +import { + isControlAdapterLayer, + layerSelected, + selectControlLayersSlice, +} from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback, useMemo } from 'react'; +import { assert } from 'tsafe'; + +type Props = { + layerId: string; +}; + +export const CALayerListItem = memo(({ layerId }: Props) => { + const dispatch = useAppDispatch(); + const selector = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.find((l) => l.id === layerId); + assert(isControlAdapterLayer(layer), `Layer ${layerId} not found or not a ControlNet layer`); + return { + controlNetId: layer.controlNetId, + isSelected: layerId === controlLayers.present.selectedLayerId, + }; + }), + [layerId] + ); + const { controlNetId, isSelected } = useAppSelector(selector); + const onClickCapture = useCallback(() => { + // Must be capture so that the layer is selected before deleting/resetting/etc + dispatch(layerSelected(layerId)); + }, [dispatch, layerId]); + const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); + + return ( + + + + + + + + + + + {isOpen && ( + + + + )} + + + ); +}); + +CALayerListItem.displayName = 'CALayerListItem'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CALayerOpacity.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CALayerOpacity.tsx new file mode 100644 index 0000000000..a6107da1ec --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/CALayerOpacity.tsx @@ -0,0 +1,98 @@ +import { + CompositeNumberInput, + CompositeSlider, + Flex, + FormControl, + FormLabel, + IconButton, + Popover, + PopoverArrow, + PopoverBody, + PopoverContent, + PopoverTrigger, + Switch, +} from '@invoke-ai/ui-library'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { stopPropagation } from 'common/util/stopPropagation'; +import { useLayerOpacity } from 'features/controlLayers/hooks/layerStateHooks'; +import { isFilterEnabledChanged, layerOpacityChanged } from 'features/controlLayers/store/controlLayersSlice'; +import type { ChangeEvent } from 'react'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiDropHalfFill } from 'react-icons/pi'; + +type Props = { + layerId: string; +}; + +const marks = [0, 25, 50, 75, 100]; +const formatPct = (v: number | string) => `${v} %`; + +const CALayerOpacity = ({ layerId }: Props) => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const { opacity, isFilterEnabled } = useLayerOpacity(layerId); + const onChangeOpacity = useCallback( + (v: number) => { + dispatch(layerOpacityChanged({ layerId, opacity: v / 100 })); + }, + [dispatch, layerId] + ); + const onChangeFilter = useCallback( + (e: ChangeEvent) => { + dispatch(isFilterEnabledChanged({ layerId, isFilterEnabled: e.target.checked })); + }, + [dispatch, layerId] + ); + return ( + + + } + variant="ghost" + onDoubleClick={stopPropagation} + /> + + + + + + + + {t('controlLayers.opacityFilter')} + + + + + {t('controlLayers.opacity')} + + + + + + + + ); +}; + +export default memo(CALayerOpacity); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersEditor.stories.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersEditor.stories.tsx new file mode 100644 index 0000000000..c0fa306c6b --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersEditor.stories.tsx @@ -0,0 +1,24 @@ +import { Flex } from '@invoke-ai/ui-library'; +import type { Meta, StoryObj } from '@storybook/react'; +import { ControlLayersEditor } from 'features/controlLayers/components/ControlLayersEditor'; + +const meta: Meta = { + title: 'Feature/ControlLayers', + tags: ['autodocs'], + component: ControlLayersEditor, +}; + +export default meta; +type Story = StoryObj; + +const Component = () => { + return ( + + + + ); +}; + +export const Default: Story = { + render: Component, +}; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersEditor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersEditor.tsx new file mode 100644 index 0000000000..e9275426fe --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersEditor.tsx @@ -0,0 +1,24 @@ +/* eslint-disable i18next/no-literal-string */ +import { Flex } from '@invoke-ai/ui-library'; +import { ControlLayersToolbar } from 'features/controlLayers/components/ControlLayersToolbar'; +import { StageComponent } from 'features/controlLayers/components/StageComponent'; +import { memo } from 'react'; + +export const ControlLayersEditor = memo(() => { + return ( + + + + + ); +}); + +ControlLayersEditor.displayName = 'ControlLayersEditor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersPanelContent.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersPanelContent.tsx new file mode 100644 index 0000000000..e2865be356 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersPanelContent.tsx @@ -0,0 +1,59 @@ +/* eslint-disable i18next/no-literal-string */ +import { Flex } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppSelector } from 'app/store/storeHooks'; +import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent'; +import { AddLayerButton } from 'features/controlLayers/components/AddLayerButton'; +import { CALayerListItem } from 'features/controlLayers/components/CALayerListItem'; +import { DeleteAllLayersButton } from 'features/controlLayers/components/DeleteAllLayersButton'; +import { IPLayerListItem } from 'features/controlLayers/components/IPLayerListItem'; +import { RGLayerListItem } from 'features/controlLayers/components/RGLayerListItem'; +import { isRenderableLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; +import type { Layer } from 'features/controlLayers/store/types'; +import { partition } from 'lodash-es'; +import { memo } from 'react'; + +const selectLayerIdTypePairs = createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const [renderableLayers, ipAdapterLayers] = partition(controlLayers.present.layers, isRenderableLayer); + return [...ipAdapterLayers, ...renderableLayers].map((l) => ({ id: l.id, type: l.type })).reverse(); +}); + +export const ControlLayersPanelContent = memo(() => { + const layerIdTypePairs = useAppSelector(selectLayerIdTypePairs); + return ( + + + + + + + + {layerIdTypePairs.map(({ id, type }) => ( + + ))} + + + + ); +}); + +ControlLayersPanelContent.displayName = 'ControlLayersPanelContent'; + +type LayerWrapperProps = { + id: string; + type: Layer['type']; +}; + +const LayerWrapper = memo(({ id, type }: LayerWrapperProps) => { + if (type === 'regional_guidance_layer') { + return ; + } + if (type === 'control_adapter_layer') { + return ; + } + if (type === 'ip_adapter_layer') { + return ; + } +}); + +LayerWrapper.displayName = 'LayerWrapper'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersSettingsPopover.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersSettingsPopover.tsx new file mode 100644 index 0000000000..89032b7c76 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersSettingsPopover.tsx @@ -0,0 +1,26 @@ +import { Flex, IconButton, Popover, PopoverBody, PopoverContent, PopoverTrigger } from '@invoke-ai/ui-library'; +import { GlobalMaskLayerOpacity } from 'features/controlLayers/components/GlobalMaskLayerOpacity'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { RiSettings4Fill } from 'react-icons/ri'; + +const ControlLayersSettingsPopover = () => { + const { t } = useTranslation(); + + return ( + + + } /> + + + + + + + + + + ); +}; + +export default memo(ControlLayersSettingsPopover); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersToolbar.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersToolbar.tsx new file mode 100644 index 0000000000..15a74a332a --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersToolbar.tsx @@ -0,0 +1,20 @@ +/* eslint-disable i18next/no-literal-string */ +import { Flex } from '@invoke-ai/ui-library'; +import { BrushSize } from 'features/controlLayers/components/BrushSize'; +import ControlLayersSettingsPopover from 'features/controlLayers/components/ControlLayersSettingsPopover'; +import { ToolChooser } from 'features/controlLayers/components/ToolChooser'; +import { UndoRedoButtonGroup } from 'features/controlLayers/components/UndoRedoButtonGroup'; +import { memo } from 'react'; + +export const ControlLayersToolbar = memo(() => { + return ( + + + + + + + ); +}); + +ControlLayersToolbar.displayName = 'ControlLayersToolbar'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/DeleteAllLayersButton.tsx b/invokeai/frontend/web/src/features/controlLayers/components/DeleteAllLayersButton.tsx new file mode 100644 index 0000000000..c55864afa5 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/DeleteAllLayersButton.tsx @@ -0,0 +1,22 @@ +import { Button } from '@invoke-ai/ui-library'; +import { allLayersDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiTrashSimpleBold } from 'react-icons/pi'; + +export const DeleteAllLayersButton = memo(() => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const onClick = useCallback(() => { + dispatch(allLayersDeleted()); + }, [dispatch]); + + return ( + + ); +}); + +DeleteAllLayersButton.displayName = 'DeleteAllLayersButton'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/GlobalMaskLayerOpacity.tsx b/invokeai/frontend/web/src/features/controlLayers/components/GlobalMaskLayerOpacity.tsx new file mode 100644 index 0000000000..40985499db --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/GlobalMaskLayerOpacity.tsx @@ -0,0 +1,54 @@ +import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { + globalMaskLayerOpacityChanged, + initialControlLayersState, +} from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +const marks = [0, 25, 50, 75, 100]; +const formatPct = (v: number | string) => `${v} %`; + +export const GlobalMaskLayerOpacity = memo(() => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const globalMaskLayerOpacity = useAppSelector((s) => + Math.round(s.controlLayers.present.globalMaskLayerOpacity * 100) + ); + const onChange = useCallback( + (v: number) => { + dispatch(globalMaskLayerOpacityChanged(v / 100)); + }, + [dispatch] + ); + return ( + + {t('controlLayers.globalMaskOpacity')} + + + + + + ); +}); + +GlobalMaskLayerOpacity.displayName = 'GlobalMaskLayerOpacity'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/IPLayerListItem.tsx b/invokeai/frontend/web/src/features/controlLayers/components/IPLayerListItem.tsx new file mode 100644 index 0000000000..bdc54373a0 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/IPLayerListItem.tsx @@ -0,0 +1,47 @@ +import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppSelector } from 'app/store/storeHooks'; +import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig'; +import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton'; +import { LayerTitle } from 'features/controlLayers/components/LayerTitle'; +import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle'; +import { isIPAdapterLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useMemo } from 'react'; +import { assert } from 'tsafe'; + +type Props = { + layerId: string; +}; + +export const IPLayerListItem = memo(({ layerId }: Props) => { + const selector = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.find((l) => l.id === layerId); + assert(isIPAdapterLayer(layer), `Layer ${layerId} not found or not an IP Adapter layer`); + return layer.ipAdapterId; + }), + [layerId] + ); + const ipAdapterId = useAppSelector(selector); + const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); + return ( + + + + + + + + + {isOpen && ( + + + + )} + + + ); +}); + +IPLayerListItem.displayName = 'IPLayerListItem'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerDeleteButton.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerDeleteButton.tsx new file mode 100644 index 0000000000..0c74b2a9ea --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/LayerDeleteButton.tsx @@ -0,0 +1,30 @@ +import { IconButton } from '@invoke-ai/ui-library'; +import { guidanceLayerDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { stopPropagation } from 'common/util/stopPropagation'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiTrashSimpleBold } from 'react-icons/pi'; + +type Props = { layerId: string }; + +export const LayerDeleteButton = memo(({ layerId }: Props) => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const deleteLayer = useCallback(() => { + dispatch(guidanceLayerDeleted(layerId)); + }, [dispatch, layerId]); + return ( + } + onClick={deleteLayer} + onDoubleClick={stopPropagation} // double click expands the layer + /> + ); +}); + +LayerDeleteButton.displayName = 'LayerDeleteButton'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerMenu.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerMenu.tsx new file mode 100644 index 0000000000..e5c8cc0aac --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/LayerMenu.tsx @@ -0,0 +1,59 @@ +import { IconButton, Menu, MenuButton, MenuDivider, MenuItem, MenuList } from '@invoke-ai/ui-library'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { stopPropagation } from 'common/util/stopPropagation'; +import { LayerMenuArrangeActions } from 'features/controlLayers/components/LayerMenuArrangeActions'; +import { LayerMenuRGActions } from 'features/controlLayers/components/LayerMenuRGActions'; +import { useLayerType } from 'features/controlLayers/hooks/layerStateHooks'; +import { layerDeleted, layerReset } from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiArrowCounterClockwiseBold, PiDotsThreeVerticalBold, PiTrashSimpleBold } from 'react-icons/pi'; + +type Props = { layerId: string }; + +export const LayerMenu = memo(({ layerId }: Props) => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const layerType = useLayerType(layerId); + const resetLayer = useCallback(() => { + dispatch(layerReset(layerId)); + }, [dispatch, layerId]); + const deleteLayer = useCallback(() => { + dispatch(layerDeleted(layerId)); + }, [dispatch, layerId]); + return ( + + } + onDoubleClick={stopPropagation} // double click expands the layer + /> + + {layerType === 'regional_guidance_layer' && ( + <> + + + + )} + {(layerType === 'regional_guidance_layer' || layerType === 'control_adapter_layer') && ( + <> + + + + )} + {layerType === 'regional_guidance_layer' && ( + }> + {t('accessibility.reset')} + + )} + } color="error.300"> + {t('common.delete')} + + + + ); +}); + +LayerMenu.displayName = 'LayerMenu'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerMenuArrangeActions.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerMenuArrangeActions.tsx new file mode 100644 index 0000000000..9c51671a39 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/LayerMenuArrangeActions.tsx @@ -0,0 +1,69 @@ +import { MenuItem } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { + isRenderableLayer, + layerMovedBackward, + layerMovedForward, + layerMovedToBack, + layerMovedToFront, + selectControlLayersSlice, +} from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiArrowDownBold, PiArrowLineDownBold, PiArrowLineUpBold, PiArrowUpBold } from 'react-icons/pi'; +import { assert } from 'tsafe'; + +type Props = { layerId: string }; + +export const LayerMenuArrangeActions = memo(({ layerId }: Props) => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const selectValidActions = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.find((l) => l.id === layerId); + assert(isRenderableLayer(layer), `Layer ${layerId} not found or not an RP layer`); + const layerIndex = controlLayers.present.layers.findIndex((l) => l.id === layerId); + const layerCount = controlLayers.present.layers.length; + return { + canMoveForward: layerIndex < layerCount - 1, + canMoveBackward: layerIndex > 0, + canMoveToFront: layerIndex < layerCount - 1, + canMoveToBack: layerIndex > 0, + }; + }), + [layerId] + ); + const validActions = useAppSelector(selectValidActions); + const moveForward = useCallback(() => { + dispatch(layerMovedForward(layerId)); + }, [dispatch, layerId]); + const moveToFront = useCallback(() => { + dispatch(layerMovedToFront(layerId)); + }, [dispatch, layerId]); + const moveBackward = useCallback(() => { + dispatch(layerMovedBackward(layerId)); + }, [dispatch, layerId]); + const moveToBack = useCallback(() => { + dispatch(layerMovedToBack(layerId)); + }, [dispatch, layerId]); + return ( + <> + }> + {t('controlLayers.moveToFront')} + + }> + {t('controlLayers.moveForward')} + + }> + {t('controlLayers.moveBackward')} + + }> + {t('controlLayers.moveToBack')} + + + ); +}); + +LayerMenuArrangeActions.displayName = 'LayerMenuArrangeActions'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerMenuRGActions.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerMenuRGActions.tsx new file mode 100644 index 0000000000..6c2bb4c26b --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/LayerMenuRGActions.tsx @@ -0,0 +1,58 @@ +import { MenuItem } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { guidanceLayerIPAdapterAdded } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { + isRegionalGuidanceLayer, + maskLayerNegativePromptChanged, + maskLayerPositivePromptChanged, + selectControlLayersSlice, +} from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiPlusBold } from 'react-icons/pi'; +import { assert } from 'tsafe'; + +type Props = { layerId: string }; + +export const LayerMenuRGActions = memo(({ layerId }: Props) => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const selectValidActions = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.find((l) => l.id === layerId); + assert(isRegionalGuidanceLayer(layer), `Layer ${layerId} not found or not an RP layer`); + return { + canAddPositivePrompt: layer.positivePrompt === null, + canAddNegativePrompt: layer.negativePrompt === null, + }; + }), + [layerId] + ); + const validActions = useAppSelector(selectValidActions); + const addPositivePrompt = useCallback(() => { + dispatch(maskLayerPositivePromptChanged({ layerId, prompt: '' })); + }, [dispatch, layerId]); + const addNegativePrompt = useCallback(() => { + dispatch(maskLayerNegativePromptChanged({ layerId, prompt: '' })); + }, [dispatch, layerId]); + const addIPAdapter = useCallback(() => { + dispatch(guidanceLayerIPAdapterAdded(layerId)); + }, [dispatch, layerId]); + return ( + <> + }> + {t('controlLayers.addPositivePrompt')} + + }> + {t('controlLayers.addNegativePrompt')} + + }> + {t('controlLayers.addIPAdapter')} + + + ); +}); + +LayerMenuRGActions.displayName = 'LayerMenuRGActions'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerTitle.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerTitle.tsx new file mode 100644 index 0000000000..ec13ff7bcc --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/LayerTitle.tsx @@ -0,0 +1,29 @@ +import { Text } from '@invoke-ai/ui-library'; +import type { Layer } from 'features/controlLayers/store/types'; +import { memo, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; + +type Props = { + type: Layer['type']; +}; + +export const LayerTitle = memo(({ type }: Props) => { + const { t } = useTranslation(); + const title = useMemo(() => { + if (type === 'regional_guidance_layer') { + return t('controlLayers.regionalGuidance'); + } else if (type === 'control_adapter_layer') { + return t('controlLayers.globalControlAdapter'); + } else if (type === 'ip_adapter_layer') { + return t('controlLayers.globalIPAdapter'); + } + }, [t, type]); + + return ( + + {title} + + ); +}); + +LayerTitle.displayName = 'LayerTitle'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerVisibilityToggle.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerVisibilityToggle.tsx new file mode 100644 index 0000000000..d2dab39e36 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/LayerVisibilityToggle.tsx @@ -0,0 +1,36 @@ +import { IconButton } from '@invoke-ai/ui-library'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { stopPropagation } from 'common/util/stopPropagation'; +import { useLayerIsVisible } from 'features/controlLayers/hooks/layerStateHooks'; +import { layerVisibilityToggled } from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiCheckBold } from 'react-icons/pi'; + +type Props = { + layerId: string; +}; + +export const LayerVisibilityToggle = memo(({ layerId }: Props) => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const isVisible = useLayerIsVisible(layerId); + const onClick = useCallback(() => { + dispatch(layerVisibilityToggled(layerId)); + }, [dispatch, layerId]); + + return ( + : undefined} + onClick={onClick} + colorScheme="base" + onDoubleClick={stopPropagation} // double click expands the layer + /> + ); +}); + +LayerVisibilityToggle.displayName = 'LayerVisibilityToggle'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerAutoNegativeCheckbox.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerAutoNegativeCheckbox.tsx new file mode 100644 index 0000000000..6f03d4b28d --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerAutoNegativeCheckbox.tsx @@ -0,0 +1,51 @@ +import { Checkbox, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import { createSelector } from '@reduxjs/toolkit'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { + isRegionalGuidanceLayer, + maskLayerAutoNegativeChanged, + selectControlLayersSlice, +} from 'features/controlLayers/store/controlLayersSlice'; +import type { ChangeEvent } from 'react'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { assert } from 'tsafe'; + +type Props = { + layerId: string; +}; + +const useAutoNegative = (layerId: string) => { + const selectAutoNegative = useMemo( + () => + createSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.find((l) => l.id === layerId); + assert(isRegionalGuidanceLayer(layer), `Layer ${layerId} not found or not an RP layer`); + return layer.autoNegative; + }), + [layerId] + ); + const autoNegative = useAppSelector(selectAutoNegative); + return autoNegative; +}; + +export const RGLayerAutoNegativeCheckbox = memo(({ layerId }: Props) => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const autoNegative = useAutoNegative(layerId); + const onChange = useCallback( + (e: ChangeEvent) => { + dispatch(maskLayerAutoNegativeChanged({ layerId, autoNegative: e.target.checked ? 'invert' : 'off' })); + }, + [dispatch, layerId] + ); + + return ( + + {t('controlLayers.autoNegative')} + + + ); +}); + +RGLayerAutoNegativeCheckbox.displayName = 'RGLayerAutoNegativeCheckbox'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerColorPicker.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerColorPicker.tsx new file mode 100644 index 0000000000..e76ab57a51 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerColorPicker.tsx @@ -0,0 +1,69 @@ +import { Flex, Popover, PopoverBody, PopoverContent, PopoverTrigger, Tooltip } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import RgbColorPicker from 'common/components/RgbColorPicker'; +import { stopPropagation } from 'common/util/stopPropagation'; +import { rgbColorToString } from 'features/canvas/util/colorToString'; +import { + isRegionalGuidanceLayer, + maskLayerPreviewColorChanged, + selectControlLayersSlice, +} from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback, useMemo } from 'react'; +import type { RgbColor } from 'react-colorful'; +import { useTranslation } from 'react-i18next'; +import { assert } from 'tsafe'; + +type Props = { + layerId: string; +}; + +export const RGLayerColorPicker = memo(({ layerId }: Props) => { + const { t } = useTranslation(); + const selectColor = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.find((l) => l.id === layerId); + assert(isRegionalGuidanceLayer(layer), `Layer ${layerId} not found or not an vector mask layer`); + return layer.previewColor; + }), + [layerId] + ); + const color = useAppSelector(selectColor); + const dispatch = useAppDispatch(); + const onColorChange = useCallback( + (color: RgbColor) => { + dispatch(maskLayerPreviewColorChanged({ layerId, color })); + }, + [dispatch, layerId] + ); + return ( + + + + + + + + + + + + + + + ); +}); + +RGLayerColorPicker.displayName = 'RGLayerColorPicker'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerIPAdapterList.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerIPAdapterList.tsx new file mode 100644 index 0000000000..464bd41897 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerIPAdapterList.tsx @@ -0,0 +1,80 @@ +import { Divider, Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { guidanceLayerIPAdapterDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig'; +import { isRegionalGuidanceLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback, useMemo } from 'react'; +import { PiTrashSimpleBold } from 'react-icons/pi'; +import { assert } from 'tsafe'; + +type Props = { + layerId: string; +}; + +export const RGLayerIPAdapterList = memo(({ layerId }: Props) => { + const selectIPAdapterIds = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.filter(isRegionalGuidanceLayer).find((l) => l.id === layerId); + assert(layer, `Layer ${layerId} not found`); + return layer.ipAdapterIds; + }), + [layerId] + ); + const ipAdapterIds = useAppSelector(selectIPAdapterIds); + + if (ipAdapterIds.length === 0) { + return null; + } + + return ( + <> + {ipAdapterIds.map((id, index) => ( + + {index > 0 && ( + + + + )} + + + ))} + + ); +}); + +RGLayerIPAdapterList.displayName = 'RGLayerIPAdapterList'; + +type IPAdapterListItemProps = { + layerId: string; + ipAdapterId: string; + ipAdapterNumber: number; +}; + +const RGLayerIPAdapterListItem = memo(({ layerId, ipAdapterId, ipAdapterNumber }: IPAdapterListItemProps) => { + const dispatch = useAppDispatch(); + const onDeleteIPAdapter = useCallback(() => { + dispatch(guidanceLayerIPAdapterDeleted({ layerId, ipAdapterId })); + }, [dispatch, ipAdapterId, layerId]); + + return ( + + + {`IP Adapter ${ipAdapterNumber}`} + + } + aria-label="Delete IP Adapter" + onClick={onDeleteIPAdapter} + variant="ghost" + colorScheme="error" + /> + + + + ); +}); + +RGLayerIPAdapterListItem.displayName = 'RGLayerIPAdapterListItem'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerListItem.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerListItem.tsx new file mode 100644 index 0000000000..3c126cabaa --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerListItem.tsx @@ -0,0 +1,84 @@ +import { Badge, Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { rgbColorToString } from 'features/canvas/util/colorToString'; +import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton'; +import { LayerMenu } from 'features/controlLayers/components/LayerMenu'; +import { LayerTitle } from 'features/controlLayers/components/LayerTitle'; +import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle'; +import { RGLayerColorPicker } from 'features/controlLayers/components/RGLayerColorPicker'; +import { RGLayerIPAdapterList } from 'features/controlLayers/components/RGLayerIPAdapterList'; +import { RGLayerNegativePrompt } from 'features/controlLayers/components/RGLayerNegativePrompt'; +import { RGLayerPositivePrompt } from 'features/controlLayers/components/RGLayerPositivePrompt'; +import RGLayerSettingsPopover from 'features/controlLayers/components/RGLayerSettingsPopover'; +import { + isRegionalGuidanceLayer, + layerSelected, + selectControlLayersSlice, +} from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { assert } from 'tsafe'; + +import { AddPromptButtons } from './AddPromptButtons'; + +type Props = { + layerId: string; +}; + +export const RGLayerListItem = memo(({ layerId }: Props) => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const selector = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.find((l) => l.id === layerId); + assert(isRegionalGuidanceLayer(layer), `Layer ${layerId} not found or not an RP layer`); + return { + color: rgbColorToString(layer.previewColor), + hasPositivePrompt: layer.positivePrompt !== null, + hasNegativePrompt: layer.negativePrompt !== null, + hasIPAdapters: layer.ipAdapterIds.length > 0, + isSelected: layerId === controlLayers.present.selectedLayerId, + autoNegative: layer.autoNegative, + }; + }), + [layerId] + ); + const { autoNegative, color, hasPositivePrompt, hasNegativePrompt, hasIPAdapters, isSelected } = + useAppSelector(selector); + const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); + const onClick = useCallback(() => { + dispatch(layerSelected(layerId)); + }, [dispatch, layerId]); + return ( + + + + + + + {autoNegative === 'invert' && ( + + {t('controlLayers.autoNegative')} + + )} + + + + + + {isOpen && ( + + {!hasPositivePrompt && !hasNegativePrompt && !hasIPAdapters && } + {hasPositivePrompt && } + {hasNegativePrompt && } + {hasIPAdapters && } + + )} + + + ); +}); + +RGLayerListItem.displayName = 'RGLayerListItem'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerNegativePrompt.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerNegativePrompt.tsx new file mode 100644 index 0000000000..e869c8809a --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerNegativePrompt.tsx @@ -0,0 +1,58 @@ +import { Box, Textarea } from '@invoke-ai/ui-library'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayerPromptDeleteButton'; +import { useLayerNegativePrompt } from 'features/controlLayers/hooks/layerStateHooks'; +import { maskLayerNegativePromptChanged } from 'features/controlLayers/store/controlLayersSlice'; +import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper'; +import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton'; +import { PromptPopover } from 'features/prompt/PromptPopover'; +import { usePrompt } from 'features/prompt/usePrompt'; +import { memo, useCallback, useRef } from 'react'; +import { useTranslation } from 'react-i18next'; + +type Props = { + layerId: string; +}; + +export const RGLayerNegativePrompt = memo(({ layerId }: Props) => { + const prompt = useLayerNegativePrompt(layerId); + const dispatch = useAppDispatch(); + const textareaRef = useRef(null); + const { t } = useTranslation(); + const _onChange = useCallback( + (v: string) => { + dispatch(maskLayerNegativePromptChanged({ layerId, prompt: v })); + }, + [dispatch, layerId] + ); + const { onChange, isOpen, onClose, onOpen, onSelect, onKeyDown } = usePrompt({ + prompt, + textareaRef, + onChange: _onChange, + }); + + return ( + + +