From a4f88ff834c3ef04d0f3bc455311aeec5d15a3a8 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 28 May 2024 15:32:47 +1000 Subject: [PATCH 01/52] feat(events): add `__event_name__` as ClassVar to `EventBase` This improves types for event consumers that need to access the event name. --- invokeai/app/services/events/events_common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py index 7d3d489bf5..32e5dc7d73 100644 --- a/invokeai/app/services/events/events_common.py +++ b/invokeai/app/services/events/events_common.py @@ -1,5 +1,5 @@ from math import floor -from typing import TYPE_CHECKING, Any, Coroutine, Generic, Optional, Protocol, TypeAlias, TypeVar +from typing import TYPE_CHECKING, Any, ClassVar, Coroutine, Generic, Optional, Protocol, TypeAlias, TypeVar from fastapi_events.handlers.local import local_handler from fastapi_events.registry.payload_schema import registry as payload_schema @@ -33,6 +33,7 @@ class EventBase(BaseModel): A timestamp is automatically added to the event when it is created. """ + __event_name__: ClassVar[str] timestamp: int = Field(description="The timestamp of the event", default_factory=get_timestamp) model_config = ConfigDict(json_schema_serialization_defaults_required=True) From 21aa42627b6061441f0961c187510b6000239fd7 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 28 May 2024 15:35:57 +1000 Subject: [PATCH 02/52] feat(events): add dynamic invocation & result validators This is required to get these event fields to deserialize correctly. If omitted, pydantic uses `BaseInvocation`/`BaseInvocationOutput`, which is not correct. This is similar to the workaround in the `Graph` and `GraphExecutionState` classes where we need to fanagle pydantic with manual validation handling. --- invokeai/app/services/events/events_common.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py index 32e5dc7d73..3ae4468b83 100644 --- a/invokeai/app/services/events/events_common.py +++ b/invokeai/app/services/events/events_common.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Any, ClassVar, Coroutine, Generic, Optional, P from fastapi_events.handlers.local import local_handler from fastapi_events.registry.payload_schema import registry as payload_schema -from pydantic import BaseModel, ConfigDict, Field, SerializeAsAny +from pydantic import BaseModel, ConfigDict, Field, SerializeAsAny, field_validator from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput from invokeai.app.services.session_processor.session_processor_common import ProgressImage @@ -101,6 +101,14 @@ class InvocationEventBase(QueueItemEventBase): invocation: SerializeAsAny[BaseInvocation] = Field(description="The ID of the invocation") invocation_source_id: str = Field(description="The ID of the prepared invocation's source node") + @field_validator("invocation", mode="plain") + @classmethod + def validate_invocation(cls, v: Any): + """Validates the invocation using the dynamic type adapter.""" + + invocation = BaseInvocation.get_typeadapter().validate_python(v) + return invocation + @payload_schema.register class InvocationStartedEvent(InvocationEventBase): @@ -176,6 +184,14 @@ class InvocationCompleteEvent(InvocationEventBase): result: SerializeAsAny[BaseInvocationOutput] = Field(description="The result of the invocation") + @field_validator("result", mode="plain") + @classmethod + def validate_results(cls, v: Any): + """Validates the invocation result using the dynamic type adapter.""" + + result = BaseInvocationOutput.get_typeadapter().validate_python(v) + return result + @classmethod def build( cls, queue_item: SessionQueueItem, invocation: BaseInvocation, result: BaseInvocationOutput From 994c61b67afb0ef515b89133ffa7a6d9f108e33d Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 27 May 2024 10:32:49 -0400 Subject: [PATCH 03/52] Add docs to TextualInversionManager and improve types. No changes to functionality. --- invokeai/backend/textual_inversion.py | 34 +++++++++++++++++++-------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/invokeai/backend/textual_inversion.py b/invokeai/backend/textual_inversion.py index f7390979bb..368736617b 100644 --- a/invokeai/backend/textual_inversion.py +++ b/invokeai/backend/textual_inversion.py @@ -1,7 +1,7 @@ """Textual Inversion wrapper class.""" from pathlib import Path -from typing import Dict, List, Optional, Union +from typing import Optional, Union import torch from compel.embeddings_provider import BaseTextualInversionManager @@ -66,33 +66,47 @@ class TextualInversionModelRaw(RawModel): return result -# no type hints for BaseTextualInversionManager? -class TextualInversionManager(BaseTextualInversionManager): # type: ignore - pad_tokens: Dict[int, List[int]] - tokenizer: CLIPTokenizer +class TextualInversionManager(BaseTextualInversionManager): + """TextualInversionManager implements the BaseTextualInversionManager ABC from the compel library.""" def __init__(self, tokenizer: CLIPTokenizer): - self.pad_tokens = {} + self.pad_tokens: dict[int, list[int]] = {} self.tokenizer = tokenizer def expand_textual_inversion_token_ids_if_necessary(self, token_ids: list[int]) -> list[int]: + """Given a list of tokens ids, expand any TI tokens to their corresponding pad tokens. + + For example, suppose we have a `` TI with 4 vectors that was added to the tokenizer with the following + mapping of tokens to token_ids: + ``` + : 49408 + : 49409 + : 49410 + : 49411 + ``` + `self.pad_tokens` would be set to `{49408: [49408, 49409, 49410, 49411]}`. + This function is responsible for expanding `49408` in the token_ids list to `[49408, 49409, 49410, 49411]`. + """ + # Short circuit if there are no pad tokens to save a little time. if len(self.pad_tokens) == 0: return token_ids + # This function assumes that compel has not included the BOS and EOS tokens in the token_ids list. We verify + # this assumption here. if token_ids[0] == self.tokenizer.bos_token_id: raise ValueError("token_ids must not start with bos_token_id") if token_ids[-1] == self.tokenizer.eos_token_id: raise ValueError("token_ids must not end with eos_token_id") - new_token_ids = [] + # Expand any TI tokens to their corresponding pad tokens. + new_token_ids: list[int] = [] for token_id in token_ids: new_token_ids.append(token_id) if token_id in self.pad_tokens: new_token_ids.extend(self.pad_tokens[token_id]) - # Do not exceed the max model input size - # The -2 here is compensating for compensate compel.embeddings_provider.get_token_ids(), - # which first removes and then adds back the start and end tokens. + # Do not exceed the max model input size. The -2 here is compensating for + # compel.embeddings_provider.get_token_ids(), which first removes and then adds back the start and end tokens. max_length = list(self.tokenizer.max_model_input_sizes.values())[0] - 2 if len(new_token_ids) > max_length: new_token_ids = new_token_ids[0:max_length] From 3aa1c8d3a88205b133b31688b39648e431178016 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 27 May 2024 10:35:02 -0400 Subject: [PATCH 04/52] Update TextualInversionManager for compatibility with the latest transformers release. See https://github.com/invoke-ai/InvokeAI/issues/6445. --- invokeai/backend/textual_inversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/textual_inversion.py b/invokeai/backend/textual_inversion.py index 368736617b..005031c95b 100644 --- a/invokeai/backend/textual_inversion.py +++ b/invokeai/backend/textual_inversion.py @@ -107,7 +107,7 @@ class TextualInversionManager(BaseTextualInversionManager): # Do not exceed the max model input size. The -2 here is compensating for # compel.embeddings_provider.get_token_ids(), which first removes and then adds back the start and end tokens. - max_length = list(self.tokenizer.max_model_input_sizes.values())[0] - 2 + max_length = self.tokenizer.model_max_length - 2 if len(new_token_ids) > max_length: new_token_ids = new_token_ids[0:max_length] From 829b9ad66bb8f95c19b9aa2744d15112760009e4 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 27 May 2024 10:53:12 -0400 Subject: [PATCH 05/52] Add a callout about the hackiness of dropping tokens in the TextualInversionManager. --- invokeai/backend/textual_inversion.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/invokeai/backend/textual_inversion.py b/invokeai/backend/textual_inversion.py index 005031c95b..98104f769e 100644 --- a/invokeai/backend/textual_inversion.py +++ b/invokeai/backend/textual_inversion.py @@ -109,6 +109,9 @@ class TextualInversionManager(BaseTextualInversionManager): # compel.embeddings_provider.get_token_ids(), which first removes and then adds back the start and end tokens. max_length = self.tokenizer.model_max_length - 2 if len(new_token_ids) > max_length: + # HACK: If TI token expansion causes us to exceed the max text encoder input length, we silently discard + # tokens. Token expansion should happen in a way that is compatible with compel's default handling of long + # prompts. new_token_ids = new_token_ids[0:max_length] return new_token_ids From 21a60af8812af33d219bd0a0d6b4a916da1967f5 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 May 2024 23:01:21 -0400 Subject: [PATCH 06/52] when unlocking models, offload_unlocked_models should prune to vram limit only (#6450) Co-authored-by: Lincoln Stein --- invokeai/backend/model_manager/load/model_cache/model_locker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_manager/load/model_cache/model_locker.py b/invokeai/backend/model_manager/load/model_cache/model_locker.py index a275987773..269ac60479 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_locker.py +++ b/invokeai/backend/model_manager/load/model_cache/model_locker.py @@ -60,5 +60,5 @@ class ModelLocker(ModelLockerBase): self._cache_entry.unlock() if not self._cache.lazy_offloading: - self._cache.offload_unlocked_models(self._cache_entry.size) + self._cache.offload_unlocked_models(0) self._cache.print_cuda_stats() From 66858effa2e1b606185f8bd6adcbd47137275cc1 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 30 May 2024 11:09:55 +1000 Subject: [PATCH 07/52] docs: add FAQ for fixing controlnet_aux --- docs/help/FAQ.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/help/FAQ.md b/docs/help/FAQ.md index 4c297f442a..25880f7cd2 100644 --- a/docs/help/FAQ.md +++ b/docs/help/FAQ.md @@ -154,6 +154,18 @@ This is caused by an invalid setting in the `invokeai.yaml` configuration file. Check the [configuration docs] for more detail about the settings and how to specify them. +## `ModuleNotFoundError: No module named 'controlnet_aux'` + +`controlnet_aux` is a dependency of Invoke and appears to have been packaged or distributed strangely. Sometimes, it doesn't install correctly. This is outside our control. + +If you encounter this error, the solution is to remove the package from the `pip` cache and re-run the Invoke installer so a fresh, working version of `controlnet_aux` can be downloaded and installed: + +- Run the Invoke launcher +- Choose the developer console option +- Run this command: `pip cache remove controlnet_aux` +- Close the terminal window +- Download and run the [installer](https://github.com/invoke-ai/InvokeAI/releases/latest), selecting your current install location + ## Out of Memory Issues The models are large, VRAM is expensive, and you may find yourself From 843f82c837d0d19b82a5e5fd5071636219314bbc Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 30 May 2024 11:45:35 +1000 Subject: [PATCH 08/52] fix(ui): remove overly strict constraints on control adapter weight --- .../controlLayers/util/controlAdapters.ts | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.ts b/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.ts index 708e089008..13435bdb7c 100644 --- a/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.ts +++ b/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.ts @@ -1,7 +1,13 @@ import { deepClone } from 'common/util/deepClone'; import { zModelIdentifierField } from 'features/nodes/types/common'; import { merge, omit } from 'lodash-es'; -import type { BaseModelType, ControlNetModelConfig, Graph, ImageDTO, T2IAdapterModelConfig } from 'services/api/types'; +import type { + AnyInvocation, + BaseModelType, + ControlNetModelConfig, + ImageDTO, + T2IAdapterModelConfig, +} from 'services/api/types'; import { z } from 'zod'; const zId = z.string().min(1); @@ -147,7 +153,7 @@ const zBeginEndStepPct = z const zControlAdapterBase = z.object({ id: zId, - weight: z.number().gte(0).lte(1), + weight: z.number().gte(-1).lte(2), image: zImageWithDims.nullable(), processedImage: zImageWithDims.nullable(), processorConfig: zProcessorConfig.nullable(), @@ -183,7 +189,7 @@ export const isIPMethodV2 = (v: unknown): v is IPMethodV2 => zIPMethodV2.safePar export const zIPAdapterConfigV2 = z.object({ id: zId, type: z.literal('ip_adapter'), - weight: z.number().gte(0).lte(1), + weight: z.number().gte(-1).lte(2), method: zIPMethodV2, image: zImageWithDims.nullable(), model: zModelIdentifierField.nullable(), @@ -216,10 +222,7 @@ type ProcessorData = { labelTKey: string; descriptionTKey: string; buildDefaults(baseModel?: BaseModelType): Extract; - buildNode( - image: ImageWithDims, - config: Extract - ): Extract; + buildNode(image: ImageWithDims, config: Extract): Extract; }; const minDim = (image: ImageWithDims): number => Math.min(image.width, image.height); From e257a72f944e6ca191513ac5078fce829dd34752 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 17:16:33 +1000 Subject: [PATCH 09/52] chore: bump pydantic, fastapi to latest --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3913a6cd1f..bb30747ba8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,10 +55,10 @@ dependencies = [ # Core application dependencies, pinned for reproducible builds. "fastapi-events==0.11.0", - "fastapi==0.110.0", + "fastapi==0.111.0", "huggingface-hub==0.23.1", "pydantic-settings==2.2.1", - "pydantic==2.6.3", + "pydantic==2.7.2", "python-socketio==5.11.1", "uvicorn[standard]==0.28.0", From 2f9ebdec694ab99c473fb9a730a3d082e537023b Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 17:29:51 +1000 Subject: [PATCH 10/52] fix(app): openapi schema generation Some tech debt related to dynamic pydantic schemas for invocations became problematic. Including the invocations and results in the event schemas was breaking pydantic's handling of ref schemas. I don't really understand why - I think it's a pydantic bug in a remote edge case that we are hitting. After many failed attempts I landed on this implementation, which is actually much tidier than what was in there before. - Create pydantic-enabled types for `AnyInvocation` and `AnyInvocationOutput` and use these in place of the janky dynamic unions. Actually, they are kinda the same, but better encapsulated. Use these in `Graph`, `GraphExecutionState`, `InvocationEventBase` and `InvocationCompleteEvent`. - Revise the custom openapi function to work with the new models. - Split out the custom openapi function to a separate file. Add a `post_transform` callback so consumers can customize the output schema. - Update makefile scripts. --- Makefile | 4 + invokeai/app/api_app.py | 92 +----------- invokeai/app/invocations/baseinvocation.py | 20 +-- invokeai/app/services/events/events_common.py | 32 ++--- invokeai/app/services/shared/graph.py | 136 +++++------------- invokeai/app/util/custom_openapi.py | 114 +++++++++++++++ scripts/generate_openapi_schema.py | 5 +- 7 files changed, 177 insertions(+), 226 deletions(-) create mode 100644 invokeai/app/util/custom_openapi.py diff --git a/Makefile b/Makefile index 7344b2e8d2..e858a89e2b 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,7 @@ help: @echo "frontend-typegen Generate types for the frontend from the OpenAPI schema" @echo "installer-zip Build the installer .zip file for the current version" @echo "tag-release Tag the GitHub repository with the current version (use at release time only!)" + @echo "openapi Generate the OpenAPI schema for the app, outputting to stdout" # Runs ruff, fixing any safely-fixable errors and formatting ruff: @@ -70,3 +71,6 @@ installer-zip: tag-release: cd installer && ./tag_release.sh +# Generate the OpenAPI Schema for the app +openapi: + python scripts/generate_openapi_schema.py diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index b7da548377..e69d95af71 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -3,9 +3,7 @@ import logging import mimetypes import socket from contextlib import asynccontextmanager -from inspect import signature from pathlib import Path -from typing import Any import torch import uvicorn @@ -13,11 +11,9 @@ from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html -from fastapi.openapi.utils import get_openapi from fastapi.responses import HTMLResponse from fastapi_events.handlers.local import local_handler from fastapi_events.middleware import EventHandlerASGIMiddleware -from pydantic.json_schema import models_json_schema from torch.backends.mps import is_available as is_mps_available # for PyCharm: @@ -25,10 +21,8 @@ from torch.backends.mps import is_available as is_mps_available import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) import invokeai.frontend.web as web_dir from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles -from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.services.config.config_default import get_config -from invokeai.app.services.events.events_common import EventBase -from invokeai.app.services.session_processor.session_processor_common import ProgressImage +from invokeai.app.util.custom_openapi import get_openapi_func from invokeai.backend.util.devices import TorchDevice from ..backend.util.logging import InvokeAILogger @@ -45,11 +39,6 @@ from .api.routers import ( workflows, ) from .api.sockets import SocketIO -from .invocations.baseinvocation import ( - BaseInvocation, - UIConfigBase, -) -from .invocations.fields import InputFieldJSONSchemaExtra, OutputFieldJSONSchemaExtra app_config = get_config() @@ -119,84 +108,7 @@ app.include_router(app_info.app_router, prefix="/api") app.include_router(session_queue.session_queue_router, prefix="/api") app.include_router(workflows.workflows_router, prefix="/api") - -# Build a custom OpenAPI to include all outputs -# TODO: can outputs be included on metadata of invocation schemas somehow? -def custom_openapi() -> dict[str, Any]: - if app.openapi_schema: - return app.openapi_schema - openapi_schema = get_openapi( - title=app.title, - description="An API for invoking AI image operations", - version="1.0.0", - routes=app.routes, - separate_input_output_schemas=False, # https://fastapi.tiangolo.com/how-to/separate-openapi-schemas/ - ) - - # Add all outputs - all_invocations = BaseInvocation.get_invocations() - output_types = set() - output_type_titles = {} - for invoker in all_invocations: - output_type = signature(invoker.invoke).return_annotation - output_types.add(output_type) - - output_schemas = models_json_schema( - models=[(o, "serialization") for o in output_types], ref_template="#/components/schemas/{model}" - ) - for schema_key, output_schema in output_schemas[1]["$defs"].items(): - # TODO: note that we assume the schema_key here is the TYPE.__name__ - # This could break in some cases, figure out a better way to do it - output_type_titles[schema_key] = output_schema["title"] - openapi_schema["components"]["schemas"][schema_key] = output_schema - openapi_schema["components"]["schemas"][schema_key]["class"] = "output" - - # Some models don't end up in the schemas as standalone definitions - additional_schemas = models_json_schema( - [ - (UIConfigBase, "serialization"), - (InputFieldJSONSchemaExtra, "serialization"), - (OutputFieldJSONSchemaExtra, "serialization"), - (ModelIdentifierField, "serialization"), - (ProgressImage, "serialization"), - ], - ref_template="#/components/schemas/{model}", - ) - for schema_key, schema_json in additional_schemas[1]["$defs"].items(): - openapi_schema["components"]["schemas"][schema_key] = schema_json - - openapi_schema["components"]["schemas"]["InvocationOutputMap"] = { - "type": "object", - "properties": {}, - "required": [], - } - - # Add a reference to the output type to additionalProperties of the invoker schema - for invoker in all_invocations: - invoker_name = invoker.__name__ # type: ignore [attr-defined] # this is a valid attribute - output_type = signature(obj=invoker.invoke).return_annotation - output_type_title = output_type_titles[output_type.__name__] - invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"] - outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"} - invoker_schema["output"] = outputs_ref - openapi_schema["components"]["schemas"]["InvocationOutputMap"]["properties"][invoker.get_type()] = outputs_ref - openapi_schema["components"]["schemas"]["InvocationOutputMap"]["required"].append(invoker.get_type()) - invoker_schema["class"] = "invocation" - - # Add all event schemas - for event in sorted(EventBase.get_events(), key=lambda e: e.__name__): - json_schema = event.model_json_schema(mode="serialization", ref_template="#/components/schemas/{model}") - if "$defs" in json_schema: - for schema_key, schema in json_schema["$defs"].items(): - openapi_schema["components"]["schemas"][schema_key] = schema - del json_schema["$defs"] - openapi_schema["components"]["schemas"][event.__name__] = json_schema - - app.openapi_schema = openapi_schema - return app.openapi_schema - - -app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid assignment +app.openapi = get_openapi_func(app) @app.get("/docs", include_in_schema=False) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 40c7b41cae..9545179e21 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -113,10 +113,10 @@ class BaseInvocationOutput(BaseModel): def get_typeadapter(cls) -> TypeAdapter[Any]: """Gets a pydantc TypeAdapter for the union of all invocation output types.""" if not cls._typeadapter: - InvocationOutputsUnion = TypeAliasType( - "InvocationOutputsUnion", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")] + AnyInvocationOutput = TypeAliasType( + "AnyInvocationOutput", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")] ) - cls._typeadapter = TypeAdapter(InvocationOutputsUnion) + cls._typeadapter = TypeAdapter(AnyInvocationOutput) return cls._typeadapter @classmethod @@ -125,12 +125,13 @@ class BaseInvocationOutput(BaseModel): return (i.get_type() for i in BaseInvocationOutput.get_outputs()) @staticmethod - def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocationOutput]) -> None: """Adds various UI-facing attributes to the invocation output's OpenAPI schema.""" # Because we use a pydantic Literal field with default value for the invocation type, # it will be typed as optional in the OpenAPI schema. Make it required manually. if "required" not in schema or not isinstance(schema["required"], list): schema["required"] = [] + schema["class"] = "output" schema["required"].extend(["type"]) @classmethod @@ -182,10 +183,10 @@ class BaseInvocation(ABC, BaseModel): def get_typeadapter(cls) -> TypeAdapter[Any]: """Gets a pydantc TypeAdapter for the union of all invocation types.""" if not cls._typeadapter: - InvocationsUnion = TypeAliasType( - "InvocationsUnion", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")] + AnyInvocation = TypeAliasType( + "AnyInvocation", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")] ) - cls._typeadapter = TypeAdapter(InvocationsUnion) + cls._typeadapter = TypeAdapter(AnyInvocation) return cls._typeadapter @classmethod @@ -221,7 +222,7 @@ class BaseInvocation(ABC, BaseModel): return signature(cls.invoke).return_annotation @staticmethod - def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel], *args, **kwargs) -> None: + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocation]) -> None: """Adds various UI-facing attributes to the invocation's OpenAPI schema.""" uiconfig = cast(UIConfigBase | None, getattr(model_class, "UIConfig", None)) if uiconfig is not None: @@ -237,6 +238,7 @@ class BaseInvocation(ABC, BaseModel): schema["version"] = uiconfig.version if "required" not in schema or not isinstance(schema["required"], list): schema["required"] = [] + schema["class"] = "invocation" schema["required"].extend(["type", "id"]) @abstractmethod @@ -310,7 +312,7 @@ class BaseInvocation(ABC, BaseModel): protected_namespaces=(), validate_assignment=True, json_schema_extra=json_schema_extra, - json_schema_serialization_defaults_required=True, + json_schema_serialization_defaults_required=False, coerce_numbers_to_str=True, ) diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py index 3ae4468b83..0adcaa2ab1 100644 --- a/invokeai/app/services/events/events_common.py +++ b/invokeai/app/services/events/events_common.py @@ -3,9 +3,8 @@ from typing import TYPE_CHECKING, Any, ClassVar, Coroutine, Generic, Optional, P from fastapi_events.handlers.local import local_handler from fastapi_events.registry.payload_schema import registry as payload_schema -from pydantic import BaseModel, ConfigDict, Field, SerializeAsAny, field_validator +from pydantic import BaseModel, ConfigDict, Field -from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput from invokeai.app.services.session_processor.session_processor_common import ProgressImage from invokeai.app.services.session_queue.session_queue_common import ( QUEUE_ITEM_STATUS, @@ -14,6 +13,7 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueItem, SessionQueueStatus, ) +from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput from invokeai.app.util.misc import get_timestamp from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState @@ -98,17 +98,9 @@ class InvocationEventBase(QueueItemEventBase): item_id: int = Field(description="The ID of the queue item") batch_id: str = Field(description="The ID of the queue batch") session_id: str = Field(description="The ID of the session (aka graph execution state)") - invocation: SerializeAsAny[BaseInvocation] = Field(description="The ID of the invocation") + invocation: AnyInvocation = Field(description="The ID of the invocation") invocation_source_id: str = Field(description="The ID of the prepared invocation's source node") - @field_validator("invocation", mode="plain") - @classmethod - def validate_invocation(cls, v: Any): - """Validates the invocation using the dynamic type adapter.""" - - invocation = BaseInvocation.get_typeadapter().validate_python(v) - return invocation - @payload_schema.register class InvocationStartedEvent(InvocationEventBase): @@ -117,7 +109,7 @@ class InvocationStartedEvent(InvocationEventBase): __event_name__ = "invocation_started" @classmethod - def build(cls, queue_item: SessionQueueItem, invocation: BaseInvocation) -> "InvocationStartedEvent": + def build(cls, queue_item: SessionQueueItem, invocation: AnyInvocation) -> "InvocationStartedEvent": return cls( queue_id=queue_item.queue_id, item_id=queue_item.item_id, @@ -144,7 +136,7 @@ class InvocationDenoiseProgressEvent(InvocationEventBase): def build( cls, queue_item: SessionQueueItem, - invocation: BaseInvocation, + invocation: AnyInvocation, intermediate_state: PipelineIntermediateState, progress_image: ProgressImage, ) -> "InvocationDenoiseProgressEvent": @@ -182,19 +174,11 @@ class InvocationCompleteEvent(InvocationEventBase): __event_name__ = "invocation_complete" - result: SerializeAsAny[BaseInvocationOutput] = Field(description="The result of the invocation") - - @field_validator("result", mode="plain") - @classmethod - def validate_results(cls, v: Any): - """Validates the invocation result using the dynamic type adapter.""" - - result = BaseInvocationOutput.get_typeadapter().validate_python(v) - return result + result: AnyInvocationOutput = Field(description="The result of the invocation") @classmethod def build( - cls, queue_item: SessionQueueItem, invocation: BaseInvocation, result: BaseInvocationOutput + cls, queue_item: SessionQueueItem, invocation: AnyInvocation, result: AnyInvocationOutput ) -> "InvocationCompleteEvent": return cls( queue_id=queue_item.queue_id, @@ -223,7 +207,7 @@ class InvocationErrorEvent(InvocationEventBase): def build( cls, queue_item: SessionQueueItem, - invocation: BaseInvocation, + invocation: AnyInvocation, error_type: str, error_message: str, error_traceback: str, diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index 8508d2484c..7f5b277ad8 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -2,11 +2,12 @@ import copy import itertools -from typing import Annotated, Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints +from typing import Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints import networkx as nx from pydantic import ( BaseModel, + GetCoreSchemaHandler, GetJsonSchemaHandler, ValidationError, field_validator, @@ -277,73 +278,46 @@ class CollectInvocation(BaseInvocation): return CollectInvocationOutput(collection=copy.copy(self.collection)) +class AnyInvocation(BaseInvocation): + @classmethod + def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler): + return BaseInvocation.get_typeadapter().core_schema + + @classmethod + def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue: + # Nodes are too powerful, we have to make our own OpenAPI schema manually + # No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually + oneOf: list[dict[str, str]] = [] + for i in BaseInvocation.get_invocations(): + oneOf.append({"$ref": f"#/components/schemas/{i.__name__}"}) + return {"oneOf": oneOf} + + +class AnyInvocationOutput(BaseInvocationOutput): + @classmethod + def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler): + return BaseInvocationOutput.get_typeadapter().core_schema + + @classmethod + def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue: + # Nodes are too powerful, we have to make our own OpenAPI schema manually + # No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually + + oneOf: list[dict[str, str]] = [] + for i in BaseInvocationOutput.get_outputs(): + oneOf.append({"$ref": f"#/components/schemas/{i.__name__}"}) + return {"oneOf": oneOf} + + class Graph(BaseModel): id: str = Field(description="The id of this graph", default_factory=uuid_string) # TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me - nodes: dict[str, BaseInvocation] = Field(description="The nodes in this graph", default_factory=dict) + nodes: dict[str, AnyInvocation] = Field(description="The nodes in this graph", default_factory=dict) edges: list[Edge] = Field( description="The connections between nodes and their fields in this graph", default_factory=list, ) - @field_validator("nodes", mode="plain") - @classmethod - def validate_nodes(cls, v: dict[str, Any]): - """Validates the nodes in the graph by retrieving a union of all node types and validating each node.""" - - # Invocations register themselves as their python modules are executed. The union of all invocations is - # constructed at runtime. We use pydantic to validate `Graph.nodes` using that union. - # - # It's possible that when `graph.py` is executed, not all invocation-containing modules will have executed. If - # we construct the invocation union as `graph.py` is executed, we may miss some invocations. Those missing - # invocations will cause a graph to fail if they are used. - # - # We can get around this by validating the nodes in the graph using a "plain" validator, which overrides the - # pydantic validation entirely. This allows us to validate the nodes using the union of invocations at runtime. - # - # This same pattern is used in `GraphExecutionState`. - - nodes: dict[str, BaseInvocation] = {} - typeadapter = BaseInvocation.get_typeadapter() - for node_id, node in v.items(): - nodes[node_id] = typeadapter.validate_python(node) - return nodes - - @classmethod - def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue: - # We use a "plain" validator to validate the nodes in the graph. Pydantic is unable to create a JSON Schema for - # fields that use "plain" validators, so we have to hack around this. Also, we need to add all invocations to - # the generated schema as options for the `nodes` field. - # - # The workaround is to create a new BaseModel that has the same fields as `Graph` but without the validator and - # with the invocation union as the type for the `nodes` field. Pydantic then generates the JSON Schema as - # expected. - # - # You might be tempted to do something like this: - # - # ```py - # cloned_model = create_model(cls.__name__, __base__=cls, nodes=...) - # delattr(cloned_model, "validate_nodes") - # cloned_model.model_rebuild(force=True) - # json_schema = handler(cloned_model.__pydantic_core_schema__) - # ``` - # - # Unfortunately, this does not work. Calling `handler` here results in infinite recursion as pydantic attempts - # to build the JSON Schema for the cloned model. Instead, we have to manually clone the model. - # - # This same pattern is used in `GraphExecutionState`. - - class Graph(BaseModel): - id: Optional[str] = Field(default=None, description="The id of this graph") - nodes: dict[ - str, Annotated[Union[tuple(BaseInvocation._invocation_classes)], Field(discriminator="type")] - ] = Field(description="The nodes in this graph") - edges: list[Edge] = Field(description="The connections between nodes and their fields in this graph") - - json_schema = handler(Graph.__pydantic_core_schema__) - json_schema = handler.resolve_ref_schema(json_schema) - return json_schema - def add_node(self, node: BaseInvocation) -> None: """Adds a node to a graph @@ -774,7 +748,7 @@ class GraphExecutionState(BaseModel): ) # The results of executed nodes - results: dict[str, BaseInvocationOutput] = Field(description="The results of node executions", default_factory=dict) + results: dict[str, AnyInvocationOutput] = Field(description="The results of node executions", default_factory=dict) # Errors raised when executing nodes errors: dict[str, str] = Field(description="Errors raised when executing nodes", default_factory=dict) @@ -791,52 +765,12 @@ class GraphExecutionState(BaseModel): default_factory=dict, ) - @field_validator("results", mode="plain") - @classmethod - def validate_results(cls, v: dict[str, BaseInvocationOutput]): - """Validates the results in the GES by retrieving a union of all output types and validating each result.""" - - # See the comment in `Graph.validate_nodes` for an explanation of this logic. - results: dict[str, BaseInvocationOutput] = {} - typeadapter = BaseInvocationOutput.get_typeadapter() - for result_id, result in v.items(): - results[result_id] = typeadapter.validate_python(result) - return results - @field_validator("graph") def graph_is_valid(cls, v: Graph): """Validates that the graph is valid""" v.validate_self() return v - @classmethod - def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue: - # See the comment in `Graph.__get_pydantic_json_schema__` for an explanation of this logic. - class GraphExecutionState(BaseModel): - """Tracks the state of a graph execution""" - - id: str = Field(description="The id of the execution state") - graph: Graph = Field(description="The graph being executed") - execution_graph: Graph = Field(description="The expanded graph of activated and executed nodes") - executed: set[str] = Field(description="The set of node ids that have been executed") - executed_history: list[str] = Field( - description="The list of node ids that have been executed, in order of execution" - ) - results: dict[ - str, Annotated[Union[tuple(BaseInvocationOutput._output_classes)], Field(discriminator="type")] - ] = Field(description="The results of node executions") - errors: dict[str, str] = Field(description="Errors raised when executing nodes") - prepared_source_mapping: dict[str, str] = Field( - description="The map of prepared nodes to original graph nodes" - ) - source_prepared_mapping: dict[str, set[str]] = Field( - description="The map of original graph nodes to prepared nodes" - ) - - json_schema = handler(GraphExecutionState.__pydantic_core_schema__) - json_schema = handler.resolve_ref_schema(json_schema) - return json_schema - def next(self) -> Optional[BaseInvocation]: """Gets the next node ready to execute.""" diff --git a/invokeai/app/util/custom_openapi.py b/invokeai/app/util/custom_openapi.py new file mode 100644 index 0000000000..9313f63b84 --- /dev/null +++ b/invokeai/app/util/custom_openapi.py @@ -0,0 +1,114 @@ +from typing import Any, Callable, Optional + +from fastapi import FastAPI +from fastapi.openapi.utils import get_openapi +from pydantic.json_schema import models_json_schema + +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, UIConfigBase +from invokeai.app.invocations.fields import InputFieldJSONSchemaExtra, OutputFieldJSONSchemaExtra +from invokeai.app.invocations.model import ModelIdentifierField +from invokeai.app.services.events.events_common import EventBase +from invokeai.app.services.session_processor.session_processor_common import ProgressImage + + +def move_defs_to_top_level(openapi_schema: dict[str, Any], component_schema: dict[str, Any]) -> None: + """Moves a component schema's $defs to the top level of the openapi schema. Useful when generating a schema + for a single model that needs to be added back to the top level of the schema. Mutates openapi_schema and + component_schema.""" + + defs = component_schema.pop("$defs", {}) + for schema_key, json_schema in defs.items(): + if schema_key in openapi_schema["components"]["schemas"]: + continue + openapi_schema["components"]["schemas"][schema_key] = json_schema + + +def get_openapi_func( + app: FastAPI, post_transform: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None +) -> Callable[[], dict[str, Any]]: + """Gets the OpenAPI schema generator function. + + Args: + app (FastAPI): The FastAPI app to generate the schema for. + post_transform (Optional[Callable[[dict[str, Any]], dict[str, Any]]], optional): A function to apply to the + generated schema before returning it. Defaults to None. + + Returns: + Callable[[], dict[str, Any]]: The OpenAPI schema generator function. When first called, the generated schema is + cached in `app.openapi_schema`. On subsequent calls, the cached schema is returned. This caching behaviour + matches FastAPI's default schema generation caching. + """ + + def openapi() -> dict[str, Any]: + if app.openapi_schema: + return app.openapi_schema + + openapi_schema = get_openapi( + title=app.title, + description="An API for invoking AI image operations", + version="1.0.0", + routes=app.routes, + separate_input_output_schemas=False, # https://fastapi.tiangolo.com/how-to/separate-openapi-schemas/ + ) + + # We'll create a map of invocation type to output schema to make some types simpler on the client. + invocation_output_map_properties: dict[str, Any] = {} + invocation_output_map_required: list[str] = [] + + # We need to manually add all outputs to the schema - pydantic doesn't add them because they aren't used directly. + for output in BaseInvocationOutput.get_outputs(): + json_schema = output.model_json_schema(mode="serialization", ref_template="#/components/schemas/{model}") + move_defs_to_top_level(openapi_schema, json_schema) + openapi_schema["components"]["schemas"][output.__name__] = json_schema + + # Technically, invocations are added to the schema by pydantic, but we still need to manually set their output + # property, so we'll just do it all manually. + for invocation in BaseInvocation.get_invocations(): + json_schema = invocation.model_json_schema( + mode="serialization", ref_template="#/components/schemas/{model}" + ) + move_defs_to_top_level(openapi_schema, json_schema) + output_title = invocation.get_output_annotation().__name__ + outputs_ref = {"$ref": f"#/components/schemas/{output_title}"} + json_schema["output"] = outputs_ref + openapi_schema["components"]["schemas"][invocation.__name__] = json_schema + + # Add this invocation and its output to the output map + invocation_type = invocation.get_type() + invocation_output_map_properties[invocation_type] = json_schema["output"] + invocation_output_map_required.append(invocation_type) + + # Add the output map to the schema + openapi_schema["components"]["schemas"]["InvocationOutputMap"] = { + "type": "object", + "properties": invocation_output_map_properties, + "required": invocation_output_map_required, + } + + # Some models don't end up in the schemas as standalone definitions because they aren't used directly in the API. + # We need to add them manually here. WARNING: Pydantic can choke if you call `model.model_json_schema()` to get + # a schema. This has something to do with schema refs - not totally clear. For whatever reason, using + # `models_json_schema` seems to work fine. + additional_models = [ + *EventBase.get_events(), + UIConfigBase, + InputFieldJSONSchemaExtra, + OutputFieldJSONSchemaExtra, + ModelIdentifierField, + ProgressImage, + ] + + additional_schemas = models_json_schema( + [(m, "serialization") for m in additional_models], + ref_template="#/components/schemas/{model}", + ) + # additional_schemas[1] is a dict of $defs that we need to add to the top level of the schema + move_defs_to_top_level(openapi_schema, additional_schemas[1]) + + if post_transform is not None: + openapi_schema = post_transform(openapi_schema) + + app.openapi_schema = openapi_schema + return app.openapi_schema + + return openapi diff --git a/scripts/generate_openapi_schema.py b/scripts/generate_openapi_schema.py index dd1f5b85dd..70baa194dc 100644 --- a/scripts/generate_openapi_schema.py +++ b/scripts/generate_openapi_schema.py @@ -7,9 +7,10 @@ def main(): # Change working directory to the repo root os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) - from invokeai.app.api_app import custom_openapi + from invokeai.app.api_app import app + from invokeai.app.util.custom_openapi import get_openapi_func - schema = custom_openapi() + schema = get_openapi_func(app)() json.dump(schema, sys.stdout, indent=2) From 7590f3005e1e72e97670a6e03a044a0cbed62e13 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 17:30:37 +1000 Subject: [PATCH 11/52] chore(ui): typegen --- .../frontend/web/src/services/api/schema.ts | 3771 ++++++++++------- 1 file changed, 2193 insertions(+), 1578 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 67b39237b1..9ecd78e3fd 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -606,7 +606,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The mask image to convert. */ + /** + * @description The mask image to convert. + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Invert @@ -621,6 +624,8 @@ export type components = { */ type: "alpha_mask_to_tensor"; }; + AnyInvocation: components["schemas"]["ControlNetInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["PairTileImageInvocation"]; + AnyInvocationOutput: components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["String2Output"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SDXLModelLoaderOutput"]; /** * AppConfig * @description App Config Response @@ -841,9 +846,15 @@ export type components = { * @description Creates a blank image and forwards it to the pipeline */ BlankImageInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -920,9 +931,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Latents tensor */ + /** + * @description Latents tensor + * @default null + */ latents_a?: components["schemas"]["LatentsField"]; - /** @description Latents tensor */ + /** + * @description Latents tensor + * @default null + */ latents_b?: components["schemas"]["LatentsField"]; /** * Alpha @@ -1208,23 +1225,6 @@ export type components = { */ type: "boolean_collection"; }; - /** - * BooleanCollectionOutput - * @description Base class for nodes that output a collection of booleans - */ - BooleanCollectionOutput: { - /** - * Collection - * @description The output boolean collection - */ - collection: boolean[]; - /** - * type - * @default boolean_collection_output - * @constant - */ - type: "boolean_collection_output"; - }; /** * Boolean Primitive * @description A boolean primitive value @@ -1260,23 +1260,6 @@ export type components = { */ type: "boolean"; }; - /** - * BooleanOutput - * @description Base class for nodes that output a single boolean - */ - BooleanOutput: { - /** - * Value - * @description The output boolean - */ - value: boolean; - /** - * type - * @default boolean_output - * @constant - */ - type: "boolean_output"; - }; /** CLIPField */ CLIPField: { /** @description Info to load tokenizer submodel */ @@ -1294,23 +1277,6 @@ export type components = { */ loras: components["schemas"]["LoRAField"][]; }; - /** - * CLIPOutput - * @description Base class for invocations that output a CLIP field - */ - CLIPOutput: { - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip: components["schemas"]["CLIPField"]; - /** - * type - * @default clip_output - * @constant - */ - type: "clip_output"; - }; /** * CLIP Skip * @description Skip layers in clip text_encoder model. @@ -1336,6 +1302,7 @@ export type components = { /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip?: components["schemas"]["CLIPField"]; /** @@ -1351,24 +1318,6 @@ export type components = { */ type: "clip_skip"; }; - /** - * CLIPSkipInvocationOutput - * @description CLIP skip node output - */ - CLIPSkipInvocationOutput: { - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip: components["schemas"]["CLIPField"] | null; - /** - * type - * @default clip_skip_output - * @constant - */ - type: "clip_skip_output"; - }; /** * CLIPVisionDiffusersConfig * @description Model config for CLIPVision. @@ -1399,7 +1348,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -1412,13 +1360,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -1440,9 +1386,15 @@ export type components = { * @description Infills transparent areas of an image using OpenCV Inpainting */ CV2InfillInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -1461,7 +1413,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * type @@ -1647,20 +1602,6 @@ export type components = { */ type: "calculate_image_tiles_min_overlap"; }; - /** CalculateImageTilesOutput */ - CalculateImageTilesOutput: { - /** - * Tiles - * @description The tiles coordinates that cover a particular image shape. - */ - tiles: components["schemas"]["Tile"][]; - /** - * type - * @default calculate_image_tiles_output - * @constant - */ - type: "calculate_image_tiles_output"; - }; /** * CancelByBatchIDsResult * @description Result of canceling by list of batch ids @@ -1677,9 +1618,15 @@ export type components = { * @description Canny edge detection for ControlNet */ CannyImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -1698,7 +1645,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -1736,9 +1686,15 @@ export type components = { * @description Combines two images by using the mask provided. Intended for use on the Unified Canvas. */ CanvasPasteBackInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -1757,11 +1713,20 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The source image */ + /** + * @description The source image + * @default null + */ source_image?: components["schemas"]["ImageField"]; - /** @description The target image */ + /** + * @description The target image + * @default null + */ target_image?: components["schemas"]["ImageField"]; - /** @description The mask to use when pasting */ + /** + * @description The mask to use when pasting + * @default null + */ mask?: components["schemas"]["ImageField"]; /** * Mask Blur @@ -1798,7 +1763,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to crop */ + /** + * @description The image to crop + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Left @@ -1867,6 +1835,7 @@ export type components = { /** * Collection Item * @description The item to collect (all inputs must be of the same type) + * @default null */ item?: unknown; /** @@ -1882,46 +1851,21 @@ export type components = { */ type: "collect"; }; - /** CollectInvocationOutput */ - CollectInvocationOutput: { - /** - * Collection - * @description The collection of input items - */ - collection: unknown[]; - /** - * type - * @default collect_output - * @constant - */ - type: "collect_output"; - }; - /** - * ColorCollectionOutput - * @description Base class for nodes that output a collection of colors - */ - ColorCollectionOutput: { - /** - * Collection - * @description The output colors - */ - collection: components["schemas"]["ColorField"][]; - /** - * type - * @default color_collection_output - * @constant - */ - type: "color_collection_output"; - }; /** * Color Correct * @description Shifts the colors of a target image to match the reference image, optionally * using a mask to only color-correct certain regions of the target image. */ ColorCorrectInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -1940,11 +1884,20 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to color-correct */ + /** + * @description The image to color-correct + * @default null + */ image?: components["schemas"]["ImageField"]; - /** @description Reference image for color-correction */ + /** + * @description Reference image for color-correction + * @default null + */ reference?: components["schemas"]["ImageField"]; - /** @description Mask to use when applying color-correction */ + /** + * @description Mask to use when applying color-correction + * @default null + */ mask?: components["schemas"]["ImageField"] | null; /** * Mask Blur Radius @@ -2029,9 +1982,15 @@ export type components = { * @description Generates a color map from the provided image */ ColorMapImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -2050,7 +2009,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Color Map Tile Size @@ -2065,20 +2027,6 @@ export type components = { */ type: "color_map_image_processor"; }; - /** - * ColorOutput - * @description Base class for nodes that output a single color - */ - ColorOutput: { - /** @description The output color */ - color: components["schemas"]["ColorField"]; - /** - * type - * @default color_output - * @constant - */ - type: "color_output"; - }; /** * Prompt * @description Parse prompt using compel package to conditioning. @@ -2110,9 +2058,13 @@ export type components = { /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip?: components["schemas"]["CLIPField"]; - /** @description A mask defining the region that this conditioning prompt applies to. */ + /** + * @description A mask defining the region that this conditioning prompt applies to. + * @default null + */ mask?: components["schemas"]["TensorField"] | null; /** * type @@ -2156,23 +2108,6 @@ export type components = { */ type: "conditioning_collection"; }; - /** - * ConditioningCollectionOutput - * @description Base class for nodes that output a collection of conditioning tensors - */ - ConditioningCollectionOutput: { - /** - * Collection - * @description The output conditioning tensors - */ - collection: components["schemas"]["ConditioningField"][]; - /** - * type - * @default conditioning_collection_output - * @constant - */ - type: "conditioning_collection_output"; - }; /** * ConditioningField * @description A conditioning tensor primitive value @@ -2183,10 +2118,7 @@ export type components = { * @description The name of conditioning tensor */ conditioning_name: string; - /** - * @description The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True. - * @default null - */ + /** @description The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True. */ mask?: components["schemas"]["TensorField"] | null; }; /** @@ -2211,7 +2143,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Conditioning tensor */ + /** + * @description Conditioning tensor + * @default null + */ conditioning?: components["schemas"]["ConditioningField"]; /** * type @@ -2220,28 +2155,20 @@ export type components = { */ type: "conditioning"; }; - /** - * ConditioningOutput - * @description Base class for nodes that output a single conditioning tensor - */ - ConditioningOutput: { - /** @description Conditioning tensor */ - conditioning: components["schemas"]["ConditioningField"]; - /** - * type - * @default conditioning_output - * @constant - */ - type: "conditioning_output"; - }; /** * Content Shuffle Processor * @description Applies content shuffle processing to image */ ContentShuffleImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -2260,7 +2187,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -2348,10 +2278,7 @@ export type components = { * @description Model config for ControlNet models (diffusers version). */ ControlNetCheckpointConfig: { - /** - * @description Default settings for this model - * @default null - */ + /** @description Default settings for this model */ default_settings?: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** * Key @@ -2378,7 +2305,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -2391,13 +2317,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -2428,10 +2352,7 @@ export type components = { * @description Model config for ControlNet models (diffusers version). */ ControlNetDiffusersConfig: { - /** - * @description Default settings for this model - * @default null - */ + /** @description Default settings for this model */ default_settings?: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** * Key @@ -2458,7 +2379,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -2471,13 +2391,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -2517,9 +2435,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The control image */ + /** + * @description The control image + * @default null + */ image?: components["schemas"]["ImageField"]; - /** @description ControlNet model to load */ + /** + * @description ControlNet model to load + * @default null + */ control_model?: components["schemas"]["ModelIdentifierField"]; /** * Control Weight @@ -2601,20 +2525,6 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; - /** - * ControlOutput - * @description node output for ControlNet info - */ - ControlOutput: { - /** @description ControlNet(s) to apply */ - control: components["schemas"]["ControlField"]; - /** - * type - * @default control_output - * @constant - */ - type: "control_output"; - }; /** * Core Metadata * @description Collects core generation metadata into a MetadataField @@ -2640,162 +2550,202 @@ export type components = { /** * Generation Mode * @description The generation mode that output this image + * @default null */ generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint") | null; /** * Positive Prompt * @description The positive prompt parameter + * @default null */ positive_prompt?: string | null; /** * Negative Prompt * @description The negative prompt parameter + * @default null */ negative_prompt?: string | null; /** * Width * @description The width parameter + * @default null */ width?: number | null; /** * Height * @description The height parameter + * @default null */ height?: number | null; /** * Seed * @description The seed used for noise generation + * @default null */ seed?: number | null; /** * Rand Device * @description The device used for random number generation + * @default null */ rand_device?: string | null; /** * Cfg Scale * @description The classifier-free guidance scale parameter + * @default null */ cfg_scale?: number | null; /** * Cfg Rescale Multiplier * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR + * @default null */ cfg_rescale_multiplier?: number | null; /** * Steps * @description The number of steps used for inference + * @default null */ steps?: number | null; /** * Scheduler * @description The scheduler used for inference + * @default null */ scheduler?: string | null; /** * Seamless X * @description Whether seamless tiling was used on the X axis + * @default null */ seamless_x?: boolean | null; /** * Seamless Y * @description Whether seamless tiling was used on the Y axis + * @default null */ seamless_y?: boolean | null; /** * Clip Skip * @description The number of skipped CLIP layers + * @default null */ clip_skip?: number | null; - /** @description The main model used for inference */ + /** + * @description The main model used for inference + * @default null + */ model?: components["schemas"]["ModelIdentifierField"] | null; /** * Controlnets * @description The ControlNets used for inference + * @default null */ controlnets?: components["schemas"]["ControlNetMetadataField"][] | null; /** * Ipadapters * @description The IP Adapters used for inference + * @default null */ ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; /** * T2Iadapters * @description The IP Adapters used for inference + * @default null */ t2iAdapters?: components["schemas"]["T2IAdapterMetadataField"][] | null; /** * Loras * @description The LoRAs used for inference + * @default null */ loras?: components["schemas"]["LoRAMetadataField"][] | null; /** * Strength * @description The strength used for latents-to-latents + * @default null */ strength?: number | null; /** * Init Image * @description The name of the initial image + * @default null */ init_image?: string | null; - /** @description The VAE used for decoding, if the main model's default was not used */ + /** + * @description The VAE used for decoding, if the main model's default was not used + * @default null + */ vae?: components["schemas"]["ModelIdentifierField"] | null; /** * Hrf Enabled * @description Whether or not high resolution fix was enabled. + * @default null */ hrf_enabled?: boolean | null; /** * Hrf Method * @description The high resolution fix upscale method. + * @default null */ hrf_method?: string | null; /** * Hrf Strength * @description The high resolution fix img2img strength used in the upscale pass. + * @default null */ hrf_strength?: number | null; /** * Positive Style Prompt * @description The positive style prompt parameter + * @default null */ positive_style_prompt?: string | null; /** * Negative Style Prompt * @description The negative style prompt parameter + * @default null */ negative_style_prompt?: string | null; - /** @description The SDXL Refiner model used */ + /** + * @description The SDXL Refiner model used + * @default null + */ refiner_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner + * @default null */ refiner_cfg_scale?: number | null; /** * Refiner Steps * @description The number of steps used for the refiner + * @default null */ refiner_steps?: number | null; /** * Refiner Scheduler * @description The scheduler used for the refiner + * @default null */ refiner_scheduler?: string | null; /** * Refiner Positive Aesthetic Score * @description The aesthetic score used for the refiner + * @default null */ refiner_positive_aesthetic_score?: number | null; /** * Refiner Negative Aesthetic Score * @description The aesthetic score used for the refiner + * @default null */ refiner_negative_aesthetic_score?: number | null; /** * Refiner Start * @description The start value used for refiner denoising + * @default null */ refiner_start?: number | null; /** @@ -2828,11 +2778,20 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description VAE */ + /** + * @description VAE + * @default null + */ vae?: components["schemas"]["VAEField"]; - /** @description Image which will be masked */ + /** + * @description Image which will be masked + * @default null + */ image?: components["schemas"]["ImageField"] | null; - /** @description The mask to use when pasting */ + /** + * @description The mask to use when pasting + * @default null + */ mask?: components["schemas"]["ImageField"]; /** * Tiled @@ -2875,7 +2834,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Image which will be masked */ + /** + * @description Image which will be masked + * @default null + */ mask?: components["schemas"]["ImageField"]; /** * Edge Radius @@ -2898,16 +2860,19 @@ export type components = { /** * [OPTIONAL] Image * @description OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE + * @default null */ image?: components["schemas"]["ImageField"] | null; /** * [OPTIONAL] UNet * @description OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE + * @default null */ unet?: components["schemas"]["UNetField"] | null; /** * [OPTIONAL] VAE * @description OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE + * @default null */ vae?: components["schemas"]["VAEField"] | null; /** @@ -2952,26 +2917,33 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Latents tensor */ + /** + * @description Latents tensor + * @default null + */ latents?: components["schemas"]["LatentsField"]; /** * X * @description The left x coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @default null */ x?: number; /** * Y * @description The top y coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @default null */ y?: number; /** * Width * @description The width (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @default null */ width?: number; /** * Height * @description The height (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @default null */ height?: number; /** @@ -3004,9 +2976,15 @@ export type components = { * @description Simple inpaint using opencv. */ CvInpaintInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -3025,9 +3003,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to inpaint */ + /** + * @description The image to inpaint + * @default null + */ image?: components["schemas"]["ImageField"]; - /** @description The mask to use when inpainting */ + /** + * @description The mask to use when inpainting + * @default null + */ mask?: components["schemas"]["ImageField"]; /** * type @@ -3041,9 +3025,15 @@ export type components = { * @description Generates an openpose pose from an image using DWPose */ DWOpenposeImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -3062,7 +3052,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Draw Body @@ -3140,14 +3133,19 @@ export type components = { /** * Positive Conditioning * @description Positive conditioning tensor + * @default null */ positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][]; /** * Negative Conditioning * @description Negative conditioning tensor + * @default null */ negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][]; - /** @description Noise tensor */ + /** + * @description Noise tensor + * @default null + */ noise?: components["schemas"]["LatentsField"] | null; /** * Steps @@ -3183,18 +3181,24 @@ export type components = { /** * UNet * @description UNet (scheduler, LoRAs) + * @default null */ unet?: components["schemas"]["UNetField"]; - /** Control */ + /** + * Control + * @default null + */ control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; /** * IP-Adapter * @description IP-Adapter to apply + * @default null */ ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; /** * T2I-Adapter * @description T2I-Adapter(s) to apply + * @default null */ t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; /** @@ -3203,9 +3207,15 @@ export type components = { * @default 0 */ cfg_rescale_multiplier?: number; - /** @description Latents tensor */ + /** + * @description Latents tensor + * @default null + */ latents?: components["schemas"]["LatentsField"] | null; - /** @description The mask to use for the operation */ + /** + * @description The mask to use for the operation + * @default null + */ denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; /** * type @@ -3227,7 +3237,6 @@ export type components = { /** * Masked Latents Name * @description The name of the masked image latents - * @default null */ masked_latents_name?: string | null; /** @@ -3237,28 +3246,20 @@ export type components = { */ gradient?: boolean; }; - /** - * DenoiseMaskOutput - * @description Base class for nodes that output a single image - */ - DenoiseMaskOutput: { - /** @description Mask for denoise model run */ - denoise_mask: components["schemas"]["DenoiseMaskField"]; - /** - * type - * @default denoise_mask_output - * @constant - */ - type: "denoise_mask_output"; - }; /** * Depth Anything Processor * @description Generates a depth map based on the Depth Anything algorithm */ DepthAnythingImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -3277,7 +3278,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Model Size @@ -3453,6 +3457,7 @@ export type components = { /** * Prompt * @description The prompt to parse with dynamicprompts + * @default null */ prompt?: string; /** @@ -3486,9 +3491,15 @@ export type components = { * @description Upscales an image using RealESRGAN. */ ESRGANInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -3507,7 +3518,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The input image */ + /** + * @description The input image + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Model Name @@ -3586,9 +3600,15 @@ export type components = { * @description Outputs an image with detected face IDs printed on each face. For use with other FaceTools. */ FaceIdentifierInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -3607,7 +3627,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Image to face detect */ + /** + * @description Image to face detect + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Minimum Confidence @@ -3633,7 +3656,10 @@ export type components = { * @description Face mask creation using mediapipe face detection */ FaceMaskInvocation: { - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -3652,7 +3678,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Image to face detect */ + /** + * @description Image to face detect + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Face Ids @@ -3697,38 +3726,15 @@ export type components = { */ type: "face_mask_detection"; }; - /** - * FaceMaskOutput - * @description Base class for FaceMask output - */ - FaceMaskOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; - /** - * Width - * @description The width of the image in pixels - */ - width: number; - /** - * Height - * @description The height of the image in pixels - */ - height: number; - /** - * type - * @default face_mask_output - * @constant - */ - type: "face_mask_output"; - /** @description The output mask */ - mask: components["schemas"]["ImageField"]; - }; /** * FaceOff * @description Bound, extract, and mask a face from an image using MediaPipe detection */ FaceOffInvocation: { - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -3747,7 +3753,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Image for face detection */ + /** + * @description Image for face detection + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Face Id @@ -3792,42 +3801,6 @@ export type components = { */ type: "face_off"; }; - /** - * FaceOffOutput - * @description Base class for FaceOff Output - */ - FaceOffOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; - /** - * Width - * @description The width of the image in pixels - */ - width: number; - /** - * Height - * @description The height of the image in pixels - */ - height: number; - /** - * type - * @default face_off_output - * @constant - */ - type: "face_off_output"; - /** @description The output mask */ - mask: components["schemas"]["ImageField"]; - /** - * X - * @description The x coordinate of the bounding box's left side - */ - x: number; - /** - * Y - * @description The y coordinate of the bounding box's top side - */ - y: number; - }; /** * Float Collection Primitive * @description A collection of float primitive values @@ -3863,23 +3836,6 @@ export type components = { */ type: "float_collection"; }; - /** - * FloatCollectionOutput - * @description Base class for nodes that output a collection of floats - */ - FloatCollectionOutput: { - /** - * Collection - * @description The float collection - */ - collection: number[]; - /** - * type - * @default float_collection_output - * @constant - */ - type: "float_collection_output"; - }; /** * Float Primitive * @description A float primitive value @@ -4010,23 +3966,6 @@ export type components = { */ type: "float_math"; }; - /** - * FloatOutput - * @description Base class for nodes that output a single float - */ - FloatOutput: { - /** - * Value - * @description The output float - */ - value: number; - /** - * type - * @default float_output - * @constant - */ - type: "float_output"; - }; /** * Float To Integer * @description Rounds a float number to (a multiple of) an integer. @@ -4145,6 +4084,7 @@ export type components = { /** * UNet * @description UNet (scheduler, LoRAs) + * @default null */ unet?: components["schemas"]["UNetField"]; /** @@ -4178,41 +4118,25 @@ export type components = { */ type: "freeu"; }; - /** - * GradientMaskOutput - * @description Outputs a denoise mask and an image representing the total gradient of the mask. - */ - GradientMaskOutput: { - /** @description Mask for denoise model run */ - denoise_mask: components["schemas"]["DenoiseMaskField"]; - /** @description Image representing the total gradient area of the mask. For paste-back purposes. */ - expanded_mask_area: components["schemas"]["ImageField"]; - /** - * type - * @default gradient_mask_output - * @constant - */ - type: "gradient_mask_output"; - }; /** Graph */ Graph: { /** * Id * @description The id of this graph */ - id?: string | null; + id?: string; /** * Nodes * @description The nodes in this graph */ - nodes: { - [key: string]: components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["IterateInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["StringReplaceInvocation"]; + nodes?: { + [key: string]: components["schemas"]["AnyInvocation"]; }; /** * Edges * @description The connections between nodes and their fields in this graph */ - edges: components["schemas"]["Edge"][]; + edges?: components["schemas"]["Edge"][]; }; /** * GraphExecutionState @@ -4223,47 +4147,47 @@ export type components = { * Id * @description The id of the execution state */ - id: string; + id?: string; /** @description The graph being executed */ graph: components["schemas"]["Graph"]; /** @description The expanded graph of activated and executed nodes */ - execution_graph: components["schemas"]["Graph"]; + execution_graph?: components["schemas"]["Graph"]; /** * Executed * @description The set of node ids that have been executed */ - executed: string[]; + executed?: string[]; /** * Executed History * @description The list of node ids that have been executed, in order of execution */ - executed_history: string[]; + executed_history?: string[]; /** * Results * @description The results of node executions */ - results: { - [key: string]: components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["String2Output"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["ConditioningCollectionOutput"]; + results?: { + [key: string]: components["schemas"]["AnyInvocationOutput"]; }; /** * Errors * @description Errors raised when executing nodes */ - errors: { + errors?: { [key: string]: string; }; /** * Prepared Source Mapping * @description The map of prepared nodes to original graph nodes */ - prepared_source_mapping: { + prepared_source_mapping?: { [key: string]: string; }; /** * Source Prepared Mapping * @description The map of original graph nodes to prepared nodes */ - source_prepared_mapping: { + source_prepared_mapping?: { [key: string]: string[]; }; }; @@ -4299,9 +4223,15 @@ export type components = { * @description Applies HED edge detection to image */ HedImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -4320,7 +4250,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -4369,7 +4302,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to resize */ + /** + * @description The image to resize + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Width @@ -4476,7 +4412,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -4489,13 +4424,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -4545,10 +4478,7 @@ export type components = { * @default 1 */ end_step_percent?: number; - /** - * @description The bool mask associated with this IP-Adapter. Excluded regions should be set to False, included regions should be set to True. - * @default null - */ + /** @description The bool mask associated with this IP-Adapter. Excluded regions should be set to False, included regions should be set to True. */ mask?: components["schemas"]["TensorField"] | null; }; /** @@ -4576,11 +4506,13 @@ export type components = { /** * Image * @description The IP-Adapter image prompt(s). + * @default null */ image?: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][]; /** * IP-Adapter Model * @description The IP-Adapter model. + * @default null */ ip_adapter_model?: components["schemas"]["ModelIdentifierField"]; /** @@ -4615,7 +4547,10 @@ export type components = { * @default 1 */ end_step_percent?: number; - /** @description A mask defining the region that this IP-Adapter applies to. */ + /** + * @description A mask defining the region that this IP-Adapter applies to. + * @default null + */ mask?: components["schemas"]["TensorField"] | null; /** * type @@ -4654,7 +4589,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -4667,13 +4601,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -4727,20 +4659,6 @@ export type components = { */ end_step_percent: number; }; - /** IPAdapterOutput */ - IPAdapterOutput: { - /** - * IP-Adapter - * @description IP-Adapter to apply - */ - ip_adapter: components["schemas"]["IPAdapterField"]; - /** - * type - * @default ip_adapter_output - * @constant - */ - type: "ip_adapter_output"; - }; /** * Ideal Size * @description Calculates the ideal size for generation to avoid duplication @@ -4775,7 +4693,10 @@ export type components = { * @default 576 */ height?: number; - /** @description UNet (scheduler, LoRAs) */ + /** + * @description UNet (scheduler, LoRAs) + * @default null + */ unet?: components["schemas"]["UNetField"]; /** * Multiplier @@ -4790,36 +4711,20 @@ export type components = { */ type: "ideal_size"; }; - /** - * IdealSizeOutput - * @description Base class for invocations that output an image - */ - IdealSizeOutput: { - /** - * Width - * @description The ideal width of the image (in pixels) - */ - width: number; - /** - * Height - * @description The ideal height of the image (in pixels) - */ - height: number; - /** - * type - * @default ideal_size_output - * @constant - */ - type: "ideal_size_output"; - }; /** * Blur Image * @description Blurs an image */ ImageBlurInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -4838,7 +4743,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to blur */ + /** + * @description The image to blur + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Radius @@ -4877,9 +4785,15 @@ export type components = { * @description Gets a channel from an image. */ ImageChannelInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -4898,7 +4812,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to get the channel from */ + /** + * @description The image to get the channel from + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -4919,9 +4836,15 @@ export type components = { * @description Scale a specific color channel of an image. */ ImageChannelMultiplyInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -4940,11 +4863,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to adjust */ + /** + * @description The image to adjust + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Channel * @description Which channel to adjust + * @default null * @enum {string} */ channel?: "Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)"; @@ -4972,9 +4899,15 @@ export type components = { * @description Add or subtract a value from a specific color channel of an image. */ ImageChannelOffsetInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -4993,11 +4926,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to adjust */ + /** + * @description The image to adjust + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Channel * @description Which channel to adjust + * @default null * @enum {string} */ channel?: "Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)"; @@ -5039,6 +4976,7 @@ export type components = { /** * Collection * @description The collection of image values + * @default null */ collection?: components["schemas"]["ImageField"][]; /** @@ -5048,31 +4986,20 @@ export type components = { */ type: "image_collection"; }; - /** - * ImageCollectionOutput - * @description Base class for nodes that output a collection of images - */ - ImageCollectionOutput: { - /** - * Collection - * @description The output images - */ - collection: components["schemas"]["ImageField"][]; - /** - * type - * @default image_collection_output - * @constant - */ - type: "image_collection_output"; - }; /** * Convert Image Mode * @description Converts an image to a different mode. */ ImageConvertInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5091,7 +5018,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to convert */ + /** + * @description The image to convert + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Mode @@ -5112,9 +5042,15 @@ export type components = { * @description Crops an image to a specified box. The box can be outside of the image. */ ImageCropInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5133,7 +5069,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to crop */ + /** + * @description The image to crop + * @default null + */ image?: components["schemas"]["ImageField"]; /** * X @@ -5262,9 +5201,15 @@ export type components = { * @description Adjusts the Hue of an image. */ ImageHueAdjustmentInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5283,7 +5228,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to adjust */ + /** + * @description The image to adjust + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Hue @@ -5303,9 +5251,15 @@ export type components = { * @description Inverse linear interpolation of all pixels of an image */ ImageInverseLerpInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5324,7 +5278,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to lerp */ + /** + * @description The image to lerp + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Min @@ -5367,7 +5324,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to load */ + /** + * @description The image to load + * @default null + */ image?: components["schemas"]["ImageField"]; /** * type @@ -5381,9 +5341,15 @@ export type components = { * @description Linear interpolation of all pixels of an image */ ImageLerpInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5402,7 +5368,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to lerp */ + /** + * @description The image to lerp + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Min @@ -5428,7 +5397,10 @@ export type components = { * @description Convert a mask image to a tensor. Converts the image to grayscale and uses thresholding at the specified value. */ ImageMaskToTensorInvocation: { - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5447,7 +5419,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The mask image to convert. */ + /** + * @description The mask image to convert. + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Cutoff @@ -5473,9 +5448,15 @@ export type components = { * @description Multiplies two images together using `PIL.ImageChops.multiply()`. */ ImageMultiplyInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5494,9 +5475,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The first image to multiply */ + /** + * @description The first image to multiply + * @default null + */ image1?: components["schemas"]["ImageField"]; - /** @description The second image to multiply */ + /** + * @description The second image to multiply + * @default null + */ image2?: components["schemas"]["ImageField"]; /** * type @@ -5510,9 +5497,15 @@ export type components = { * @description Add blur to NSFW-flagged images */ ImageNSFWBlurInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5531,7 +5524,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to check */ + /** + * @description The image to check + * @default null + */ image?: components["schemas"]["ImageField"]; /** * type @@ -5540,38 +5536,20 @@ export type components = { */ type: "img_nsfw"; }; - /** - * ImageOutput - * @description Base class for nodes that output a single image - */ - ImageOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; - /** - * Width - * @description The width of the image in pixels - */ - width: number; - /** - * Height - * @description The height of the image in pixels - */ - height: number; - /** - * type - * @default image_output - * @constant - */ - type: "image_output"; - }; /** * Paste Image * @description Pastes an image into another image. */ ImagePasteInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5590,11 +5568,20 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The base image */ + /** + * @description The base image + * @default null + */ base_image?: components["schemas"]["ImageField"]; - /** @description The image to paste */ + /** + * @description The image to paste + * @default null + */ image?: components["schemas"]["ImageField"]; - /** @description The mask to use when pasting */ + /** + * @description The mask to use when pasting + * @default null + */ mask?: components["schemas"]["ImageField"] | null; /** * X @@ -5656,9 +5643,15 @@ export type components = { * @description Resizes an image to specific dimensions */ ImageResizeInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5677,7 +5670,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to resize */ + /** + * @description The image to resize + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Width @@ -5710,9 +5706,15 @@ export type components = { * @description Scales an image by a factor */ ImageScaleInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5731,7 +5733,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to scale */ + /** + * @description The image to scale + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Scale Factor @@ -5775,9 +5780,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to encode */ + /** + * @description The image to encode + * @default null + */ image?: components["schemas"]["ImageField"]; - /** @description VAE */ + /** + * @description VAE + * @default null + */ vae?: components["schemas"]["VAEField"]; /** * Tiled @@ -5824,9 +5835,15 @@ export type components = { * @description Add an invisible watermark to an image */ ImageWatermarkInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5845,7 +5862,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to check */ + /** + * @description The image to check + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Text @@ -5886,9 +5906,15 @@ export type components = { * @description Infills transparent areas of an image with a solid color */ InfillColorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5907,7 +5933,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * @description The color to use to infill @@ -5931,9 +5960,15 @@ export type components = { * @description Infills transparent areas of an image using the PatchMatch algorithm */ InfillPatchMatchInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -5952,7 +5987,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Downscale @@ -5979,9 +6017,15 @@ export type components = { * @description Infills transparent areas of an image with tiles of the image */ InfillTileInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -6000,7 +6044,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Tile Size @@ -6062,23 +6109,6 @@ export type components = { */ type: "integer_collection"; }; - /** - * IntegerCollectionOutput - * @description Base class for nodes that output a collection of integers - */ - IntegerCollectionOutput: { - /** - * Collection - * @description The int collection - */ - collection: number[]; - /** - * type - * @default integer_collection_output - * @constant - */ - type: "integer_collection_output"; - }; /** * Integer Primitive * @description An integer primitive value @@ -6162,23 +6192,6 @@ export type components = { */ type: "integer_math"; }; - /** - * IntegerOutput - * @description Base class for nodes that output a single integer - */ - IntegerOutput: { - /** - * Value - * @description The output integer - */ - value: number; - /** - * type - * @default integer_output - * @constant - */ - type: "integer_output"; - }; /** * Invert Tensor Mask * @description Inverts a tensor mask. @@ -6201,7 +6214,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The tensor mask to convert. */ + /** + * @description The tensor mask to convert. + * @default null + */ mask?: components["schemas"]["TensorField"]; /** * type @@ -6279,42 +6295,21 @@ export type components = { */ type: "iterate"; }; - /** - * IterateInvocationOutput - * @description Used to connect iteration outputs. Will be expanded to a specific output. - */ - IterateInvocationOutput: { - /** - * Collection Item - * @description The item being iterated over - */ - item: unknown; - /** - * Index - * @description The index of the item - */ - index: number; - /** - * Total - * @description The total number of items - */ - total: number; - /** - * type - * @default iterate_output - * @constant - */ - type: "iterate_output"; - }; JsonValue: unknown; /** * LaMa Infill * @description Infills transparent areas of an image using the LaMa model */ LaMaInfillInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -6333,7 +6328,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * type @@ -6367,6 +6365,7 @@ export type components = { /** * Collection * @description The collection of latents tensors + * @default null */ collection?: components["schemas"]["LatentsField"][]; /** @@ -6376,23 +6375,6 @@ export type components = { */ type: "latents_collection"; }; - /** - * LatentsCollectionOutput - * @description Base class for nodes that output a collection of latents tensors - */ - LatentsCollectionOutput: { - /** - * Collection - * @description Latents tensor - */ - collection: components["schemas"]["LatentsField"][]; - /** - * type - * @default latents_collection_output - * @constant - */ - type: "latents_collection_output"; - }; /** * LatentsField * @description A latents tensor primitive field @@ -6406,7 +6388,6 @@ export type components = { /** * Seed * @description Seed used to generate this latents - * @default null */ seed?: number | null; }; @@ -6432,7 +6413,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The latents tensor */ + /** + * @description The latents tensor + * @default null + */ latents?: components["schemas"]["LatentsField"]; /** * type @@ -6441,38 +6425,20 @@ export type components = { */ type: "latents"; }; - /** - * LatentsOutput - * @description Base class for nodes that output a single latents tensor - */ - LatentsOutput: { - /** @description Latents tensor */ - latents: components["schemas"]["LatentsField"]; - /** - * Width - * @description Width of output (px) - */ - width: number; - /** - * Height - * @description Height of output (px) - */ - height: number; - /** - * type - * @default latents_output - * @constant - */ - type: "latents_output"; - }; /** * Latents to Image * @description Generates an image from latents. */ LatentsToImageInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -6491,9 +6457,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Latents tensor */ + /** + * @description Latents tensor + * @default null + */ latents?: components["schemas"]["LatentsField"]; - /** @description VAE */ + /** + * @description VAE + * @default null + */ vae?: components["schemas"]["VAEField"]; /** * Tiled @@ -6519,9 +6491,15 @@ export type components = { * @description Applies leres processing to image */ LeresImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -6540,7 +6518,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Thr A @@ -6584,9 +6565,15 @@ export type components = { * @description Applies line art anime processing to image */ LineartAnimeImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -6605,7 +6592,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -6631,9 +6621,15 @@ export type components = { * @description Applies line art processing to image */ LineartImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -6652,7 +6648,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -6704,16 +6703,19 @@ export type components = { /** * LoRAs * @description LoRA models and weights. May be a single LoRA or collection. + * @default null */ loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][]; /** * UNet * @description UNet (scheduler, LoRAs) + * @default null */ unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip?: components["schemas"]["CLIPField"] | null; /** @@ -6753,7 +6755,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -6766,13 +6767,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -6784,7 +6783,6 @@ export type components = { /** * Trigger Phrases * @description Set of trigger phrases for this model - * @default null */ trigger_phrases?: string[] | null; /** @@ -6829,6 +6827,7 @@ export type components = { /** * LoRA * @description LoRA model to load + * @default null */ lora?: components["schemas"]["ModelIdentifierField"]; /** @@ -6840,11 +6839,13 @@ export type components = { /** * UNet * @description UNet (scheduler, LoRAs) + * @default null */ unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip?: components["schemas"]["CLIPField"] | null; /** @@ -6854,30 +6855,6 @@ export type components = { */ type: "lora_loader"; }; - /** - * LoRALoaderOutput - * @description Model loader output - */ - LoRALoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet: components["schemas"]["UNetField"] | null; - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip: components["schemas"]["CLIPField"] | null; - /** - * type - * @default lora_loader_output - * @constant - */ - type: "lora_loader_output"; - }; /** * LoRALyCORISConfig * @description Model config for LoRA/Lycoris models. @@ -6908,7 +6885,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -6921,13 +6897,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -6939,7 +6913,6 @@ export type components = { /** * Trigger Phrases * @description Set of trigger phrases for this model - * @default null */ trigger_phrases?: string[] | null; /** @@ -6987,6 +6960,7 @@ export type components = { /** * LoRA * @description LoRA model to load + * @default null */ lora?: components["schemas"]["ModelIdentifierField"]; /** @@ -7002,23 +6976,6 @@ export type components = { */ type: "lora_selector"; }; - /** - * LoRASelectorOutput - * @description Model loader output - */ - LoRASelectorOutput: { - /** - * LoRA - * @description LoRA model and weight - */ - lora: components["schemas"]["LoRAField"]; - /** - * type - * @default lora_selector_output - * @constant - */ - type: "lora_selector_output"; - }; /** * LocalModelSource * @description A local file or directory path. @@ -7073,7 +7030,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -7086,13 +7042,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -7104,13 +7058,9 @@ export type components = { /** * Trigger Phrases * @description Set of trigger phrases for this model - * @default null */ trigger_phrases?: string[] | null; - /** - * @description Default settings for this model - * @default null - */ + /** @description Default settings for this model */ default_settings?: components["schemas"]["MainModelDefaultSettings"] | null; /** @default normal */ variant?: components["schemas"]["ModelVariantType"]; @@ -7168,7 +7118,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -7181,13 +7130,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -7199,13 +7146,9 @@ export type components = { /** * Trigger Phrases * @description Set of trigger phrases for this model - * @default null */ trigger_phrases?: string[] | null; - /** - * @description Default settings for this model - * @default null - */ + /** @description Default settings for this model */ default_settings?: components["schemas"]["MainModelDefaultSettings"] | null; /** @default normal */ variant?: components["schemas"]["ModelVariantType"]; @@ -7223,49 +7166,41 @@ export type components = { /** * Vae * @description Default VAE for this model (model key) - * @default null */ vae?: string | null; /** * Vae Precision * @description Default VAE precision for this model - * @default null */ vae_precision?: ("fp16" | "fp32") | null; /** * Scheduler * @description Default scheduler for this model - * @default null */ scheduler?: ("ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd") | null; /** * Steps * @description Default number of steps for this model - * @default null */ steps?: number | null; /** * Cfg Scale * @description Default CFG Scale for this model - * @default null */ cfg_scale?: number | null; /** * Cfg Rescale Multiplier * @description Default CFG Rescale Multiplier for this model - * @default null */ cfg_rescale_multiplier?: number | null; /** * Width * @description Default width for this model - * @default null */ width?: number | null; /** * Height * @description Default height for this model - * @default null */ height?: number | null; }; @@ -7291,7 +7226,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Main model (UNet, VAE, CLIP) to load */ + /** + * @description Main model (UNet, VAE, CLIP) to load + * @default null + */ model?: components["schemas"]["ModelIdentifierField"]; /** * type @@ -7305,9 +7243,15 @@ export type components = { * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. */ MaskCombineInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -7326,9 +7270,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The first mask to combine */ + /** + * @description The first mask to combine + * @default null + */ mask1?: components["schemas"]["ImageField"]; - /** @description The second image to combine */ + /** + * @description The second image to combine + * @default null + */ mask2?: components["schemas"]["ImageField"]; /** * type @@ -7342,9 +7292,15 @@ export type components = { * @description Applies an edge mask to an image */ MaskEdgeInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -7363,26 +7319,33 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to apply the mask to */ + /** + * @description The image to apply the mask to + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Edge Size * @description The size of the edge + * @default null */ edge_size?: number; /** * Edge Blur * @description The amount of blur on the edge + * @default null */ edge_blur?: number; /** * Low Threshold * @description First threshold for the hysteresis procedure in Canny edge detection + * @default null */ low_threshold?: number; /** * High Threshold * @description Second threshold for the hysteresis procedure in Canny edge detection + * @default null */ high_threshold?: number; /** @@ -7397,9 +7360,15 @@ export type components = { * @description Extracts the alpha channel of an image as a mask. */ MaskFromAlphaInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -7418,7 +7387,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to create the mask from */ + /** + * @description The image to create the mask from + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Invert @@ -7438,9 +7410,15 @@ export type components = { * @description Generate a mask for a particular color in an ID Map */ MaskFromIDInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -7459,9 +7437,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to create the mask from */ + /** + * @description The image to create the mask from + * @default null + */ image?: components["schemas"]["ImageField"]; - /** @description ID color to mask */ + /** + * @description ID color to mask + * @default null + */ color?: components["schemas"]["ColorField"]; /** * Threshold @@ -7482,38 +7466,20 @@ export type components = { */ type: "mask_from_id"; }; - /** - * MaskOutput - * @description A torch mask tensor. - */ - MaskOutput: { - /** @description The mask. */ - mask: components["schemas"]["TensorField"]; - /** - * Width - * @description The width of the mask in pixels. - */ - width: number; - /** - * Height - * @description The height of the mask in pixels. - */ - height: number; - /** - * type - * @default mask_output - * @constant - */ - type: "mask_output"; - }; /** * Mediapipe Face Processor * @description Applies mediapipe face processing to image */ MediapipeFaceProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -7532,7 +7498,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Max Faces @@ -7590,6 +7559,7 @@ export type components = { /** * Collection * @description Collection of Metadata + * @default null */ collection?: components["schemas"]["MetadataField"][]; /** @@ -7604,9 +7574,15 @@ export type components = { * @description Merge multiple tile images into a single image. */ MergeTilesToImageInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -7628,6 +7604,7 @@ export type components = { /** * Tiles With Images * @description A list of tile images with tile properties. + * @default null */ tiles_with_images?: components["schemas"]["TileWithImage"][]; /** @@ -7681,6 +7658,7 @@ export type components = { /** * Items * @description A single metadata item or collection of metadata items + * @default null */ items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"]; /** @@ -7728,11 +7706,13 @@ export type components = { /** * Label * @description Label for this metadata item + * @default null */ label?: string; /** * Value * @description The value for this metadata item (may be any type) + * @default null */ value?: unknown; /** @@ -7742,39 +7722,20 @@ export type components = { */ type: "metadata_item"; }; - /** - * MetadataItemOutput - * @description Metadata Item Output - */ - MetadataItemOutput: { - /** @description Metadata Item */ - item: components["schemas"]["MetadataItemField"]; - /** - * type - * @default metadata_item_output - * @constant - */ - type: "metadata_item_output"; - }; - /** MetadataOutput */ - MetadataOutput: { - /** @description Metadata Dict */ - metadata: components["schemas"]["MetadataField"]; - /** - * type - * @default metadata_output - * @constant - */ - type: "metadata_output"; - }; /** * Midas Depth Processor * @description Applies Midas depth processing to image */ MidasDepthImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -7793,7 +7754,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * A Mult @@ -7831,9 +7795,15 @@ export type components = { * @description Applies MLSD processing to image */ MlsdImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -7852,7 +7822,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -7912,10 +7885,7 @@ export type components = { base: components["schemas"]["BaseModelType"]; /** @description The model's type */ type: components["schemas"]["ModelType"]; - /** - * @description The submodel to load, if this is a main model - * @default null - */ + /** @description The submodel to load, if this is a main model */ submodel_type?: components["schemas"]["SubModelType"] | null; }; /** @@ -7945,6 +7915,7 @@ export type components = { /** * Model * @description The model to select + * @default null */ model?: components["schemas"]["ModelIdentifierField"]; /** @@ -7954,23 +7925,6 @@ export type components = { */ type: "model_identifier"; }; - /** - * ModelIdentifierOutput - * @description Model identifier output - */ - ModelIdentifierOutput: { - /** - * Model - * @description Model identifier - */ - model: components["schemas"]["ModelIdentifierField"]; - /** - * type - * @default model_identifier_output - * @constant - */ - type: "model_identifier_output"; - }; /** * ModelInstallJob * @description Object that tracks the current status of an install request. @@ -8051,33 +8005,6 @@ export type components = { */ error_traceback?: string | null; }; - /** - * ModelLoaderOutput - * @description Model loader output - */ - ModelLoaderOutput: { - /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; - /** - * type - * @default model_loader_output - * @constant - */ - type: "model_loader_output"; - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip: components["schemas"]["CLIPField"]; - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - }; /** * ModelRecordChanges * @description A set of changes to apply to a model. @@ -8269,38 +8196,20 @@ export type components = { */ type: "noise"; }; - /** - * NoiseOutput - * @description Invocation noise output - */ - NoiseOutput: { - /** @description Noise tensor */ - noise: components["schemas"]["LatentsField"]; - /** - * Width - * @description Width of output (px) - */ - width: number; - /** - * Height - * @description Height of output (px) - */ - height: number; - /** - * type - * @default noise_output - * @constant - */ - type: "noise_output"; - }; /** * Normal BAE Processor * @description Applies NormalBae processing to image */ NormalbaeImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -8319,7 +8228,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -8436,9 +8348,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The tile image. */ + /** + * @description The tile image. + * @default null + */ image?: components["schemas"]["ImageField"]; - /** @description The tile properties. */ + /** + * @description The tile properties. + * @default null + */ tile?: components["schemas"]["Tile"]; /** * type @@ -8447,25 +8365,20 @@ export type components = { */ type: "pair_tile_image"; }; - /** PairTileImageOutput */ - PairTileImageOutput: { - /** @description A tile description with its corresponding image. */ - tile_with_image: components["schemas"]["TileWithImage"]; - /** - * type - * @default pair_tile_image_output - * @constant - */ - type: "pair_tile_image_output"; - }; /** * PIDI Processor * @description Applies PIDI processing to image */ PidiImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -8484,7 +8397,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -8542,16 +8458,19 @@ export type components = { /** * File Path * @description Path to prompt text file + * @default null */ file_path?: string; /** * Pre Prompt * @description String to prepend to each prompt + * @default null */ pre_prompt?: string | null; /** * Post Prompt * @description String to append to each prompt + * @default null */ post_prompt?: string | null; /** @@ -8824,7 +8743,10 @@ export type components = { * @description Create a rectangular mask. */ RectangleMaskInvocation: { - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -8846,31 +8768,37 @@ export type components = { /** * Width * @description The width of the entire mask. + * @default null */ width?: number; /** * Height * @description The height of the entire mask. + * @default null */ height?: number; /** * X Left * @description The left x-coordinate of the rectangular masked region (inclusive). + * @default null */ x_left?: number; /** * Y Top * @description The top y-coordinate of the rectangular masked region (inclusive). + * @default null */ y_top?: number; /** * Rectangle Width * @description The width of the rectangular masked region. + * @default null */ rectangle_width?: number; /** * Rectangle Height * @description The height of the rectangular masked region. + * @default null */ rectangle_height?: number; /** @@ -8938,16 +8866,21 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Latents tensor */ + /** + * @description Latents tensor + * @default null + */ latents?: components["schemas"]["LatentsField"]; /** * Width * @description Width of output (px) + * @default null */ width?: number; /** * Height * @description Width of output (px) + * @default null */ height?: number; /** @@ -9088,14 +9021,19 @@ export type components = { /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip?: components["schemas"]["CLIPField"]; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip2?: components["schemas"]["CLIPField"]; - /** @description A mask defining the region that this conditioning prompt applies to. */ + /** + * @description A mask defining the region that this conditioning prompt applies to. + * @default null + */ mask?: components["schemas"]["TensorField"] | null; /** * type @@ -9129,21 +9067,25 @@ export type components = { /** * LoRAs * @description LoRA models and weights. May be a single LoRA or collection. + * @default null */ loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][]; /** * UNet * @description UNet (scheduler, LoRAs) + * @default null */ unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip?: components["schemas"]["CLIPField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip2?: components["schemas"]["CLIPField"] | null; /** @@ -9178,6 +9120,7 @@ export type components = { /** * LoRA * @description LoRA model to load + * @default null */ lora?: components["schemas"]["ModelIdentifierField"]; /** @@ -9189,16 +9132,19 @@ export type components = { /** * UNet * @description UNet (scheduler, LoRAs) + * @default null */ unet?: components["schemas"]["UNetField"] | null; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip?: components["schemas"]["CLIPField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ clip2?: components["schemas"]["CLIPField"] | null; /** @@ -9208,36 +9154,6 @@ export type components = { */ type: "sdxl_lora_loader"; }; - /** - * SDXLLoRALoaderOutput - * @description SDXL LoRA Loader Output - */ - SDXLLoRALoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet: components["schemas"]["UNetField"] | null; - /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip: components["schemas"]["CLIPField"] | null; - /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip2: components["schemas"]["CLIPField"] | null; - /** - * type - * @default sdxl_lora_loader_output - * @constant - */ - type: "sdxl_lora_loader_output"; - }; /** * SDXL Main Model * @description Loads an sdxl base model, outputting its submodels. @@ -9260,7 +9176,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load */ + /** + * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load + * @default null + */ model?: components["schemas"]["ModelIdentifierField"]; /** * type @@ -9269,38 +9188,6 @@ export type components = { */ type: "sdxl_model_loader"; }; - /** - * SDXLModelLoaderOutput - * @description SDXL base model loader output - */ - SDXLModelLoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip: components["schemas"]["CLIPField"]; - /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip2: components["schemas"]["CLIPField"]; - /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; - /** - * type - * @default sdxl_model_loader_output - * @constant - */ - type: "sdxl_model_loader_output"; - }; /** * SDXL Refiner Prompt * @description Parse prompt using compel package to conditioning. @@ -9355,7 +9242,10 @@ export type components = { * @default 6 */ aesthetic_score?: number; - /** @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ + /** + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ clip2?: components["schemas"]["CLIPField"]; /** * type @@ -9386,7 +9276,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load */ + /** + * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load + * @default null + */ model?: components["schemas"]["ModelIdentifierField"]; /** * type @@ -9395,33 +9288,6 @@ export type components = { */ type: "sdxl_refiner_model_loader"; }; - /** - * SDXLRefinerModelLoaderOutput - * @description SDXL refiner model loader output - */ - SDXLRefinerModelLoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip2: components["schemas"]["CLIPField"]; - /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; - /** - * type - * @default sdxl_refiner_model_loader_output - * @constant - */ - type: "sdxl_refiner_model_loader_output"; - }; /** * SQLiteDirection * @enum {string} @@ -9432,9 +9298,15 @@ export type components = { * @description Saves an image. Unlike an image primitive, this invocation stores a copy of the image. */ SaveImageInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -9453,7 +9325,10 @@ export type components = { * @default false */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * type @@ -9484,11 +9359,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Latents tensor */ + /** + * @description Latents tensor + * @default null + */ latents?: components["schemas"]["LatentsField"]; /** * Scale Factor * @description The factor by which to scale + * @default null */ scale_factor?: number; /** @@ -9547,21 +9426,6 @@ export type components = { */ type: "scheduler"; }; - /** SchedulerOutput */ - SchedulerOutput: { - /** - * Scheduler - * @description Scheduler to use during inference - * @enum {string} - */ - scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; - /** - * type - * @default scheduler_output - * @constant - */ - type: "scheduler_output"; - }; /** * SchedulerPredictionType * @description Scheduler prediction type. @@ -9593,11 +9457,13 @@ export type components = { /** * UNet * @description UNet (scheduler, LoRAs) + * @default null */ unet?: components["schemas"]["UNetField"] | null; /** * VAE * @description VAE model to load + * @default null */ vae?: components["schemas"]["VAEField"] | null; /** @@ -9619,38 +9485,20 @@ export type components = { */ type: "seamless"; }; - /** - * SeamlessModeOutput - * @description Modified Seamless Model output - */ - SeamlessModeOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet: components["schemas"]["UNetField"] | null; - /** - * VAE - * @description VAE - * @default null - */ - vae: components["schemas"]["VAEField"] | null; - /** - * type - * @default seamless_output - * @constant - */ - type: "seamless_output"; - }; /** * Segment Anything Processor * @description Applies segment anything processing to image */ SegmentAnythingProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -9669,7 +9517,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Detect Resolution @@ -9942,7 +9793,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to show */ + /** + * @description The image to show + * @default null + */ image?: components["schemas"]["ImageField"]; /** * type @@ -10047,11 +9901,13 @@ export type components = { /** * Pre Start Value * @description value before easing start + * @default null */ pre_start_value?: number | null; /** * Post End Value * @description value after easing end + * @default null */ post_end_value?: number | null; /** @@ -10073,28 +9929,6 @@ export type components = { */ type: "step_param_easing"; }; - /** - * String2Output - * @description Base class for invocations that output two strings - */ - String2Output: { - /** - * String 1 - * @description string 1 - */ - string_1: string; - /** - * String 2 - * @description string 2 - */ - string_2: string; - /** - * type - * @default string_2_output - * @constant - */ - type: "string_2_output"; - }; /** * String Collection Primitive * @description A collection of string primitive values @@ -10130,23 +9964,6 @@ export type components = { */ type: "string_collection"; }; - /** - * StringCollectionOutput - * @description Base class for nodes that output a collection of strings - */ - StringCollectionOutput: { - /** - * Collection - * @description The output strings - */ - collection: string[]; - /** - * type - * @default string_collection_output - * @constant - */ - type: "string_collection_output"; - }; /** * String Primitive * @description A string primitive value @@ -10270,45 +10087,6 @@ export type components = { */ type: "string_join_three"; }; - /** - * StringOutput - * @description Base class for nodes that output a single string - */ - StringOutput: { - /** - * Value - * @description The output string - */ - value: string; - /** - * type - * @default string_output - * @constant - */ - type: "string_output"; - }; - /** - * StringPosNegOutput - * @description Base class for invocations that output a positive and negative string - */ - StringPosNegOutput: { - /** - * Positive String - * @description Positive string - */ - positive_string: string; - /** - * Negative String - * @description Negative string - */ - negative_string: string; - /** - * type - * @default string_pos_neg_output - * @constant - */ - type: "string_pos_neg_output"; - }; /** * String Replace * @description Replaces the search string with the replace string @@ -10490,10 +10268,7 @@ export type components = { * @description Model config for T2I. */ T2IAdapterConfig: { - /** - * @description Default settings for this model - * @default null - */ + /** @description Default settings for this model */ default_settings?: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** * Key @@ -10520,7 +10295,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -10533,13 +10307,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -10610,11 +10382,15 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The IP-Adapter image prompt. */ + /** + * @description The IP-Adapter image prompt. + * @default null + */ image?: components["schemas"]["ImageField"]; /** * T2I-Adapter Model * @description The T2I-Adapter model. + * @default null */ t2i_adapter_model?: components["schemas"]["ModelIdentifierField"]; /** @@ -10683,20 +10459,6 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; - /** T2IAdapterOutput */ - T2IAdapterOutput: { - /** - * T2I Adapter - * @description T2I-Adapter(s) to apply - */ - t2i_adapter: components["schemas"]["T2IAdapterField"]; - /** - * type - * @default t2i_adapter_output - * @constant - */ - type: "t2i_adapter_output"; - }; /** TBLR */ TBLR: { /** Top */ @@ -10749,7 +10511,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -10762,13 +10523,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -10814,7 +10573,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -10827,13 +10585,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -10861,9 +10617,15 @@ export type components = { * @description Tile resampler processor */ TileResamplerProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -10882,7 +10644,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Down Sampling Rate @@ -10919,7 +10684,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The tile to split into properties. */ + /** + * @description The tile to split into properties. + * @default null + */ tile?: components["schemas"]["Tile"]; /** * type @@ -10928,65 +10696,6 @@ export type components = { */ type: "tile_to_properties"; }; - /** TileToPropertiesOutput */ - TileToPropertiesOutput: { - /** - * Coords Left - * @description Left coordinate of the tile relative to its parent image. - */ - coords_left: number; - /** - * Coords Right - * @description Right coordinate of the tile relative to its parent image. - */ - coords_right: number; - /** - * Coords Top - * @description Top coordinate of the tile relative to its parent image. - */ - coords_top: number; - /** - * Coords Bottom - * @description Bottom coordinate of the tile relative to its parent image. - */ - coords_bottom: number; - /** - * Width - * @description The width of the tile. Equal to coords_right - coords_left. - */ - width: number; - /** - * Height - * @description The height of the tile. Equal to coords_bottom - coords_top. - */ - height: number; - /** - * Overlap Top - * @description Overlap between this tile and its top neighbor. - */ - overlap_top: number; - /** - * Overlap Bottom - * @description Overlap between this tile and its bottom neighbor. - */ - overlap_bottom: number; - /** - * Overlap Left - * @description Overlap between this tile and its left neighbor. - */ - overlap_left: number; - /** - * Overlap Right - * @description Overlap between this tile and its right neighbor. - */ - overlap_right: number; - /** - * type - * @default tile_to_properties_output - * @constant - */ - type: "tile_to_properties_output"; - }; /** TileWithImage */ TileWithImage: { tile: components["schemas"]["Tile"]; @@ -11008,29 +10717,9 @@ export type components = { * @description Axes("x" and "y") to which apply seamless */ seamless_axes?: string[]; - /** - * @description FreeU configuration - * @default null - */ + /** @description FreeU configuration */ freeu_config?: components["schemas"]["FreeUConfig"] | null; }; - /** - * UNetOutput - * @description Base class for invocations that output a UNet field. - */ - UNetOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - /** - * type - * @default unet_output - * @constant - */ - type: "unet_output"; - }; /** * URLModelSource * @description A generic URL point to a checkpoint file. @@ -11055,9 +10744,15 @@ export type components = { * @description Applies an unsharp mask filter to an image */ UnsharpMaskInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -11076,7 +10771,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to use */ + /** + * @description The image to use + * @default null + */ image?: components["schemas"]["ImageField"]; /** * Radius @@ -11140,7 +10838,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -11153,13 +10850,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -11215,7 +10910,6 @@ export type components = { /** * Description * @description Model description - * @default null */ description?: string | null; /** @@ -11228,13 +10922,11 @@ export type components = { /** * Source Api Response * @description The original API response from the source, as stringified JSON. - * @default null */ source_api_response?: string | null; /** * Cover Image * @description Url for image to preview model - * @default null */ cover_image?: string | null; /** @@ -11285,6 +10977,7 @@ export type components = { /** * VAE * @description VAE model to load + * @default null */ vae_model?: components["schemas"]["ModelIdentifierField"]; /** @@ -11294,23 +10987,6 @@ export type components = { */ type: "vae_loader"; }; - /** - * VAEOutput - * @description Base class for invocations that output a VAE field - */ - VAEOutput: { - /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; - /** - * type - * @default vae_output - * @constant - */ - type: "vae_output"; - }; /** ValidationError */ ValidationError: { /** Location */ @@ -11550,9 +11226,15 @@ export type components = { * @description Applies Zoe depth processing to image */ ZoeDepthImageProcessorInvocation: { - /** @description The board to save the image to */ + /** + * @description The board to save the image to + * @default null + */ board?: components["schemas"]["BoardField"] | null; - /** @description Optional metadata to be saved with the image */ + /** + * @description Optional metadata to be saved with the image + * @default null + */ metadata?: components["schemas"]["MetadataField"] | null; /** * Id @@ -11571,7 +11253,10 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description The image to process */ + /** + * @description The image to process + * @default null + */ image?: components["schemas"]["ImageField"]; /** * type @@ -11581,331 +11266,1104 @@ export type components = { type: "zoe_depth_image_processor"; }; /** - * Classification - * @description The classification of an Invocation. - * - `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation. - * - `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term. - * - `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation. - * @enum {string} + * FloatCollectionOutput + * @description Base class for nodes that output a collection of floats */ - Classification: "stable" | "beta" | "prototype"; - /** - * FieldKind - * @description The kind of field. - * - `Input`: An input field on a node. - * - `Output`: An output field on a node. - * - `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is - * one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name - * "metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic, - * allowing "metadata" for that field. - * - `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs, - * but which are used to store information about the node. For example, the `id` and `type` fields are node - * attributes. - * - * The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app - * startup, and when generating the OpenAPI schema for the workflow editor. - * @enum {string} - */ - FieldKind: "input" | "output" | "internal" | "node_attribute"; - /** - * Input - * @description The type of input a field accepts. - * - `Input.Direct`: The field must have its value provided directly, when the invocation and field are instantiated. - * - `Input.Connection`: The field must have its value provided by a connection. - * - `Input.Any`: The field may have its value provided either directly or by a connection. - * @enum {string} - */ - Input: "connection" | "direct" | "any"; - /** - * InputFieldJSONSchemaExtra - * @description Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution, - * and by the workflow editor during schema parsing and UI rendering. - */ - InputFieldJSONSchemaExtra: { - input: components["schemas"]["Input"]; - /** Orig Required */ - orig_required: boolean; - field_kind: components["schemas"]["FieldKind"]; + FloatCollectionOutput: { /** - * Default - * @default null + * Collection + * @description The float collection */ - default: unknown; + collection: number[]; /** - * Orig Default - * @default null + * type + * @default float_collection_output + * @constant */ - orig_default: unknown; - /** - * Ui Hidden - * @default false - */ - ui_hidden: boolean; - /** @default null */ - ui_type: components["schemas"]["UIType"] | null; - /** @default null */ - ui_component: components["schemas"]["UIComponent"] | null; - /** - * Ui Order - * @default null - */ - ui_order: number | null; - /** - * Ui Choice Labels - * @default null - */ - ui_choice_labels: { - [key: string]: string; - } | null; + type: "float_collection_output"; }; /** - * OutputFieldJSONSchemaExtra - * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor - * during schema parsing and UI rendering. + * ConditioningOutput + * @description Base class for nodes that output a single conditioning tensor */ - OutputFieldJSONSchemaExtra: { - field_kind: components["schemas"]["FieldKind"]; - /** Ui Hidden */ - ui_hidden: boolean; - ui_type: components["schemas"]["UIType"] | null; - /** Ui Order */ - ui_order: number | null; + ConditioningOutput: { + /** @description Conditioning tensor */ + conditioning: components["schemas"]["ConditioningField"]; + /** + * type + * @default conditioning_output + * @constant + */ + type: "conditioning_output"; }; /** - * ProgressImage - * @description The progress image sent intermittently during processing + * IntegerOutput + * @description Base class for nodes that output a single integer */ - ProgressImage: { + IntegerOutput: { + /** + * Value + * @description The output integer + */ + value: number; + /** + * type + * @default integer_output + * @constant + */ + type: "integer_output"; + }; + /** + * DenoiseMaskOutput + * @description Base class for nodes that output a single image + */ + DenoiseMaskOutput: { + /** @description Mask for denoise model run */ + denoise_mask: components["schemas"]["DenoiseMaskField"]; + /** + * type + * @default denoise_mask_output + * @constant + */ + type: "denoise_mask_output"; + }; + /** SchedulerOutput */ + SchedulerOutput: { + /** + * Scheduler + * @description Scheduler to use during inference + * @enum {string} + */ + scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; + /** + * type + * @default scheduler_output + * @constant + */ + type: "scheduler_output"; + }; + /** + * IntegerCollectionOutput + * @description Base class for nodes that output a collection of integers + */ + IntegerCollectionOutput: { + /** + * Collection + * @description The int collection + */ + collection: number[]; + /** + * type + * @default integer_collection_output + * @constant + */ + type: "integer_collection_output"; + }; + /** PairTileImageOutput */ + PairTileImageOutput: { + /** @description A tile description with its corresponding image. */ + tile_with_image: components["schemas"]["TileWithImage"]; + /** + * type + * @default pair_tile_image_output + * @constant + */ + type: "pair_tile_image_output"; + }; + /** + * GradientMaskOutput + * @description Outputs a denoise mask and an image representing the total gradient of the mask. + */ + GradientMaskOutput: { + /** @description Mask for denoise model run */ + denoise_mask: components["schemas"]["DenoiseMaskField"]; + /** @description Image representing the total gradient area of the mask. For paste-back purposes. */ + expanded_mask_area: components["schemas"]["ImageField"]; + /** + * type + * @default gradient_mask_output + * @constant + */ + type: "gradient_mask_output"; + }; + /** CollectInvocationOutput */ + CollectInvocationOutput: { + /** + * Collection + * @description The collection of input items + */ + collection: unknown[]; + /** + * type + * @default collect_output + * @constant + */ + type: "collect_output"; + }; + /** + * CLIPSkipInvocationOutput + * @description CLIP skip node output + */ + CLIPSkipInvocationOutput: { + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip: components["schemas"]["CLIPField"] | null; + /** + * type + * @default clip_skip_output + * @constant + */ + type: "clip_skip_output"; + }; + /** + * ConditioningCollectionOutput + * @description Base class for nodes that output a collection of conditioning tensors + */ + ConditioningCollectionOutput: { + /** + * Collection + * @description The output conditioning tensors + */ + collection: components["schemas"]["ConditioningField"][]; + /** + * type + * @default conditioning_collection_output + * @constant + */ + type: "conditioning_collection_output"; + }; + /** + * LoRALoaderOutput + * @description Model loader output + */ + LoRALoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet: components["schemas"]["UNetField"] | null; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip: components["schemas"]["CLIPField"] | null; + /** + * type + * @default lora_loader_output + * @constant + */ + type: "lora_loader_output"; + }; + /** + * FaceOffOutput + * @description Base class for FaceOff Output + */ + FaceOffOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; /** * Width - * @description The effective width of the image in pixels + * @description The width of the image in pixels */ width: number; /** * Height - * @description The effective height of the image in pixels + * @description The height of the image in pixels */ height: number; /** - * Dataurl - * @description The image data as a b64 data URL + * type + * @default face_off_output + * @constant */ - dataURL: string; + type: "face_off_output"; + /** @description The output mask */ + mask: components["schemas"]["ImageField"]; + /** + * X + * @description The x coordinate of the bounding box's left side + */ + x: number; + /** + * Y + * @description The y coordinate of the bounding box's top side + */ + y: number; }; /** - * UIComponent - * @description The type of UI component to use for a field, used to override the default components, which are - * inferred from the field type. - * @enum {string} + * FaceMaskOutput + * @description Base class for FaceMask output */ - UIComponent: "none" | "textarea" | "slider"; - /** - * UIConfigBase - * @description Provides additional node configuration to the UI. - * This is used internally by the @invocation decorator logic. Do not use this directly. - */ - UIConfigBase: { + FaceMaskOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; /** - * Tags - * @description The node's tags + * Width + * @description The width of the image in pixels */ - tags: string[] | null; + width: number; /** - * Title - * @description The node's display name - * @default null + * Height + * @description The height of the image in pixels */ - title: string | null; + height: number; /** - * Category - * @description The node's category - * @default null + * type + * @default face_mask_output + * @constant */ - category: string | null; - /** - * Version - * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". - */ - version: string; - /** - * Node Pack - * @description Whether or not this is a custom node - * @default null - */ - node_pack: string | null; - /** - * @description The node's classification - * @default stable - */ - classification: components["schemas"]["Classification"]; + type: "face_mask_output"; + /** @description The output mask */ + mask: components["schemas"]["ImageField"]; }; /** - * UIType - * @description Type hints for the UI for situations in which the field type is not enough to infer the correct UI type. - * - * - Model Fields - * The most common node-author-facing use will be for model fields. Internally, there is no difference - * between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the - * base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that - * the field is an SDXL main model field. - * - * - Any Field - * We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to - * indicate that the field accepts any type. Use with caution. This cannot be used on outputs. - * - * - Scheduler Field - * Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field. - * - * - Internal Fields - * Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate - * handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These - * should not be used by node authors. - * - * - DEPRECATED Fields - * These types are deprecated and should not be used by node authors. A warning will be logged if one is - * used, and the type will be ignored. They are included here for backwards compatibility. - * @enum {string} + * LatentsCollectionOutput + * @description Base class for nodes that output a collection of latents tensors */ - UIType: "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VAEModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "T2IAdapterModelField" | "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict"; + LatentsCollectionOutput: { + /** + * Collection + * @description Latents tensor + */ + collection: components["schemas"]["LatentsField"][]; + /** + * type + * @default latents_collection_output + * @constant + */ + type: "latents_collection_output"; + }; + /** + * ImageOutput + * @description Base class for nodes that output a single image + */ + ImageOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; + /** + * Width + * @description The width of the image in pixels + */ + width: number; + /** + * Height + * @description The height of the image in pixels + */ + height: number; + /** + * type + * @default image_output + * @constant + */ + type: "image_output"; + }; + /** + * ColorOutput + * @description Base class for nodes that output a single color + */ + ColorOutput: { + /** @description The output color */ + color: components["schemas"]["ColorField"]; + /** + * type + * @default color_output + * @constant + */ + type: "color_output"; + }; + /** + * VAEOutput + * @description Base class for invocations that output a VAE field + */ + VAEOutput: { + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default vae_output + * @constant + */ + type: "vae_output"; + }; + /** + * LoRASelectorOutput + * @description Model loader output + */ + LoRASelectorOutput: { + /** + * LoRA + * @description LoRA model and weight + */ + lora: components["schemas"]["LoRAField"]; + /** + * type + * @default lora_selector_output + * @constant + */ + type: "lora_selector_output"; + }; + /** + * FloatOutput + * @description Base class for nodes that output a single float + */ + FloatOutput: { + /** + * Value + * @description The output float + */ + value: number; + /** + * type + * @default float_output + * @constant + */ + type: "float_output"; + }; + /** + * ModelLoaderOutput + * @description Model loader output + */ + ModelLoaderOutput: { + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default model_loader_output + * @constant + */ + type: "model_loader_output"; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; + }; + /** + * ModelIdentifierOutput + * @description Model identifier output + */ + ModelIdentifierOutput: { + /** + * Model + * @description Model identifier + */ + model: components["schemas"]["ModelIdentifierField"]; + /** + * type + * @default model_identifier_output + * @constant + */ + type: "model_identifier_output"; + }; + /** + * StringOutput + * @description Base class for nodes that output a single string + */ + StringOutput: { + /** + * Value + * @description The output string + */ + value: string; + /** + * type + * @default string_output + * @constant + */ + type: "string_output"; + }; + /** + * StringPosNegOutput + * @description Base class for invocations that output a positive and negative string + */ + StringPosNegOutput: { + /** + * Positive String + * @description Positive string + */ + positive_string: string; + /** + * Negative String + * @description Negative string + */ + negative_string: string; + /** + * type + * @default string_pos_neg_output + * @constant + */ + type: "string_pos_neg_output"; + }; + /** MetadataOutput */ + MetadataOutput: { + /** @description Metadata Dict */ + metadata: components["schemas"]["MetadataField"]; + /** + * type + * @default metadata_output + * @constant + */ + type: "metadata_output"; + }; + /** + * CLIPOutput + * @description Base class for invocations that output a CLIP field + */ + CLIPOutput: { + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; + /** + * type + * @default clip_output + * @constant + */ + type: "clip_output"; + }; + /** + * IterateInvocationOutput + * @description Used to connect iteration outputs. Will be expanded to a specific output. + */ + IterateInvocationOutput: { + /** + * Collection Item + * @description The item being iterated over + */ + item: unknown; + /** + * Index + * @description The index of the item + */ + index: number; + /** + * Total + * @description The total number of items + */ + total: number; + /** + * type + * @default iterate_output + * @constant + */ + type: "iterate_output"; + }; + /** + * SDXLRefinerModelLoaderOutput + * @description SDXL refiner model loader output + */ + SDXLRefinerModelLoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; + /** + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip2: components["schemas"]["CLIPField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default sdxl_refiner_model_loader_output + * @constant + */ + type: "sdxl_refiner_model_loader_output"; + }; + /** + * BooleanCollectionOutput + * @description Base class for nodes that output a collection of booleans + */ + BooleanCollectionOutput: { + /** + * Collection + * @description The output boolean collection + */ + collection: boolean[]; + /** + * type + * @default boolean_collection_output + * @constant + */ + type: "boolean_collection_output"; + }; + /** + * LatentsOutput + * @description Base class for nodes that output a single latents tensor + */ + LatentsOutput: { + /** @description Latents tensor */ + latents: components["schemas"]["LatentsField"]; + /** + * Width + * @description Width of output (px) + */ + width: number; + /** + * Height + * @description Height of output (px) + */ + height: number; + /** + * type + * @default latents_output + * @constant + */ + type: "latents_output"; + }; + /** + * SDXLLoRALoaderOutput + * @description SDXL LoRA Loader Output + */ + SDXLLoRALoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet: components["schemas"]["UNetField"] | null; + /** + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip: components["schemas"]["CLIPField"] | null; + /** + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip2: components["schemas"]["CLIPField"] | null; + /** + * type + * @default sdxl_lora_loader_output + * @constant + */ + type: "sdxl_lora_loader_output"; + }; + /** + * BooleanOutput + * @description Base class for nodes that output a single boolean + */ + BooleanOutput: { + /** + * Value + * @description The output boolean + */ + value: boolean; + /** + * type + * @default boolean_output + * @constant + */ + type: "boolean_output"; + }; + /** + * NoiseOutput + * @description Invocation noise output + */ + NoiseOutput: { + /** @description Noise tensor */ + noise: components["schemas"]["LatentsField"]; + /** + * Width + * @description Width of output (px) + */ + width: number; + /** + * Height + * @description Height of output (px) + */ + height: number; + /** + * type + * @default noise_output + * @constant + */ + type: "noise_output"; + }; + /** CalculateImageTilesOutput */ + CalculateImageTilesOutput: { + /** + * Tiles + * @description The tiles coordinates that cover a particular image shape. + */ + tiles: components["schemas"]["Tile"][]; + /** + * type + * @default calculate_image_tiles_output + * @constant + */ + type: "calculate_image_tiles_output"; + }; + /** + * String2Output + * @description Base class for invocations that output two strings + */ + String2Output: { + /** + * String 1 + * @description string 1 + */ + string_1: string; + /** + * String 2 + * @description string 2 + */ + string_2: string; + /** + * type + * @default string_2_output + * @constant + */ + type: "string_2_output"; + }; + /** T2IAdapterOutput */ + T2IAdapterOutput: { + /** + * T2I Adapter + * @description T2I-Adapter(s) to apply + */ + t2i_adapter: components["schemas"]["T2IAdapterField"]; + /** + * type + * @default t2i_adapter_output + * @constant + */ + type: "t2i_adapter_output"; + }; + /** + * UNetOutput + * @description Base class for invocations that output a UNet field. + */ + UNetOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; + /** + * type + * @default unet_output + * @constant + */ + type: "unet_output"; + }; + /** TileToPropertiesOutput */ + TileToPropertiesOutput: { + /** + * Coords Left + * @description Left coordinate of the tile relative to its parent image. + */ + coords_left: number; + /** + * Coords Right + * @description Right coordinate of the tile relative to its parent image. + */ + coords_right: number; + /** + * Coords Top + * @description Top coordinate of the tile relative to its parent image. + */ + coords_top: number; + /** + * Coords Bottom + * @description Bottom coordinate of the tile relative to its parent image. + */ + coords_bottom: number; + /** + * Width + * @description The width of the tile. Equal to coords_right - coords_left. + */ + width: number; + /** + * Height + * @description The height of the tile. Equal to coords_bottom - coords_top. + */ + height: number; + /** + * Overlap Top + * @description Overlap between this tile and its top neighbor. + */ + overlap_top: number; + /** + * Overlap Bottom + * @description Overlap between this tile and its bottom neighbor. + */ + overlap_bottom: number; + /** + * Overlap Left + * @description Overlap between this tile and its left neighbor. + */ + overlap_left: number; + /** + * Overlap Right + * @description Overlap between this tile and its right neighbor. + */ + overlap_right: number; + /** + * type + * @default tile_to_properties_output + * @constant + */ + type: "tile_to_properties_output"; + }; + /** + * ColorCollectionOutput + * @description Base class for nodes that output a collection of colors + */ + ColorCollectionOutput: { + /** + * Collection + * @description The output colors + */ + collection: components["schemas"]["ColorField"][]; + /** + * type + * @default color_collection_output + * @constant + */ + type: "color_collection_output"; + }; + /** + * ImageCollectionOutput + * @description Base class for nodes that output a collection of images + */ + ImageCollectionOutput: { + /** + * Collection + * @description The output images + */ + collection: components["schemas"]["ImageField"][]; + /** + * type + * @default image_collection_output + * @constant + */ + type: "image_collection_output"; + }; + /** + * MaskOutput + * @description A torch mask tensor. + */ + MaskOutput: { + /** @description The mask. */ + mask: components["schemas"]["TensorField"]; + /** + * Width + * @description The width of the mask in pixels. + */ + width: number; + /** + * Height + * @description The height of the mask in pixels. + */ + height: number; + /** + * type + * @default mask_output + * @constant + */ + type: "mask_output"; + }; + /** + * SeamlessModeOutput + * @description Modified Seamless Model output + */ + SeamlessModeOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet: components["schemas"]["UNetField"] | null; + /** + * VAE + * @description VAE + * @default null + */ + vae: components["schemas"]["VAEField"] | null; + /** + * type + * @default seamless_output + * @constant + */ + type: "seamless_output"; + }; + /** + * MetadataItemOutput + * @description Metadata Item Output + */ + MetadataItemOutput: { + /** @description Metadata Item */ + item: components["schemas"]["MetadataItemField"]; + /** + * type + * @default metadata_item_output + * @constant + */ + type: "metadata_item_output"; + }; + /** + * StringCollectionOutput + * @description Base class for nodes that output a collection of strings + */ + StringCollectionOutput: { + /** + * Collection + * @description The output strings + */ + collection: string[]; + /** + * type + * @default string_collection_output + * @constant + */ + type: "string_collection_output"; + }; + /** IPAdapterOutput */ + IPAdapterOutput: { + /** + * IP-Adapter + * @description IP-Adapter to apply + */ + ip_adapter: components["schemas"]["IPAdapterField"]; + /** + * type + * @default ip_adapter_output + * @constant + */ + type: "ip_adapter_output"; + }; + /** + * IdealSizeOutput + * @description Base class for invocations that output an image + */ + IdealSizeOutput: { + /** + * Width + * @description The ideal width of the image (in pixels) + */ + width: number; + /** + * Height + * @description The ideal height of the image (in pixels) + */ + height: number; + /** + * type + * @default ideal_size_output + * @constant + */ + type: "ideal_size_output"; + }; + /** + * ControlOutput + * @description node output for ControlNet info + */ + ControlOutput: { + /** @description ControlNet(s) to apply */ + control: components["schemas"]["ControlField"]; + /** + * type + * @default control_output + * @constant + */ + type: "control_output"; + }; + /** + * SDXLModelLoaderOutput + * @description SDXL base model loader output + */ + SDXLModelLoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; + /** + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; + /** + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip2: components["schemas"]["CLIPField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default sdxl_model_loader_output + * @constant + */ + type: "sdxl_model_loader_output"; + }; InvocationOutputMap: { - float_collection: components["schemas"]["FloatCollectionOutput"]; - infill_patchmatch: components["schemas"]["ImageOutput"]; - lora_selector: components["schemas"]["LoRASelectorOutput"]; - img_conv: components["schemas"]["ImageOutput"]; - midas_depth_image_processor: components["schemas"]["ImageOutput"]; - invert_tensor_mask: components["schemas"]["MaskOutput"]; - integer: components["schemas"]["IntegerOutput"]; - color_map_image_processor: components["schemas"]["ImageOutput"]; - color_correct: components["schemas"]["ImageOutput"]; - string_collection: components["schemas"]["StringCollectionOutput"]; - merge_metadata: components["schemas"]["MetadataOutput"]; - img_hue_adjust: components["schemas"]["ImageOutput"]; - string_split_neg: components["schemas"]["StringPosNegOutput"]; - face_identifier: components["schemas"]["ImageOutput"]; controlnet: components["schemas"]["ControlOutput"]; - float_to_int: components["schemas"]["IntegerOutput"]; - lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; - freeu: components["schemas"]["UNetOutput"]; + rand_int: components["schemas"]["IntegerOutput"]; + clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; + color: components["schemas"]["ColorOutput"]; + iterate: components["schemas"]["IterateInvocationOutput"]; + tile_image_processor: components["schemas"]["ImageOutput"]; + depth_anything_image_processor: components["schemas"]["ImageOutput"]; + lora_selector: components["schemas"]["LoRASelectorOutput"]; + crop_latents: components["schemas"]["LatentsOutput"]; + img_resize: components["schemas"]["ImageOutput"]; + mask_combine: components["schemas"]["ImageOutput"]; + prompt_from_file: components["schemas"]["StringCollectionOutput"]; + ideal_size: components["schemas"]["IdealSizeOutput"]; + image_mask_to_tensor: components["schemas"]["MaskOutput"]; + rand_float: components["schemas"]["FloatOutput"]; + infill_rgba: components["schemas"]["ImageOutput"]; + color_correct: components["schemas"]["ImageOutput"]; + ip_adapter: components["schemas"]["IPAdapterOutput"]; + rectangle_mask: components["schemas"]["MaskOutput"]; img_scale: components["schemas"]["ImageOutput"]; - calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; - sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - unsharp_mask: components["schemas"]["ImageOutput"]; + boolean_collection: components["schemas"]["BooleanCollectionOutput"]; + calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; + esrgan: components["schemas"]["ImageOutput"]; + normalbae_image_processor: components["schemas"]["ImageOutput"]; + img_ilerp: components["schemas"]["ImageOutput"]; + img_mul: components["schemas"]["ImageOutput"]; + dynamic_prompt: components["schemas"]["StringCollectionOutput"]; + cv_inpaint: components["schemas"]["ImageOutput"]; + lscale: components["schemas"]["LatentsOutput"]; + range_of_size: components["schemas"]["IntegerCollectionOutput"]; + color_map_image_processor: components["schemas"]["ImageOutput"]; + conditioning: components["schemas"]["ConditioningOutput"]; dw_openpose_image_processor: components["schemas"]["ImageOutput"]; img_blur: components["schemas"]["ImageOutput"]; - infill_cv2: components["schemas"]["ImageOutput"]; - face_mask_detection: components["schemas"]["FaceMaskOutput"]; - t2i_adapter: components["schemas"]["T2IAdapterOutput"]; - core_metadata: components["schemas"]["MetadataOutput"]; - rand_float: components["schemas"]["FloatOutput"]; - mediapipe_face_processor: components["schemas"]["ImageOutput"]; - img_resize: components["schemas"]["ImageOutput"]; - latents_collection: components["schemas"]["LatentsCollectionOutput"]; - float_math: components["schemas"]["FloatOutput"]; - range: components["schemas"]["IntegerCollectionOutput"]; - zoe_depth_image_processor: components["schemas"]["ImageOutput"]; - image_mask_to_tensor: components["schemas"]["MaskOutput"]; - sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; - i2l: components["schemas"]["LatentsOutput"]; - integer_math: components["schemas"]["IntegerOutput"]; - sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; - seamless: components["schemas"]["SeamlessModeOutput"]; save_image: components["schemas"]["ImageOutput"]; - lresize: components["schemas"]["LatentsOutput"]; - color: components["schemas"]["ColorOutput"]; - img_chan: components["schemas"]["ImageOutput"]; - l2i: components["schemas"]["ImageOutput"]; - lblend: components["schemas"]["LatentsOutput"]; - img_watermark: components["schemas"]["ImageOutput"]; - image: components["schemas"]["ImageOutput"]; - lineart_anime_image_processor: components["schemas"]["ImageOutput"]; - sub: components["schemas"]["IntegerOutput"]; - rand_int: components["schemas"]["IntegerOutput"]; - main_model_loader: components["schemas"]["ModelLoaderOutput"]; - calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; - face_off: components["schemas"]["FaceOffOutput"]; - image_collection: components["schemas"]["ImageCollectionOutput"]; - mlsd_image_processor: components["schemas"]["ImageOutput"]; - boolean_collection: components["schemas"]["BooleanCollectionOutput"]; string: components["schemas"]["StringOutput"]; - mask_from_id: components["schemas"]["ImageOutput"]; - noise: components["schemas"]["NoiseOutput"]; - img_mul: components["schemas"]["ImageOutput"]; - pair_tile_image: components["schemas"]["PairTileImageOutput"]; - content_shuffle_image_processor: components["schemas"]["ImageOutput"]; - range_of_size: components["schemas"]["IntegerCollectionOutput"]; - latents: components["schemas"]["LatentsOutput"]; - add: components["schemas"]["IntegerOutput"]; - div: components["schemas"]["IntegerOutput"]; - blank_image: components["schemas"]["ImageOutput"]; - dynamic_prompt: components["schemas"]["StringCollectionOutput"]; - mask_combine: components["schemas"]["ImageOutput"]; - img_nsfw: components["schemas"]["ImageOutput"]; - sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; + img_chan: components["schemas"]["ImageOutput"]; step_param_easing: components["schemas"]["FloatCollectionOutput"]; + midas_depth_image_processor: components["schemas"]["ImageOutput"]; + unsharp_mask: components["schemas"]["ImageOutput"]; + sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + integer: components["schemas"]["IntegerOutput"]; img_channel_offset: components["schemas"]["ImageOutput"]; - create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; - rectangle_mask: components["schemas"]["MaskOutput"]; - vae_loader: components["schemas"]["VAEOutput"]; - integer_collection: components["schemas"]["IntegerCollectionOutput"]; - leres_image_processor: components["schemas"]["ImageOutput"]; - lineart_image_processor: components["schemas"]["ImageOutput"]; + image: components["schemas"]["ImageOutput"]; round_float: components["schemas"]["FloatOutput"]; - infill_lama: components["schemas"]["ImageOutput"]; - string_join_three: components["schemas"]["StringOutput"]; - collect: components["schemas"]["CollectInvocationOutput"]; - boolean: components["schemas"]["BooleanOutput"]; - create_gradient_mask: components["schemas"]["GradientMaskOutput"]; - string_split: components["schemas"]["String2Output"]; - show_image: components["schemas"]["ImageOutput"]; - mask_edge: components["schemas"]["ImageOutput"]; + mediapipe_face_processor: components["schemas"]["ImageOutput"]; + infill_cv2: components["schemas"]["ImageOutput"]; random_range: components["schemas"]["IntegerCollectionOutput"]; - float_range: components["schemas"]["FloatCollectionOutput"]; - conditioning: components["schemas"]["ConditioningOutput"]; - cv_inpaint: components["schemas"]["ImageOutput"]; - string_join: components["schemas"]["StringOutput"]; - sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; - lora_loader: components["schemas"]["LoRALoaderOutput"]; - compel: components["schemas"]["ConditioningOutput"]; - tomask: components["schemas"]["ImageOutput"]; - esrgan: components["schemas"]["ImageOutput"]; - denoise_latents: components["schemas"]["LatentsOutput"]; - img_ilerp: components["schemas"]["ImageOutput"]; - crop_latents: components["schemas"]["LatentsOutput"]; - prompt_from_file: components["schemas"]["StringCollectionOutput"]; - merge_tiles_to_image: components["schemas"]["ImageOutput"]; - tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; - infill_tile: components["schemas"]["ImageOutput"]; - segment_anything_processor: components["schemas"]["ImageOutput"]; - lscale: components["schemas"]["LatentsOutput"]; - scheduler: components["schemas"]["SchedulerOutput"]; - ideal_size: components["schemas"]["IdealSizeOutput"]; - img_paste: components["schemas"]["ImageOutput"]; - img_channel_multiply: components["schemas"]["ImageOutput"]; - mul: components["schemas"]["IntegerOutput"]; - model_identifier: components["schemas"]["ModelIdentifierOutput"]; - depth_anything_image_processor: components["schemas"]["ImageOutput"]; - metadata_item: components["schemas"]["MetadataItemOutput"]; - alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; - tile_image_processor: components["schemas"]["ImageOutput"]; - pidi_image_processor: components["schemas"]["ImageOutput"]; - calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; - canny_image_processor: components["schemas"]["ImageOutput"]; - hed_image_processor: components["schemas"]["ImageOutput"]; - metadata: components["schemas"]["MetadataOutput"]; - clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; - canvas_paste_back: components["schemas"]["ImageOutput"]; - sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - iterate: components["schemas"]["IterateInvocationOutput"]; - heuristic_resize: components["schemas"]["ImageOutput"]; - infill_rgba: components["schemas"]["ImageOutput"]; - ip_adapter: components["schemas"]["IPAdapterOutput"]; - img_pad_crop: components["schemas"]["ImageOutput"]; + float_math: components["schemas"]["FloatOutput"]; + sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; + string_collection: components["schemas"]["StringCollectionOutput"]; img_lerp: components["schemas"]["ImageOutput"]; - img_crop: components["schemas"]["ImageOutput"]; - float: components["schemas"]["FloatOutput"]; - conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; - normalbae_image_processor: components["schemas"]["ImageOutput"]; + face_off: components["schemas"]["FaceOffOutput"]; + sub: components["schemas"]["IntegerOutput"]; + merge_tiles_to_image: components["schemas"]["ImageOutput"]; + boolean: components["schemas"]["BooleanOutput"]; + integer_math: components["schemas"]["IntegerOutput"]; string_replace: components["schemas"]["StringOutput"]; + metadata: components["schemas"]["MetadataOutput"]; + invert_tensor_mask: components["schemas"]["MaskOutput"]; + img_conv: components["schemas"]["ImageOutput"]; + merge_metadata: components["schemas"]["MetadataOutput"]; + img_watermark: components["schemas"]["ImageOutput"]; + float_to_int: components["schemas"]["IntegerOutput"]; + float_range: components["schemas"]["FloatCollectionOutput"]; + string_split: components["schemas"]["String2Output"]; + float: components["schemas"]["FloatOutput"]; + blank_image: components["schemas"]["ImageOutput"]; + collect: components["schemas"]["CollectInvocationOutput"]; + infill_tile: components["schemas"]["ImageOutput"]; + seamless: components["schemas"]["SeamlessModeOutput"]; + create_gradient_mask: components["schemas"]["GradientMaskOutput"]; + lora_loader: components["schemas"]["LoRALoaderOutput"]; + zoe_depth_image_processor: components["schemas"]["ImageOutput"]; + string_join: components["schemas"]["StringOutput"]; + show_image: components["schemas"]["ImageOutput"]; + t2i_adapter: components["schemas"]["T2IAdapterOutput"]; + create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; + metadata_item: components["schemas"]["MetadataItemOutput"]; + main_model_loader: components["schemas"]["ModelLoaderOutput"]; + hed_image_processor: components["schemas"]["ImageOutput"]; + core_metadata: components["schemas"]["MetadataOutput"]; + conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; + content_shuffle_image_processor: components["schemas"]["ImageOutput"]; + lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; + model_identifier: components["schemas"]["ModelIdentifierOutput"]; + mlsd_image_processor: components["schemas"]["ImageOutput"]; + lresize: components["schemas"]["LatentsOutput"]; + tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; + div: components["schemas"]["IntegerOutput"]; + calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; + infill_lama: components["schemas"]["ImageOutput"]; + tomask: components["schemas"]["ImageOutput"]; + canny_image_processor: components["schemas"]["ImageOutput"]; + img_hue_adjust: components["schemas"]["ImageOutput"]; + i2l: components["schemas"]["LatentsOutput"]; + float_collection: components["schemas"]["FloatCollectionOutput"]; + integer_collection: components["schemas"]["IntegerCollectionOutput"]; + face_identifier: components["schemas"]["ImageOutput"]; + sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; + compel: components["schemas"]["ConditioningOutput"]; + mask_edge: components["schemas"]["ImageOutput"]; + face_mask_detection: components["schemas"]["FaceMaskOutput"]; + scheduler: components["schemas"]["SchedulerOutput"]; + image_collection: components["schemas"]["ImageCollectionOutput"]; + infill_patchmatch: components["schemas"]["ImageOutput"]; + freeu: components["schemas"]["UNetOutput"]; + pidi_image_processor: components["schemas"]["ImageOutput"]; + img_channel_multiply: components["schemas"]["ImageOutput"]; + sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; + l2i: components["schemas"]["ImageOutput"]; + segment_anything_processor: components["schemas"]["ImageOutput"]; + lblend: components["schemas"]["LatentsOutput"]; + vae_loader: components["schemas"]["VAEOutput"]; + canvas_paste_back: components["schemas"]["ImageOutput"]; + leres_image_processor: components["schemas"]["ImageOutput"]; + mask_from_id: components["schemas"]["ImageOutput"]; + latents_collection: components["schemas"]["LatentsCollectionOutput"]; + range: components["schemas"]["IntegerCollectionOutput"]; + img_pad_crop: components["schemas"]["ImageOutput"]; + img_crop: components["schemas"]["ImageOutput"]; + img_nsfw: components["schemas"]["ImageOutput"]; + denoise_latents: components["schemas"]["LatentsOutput"]; + mul: components["schemas"]["IntegerOutput"]; + noise: components["schemas"]["NoiseOutput"]; + sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; + lineart_anime_image_processor: components["schemas"]["ImageOutput"]; + img_paste: components["schemas"]["ImageOutput"]; + latents: components["schemas"]["LatentsOutput"]; + calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; + string_split_neg: components["schemas"]["StringPosNegOutput"]; + lineart_image_processor: components["schemas"]["ImageOutput"]; + heuristic_resize: components["schemas"]["ImageOutput"]; + add: components["schemas"]["IntegerOutput"]; + string_join_three: components["schemas"]["StringOutput"]; + sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; + pair_tile_image: components["schemas"]["PairTileImageOutput"]; }; /** * BatchEnqueuedEvent @@ -12026,6 +12484,15 @@ export type components = { */ bulk_download_item_name: string; }; + /** + * Classification + * @description The classification of an Invocation. + * - `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation. + * - `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term. + * - `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation. + * @enum {string} + */ + Classification: "stable" | "beta" | "prototype"; /** * DownloadCancelledEvent * @description Event model for download_cancelled @@ -12147,35 +12614,74 @@ export type components = { download_path: string; }; /** - * BaseInvocation - * @description All invocations must use the `@invocation` decorator to provide their unique type. + * FieldKind + * @description The kind of field. + * - `Input`: An input field on a node. + * - `Output`: An output field on a node. + * - `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is + * one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name + * "metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic, + * allowing "metadata" for that field. + * - `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs, + * but which are used to store information about the node. For example, the `id` and `type` fields are node + * attributes. + * + * The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app + * startup, and when generating the OpenAPI schema for the workflow editor. + * @enum {string} */ - BaseInvocation: { + FieldKind: "input" | "output" | "internal" | "node_attribute"; + /** + * Input + * @description The type of input a field accepts. + * - `Input.Direct`: The field must have its value provided directly, when the invocation and field are instantiated. + * - `Input.Connection`: The field must have its value provided by a connection. + * - `Input.Any`: The field may have its value provided either directly or by a connection. + * @enum {string} + */ + Input: "connection" | "direct" | "any"; + /** + * InputFieldJSONSchemaExtra + * @description Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution, + * and by the workflow editor during schema parsing and UI rendering. + */ + InputFieldJSONSchemaExtra: { + input: components["schemas"]["Input"]; + /** Orig Required */ + orig_required: boolean; + field_kind: components["schemas"]["FieldKind"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Default + * @default null */ - id: string; + default: unknown; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. + * Orig Default + * @default null + */ + orig_default: unknown; + /** + * Ui Hidden * @default false */ - is_intermediate: boolean; + ui_hidden: boolean; + /** @default null */ + ui_type: components["schemas"]["UIType"] | null; + /** @default null */ + ui_component: components["schemas"]["UIComponent"] | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Ui Order + * @default null */ - use_cache: boolean; + ui_order: number | null; + /** + * Ui Choice Labels + * @default null + */ + ui_choice_labels: { + [key: string]: string; + } | null; }; - /** - * BaseInvocationOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ - BaseInvocationOutput: Record; /** * InvocationCompleteEvent * @description Event model for invocation_complete @@ -12207,14 +12713,14 @@ export type components = { */ session_id: string; /** @description The ID of the invocation */ - invocation: components["schemas"]["BaseInvocation"]; + invocation: components["schemas"]["AnyInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node */ invocation_source_id: string; /** @description The result of the invocation */ - result: components["schemas"]["BaseInvocationOutput"]; + result: components["schemas"]["AnyInvocationOutput"]; }; /** * InvocationDenoiseProgressEvent @@ -12247,7 +12753,7 @@ export type components = { */ session_id: string; /** @description The ID of the invocation */ - invocation: components["schemas"]["BaseInvocation"]; + invocation: components["schemas"]["AnyInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -12307,7 +12813,7 @@ export type components = { */ session_id: string; /** @description The ID of the invocation */ - invocation: components["schemas"]["BaseInvocation"]; + invocation: components["schemas"]["AnyInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -12372,7 +12878,7 @@ export type components = { */ session_id: string; /** @description The ID of the invocation */ - invocation: components["schemas"]["BaseInvocation"]; + invocation: components["schemas"]["AnyInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -12589,6 +13095,40 @@ export type components = { */ submodel_type: components["schemas"]["SubModelType"] | null; }; + /** + * OutputFieldJSONSchemaExtra + * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor + * during schema parsing and UI rendering. + */ + OutputFieldJSONSchemaExtra: { + field_kind: components["schemas"]["FieldKind"]; + /** Ui Hidden */ + ui_hidden: boolean; + ui_type: components["schemas"]["UIType"] | null; + /** Ui Order */ + ui_order: number | null; + }; + /** + * ProgressImage + * @description The progress image sent intermittently during processing + */ + ProgressImage: { + /** + * Width + * @description The effective width of the image in pixels + */ + width: number; + /** + * Height + * @description The effective height of the image in pixels + */ + height: number; + /** + * Dataurl + * @description The image data as a b64 data URL + */ + dataURL: string; + }; /** * QueueClearedEvent * @description Event model for queue_cleared @@ -12688,6 +13228,81 @@ export type components = { */ session_id: string; }; + /** + * UIComponent + * @description The type of UI component to use for a field, used to override the default components, which are + * inferred from the field type. + * @enum {string} + */ + UIComponent: "none" | "textarea" | "slider"; + /** + * UIConfigBase + * @description Provides additional node configuration to the UI. + * This is used internally by the @invocation decorator logic. Do not use this directly. + */ + UIConfigBase: { + /** + * Tags + * @description The node's tags + */ + tags: string[] | null; + /** + * Title + * @description The node's display name + * @default null + */ + title: string | null; + /** + * Category + * @description The node's category + * @default null + */ + category: string | null; + /** + * Version + * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". + */ + version: string; + /** + * Node Pack + * @description Whether or not this is a custom node + * @default null + */ + node_pack: string | null; + /** + * @description The node's classification + * @default stable + */ + classification: components["schemas"]["Classification"]; + }; + /** + * UIType + * @description Type hints for the UI for situations in which the field type is not enough to infer the correct UI type. + * + * - Model Fields + * The most common node-author-facing use will be for model fields. Internally, there is no difference + * between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the + * base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that + * the field is an SDXL main model field. + * + * - Any Field + * We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to + * indicate that the field accepts any type. Use with caution. This cannot be used on outputs. + * + * - Scheduler Field + * Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field. + * + * - Internal Fields + * Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate + * handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These + * should not be used by node authors. + * + * - DEPRECATED Fields + * These types are deprecated and should not be used by node authors. A warning will be logged if one is + * used, and the type will be ignored. They are included here for backwards compatibility. + * @enum {string} + */ + UIType: "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VAEModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "T2IAdapterModelField" | "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict"; }; responses: never; parameters: never; From 5a4d10467bfc584c755fd26b99d3474782be189b Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 17:30:56 +1000 Subject: [PATCH 12/52] feat(ui): use updated types --- .../listeners/controlAdapterPreprocessor.ts | 3 +-- .../listeners/controlNetImageProcessed.ts | 3 +-- .../socketio/socketInvocationComplete.ts | 3 +-- .../inspector/InspectorOutputsTab.tsx | 5 ++--- .../web/src/features/nodes/types/common.ts | 1 - .../nodes/util/graph/buildNodesGraph.ts | 5 ++--- .../frontend/web/src/services/api/types.ts | 5 ++--- .../frontend/web/src/services/events/types.ts | 19 +++++-------------- 8 files changed, 14 insertions(+), 30 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor.ts index 581146c25c..ba04947a2d 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor.ts @@ -13,7 +13,6 @@ import { isControlAdapterLayer, } from 'features/controlLayers/store/controlLayersSlice'; import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters'; -import { isImageOutput } from 'features/nodes/types/common'; import { toast } from 'features/toast/toast'; import { t } from 'i18next'; import { isEqual } from 'lodash-es'; @@ -139,7 +138,7 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni // We still have to check the output type assert( - isImageOutput(invocationCompleteAction.payload.data.result), + invocationCompleteAction.payload.data.result.type === 'image_output', `Processor did not return an image output, got: ${invocationCompleteAction.payload.data.result}` ); const { image_name } = invocationCompleteAction.payload.data.result.image; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts index 1e485b31d5..574dad00eb 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts @@ -9,7 +9,6 @@ import { selectControlAdapterById, } from 'features/controlAdapters/store/controlAdaptersSlice'; import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; -import { isImageOutput } from 'features/nodes/types/common'; import { toast } from 'features/toast/toast'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; @@ -74,7 +73,7 @@ export const addControlNetImageProcessedListener = (startAppListening: AppStartL ); // We still have to check the output type - if (isImageOutput(invocationCompleteAction.payload.data.result)) { + if (invocationCompleteAction.payload.data.result.type === 'image_output') { const { image_name } = invocationCompleteAction.payload.data.result.image; // Wait for the ImageDTO to be received diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts index 1a04f9493a..2841493ca6 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts @@ -11,7 +11,6 @@ import { } from 'features/gallery/store/gallerySlice'; import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState'; -import { isImageOutput } from 'features/nodes/types/common'; import { zNodeStatus } from 'features/nodes/types/invocation'; import { CANVAS_OUTPUT } from 'features/nodes/util/graph/constants'; import { boardsApi } from 'services/api/endpoints/boards'; @@ -33,7 +32,7 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi const { result, invocation_source_id } = data; // This complete event has an associated image output - if (isImageOutput(data.result) && !nodeTypeDenylist.includes(data.invocation.type)) { + if (data.result.type === 'image_output' && !nodeTypeDenylist.includes(data.invocation.type)) { const { image_name } = data.result.image; const { canvas, gallery } = getState(); diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx index d4150243b9..59a603e7f1 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx @@ -11,8 +11,7 @@ import { selectLastSelectedNode } from 'features/nodes/store/selectors'; import { isInvocationNode } from 'features/nodes/types/invocation'; import { memo, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import type { ImageOutput } from 'services/api/types'; -import type { AnyResult } from 'services/events/types'; +import type { ImageOutput, S } from 'services/api/types'; import ImageOutputPreview from './outputs/ImageOutputPreview'; @@ -66,4 +65,4 @@ const InspectorOutputsTab = () => { export default memo(InspectorOutputsTab); -const getKey = (result: AnyResult, i: number) => `${result.type}-${i}`; +const getKey = (result: S['AnyInvocationOutput'], i: number) => `${result.type}-${i}`; diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index ca44259995..54e126af3a 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -144,5 +144,4 @@ const zImageOutput = z.object({ type: z.literal('image_output'), }); export type ImageOutput = z.infer; -export const isImageOutput = (output: unknown): output is ImageOutput => zImageOutput.safeParse(output).success; // #endregion diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts index 130da8bf15..8f880a46a7 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts @@ -1,8 +1,7 @@ import type { NodesState } from 'features/nodes/store/types'; import { isInvocationNode } from 'features/nodes/types/invocation'; import { omit, reduce } from 'lodash-es'; -import type { Graph } from 'services/api/types'; -import type { AnyInvocation } from 'services/events/types'; +import type { Graph, S } from 'services/api/types'; import { v4 as uuidv4 } from 'uuid'; /** @@ -82,7 +81,7 @@ export const buildNodesGraph = (nodesState: NodesState): Graph => { parsedEdges.forEach((edge) => { const destination_node = parsedNodes[edge.destination.node_id]; const field = edge.destination.field; - parsedNodes[edge.destination.node_id] = omit(destination_node, field) as AnyInvocation; + parsedNodes[edge.destination.node_id] = omit(destination_node, field) as S['AnyInvocation']; }); // Assemble! diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 3522d719fb..17b63f6f7c 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -122,7 +122,6 @@ export type ModelInstallStatus = S['InstallStatus']; // Graphs export type Graph = S['Graph']; export type NonNullableGraph = O.Required; -export type GraphExecutionState = S['GraphExecutionState']; export type Batch = S['Batch']; export type SessionQueueItemDTO = S['SessionQueueItemDTO']; export type WorkflowRecordOrderBy = S['WorkflowRecordOrderBy']; @@ -132,10 +131,10 @@ export type WorkflowRecordListItemDTO = S['WorkflowRecordListItemDTO']; type KeysOfUnion = T extends T ? keyof T : never; export type AnyInvocation = Exclude< - Graph['nodes'][string], + S['AnyInvocation'], S['CoreMetadataInvocation'] | S['MetadataInvocation'] | S['MetadataItemInvocation'] | S['MergeMetadataInvocation'] >; -export type AnyInvocationIncMetadata = S['Graph']['nodes'][string]; +export type AnyInvocationIncMetadata = S['AnyInvocation']; export type InvocationType = AnyInvocation['type']; type InvocationOutputMap = S['InvocationOutputMap']; diff --git a/invokeai/frontend/web/src/services/events/types.ts b/invokeai/frontend/web/src/services/events/types.ts index 3a7de93627..a84049cc28 100644 --- a/invokeai/frontend/web/src/services/events/types.ts +++ b/invokeai/frontend/web/src/services/events/types.ts @@ -1,21 +1,12 @@ -import type { Graph, GraphExecutionState, S } from 'services/api/types'; - -export type AnyInvocation = NonNullable[string]>; - -export type AnyResult = NonNullable; +import type { S } from 'services/api/types'; export type ModelLoadStartedEvent = S['ModelLoadStartedEvent']; export type ModelLoadCompleteEvent = S['ModelLoadCompleteEvent']; -export type InvocationStartedEvent = Omit & { invocation: AnyInvocation }; -export type InvocationDenoiseProgressEvent = Omit & { - invocation: AnyInvocation; -}; -export type InvocationCompleteEvent = Omit & { - result: AnyResult; - invocation: AnyInvocation; -}; -export type InvocationErrorEvent = Omit & { invocation: AnyInvocation }; +export type InvocationStartedEvent = S['InvocationStartedEvent']; +export type InvocationDenoiseProgressEvent = S['InvocationDenoiseProgressEvent']; +export type InvocationCompleteEvent = S['InvocationCompleteEvent']; +export type InvocationErrorEvent = S['InvocationErrorEvent']; export type ProgressImage = InvocationDenoiseProgressEvent['progress_image']; export type ModelInstallDownloadProgressEvent = S['ModelInstallDownloadProgressEvent']; From 5beec8211aad2371287c7639cad48cf6a0f65cf7 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 21:00:02 +1000 Subject: [PATCH 13/52] feat(api): sort openapi schemas Reduces the constant changes to the frontend client types due to inconsistent ordering of pydantic models. --- invokeai/app/services/shared/graph.py | 10 ++++++---- invokeai/app/util/custom_openapi.py | 2 ++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index 7f5b277ad8..1a60a8cc0e 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -288,8 +288,9 @@ class AnyInvocation(BaseInvocation): # Nodes are too powerful, we have to make our own OpenAPI schema manually # No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually oneOf: list[dict[str, str]] = [] - for i in BaseInvocation.get_invocations(): - oneOf.append({"$ref": f"#/components/schemas/{i.__name__}"}) + names = [i.__name__ for i in BaseInvocation.get_invocations()] + for name in sorted(names): + oneOf.append({"$ref": f"#/components/schemas/{name}"}) return {"oneOf": oneOf} @@ -304,8 +305,9 @@ class AnyInvocationOutput(BaseInvocationOutput): # No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually oneOf: list[dict[str, str]] = [] - for i in BaseInvocationOutput.get_outputs(): - oneOf.append({"$ref": f"#/components/schemas/{i.__name__}"}) + names = [i.__name__ for i in BaseInvocationOutput.get_outputs()] + for name in sorted(names): + oneOf.append({"$ref": f"#/components/schemas/{name}"}) return {"oneOf": oneOf} diff --git a/invokeai/app/util/custom_openapi.py b/invokeai/app/util/custom_openapi.py index 9313f63b84..50259c12cc 100644 --- a/invokeai/app/util/custom_openapi.py +++ b/invokeai/app/util/custom_openapi.py @@ -108,6 +108,8 @@ def get_openapi_func( if post_transform is not None: openapi_schema = post_transform(openapi_schema) + openapi_schema["components"]["schemas"] = dict(sorted(openapi_schema["components"]["schemas"].items())) + app.openapi_schema = openapi_schema return app.openapi_schema From 50d3030471dd26f0d5a761f9ae4511092aaffe6e Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 21:02:29 +1000 Subject: [PATCH 14/52] feat(app): dynamic type adapters for invocations & outputs Keep track of whether or not the typeadapter needs to be updated. Allows for dynamic invocation and output unions. --- invokeai/app/invocations/baseinvocation.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 9545179e21..1d169f0a82 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -98,11 +98,13 @@ class BaseInvocationOutput(BaseModel): _output_classes: ClassVar[set[BaseInvocationOutput]] = set() _typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None + _typeadapter_needs_update: ClassVar[bool] = False @classmethod def register_output(cls, output: BaseInvocationOutput) -> None: """Registers an invocation output.""" cls._output_classes.add(output) + cls._typeadapter_needs_update = True @classmethod def get_outputs(cls) -> Iterable[BaseInvocationOutput]: @@ -112,11 +114,12 @@ class BaseInvocationOutput(BaseModel): @classmethod def get_typeadapter(cls) -> TypeAdapter[Any]: """Gets a pydantc TypeAdapter for the union of all invocation output types.""" - if not cls._typeadapter: + if not cls._typeadapter or cls._typeadapter_needs_update: AnyInvocationOutput = TypeAliasType( "AnyInvocationOutput", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")] ) cls._typeadapter = TypeAdapter(AnyInvocationOutput) + cls._typeadapter_needs_update = False return cls._typeadapter @classmethod @@ -168,6 +171,7 @@ class BaseInvocation(ABC, BaseModel): _invocation_classes: ClassVar[set[BaseInvocation]] = set() _typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None + _typeadapter_needs_update: ClassVar[bool] = False @classmethod def get_type(cls) -> str: @@ -178,15 +182,17 @@ class BaseInvocation(ABC, BaseModel): def register_invocation(cls, invocation: BaseInvocation) -> None: """Registers an invocation.""" cls._invocation_classes.add(invocation) + cls._typeadapter_needs_update = True @classmethod def get_typeadapter(cls) -> TypeAdapter[Any]: """Gets a pydantc TypeAdapter for the union of all invocation types.""" - if not cls._typeadapter: + if not cls._typeadapter or cls._typeadapter_needs_update: AnyInvocation = TypeAliasType( "AnyInvocation", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")] ) cls._typeadapter = TypeAdapter(AnyInvocation) + cls._typeadapter_needs_update = False return cls._typeadapter @classmethod From ac56ab79a7f6d2a9ca3e7c76e37f123d11c90b7d Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 21:05:42 +1000 Subject: [PATCH 15/52] fix(app): add dynamic validator to AnyInvocation & AnyInvocationOutput This fixes the tests and slightly changes output types. --- invokeai/app/services/shared/graph.py | 22 ++++++++++++++++------ tests/test_node_graph.py | 3 ++- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index 1a60a8cc0e..d745e73823 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -14,7 +14,7 @@ from pydantic import ( ) from pydantic.fields import Field from pydantic.json_schema import JsonSchemaValue -from pydantic_core import CoreSchema +from pydantic_core import core_schema # Importing * is bad karma but needed here for node detection from invokeai.app.invocations import * # noqa: F401 F403 @@ -280,11 +280,16 @@ class CollectInvocation(BaseInvocation): class AnyInvocation(BaseInvocation): @classmethod - def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler): - return BaseInvocation.get_typeadapter().core_schema + def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + def validate_invocation(v: Any) -> "AnyInvocation": + return BaseInvocation.get_typeadapter().validate_python(v) + + return core_schema.no_info_plain_validator_function(validate_invocation) @classmethod - def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue: + def __get_pydantic_json_schema__( + cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: # Nodes are too powerful, we have to make our own OpenAPI schema manually # No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually oneOf: list[dict[str, str]] = [] @@ -297,10 +302,15 @@ class AnyInvocation(BaseInvocation): class AnyInvocationOutput(BaseInvocationOutput): @classmethod def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler): - return BaseInvocationOutput.get_typeadapter().core_schema + def validate_invocation_output(v: Any) -> "AnyInvocationOutput": + return BaseInvocationOutput.get_typeadapter().validate_python(v) + + return core_schema.no_info_plain_validator_function(validate_invocation_output) @classmethod - def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue: + def __get_pydantic_json_schema__( + cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: # Nodes are too powerful, we have to make our own OpenAPI schema manually # No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually diff --git a/tests/test_node_graph.py b/tests/test_node_graph.py index 87a4948af4..861f1bd07b 100644 --- a/tests/test_node_graph.py +++ b/tests/test_node_graph.py @@ -1,5 +1,6 @@ import pytest from pydantic import TypeAdapter +from pydantic.json_schema import models_json_schema from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -713,4 +714,4 @@ def test_iterate_accepts_collection(): def test_graph_can_generate_schema(): # Not throwing on this line is sufficient # NOTE: if this test fails, it's PROBABLY because a new invocation type is breaking schema generation - _ = Graph.model_json_schema() + models_json_schema([(Graph, "serialization")]) From 7cb32d3d8323687b58f0ff2b85bc981ddf592ecf Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 21:06:03 +1000 Subject: [PATCH 16/52] chore(ui): typegen --- .../frontend/web/src/services/api/schema.ts | 4348 +++++++++-------- 1 file changed, 2300 insertions(+), 2048 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 9ecd78e3fd..5482b57c0b 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -581,6 +581,7 @@ export type components = { * type * @default add * @constant + * @enum {string} */ type: "add"; }; @@ -621,11 +622,10 @@ export type components = { * type * @default alpha_mask_to_tensor * @constant + * @enum {string} */ type: "alpha_mask_to_tensor"; }; - AnyInvocation: components["schemas"]["ControlNetInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["PairTileImageInvocation"]; - AnyInvocationOutput: components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["String2Output"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SDXLModelLoaderOutput"]; /** * AppConfig * @description App Config Response @@ -748,6 +748,7 @@ export type components = { * Type * @default basemetadata * @constant + * @enum {string} */ type?: "basemetadata"; }; @@ -798,6 +799,42 @@ export type components = { */ items?: (string | number)[]; }; + /** + * BatchEnqueuedEvent + * @description Event model for batch_enqueued + */ + BatchEnqueuedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Batch Id + * @description The ID of the batch + */ + batch_id: string; + /** + * Enqueued + * @description The number of invocations enqueued + */ + enqueued: number; + /** + * Requested + * @description The number of invocations initially requested to be enqueued (may be less than enqueued if queue was full) + */ + requested: number; + /** + * Priority + * @description The priority of the batch + */ + priority: number; + }; /** BatchStatus */ BatchStatus: { /** @@ -906,6 +943,7 @@ export type components = { * type * @default blank_image * @constant + * @enum {string} */ type: "blank_image"; }; @@ -951,6 +989,7 @@ export type components = { * type * @default lblend * @constant + * @enum {string} */ type: "lblend"; }; @@ -1222,9 +1261,28 @@ export type components = { * type * @default boolean_collection * @constant + * @enum {string} */ type: "boolean_collection"; }; + /** + * BooleanCollectionOutput + * @description Base class for nodes that output a collection of booleans + */ + BooleanCollectionOutput: { + /** + * Collection + * @description The output boolean collection + */ + collection: boolean[]; + /** + * type + * @default boolean_collection_output + * @constant + * @enum {string} + */ + type: "boolean_collection_output"; + }; /** * Boolean Primitive * @description A boolean primitive value @@ -1257,9 +1315,111 @@ export type components = { * type * @default boolean * @constant + * @enum {string} */ type: "boolean"; }; + /** + * BooleanOutput + * @description Base class for nodes that output a single boolean + */ + BooleanOutput: { + /** + * Value + * @description The output boolean + */ + value: boolean; + /** + * type + * @default boolean_output + * @constant + * @enum {string} + */ + type: "boolean_output"; + }; + /** + * BulkDownloadCompleteEvent + * @description Event model for bulk_download_complete + */ + BulkDownloadCompleteEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Bulk Download Id + * @description The ID of the bulk image download + */ + bulk_download_id: string; + /** + * Bulk Download Item Id + * @description The ID of the bulk image download item + */ + bulk_download_item_id: string; + /** + * Bulk Download Item Name + * @description The name of the bulk image download item + */ + bulk_download_item_name: string; + }; + /** + * BulkDownloadErrorEvent + * @description Event model for bulk_download_error + */ + BulkDownloadErrorEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Bulk Download Id + * @description The ID of the bulk image download + */ + bulk_download_id: string; + /** + * Bulk Download Item Id + * @description The ID of the bulk image download item + */ + bulk_download_item_id: string; + /** + * Bulk Download Item Name + * @description The name of the bulk image download item + */ + bulk_download_item_name: string; + /** + * Error + * @description The error message + */ + error: string; + }; + /** + * BulkDownloadStartedEvent + * @description Event model for bulk_download_started + */ + BulkDownloadStartedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Bulk Download Id + * @description The ID of the bulk image download + */ + bulk_download_id: string; + /** + * Bulk Download Item Id + * @description The ID of the bulk image download item + */ + bulk_download_item_id: string; + /** + * Bulk Download Item Name + * @description The name of the bulk image download item + */ + bulk_download_item_name: string; + }; /** CLIPField */ CLIPField: { /** @description Info to load tokenizer submodel */ @@ -1277,6 +1437,24 @@ export type components = { */ loras: components["schemas"]["LoRAField"][]; }; + /** + * CLIPOutput + * @description Base class for invocations that output a CLIP field + */ + CLIPOutput: { + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; + /** + * type + * @default clip_output + * @constant + * @enum {string} + */ + type: "clip_output"; + }; /** * CLIP Skip * @description Skip layers in clip text_encoder model. @@ -1315,9 +1493,29 @@ export type components = { * type * @default clip_skip * @constant + * @enum {string} */ type: "clip_skip"; }; + /** + * CLIPSkipInvocationOutput + * @description CLIP skip node output + */ + CLIPSkipInvocationOutput: { + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip: components["schemas"]["CLIPField"] | null; + /** + * type + * @default clip_skip_output + * @constant + * @enum {string} + */ + type: "clip_skip_output"; + }; /** * CLIPVisionDiffusersConfig * @description Model config for CLIPVision. @@ -1370,6 +1568,7 @@ export type components = { /** * Format * @constant + * @enum {string} */ format: "diffusers"; /** @default */ @@ -1378,6 +1577,7 @@ export type components = { * Type * @default clip_vision * @constant + * @enum {string} */ type: "clip_vision"; }; @@ -1422,6 +1622,7 @@ export type components = { * type * @default infill_cv2 * @constant + * @enum {string} */ type: "infill_cv2"; }; @@ -1481,6 +1682,7 @@ export type components = { * type * @default calculate_image_tiles_even_split * @constant + * @enum {string} */ type: "calculate_image_tiles_even_split"; }; @@ -1540,6 +1742,7 @@ export type components = { * type * @default calculate_image_tiles * @constant + * @enum {string} */ type: "calculate_image_tiles"; }; @@ -1599,9 +1802,25 @@ export type components = { * type * @default calculate_image_tiles_min_overlap * @constant + * @enum {string} */ type: "calculate_image_tiles_min_overlap"; }; + /** CalculateImageTilesOutput */ + CalculateImageTilesOutput: { + /** + * Tiles + * @description The tiles coordinates that cover a particular image shape. + */ + tiles: components["schemas"]["Tile"][]; + /** + * type + * @default calculate_image_tiles_output + * @constant + * @enum {string} + */ + type: "calculate_image_tiles_output"; + }; /** * CancelByBatchIDsResult * @description Result of canceling by list of batch ids @@ -1678,6 +1897,7 @@ export type components = { * type * @default canny_image_processor * @constant + * @enum {string} */ type: "canny_image_processor"; }; @@ -1738,6 +1958,7 @@ export type components = { * type * @default canvas_paste_back * @constant + * @enum {string} */ type: "canvas_paste_back"; }; @@ -1796,9 +2017,19 @@ export type components = { * type * @default img_pad_crop * @constant + * @enum {string} */ type: "img_pad_crop"; }; + /** + * Classification + * @description The classification of an Invocation. + * - `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation. + * - `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term. + * - `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation. + * @enum {string} + */ + Classification: "stable" | "beta" | "prototype"; /** * ClearResult * @description Result of clearing the session queue @@ -1848,9 +2079,43 @@ export type components = { * type * @default collect * @constant + * @enum {string} */ type: "collect"; }; + /** CollectInvocationOutput */ + CollectInvocationOutput: { + /** + * Collection + * @description The collection of input items + */ + collection: unknown[]; + /** + * type + * @default collect_output + * @constant + * @enum {string} + */ + type: "collect_output"; + }; + /** + * ColorCollectionOutput + * @description Base class for nodes that output a collection of colors + */ + ColorCollectionOutput: { + /** + * Collection + * @description The output colors + */ + collection: components["schemas"]["ColorField"][]; + /** + * type + * @default color_collection_output + * @constant + * @enum {string} + */ + type: "color_collection_output"; + }; /** * Color Correct * @description Shifts the colors of a target image to match the reference image, optionally @@ -1909,6 +2174,7 @@ export type components = { * type * @default color_correct * @constant + * @enum {string} */ type: "color_correct"; }; @@ -1974,6 +2240,7 @@ export type components = { * type * @default color * @constant + * @enum {string} */ type: "color"; }; @@ -2024,9 +2291,25 @@ export type components = { * type * @default color_map_image_processor * @constant + * @enum {string} */ type: "color_map_image_processor"; }; + /** + * ColorOutput + * @description Base class for nodes that output a single color + */ + ColorOutput: { + /** @description The output color */ + color: components["schemas"]["ColorField"]; + /** + * type + * @default color_output + * @constant + * @enum {string} + */ + type: "color_output"; + }; /** * Prompt * @description Parse prompt using compel package to conditioning. @@ -2070,6 +2353,7 @@ export type components = { * type * @default compel * @constant + * @enum {string} */ type: "compel"; }; @@ -2105,9 +2389,28 @@ export type components = { * type * @default conditioning_collection * @constant + * @enum {string} */ type: "conditioning_collection"; }; + /** + * ConditioningCollectionOutput + * @description Base class for nodes that output a collection of conditioning tensors + */ + ConditioningCollectionOutput: { + /** + * Collection + * @description The output conditioning tensors + */ + collection: components["schemas"]["ConditioningField"][]; + /** + * type + * @default conditioning_collection_output + * @constant + * @enum {string} + */ + type: "conditioning_collection_output"; + }; /** * ConditioningField * @description A conditioning tensor primitive value @@ -2118,7 +2421,10 @@ export type components = { * @description The name of conditioning tensor */ conditioning_name: string; - /** @description The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True. */ + /** + * @description The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True. + * @default null + */ mask?: components["schemas"]["TensorField"] | null; }; /** @@ -2152,9 +2458,25 @@ export type components = { * type * @default conditioning * @constant + * @enum {string} */ type: "conditioning"; }; + /** + * ConditioningOutput + * @description Base class for nodes that output a single conditioning tensor + */ + ConditioningOutput: { + /** @description Conditioning tensor */ + conditioning: components["schemas"]["ConditioningField"]; + /** + * type + * @default conditioning_output + * @constant + * @enum {string} + */ + type: "conditioning_output"; + }; /** * Content Shuffle Processor * @description Applies content shuffle processing to image @@ -2226,6 +2548,7 @@ export type components = { * type * @default content_shuffle_image_processor * @constant + * @enum {string} */ type: "content_shuffle_image_processor"; }; @@ -2328,6 +2651,7 @@ export type components = { * Format * @default checkpoint * @constant + * @enum {string} */ format: "checkpoint"; /** @@ -2344,6 +2668,7 @@ export type components = { * Type * @default controlnet * @constant + * @enum {string} */ type: "controlnet"; }; @@ -2402,6 +2727,7 @@ export type components = { * Format * @default diffusers * @constant + * @enum {string} */ format: "diffusers"; /** @default */ @@ -2410,6 +2736,7 @@ export type components = { * Type * @default controlnet * @constant + * @enum {string} */ type: "controlnet"; }; @@ -2481,6 +2808,7 @@ export type components = { * type * @default controlnet * @constant + * @enum {string} */ type: "controlnet"; }; @@ -2488,7 +2816,10 @@ export type components = { ControlNetMetadataField: { /** @description The control image */ image: components["schemas"]["ImageField"]; - /** @description The control image, after processing. */ + /** + * @description The control image, after processing. + * @default null + */ processed_image?: components["schemas"]["ImageField"] | null; /** @description The ControlNet model to use */ control_model: components["schemas"]["ModelIdentifierField"]; @@ -2525,6 +2856,21 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; + /** + * ControlOutput + * @description node output for ControlNet info + */ + ControlOutput: { + /** @description ControlNet(s) to apply */ + control: components["schemas"]["ControlField"]; + /** + * type + * @default control_output + * @constant + * @enum {string} + */ + type: "control_output"; + }; /** * Core Metadata * @description Collects core generation metadata into a MetadataField @@ -2752,6 +3098,7 @@ export type components = { * type * @default core_metadata * @constant + * @enum {string} */ type: "core_metadata"; [key: string]: unknown; @@ -2809,6 +3156,7 @@ export type components = { * type * @default create_denoise_mask * @constant + * @enum {string} */ type: "create_denoise_mask"; }; @@ -2891,6 +3239,7 @@ export type components = { * type * @default create_gradient_mask * @constant + * @enum {string} */ type: "create_gradient_mask"; }; @@ -2950,6 +3299,7 @@ export type components = { * type * @default crop_latents * @constant + * @enum {string} */ type: "crop_latents"; }; @@ -3017,6 +3367,7 @@ export type components = { * type * @default cv_inpaint * @constant + * @enum {string} */ type: "cv_inpaint"; }; @@ -3082,6 +3433,7 @@ export type components = { * type * @default dw_openpose_image_processor * @constant + * @enum {string} */ type: "dw_openpose_image_processor"; }; @@ -3221,6 +3573,7 @@ export type components = { * type * @default denoise_latents * @constant + * @enum {string} */ type: "denoise_latents"; }; @@ -3237,6 +3590,7 @@ export type components = { /** * Masked Latents Name * @description The name of the masked image latents + * @default null */ masked_latents_name?: string | null; /** @@ -3246,6 +3600,21 @@ export type components = { */ gradient?: boolean; }; + /** + * DenoiseMaskOutput + * @description Base class for nodes that output a single image + */ + DenoiseMaskOutput: { + /** @description Mask for denoise model run */ + denoise_mask: components["schemas"]["DenoiseMaskField"]; + /** + * type + * @default denoise_mask_output + * @constant + * @enum {string} + */ + type: "denoise_mask_output"; + }; /** * Depth Anything Processor * @description Generates a depth map based on the Depth Anything algorithm @@ -3300,6 +3669,7 @@ export type components = { * type * @default depth_anything_image_processor * @constant + * @enum {string} */ type: "depth_anything_image_processor"; }; @@ -3341,9 +3711,78 @@ export type components = { * type * @default div * @constant + * @enum {string} */ type: "div"; }; + /** + * DownloadCancelledEvent + * @description Event model for download_cancelled + */ + DownloadCancelledEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Source + * @description The source of the download + */ + source: string; + }; + /** + * DownloadCompleteEvent + * @description Event model for download_complete + */ + DownloadCompleteEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Source + * @description The source of the download + */ + source: string; + /** + * Download Path + * @description The local path where the download is saved + */ + download_path: string; + /** + * Total Bytes + * @description The total number of bytes downloaded + */ + total_bytes: number; + }; + /** + * DownloadErrorEvent + * @description Event model for download_error + */ + DownloadErrorEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Source + * @description The source of the download + */ + source: string; + /** + * Error Type + * @description The type of error + */ + error_type: string; + /** + * Error + * @description The error message + */ + error: string; + }; /** * DownloadJob * @description Class to monitor and control a model download request. @@ -3432,6 +3871,58 @@ export type components = { * @enum {string} */ DownloadJobStatus: "waiting" | "running" | "completed" | "cancelled" | "error"; + /** + * DownloadProgressEvent + * @description Event model for download_progress + */ + DownloadProgressEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Source + * @description The source of the download + */ + source: string; + /** + * Download Path + * @description The local path where the download is saved + */ + download_path: string; + /** + * Current Bytes + * @description The number of bytes downloaded so far + */ + current_bytes: number; + /** + * Total Bytes + * @description The total number of bytes to be downloaded + */ + total_bytes: number; + }; + /** + * DownloadStartedEvent + * @description Event model for download_started + */ + DownloadStartedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Source + * @description The source of the download + */ + source: string; + /** + * Download Path + * @description The local path where the download is saved + */ + download_path: string; + }; /** * Dynamic Prompt * @description Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator @@ -3476,6 +3967,7 @@ export type components = { * type * @default dynamic_prompt * @constant + * @enum {string} */ type: "dynamic_prompt"; }; @@ -3540,6 +4032,7 @@ export type components = { * type * @default esrgan * @constant + * @enum {string} */ type: "esrgan"; }; @@ -3648,6 +4141,7 @@ export type components = { * type * @default face_identifier * @constant + * @enum {string} */ type: "face_identifier"; }; @@ -3723,9 +4217,37 @@ export type components = { * type * @default face_mask_detection * @constant + * @enum {string} */ type: "face_mask_detection"; }; + /** + * FaceMaskOutput + * @description Base class for FaceMask output + */ + FaceMaskOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; + /** + * Width + * @description The width of the image in pixels + */ + width: number; + /** + * Height + * @description The height of the image in pixels + */ + height: number; + /** + * type + * @default face_mask_output + * @constant + * @enum {string} + */ + type: "face_mask_output"; + /** @description The output mask */ + mask: components["schemas"]["ImageField"]; + }; /** * FaceOff * @description Bound, extract, and mask a face from an image using MediaPipe detection @@ -3798,9 +4320,65 @@ export type components = { * type * @default face_off * @constant + * @enum {string} */ type: "face_off"; }; + /** + * FaceOffOutput + * @description Base class for FaceOff Output + */ + FaceOffOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; + /** + * Width + * @description The width of the image in pixels + */ + width: number; + /** + * Height + * @description The height of the image in pixels + */ + height: number; + /** + * type + * @default face_off_output + * @constant + * @enum {string} + */ + type: "face_off_output"; + /** @description The output mask */ + mask: components["schemas"]["ImageField"]; + /** + * X + * @description The x coordinate of the bounding box's left side + */ + x: number; + /** + * Y + * @description The y coordinate of the bounding box's top side + */ + y: number; + }; + /** + * FieldKind + * @description The kind of field. + * - `Input`: An input field on a node. + * - `Output`: An output field on a node. + * - `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is + * one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name + * "metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic, + * allowing "metadata" for that field. + * - `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs, + * but which are used to store information about the node. For example, the `id` and `type` fields are node + * attributes. + * + * The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app + * startup, and when generating the OpenAPI schema for the workflow editor. + * @enum {string} + */ + FieldKind: "input" | "output" | "internal" | "node_attribute"; /** * Float Collection Primitive * @description A collection of float primitive values @@ -3833,9 +4411,28 @@ export type components = { * type * @default float_collection * @constant + * @enum {string} */ type: "float_collection"; }; + /** + * FloatCollectionOutput + * @description Base class for nodes that output a collection of floats + */ + FloatCollectionOutput: { + /** + * Collection + * @description The float collection + */ + collection: number[]; + /** + * type + * @default float_collection_output + * @constant + * @enum {string} + */ + type: "float_collection_output"; + }; /** * Float Primitive * @description A float primitive value @@ -3868,6 +4465,7 @@ export type components = { * type * @default float * @constant + * @enum {string} */ type: "float"; }; @@ -3915,6 +4513,7 @@ export type components = { * type * @default float_range * @constant + * @enum {string} */ type: "float_range"; }; @@ -3963,9 +4562,28 @@ export type components = { * type * @default float_math * @constant + * @enum {string} */ type: "float_math"; }; + /** + * FloatOutput + * @description Base class for nodes that output a single float + */ + FloatOutput: { + /** + * Value + * @description The output float + */ + value: number; + /** + * type + * @default float_output + * @constant + * @enum {string} + */ + type: "float_output"; + }; /** * Float To Integer * @description Rounds a float number to (a multiple of) an integer. @@ -4011,6 +4629,7 @@ export type components = { * type * @default float_to_int * @constant + * @enum {string} */ type: "float_to_int"; }; @@ -4115,9 +4734,27 @@ export type components = { * type * @default freeu * @constant + * @enum {string} */ type: "freeu"; }; + /** + * GradientMaskOutput + * @description Outputs a denoise mask and an image representing the total gradient of the mask. + */ + GradientMaskOutput: { + /** @description Mask for denoise model run */ + denoise_mask: components["schemas"]["DenoiseMaskField"]; + /** @description Image representing the total gradient area of the mask. For paste-back purposes. */ + expanded_mask_area: components["schemas"]["ImageField"]; + /** + * type + * @default gradient_mask_output + * @constant + * @enum {string} + */ + type: "gradient_mask_output"; + }; /** Graph */ Graph: { /** @@ -4130,7 +4767,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["AnyInvocation"]; + [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; }; /** * Edges @@ -4167,7 +4804,7 @@ export type components = { * @description The results of node executions */ results?: { - [key: string]: components["schemas"]["AnyInvocationOutput"]; + [key: string]: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"]; }; /** * Errors @@ -4210,6 +4847,7 @@ export type components = { * Type * @default hf * @constant + * @enum {string} */ type?: "hf"; }; @@ -4277,6 +4915,7 @@ export type components = { * type * @default hed_image_processor * @constant + * @enum {string} */ type: "hed_image_processor"; }; @@ -4323,6 +4962,7 @@ export type components = { * type * @default heuristic_resize * @constant + * @enum {string} */ type: "heuristic_resize"; }; @@ -4345,6 +4985,7 @@ export type components = { * Type * @default huggingface * @constant + * @enum {string} */ type?: "huggingface"; /** @@ -4435,11 +5076,13 @@ export type components = { * Type * @default ip_adapter * @constant + * @enum {string} */ type: "ip_adapter"; /** * Format * @constant + * @enum {string} */ format: "checkpoint"; }; @@ -4478,7 +5121,10 @@ export type components = { * @default 1 */ end_step_percent?: number; - /** @description The bool mask associated with this IP-Adapter. Excluded regions should be set to False, included regions should be set to True. */ + /** + * @description The bool mask associated with this IP-Adapter. Excluded regions should be set to False, included regions should be set to True. + * @default null + */ mask?: components["schemas"]["TensorField"] | null; }; /** @@ -4556,6 +5202,7 @@ export type components = { * type * @default ip_adapter * @constant + * @enum {string} */ type: "ip_adapter"; }; @@ -4612,6 +5259,7 @@ export type components = { * Type * @default ip_adapter * @constant + * @enum {string} */ type: "ip_adapter"; /** Image Encoder Model Id */ @@ -4619,6 +5267,7 @@ export type components = { /** * Format * @constant + * @enum {string} */ format: "invokeai"; }; @@ -4659,6 +5308,21 @@ export type components = { */ end_step_percent: number; }; + /** IPAdapterOutput */ + IPAdapterOutput: { + /** + * IP-Adapter + * @description IP-Adapter to apply + */ + ip_adapter: components["schemas"]["IPAdapterField"]; + /** + * type + * @default ip_adapter_output + * @constant + * @enum {string} + */ + type: "ip_adapter_output"; + }; /** * Ideal Size * @description Calculates the ideal size for generation to avoid duplication @@ -4708,9 +5372,33 @@ export type components = { * type * @default ideal_size * @constant + * @enum {string} */ type: "ideal_size"; }; + /** + * IdealSizeOutput + * @description Base class for invocations that output an image + */ + IdealSizeOutput: { + /** + * Width + * @description The ideal width of the image (in pixels) + */ + width: number; + /** + * Height + * @description The ideal height of the image (in pixels) + */ + height: number; + /** + * type + * @default ideal_size_output + * @constant + * @enum {string} + */ + type: "ideal_size_output"; + }; /** * Blur Image * @description Blurs an image @@ -4765,6 +5453,7 @@ export type components = { * type * @default img_blur * @constant + * @enum {string} */ type: "img_blur"; }; @@ -4828,6 +5517,7 @@ export type components = { * type * @default img_chan * @constant + * @enum {string} */ type: "img_chan"; }; @@ -4891,6 +5581,7 @@ export type components = { * type * @default img_channel_multiply * @constant + * @enum {string} */ type: "img_channel_multiply"; }; @@ -4948,6 +5639,7 @@ export type components = { * type * @default img_channel_offset * @constant + * @enum {string} */ type: "img_channel_offset"; }; @@ -4983,9 +5675,28 @@ export type components = { * type * @default image_collection * @constant + * @enum {string} */ type: "image_collection"; }; + /** + * ImageCollectionOutput + * @description Base class for nodes that output a collection of images + */ + ImageCollectionOutput: { + /** + * Collection + * @description The output images + */ + collection: components["schemas"]["ImageField"][]; + /** + * type + * @default image_collection_output + * @constant + * @enum {string} + */ + type: "image_collection_output"; + }; /** * Convert Image Mode * @description Converts an image to a different mode. @@ -5034,6 +5745,7 @@ export type components = { * type * @default img_conv * @constant + * @enum {string} */ type: "img_conv"; }; @@ -5102,6 +5814,7 @@ export type components = { * type * @default img_crop * @constant + * @enum {string} */ type: "img_crop"; }; @@ -5243,6 +5956,7 @@ export type components = { * type * @default img_hue_adjust * @constant + * @enum {string} */ type: "img_hue_adjust"; }; @@ -5299,6 +6013,7 @@ export type components = { * type * @default img_ilerp * @constant + * @enum {string} */ type: "img_ilerp"; }; @@ -5333,6 +6048,7 @@ export type components = { * type * @default image * @constant + * @enum {string} */ type: "image"; }; @@ -5389,6 +6105,7 @@ export type components = { * type * @default img_lerp * @constant + * @enum {string} */ type: "img_lerp"; }; @@ -5440,6 +6157,7 @@ export type components = { * type * @default image_mask_to_tensor * @constant + * @enum {string} */ type: "image_mask_to_tensor"; }; @@ -5489,6 +6207,7 @@ export type components = { * type * @default img_mul * @constant + * @enum {string} */ type: "img_mul"; }; @@ -5533,9 +6252,35 @@ export type components = { * type * @default img_nsfw * @constant + * @enum {string} */ type: "img_nsfw"; }; + /** + * ImageOutput + * @description Base class for nodes that output a single image + */ + ImageOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; + /** + * Width + * @description The width of the image in pixels + */ + width: number; + /** + * Height + * @description The height of the image in pixels + */ + height: number; + /** + * type + * @default image_output + * @constant + * @enum {string} + */ + type: "image_output"; + }; /** * Paste Image * @description Pastes an image into another image. @@ -5605,6 +6350,7 @@ export type components = { * type * @default img_paste * @constant + * @enum {string} */ type: "img_paste"; }; @@ -5698,6 +6444,7 @@ export type components = { * type * @default img_resize * @constant + * @enum {string} */ type: "img_resize"; }; @@ -5755,6 +6502,7 @@ export type components = { * type * @default img_scale * @constant + * @enum {string} */ type: "img_scale"; }; @@ -5806,6 +6554,7 @@ export type components = { * type * @default i2l * @constant + * @enum {string} */ type: "i2l"; }; @@ -5877,6 +6626,7 @@ export type components = { * type * @default img_watermark * @constant + * @enum {string} */ type: "img_watermark"; }; @@ -5952,6 +6702,7 @@ export type components = { * type * @default infill_rgba * @constant + * @enum {string} */ type: "infill_rgba"; }; @@ -6009,6 +6760,7 @@ export type components = { * type * @default infill_patchmatch * @constant + * @enum {string} */ type: "infill_patchmatch"; }; @@ -6065,9 +6817,61 @@ export type components = { * type * @default infill_tile * @constant + * @enum {string} */ type: "infill_tile"; }; + /** + * Input + * @description The type of input a field accepts. + * - `Input.Direct`: The field must have its value provided directly, when the invocation and field are instantiated. + * - `Input.Connection`: The field must have its value provided by a connection. + * - `Input.Any`: The field may have its value provided either directly or by a connection. + * @enum {string} + */ + Input: "connection" | "direct" | "any"; + /** + * InputFieldJSONSchemaExtra + * @description Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution, + * and by the workflow editor during schema parsing and UI rendering. + */ + InputFieldJSONSchemaExtra: { + input: components["schemas"]["Input"]; + /** Orig Required */ + orig_required: boolean; + field_kind: components["schemas"]["FieldKind"]; + /** + * Default + * @default null + */ + default: unknown; + /** + * Orig Default + * @default null + */ + orig_default: unknown; + /** + * Ui Hidden + * @default false + */ + ui_hidden: boolean; + /** @default null */ + ui_type: components["schemas"]["UIType"] | null; + /** @default null */ + ui_component: components["schemas"]["UIComponent"] | null; + /** + * Ui Order + * @default null + */ + ui_order: number | null; + /** + * Ui Choice Labels + * @default null + */ + ui_choice_labels: { + [key: string]: string; + } | null; + }; /** * InstallStatus * @description State of an install job running in the background. @@ -6106,9 +6910,28 @@ export type components = { * type * @default integer_collection * @constant + * @enum {string} */ type: "integer_collection"; }; + /** + * IntegerCollectionOutput + * @description Base class for nodes that output a collection of integers + */ + IntegerCollectionOutput: { + /** + * Collection + * @description The int collection + */ + collection: number[]; + /** + * type + * @default integer_collection_output + * @constant + * @enum {string} + */ + type: "integer_collection_output"; + }; /** * Integer Primitive * @description An integer primitive value @@ -6141,6 +6964,7 @@ export type components = { * type * @default integer * @constant + * @enum {string} */ type: "integer"; }; @@ -6189,9 +7013,28 @@ export type components = { * type * @default integer_math * @constant + * @enum {string} */ type: "integer_math"; }; + /** + * IntegerOutput + * @description Base class for nodes that output a single integer + */ + IntegerOutput: { + /** + * Value + * @description The output integer + */ + value: number; + /** + * type + * @default integer_output + * @constant + * @enum {string} + */ + type: "integer_output"; + }; /** * Invert Tensor Mask * @description Inverts a tensor mask. @@ -6223,6 +7066,7 @@ export type components = { * type * @default invert_tensor_mask * @constant + * @enum {string} */ type: "invert_tensor_mask"; }; @@ -6254,6 +7098,364 @@ export type components = { */ max_size: number; }; + /** + * InvocationCompleteEvent + * @description Event model for invocation_complete + */ + InvocationCompleteEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + /** + * Result + * @description The result of the invocation + */ + result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"]; + }; + /** + * InvocationDenoiseProgressEvent + * @description Event model for invocation_denoise_progress + */ + InvocationDenoiseProgressEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + /** @description The progress image sent at each step during processing */ + progress_image: components["schemas"]["ProgressImage"]; + /** + * Step + * @description The current step of the invocation + */ + step: number; + /** + * Total Steps + * @description The total number of steps in the invocation + */ + total_steps: number; + /** + * Order + * @description The order of the invocation in the session + */ + order: number; + /** + * Percentage + * @description The percentage of completion of the invocation + */ + percentage: number; + }; + /** + * InvocationErrorEvent + * @description Event model for invocation_error + */ + InvocationErrorEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + /** + * Error Type + * @description The error type + */ + error_type: string; + /** + * Error Message + * @description The error message + */ + error_message: string; + /** + * Error Traceback + * @description The error traceback + */ + error_traceback: string; + /** + * User Id + * @description The ID of the user who created the invocation + * @default null + */ + user_id: string | null; + /** + * Project Id + * @description The ID of the user who created the invocation + * @default null + */ + project_id: string | null; + }; + InvocationOutputMap: { + pidi_image_processor: components["schemas"]["ImageOutput"]; + image_mask_to_tensor: components["schemas"]["MaskOutput"]; + vae_loader: components["schemas"]["VAEOutput"]; + collect: components["schemas"]["CollectInvocationOutput"]; + string_join_three: components["schemas"]["StringOutput"]; + content_shuffle_image_processor: components["schemas"]["ImageOutput"]; + random_range: components["schemas"]["IntegerCollectionOutput"]; + ip_adapter: components["schemas"]["IPAdapterOutput"]; + step_param_easing: components["schemas"]["FloatCollectionOutput"]; + core_metadata: components["schemas"]["MetadataOutput"]; + main_model_loader: components["schemas"]["ModelLoaderOutput"]; + leres_image_processor: components["schemas"]["ImageOutput"]; + calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; + color_correct: components["schemas"]["ImageOutput"]; + calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; + float_range: components["schemas"]["FloatCollectionOutput"]; + infill_cv2: components["schemas"]["ImageOutput"]; + img_channel_multiply: components["schemas"]["ImageOutput"]; + img_pad_crop: components["schemas"]["ImageOutput"]; + sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; + face_mask_detection: components["schemas"]["FaceMaskOutput"]; + infill_lama: components["schemas"]["ImageOutput"]; + mask_combine: components["schemas"]["ImageOutput"]; + sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; + segment_anything_processor: components["schemas"]["ImageOutput"]; + merge_metadata: components["schemas"]["MetadataOutput"]; + img_ilerp: components["schemas"]["ImageOutput"]; + heuristic_resize: components["schemas"]["ImageOutput"]; + cv_inpaint: components["schemas"]["ImageOutput"]; + div: components["schemas"]["IntegerOutput"]; + pair_tile_image: components["schemas"]["PairTileImageOutput"]; + float_math: components["schemas"]["FloatOutput"]; + img_channel_offset: components["schemas"]["ImageOutput"]; + canvas_paste_back: components["schemas"]["ImageOutput"]; + canny_image_processor: components["schemas"]["ImageOutput"]; + integer_collection: components["schemas"]["IntegerCollectionOutput"]; + freeu: components["schemas"]["UNetOutput"]; + lresize: components["schemas"]["LatentsOutput"]; + range_of_size: components["schemas"]["IntegerCollectionOutput"]; + depth_anything_image_processor: components["schemas"]["ImageOutput"]; + float_to_int: components["schemas"]["IntegerOutput"]; + rand_int: components["schemas"]["IntegerOutput"]; + lineart_anime_image_processor: components["schemas"]["ImageOutput"]; + string_split: components["schemas"]["String2Output"]; + img_nsfw: components["schemas"]["ImageOutput"]; + string: components["schemas"]["StringOutput"]; + mask_edge: components["schemas"]["ImageOutput"]; + i2l: components["schemas"]["LatentsOutput"]; + face_identifier: components["schemas"]["ImageOutput"]; + compel: components["schemas"]["ConditioningOutput"]; + esrgan: components["schemas"]["ImageOutput"]; + seamless: components["schemas"]["SeamlessModeOutput"]; + mask_from_id: components["schemas"]["ImageOutput"]; + invert_tensor_mask: components["schemas"]["MaskOutput"]; + rectangle_mask: components["schemas"]["MaskOutput"]; + conditioning: components["schemas"]["ConditioningOutput"]; + t2i_adapter: components["schemas"]["T2IAdapterOutput"]; + string_collection: components["schemas"]["StringCollectionOutput"]; + show_image: components["schemas"]["ImageOutput"]; + dw_openpose_image_processor: components["schemas"]["ImageOutput"]; + string_split_neg: components["schemas"]["StringPosNegOutput"]; + conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; + infill_patchmatch: components["schemas"]["ImageOutput"]; + img_conv: components["schemas"]["ImageOutput"]; + unsharp_mask: components["schemas"]["ImageOutput"]; + metadata_item: components["schemas"]["MetadataItemOutput"]; + image: components["schemas"]["ImageOutput"]; + image_collection: components["schemas"]["ImageCollectionOutput"]; + tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; + lblend: components["schemas"]["LatentsOutput"]; + float: components["schemas"]["FloatOutput"]; + boolean_collection: components["schemas"]["BooleanCollectionOutput"]; + color: components["schemas"]["ColorOutput"]; + midas_depth_image_processor: components["schemas"]["ImageOutput"]; + zoe_depth_image_processor: components["schemas"]["ImageOutput"]; + infill_rgba: components["schemas"]["ImageOutput"]; + mlsd_image_processor: components["schemas"]["ImageOutput"]; + merge_tiles_to_image: components["schemas"]["ImageOutput"]; + prompt_from_file: components["schemas"]["StringCollectionOutput"]; + boolean: components["schemas"]["BooleanOutput"]; + create_gradient_mask: components["schemas"]["GradientMaskOutput"]; + rand_float: components["schemas"]["FloatOutput"]; + img_mul: components["schemas"]["ImageOutput"]; + controlnet: components["schemas"]["ControlOutput"]; + latents_collection: components["schemas"]["LatentsCollectionOutput"]; + img_lerp: components["schemas"]["ImageOutput"]; + noise: components["schemas"]["NoiseOutput"]; + iterate: components["schemas"]["IterateInvocationOutput"]; + lineart_image_processor: components["schemas"]["ImageOutput"]; + tomask: components["schemas"]["ImageOutput"]; + integer: components["schemas"]["IntegerOutput"]; + create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; + clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; + denoise_latents: components["schemas"]["LatentsOutput"]; + string_join: components["schemas"]["StringOutput"]; + scheduler: components["schemas"]["SchedulerOutput"]; + model_identifier: components["schemas"]["ModelIdentifierOutput"]; + normalbae_image_processor: components["schemas"]["ImageOutput"]; + face_off: components["schemas"]["FaceOffOutput"]; + hed_image_processor: components["schemas"]["ImageOutput"]; + img_paste: components["schemas"]["ImageOutput"]; + img_chan: components["schemas"]["ImageOutput"]; + img_watermark: components["schemas"]["ImageOutput"]; + l2i: components["schemas"]["ImageOutput"]; + string_replace: components["schemas"]["StringOutput"]; + color_map_image_processor: components["schemas"]["ImageOutput"]; + tile_image_processor: components["schemas"]["ImageOutput"]; + crop_latents: components["schemas"]["LatentsOutput"]; + sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + add: components["schemas"]["IntegerOutput"]; + sub: components["schemas"]["IntegerOutput"]; + img_scale: components["schemas"]["ImageOutput"]; + range: components["schemas"]["IntegerCollectionOutput"]; + dynamic_prompt: components["schemas"]["StringCollectionOutput"]; + img_crop: components["schemas"]["ImageOutput"]; + infill_tile: components["schemas"]["ImageOutput"]; + img_resize: components["schemas"]["ImageOutput"]; + mediapipe_face_processor: components["schemas"]["ImageOutput"]; + sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; + lora_selector: components["schemas"]["LoRASelectorOutput"]; + img_hue_adjust: components["schemas"]["ImageOutput"]; + latents: components["schemas"]["LatentsOutput"]; + lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; + img_blur: components["schemas"]["ImageOutput"]; + ideal_size: components["schemas"]["IdealSizeOutput"]; + float_collection: components["schemas"]["FloatCollectionOutput"]; + blank_image: components["schemas"]["ImageOutput"]; + integer_math: components["schemas"]["IntegerOutput"]; + lora_loader: components["schemas"]["LoRALoaderOutput"]; + metadata: components["schemas"]["MetadataOutput"]; + sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + round_float: components["schemas"]["FloatOutput"]; + sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; + mul: components["schemas"]["IntegerOutput"]; + alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; + lscale: components["schemas"]["LatentsOutput"]; + save_image: components["schemas"]["ImageOutput"]; + calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; + }; + /** + * InvocationStartedEvent + * @description Event model for invocation_started + */ + InvocationStartedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + }; /** * IterateInvocation * @description Iterates over a list of items @@ -6292,9 +7494,38 @@ export type components = { * type * @default iterate * @constant + * @enum {string} */ type: "iterate"; }; + /** + * IterateInvocationOutput + * @description Used to connect iteration outputs. Will be expanded to a specific output. + */ + IterateInvocationOutput: { + /** + * Collection Item + * @description The item being iterated over + */ + item: unknown; + /** + * Index + * @description The index of the item + */ + index: number; + /** + * Total + * @description The total number of items + */ + total: number; + /** + * type + * @default iterate_output + * @constant + * @enum {string} + */ + type: "iterate_output"; + }; JsonValue: unknown; /** * LaMa Infill @@ -6337,6 +7568,7 @@ export type components = { * type * @default infill_lama * @constant + * @enum {string} */ type: "infill_lama"; }; @@ -6372,9 +7604,28 @@ export type components = { * type * @default latents_collection * @constant + * @enum {string} */ type: "latents_collection"; }; + /** + * LatentsCollectionOutput + * @description Base class for nodes that output a collection of latents tensors + */ + LatentsCollectionOutput: { + /** + * Collection + * @description Latents tensor + */ + collection: components["schemas"]["LatentsField"][]; + /** + * type + * @default latents_collection_output + * @constant + * @enum {string} + */ + type: "latents_collection_output"; + }; /** * LatentsField * @description A latents tensor primitive field @@ -6388,6 +7639,7 @@ export type components = { /** * Seed * @description Seed used to generate this latents + * @default null */ seed?: number | null; }; @@ -6422,9 +7674,35 @@ export type components = { * type * @default latents * @constant + * @enum {string} */ type: "latents"; }; + /** + * LatentsOutput + * @description Base class for nodes that output a single latents tensor + */ + LatentsOutput: { + /** @description Latents tensor */ + latents: components["schemas"]["LatentsField"]; + /** + * Width + * @description Width of output (px) + */ + width: number; + /** + * Height + * @description Height of output (px) + */ + height: number; + /** + * type + * @default latents_output + * @constant + * @enum {string} + */ + type: "latents_output"; + }; /** * Latents to Image * @description Generates an image from latents. @@ -6483,6 +7761,7 @@ export type components = { * type * @default l2i * @constant + * @enum {string} */ type: "l2i"; }; @@ -6557,6 +7836,7 @@ export type components = { * type * @default leres_image_processor * @constant + * @enum {string} */ type: "leres_image_processor"; }; @@ -6613,6 +7893,7 @@ export type components = { * type * @default lineart_anime_image_processor * @constant + * @enum {string} */ type: "lineart_anime_image_processor"; }; @@ -6675,6 +7956,7 @@ export type components = { * type * @default lineart_image_processor * @constant + * @enum {string} */ type: "lineart_image_processor"; }; @@ -6722,6 +8004,7 @@ export type components = { * type * @default lora_collection_loader * @constant + * @enum {string} */ type: "lora_collection_loader"; }; @@ -6778,6 +8061,7 @@ export type components = { * Type * @default lora * @constant + * @enum {string} */ type: "lora"; /** @@ -6789,6 +8073,7 @@ export type components = { * Format * @default diffusers * @constant + * @enum {string} */ format: "diffusers"; }; @@ -6852,9 +8137,35 @@ export type components = { * type * @default lora_loader * @constant + * @enum {string} */ type: "lora_loader"; }; + /** + * LoRALoaderOutput + * @description Model loader output + */ + LoRALoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet: components["schemas"]["UNetField"] | null; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip: components["schemas"]["CLIPField"] | null; + /** + * type + * @default lora_loader_output + * @constant + * @enum {string} + */ + type: "lora_loader_output"; + }; /** * LoRALyCORISConfig * @description Model config for LoRA/Lycoris models. @@ -6908,6 +8219,7 @@ export type components = { * Type * @default lora * @constant + * @enum {string} */ type: "lora"; /** @@ -6919,6 +8231,7 @@ export type components = { * Format * @default lycoris * @constant + * @enum {string} */ format: "lycoris"; }; @@ -6973,9 +8286,28 @@ export type components = { * type * @default lora_selector * @constant + * @enum {string} */ type: "lora_selector"; }; + /** + * LoRASelectorOutput + * @description Model loader output + */ + LoRASelectorOutput: { + /** + * LoRA + * @description LoRA model and weight + */ + lora: components["schemas"]["LoRAField"]; + /** + * type + * @default lora_selector_output + * @constant + * @enum {string} + */ + type: "lora_selector_output"; + }; /** * LocalModelSource * @description A local file or directory path. @@ -6992,6 +8324,7 @@ export type components = { * Type * @default local * @constant + * @enum {string} */ type?: "local"; }; @@ -7053,6 +8386,7 @@ export type components = { * Type * @default main * @constant + * @enum {string} */ type: "main"; /** @@ -7068,6 +8402,7 @@ export type components = { * Format * @default checkpoint * @constant + * @enum {string} */ format: "checkpoint"; /** @@ -7141,6 +8476,7 @@ export type components = { * Type * @default main * @constant + * @enum {string} */ type: "main"; /** @@ -7156,6 +8492,7 @@ export type components = { * Format * @default diffusers * @constant + * @enum {string} */ format: "diffusers"; /** @default */ @@ -7235,6 +8572,7 @@ export type components = { * type * @default main_model_loader * @constant + * @enum {string} */ type: "main_model_loader"; }; @@ -7284,6 +8622,7 @@ export type components = { * type * @default mask_combine * @constant + * @enum {string} */ type: "mask_combine"; }; @@ -7352,6 +8691,7 @@ export type components = { * type * @default mask_edge * @constant + * @enum {string} */ type: "mask_edge"; }; @@ -7402,6 +8742,7 @@ export type components = { * type * @default tomask * @constant + * @enum {string} */ type: "tomask"; }; @@ -7463,9 +8804,35 @@ export type components = { * type * @default mask_from_id * @constant + * @enum {string} */ type: "mask_from_id"; }; + /** + * MaskOutput + * @description A torch mask tensor. + */ + MaskOutput: { + /** @description The mask. */ + mask: components["schemas"]["TensorField"]; + /** + * Width + * @description The width of the mask in pixels. + */ + width: number; + /** + * Height + * @description The height of the mask in pixels. + */ + height: number; + /** + * type + * @default mask_output + * @constant + * @enum {string} + */ + type: "mask_output"; + }; /** * Mediapipe Face Processor * @description Applies mediapipe face processing to image @@ -7531,6 +8898,7 @@ export type components = { * type * @default mediapipe_face_processor * @constant + * @enum {string} */ type: "mediapipe_face_processor"; }; @@ -7566,6 +8934,7 @@ export type components = { * type * @default merge_metadata * @constant + * @enum {string} */ type: "merge_metadata"; }; @@ -7624,6 +8993,7 @@ export type components = { * type * @default merge_tiles_to_image * @constant + * @enum {string} */ type: "merge_tiles_to_image"; }; @@ -7665,6 +9035,7 @@ export type components = { * type * @default metadata * @constant + * @enum {string} */ type: "metadata"; }; @@ -7719,9 +9090,37 @@ export type components = { * type * @default metadata_item * @constant + * @enum {string} */ type: "metadata_item"; }; + /** + * MetadataItemOutput + * @description Metadata Item Output + */ + MetadataItemOutput: { + /** @description Metadata Item */ + item: components["schemas"]["MetadataItemField"]; + /** + * type + * @default metadata_item_output + * @constant + * @enum {string} + */ + type: "metadata_item_output"; + }; + /** MetadataOutput */ + MetadataOutput: { + /** @description Metadata Dict */ + metadata: components["schemas"]["MetadataField"]; + /** + * type + * @default metadata_output + * @constant + * @enum {string} + */ + type: "metadata_output"; + }; /** * Midas Depth Processor * @description Applies Midas depth processing to image @@ -7787,6 +9186,7 @@ export type components = { * type * @default midas_depth_image_processor * @constant + * @enum {string} */ type: "midas_depth_image_processor"; }; @@ -7855,6 +9255,7 @@ export type components = { * type * @default mlsd_image_processor * @constant + * @enum {string} */ type: "mlsd_image_processor"; }; @@ -7885,7 +9286,10 @@ export type components = { base: components["schemas"]["BaseModelType"]; /** @description The model's type */ type: components["schemas"]["ModelType"]; - /** @description The submodel to load, if this is a main model */ + /** + * @description The submodel to load, if this is a main model + * @default null + */ submodel_type?: components["schemas"]["SubModelType"] | null; }; /** @@ -7922,9 +9326,175 @@ export type components = { * type * @default model_identifier * @constant + * @enum {string} */ type: "model_identifier"; }; + /** + * ModelIdentifierOutput + * @description Model identifier output + */ + ModelIdentifierOutput: { + /** + * Model + * @description Model identifier + */ + model: components["schemas"]["ModelIdentifierField"]; + /** + * type + * @default model_identifier_output + * @constant + * @enum {string} + */ + type: "model_identifier_output"; + }; + /** + * ModelInstallCancelledEvent + * @description Event model for model_install_cancelled + */ + ModelInstallCancelledEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: string; + }; + /** + * ModelInstallCompleteEvent + * @description Event model for model_install_complete + */ + ModelInstallCompleteEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: string; + /** + * Key + * @description Model config record key + */ + key: string; + /** + * Total Bytes + * @description Size of the model (may be None for installation of a local path) + */ + total_bytes: number | null; + }; + /** + * ModelInstallDownloadProgressEvent + * @description Event model for model_install_download_progress + */ + ModelInstallDownloadProgressEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: string; + /** + * Local Path + * @description Where model is downloading to + */ + local_path: string; + /** + * Bytes + * @description Number of bytes downloaded so far + */ + bytes: number; + /** + * Total Bytes + * @description Total size of download, including all files + */ + total_bytes: number; + /** + * Parts + * @description Progress of downloading URLs that comprise the model, if any + */ + parts: ({ + [key: string]: number | string; + })[]; + }; + /** + * ModelInstallDownloadsCompleteEvent + * @description Emitted once when an install job becomes active. + */ + ModelInstallDownloadsCompleteEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: string; + }; + /** + * ModelInstallErrorEvent + * @description Event model for model_install_error + */ + ModelInstallErrorEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: string; + /** + * Error Type + * @description The name of the exception + */ + error_type: string; + /** + * Error + * @description A text description of the exception + */ + error: string; + }; /** * ModelInstallJob * @description Object that tracks the current status of an install request. @@ -8005,6 +9575,97 @@ export type components = { */ error_traceback?: string | null; }; + /** + * ModelInstallStartedEvent + * @description Event model for model_install_started + */ + ModelInstallStartedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: string; + }; + /** + * ModelLoadCompleteEvent + * @description Event model for model_load_complete + */ + ModelLoadCompleteEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Config + * @description The model's config + */ + config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"]; + /** + * @description The submodel type, if any + * @default null + */ + submodel_type: components["schemas"]["SubModelType"] | null; + }; + /** + * ModelLoadStartedEvent + * @description Event model for model_load_started + */ + ModelLoadStartedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Config + * @description The model's config + */ + config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"]; + /** + * @description The submodel type, if any + * @default null + */ + submodel_type: components["schemas"]["SubModelType"] | null; + }; + /** + * ModelLoaderOutput + * @description Model loader output + */ + ModelLoaderOutput: { + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default model_loader_output + * @constant + * @enum {string} + */ + type: "model_loader_output"; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; + }; /** * ModelRecordChanges * @description A set of changes to apply to a model. @@ -8122,6 +9783,7 @@ export type components = { * type * @default mul * @constant + * @enum {string} */ type: "mul"; }; @@ -8193,9 +9855,35 @@ export type components = { * type * @default noise * @constant + * @enum {string} */ type: "noise"; }; + /** + * NoiseOutput + * @description Invocation noise output + */ + NoiseOutput: { + /** @description Noise tensor */ + noise: components["schemas"]["LatentsField"]; + /** + * Width + * @description Width of output (px) + */ + width: number; + /** + * Height + * @description Height of output (px) + */ + height: number; + /** + * type + * @default noise_output + * @constant + * @enum {string} + */ + type: "noise_output"; + }; /** * Normal BAE Processor * @description Applies NormalBae processing to image @@ -8249,6 +9937,7 @@ export type components = { * type * @default normalbae_image_processor * @constant + * @enum {string} */ type: "normalbae_image_processor"; }; @@ -8298,6 +9987,19 @@ export type components = { */ items: components["schemas"]["ImageDTO"][]; }; + /** + * OutputFieldJSONSchemaExtra + * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor + * during schema parsing and UI rendering. + */ + OutputFieldJSONSchemaExtra: { + field_kind: components["schemas"]["FieldKind"]; + /** Ui Hidden */ + ui_hidden: boolean; + ui_type: components["schemas"]["UIType"] | null; + /** Ui Order */ + ui_order: number | null; + }; /** PaginatedResults[WorkflowRecordListItemDTO] */ PaginatedResults_WorkflowRecordListItemDTO_: { /** @@ -8362,9 +10064,22 @@ export type components = { * type * @default pair_tile_image * @constant + * @enum {string} */ type: "pair_tile_image"; }; + /** PairTileImageOutput */ + PairTileImageOutput: { + /** @description A tile description with its corresponding image. */ + tile_with_image: components["schemas"]["TileWithImage"]; + /** + * type + * @default pair_tile_image_output + * @constant + * @enum {string} + */ + type: "pair_tile_image_output"; + }; /** * PIDI Processor * @description Applies PIDI processing to image @@ -8430,9 +10145,31 @@ export type components = { * type * @default pidi_image_processor * @constant + * @enum {string} */ type: "pidi_image_processor"; }; + /** + * ProgressImage + * @description The progress image sent intermittently during processing + */ + ProgressImage: { + /** + * Width + * @description The effective width of the image in pixels + */ + width: number; + /** + * Height + * @description The effective height of the image in pixels + */ + height: number; + /** + * Dataurl + * @description The image data as a b64 data URL + */ + dataURL: string; + }; /** * Prompts from File * @description Loads prompts from a text file @@ -8489,6 +10226,7 @@ export type components = { * type * @default prompt_from_file * @constant + * @enum {string} */ type: "prompt_from_file"; }; @@ -8503,6 +10241,105 @@ export type components = { */ deleted: number; }; + /** + * QueueClearedEvent + * @description Event model for queue_cleared + */ + QueueClearedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + }; + /** + * QueueItemStatusChangedEvent + * @description Event model for queue_item_status_changed + */ + QueueItemStatusChangedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Status + * @description The new status of the queue item + * @enum {string} + */ + status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + /** + * Error Type + * @description The error type, if any + * @default null + */ + error_type: string | null; + /** + * Error Message + * @description The error message, if any + * @default null + */ + error_message: string | null; + /** + * Error Traceback + * @description The error traceback, if any + * @default null + */ + error_traceback: string | null; + /** + * Created At + * @description The timestamp when the queue item was created + * @default null + */ + created_at: string | null; + /** + * Updated At + * @description The timestamp when the queue item was last updated + * @default null + */ + updated_at: string | null; + /** + * Started At + * @description The timestamp when the queue item was started + * @default null + */ + started_at: string | null; + /** + * Completed At + * @description The timestamp when the queue item was completed + * @default null + */ + completed_at: string | null; + /** @description The status of the batch */ + batch_status: components["schemas"]["BatchStatus"]; + /** @description The status of the queue */ + queue_status: components["schemas"]["SessionQueueStatus"]; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + }; /** * Random Float * @description Outputs a single random float @@ -8547,6 +10384,7 @@ export type components = { * type * @default rand_float * @constant + * @enum {string} */ type: "rand_float"; }; @@ -8588,6 +10426,7 @@ export type components = { * type * @default rand_int * @constant + * @enum {string} */ type: "rand_int"; }; @@ -8641,6 +10480,7 @@ export type components = { * type * @default random_range * @constant + * @enum {string} */ type: "random_range"; }; @@ -8688,6 +10528,7 @@ export type components = { * type * @default range * @constant + * @enum {string} */ type: "range"; }; @@ -8735,6 +10576,7 @@ export type components = { * type * @default range_of_size * @constant + * @enum {string} */ type: "range_of_size"; }; @@ -8805,6 +10647,7 @@ export type components = { * type * @default rectangle_mask * @constant + * @enum {string} */ type: "rectangle_mask"; }; @@ -8900,6 +10743,7 @@ export type components = { * type * @default lresize * @constant + * @enum {string} */ type: "lresize"; }; @@ -8951,6 +10795,7 @@ export type components = { * type * @default round_float * @constant + * @enum {string} */ type: "round_float"; }; @@ -9039,6 +10884,7 @@ export type components = { * type * @default sdxl_compel_prompt * @constant + * @enum {string} */ type: "sdxl_compel_prompt"; }; @@ -9092,6 +10938,7 @@ export type components = { * type * @default sdxl_lora_collection_loader * @constant + * @enum {string} */ type: "sdxl_lora_collection_loader"; }; @@ -9151,9 +10998,41 @@ export type components = { * type * @default sdxl_lora_loader * @constant + * @enum {string} */ type: "sdxl_lora_loader"; }; + /** + * SDXLLoRALoaderOutput + * @description SDXL LoRA Loader Output + */ + SDXLLoRALoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet: components["schemas"]["UNetField"] | null; + /** + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip: components["schemas"]["CLIPField"] | null; + /** + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip2: components["schemas"]["CLIPField"] | null; + /** + * type + * @default sdxl_lora_loader_output + * @constant + * @enum {string} + */ + type: "sdxl_lora_loader_output"; + }; /** * SDXL Main Model * @description Loads an sdxl base model, outputting its submodels. @@ -9185,9 +11064,43 @@ export type components = { * type * @default sdxl_model_loader * @constant + * @enum {string} */ type: "sdxl_model_loader"; }; + /** + * SDXLModelLoaderOutput + * @description SDXL base model loader output + */ + SDXLModelLoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; + /** + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; + /** + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip2: components["schemas"]["CLIPField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default sdxl_model_loader_output + * @constant + * @enum {string} + */ + type: "sdxl_model_loader_output"; + }; /** * SDXL Refiner Prompt * @description Parse prompt using compel package to conditioning. @@ -9251,6 +11164,7 @@ export type components = { * type * @default sdxl_refiner_compel_prompt * @constant + * @enum {string} */ type: "sdxl_refiner_compel_prompt"; }; @@ -9285,9 +11199,38 @@ export type components = { * type * @default sdxl_refiner_model_loader * @constant + * @enum {string} */ type: "sdxl_refiner_model_loader"; }; + /** + * SDXLRefinerModelLoaderOutput + * @description SDXL refiner model loader output + */ + SDXLRefinerModelLoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; + /** + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip2: components["schemas"]["CLIPField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default sdxl_refiner_model_loader_output + * @constant + * @enum {string} + */ + type: "sdxl_refiner_model_loader_output"; + }; /** * SQLiteDirection * @enum {string} @@ -9334,6 +11277,7 @@ export type components = { * type * @default save_image * @constant + * @enum {string} */ type: "save_image"; }; @@ -9387,6 +11331,7 @@ export type components = { * type * @default lscale * @constant + * @enum {string} */ type: "lscale"; }; @@ -9423,9 +11368,26 @@ export type components = { * type * @default scheduler * @constant + * @enum {string} */ type: "scheduler"; }; + /** SchedulerOutput */ + SchedulerOutput: { + /** + * Scheduler + * @description Scheduler to use during inference + * @enum {string} + */ + scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; + /** + * type + * @default scheduler_output + * @constant + * @enum {string} + */ + type: "scheduler_output"; + }; /** * SchedulerPredictionType * @description Scheduler prediction type. @@ -9482,9 +11444,35 @@ export type components = { * type * @default seamless * @constant + * @enum {string} */ type: "seamless"; }; + /** + * SeamlessModeOutput + * @description Modified Seamless Model output + */ + SeamlessModeOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet: components["schemas"]["UNetField"] | null; + /** + * VAE + * @description VAE + * @default null + */ + vae: components["schemas"]["VAEField"] | null; + /** + * type + * @default seamless_output + * @constant + * @enum {string} + */ + type: "seamless_output"; + }; /** * Segment Anything Processor * @description Applies segment anything processing to image @@ -9538,6 +11526,7 @@ export type components = { * type * @default segment_anything_processor * @constant + * @enum {string} */ type: "segment_anything_processor"; }; @@ -9802,6 +11791,7 @@ export type components = { * type * @default show_image * @constant + * @enum {string} */ type: "show_image"; }; @@ -9926,9 +11916,33 @@ export type components = { * type * @default step_param_easing * @constant + * @enum {string} */ type: "step_param_easing"; }; + /** + * String2Output + * @description Base class for invocations that output two strings + */ + String2Output: { + /** + * String 1 + * @description string 1 + */ + string_1: string; + /** + * String 2 + * @description string 2 + */ + string_2: string; + /** + * type + * @default string_2_output + * @constant + * @enum {string} + */ + type: "string_2_output"; + }; /** * String Collection Primitive * @description A collection of string primitive values @@ -9961,9 +11975,28 @@ export type components = { * type * @default string_collection * @constant + * @enum {string} */ type: "string_collection"; }; + /** + * StringCollectionOutput + * @description Base class for nodes that output a collection of strings + */ + StringCollectionOutput: { + /** + * Collection + * @description The output strings + */ + collection: string[]; + /** + * type + * @default string_collection_output + * @constant + * @enum {string} + */ + type: "string_collection_output"; + }; /** * String Primitive * @description A string primitive value @@ -9996,6 +12029,7 @@ export type components = { * type * @default string * @constant + * @enum {string} */ type: "string"; }; @@ -10037,6 +12071,7 @@ export type components = { * type * @default string_join * @constant + * @enum {string} */ type: "string_join"; }; @@ -10084,9 +12119,51 @@ export type components = { * type * @default string_join_three * @constant + * @enum {string} */ type: "string_join_three"; }; + /** + * StringOutput + * @description Base class for nodes that output a single string + */ + StringOutput: { + /** + * Value + * @description The output string + */ + value: string; + /** + * type + * @default string_output + * @constant + * @enum {string} + */ + type: "string_output"; + }; + /** + * StringPosNegOutput + * @description Base class for invocations that output a positive and negative string + */ + StringPosNegOutput: { + /** + * Positive String + * @description Positive string + */ + positive_string: string; + /** + * Negative String + * @description Negative string + */ + negative_string: string; + /** + * type + * @default string_pos_neg_output + * @constant + * @enum {string} + */ + type: "string_pos_neg_output"; + }; /** * String Replace * @description Replaces the search string with the replace string @@ -10137,6 +12214,7 @@ export type components = { * type * @default string_replace * @constant + * @enum {string} */ type: "string_replace"; }; @@ -10178,6 +12256,7 @@ export type components = { * type * @default string_split * @constant + * @enum {string} */ type: "string_split"; }; @@ -10213,6 +12292,7 @@ export type components = { * type * @default string_split_neg * @constant + * @enum {string} */ type: "string_split_neg"; }; @@ -10260,6 +12340,7 @@ export type components = { * type * @default sub * @constant + * @enum {string} */ type: "sub"; }; @@ -10317,6 +12398,7 @@ export type components = { /** * Format * @constant + * @enum {string} */ format: "diffusers"; /** @default */ @@ -10325,6 +12407,7 @@ export type components = { * Type * @default t2i_adapter * @constant + * @enum {string} */ type: "t2i_adapter"; }; @@ -10422,6 +12505,7 @@ export type components = { * type * @default t2i_adapter * @constant + * @enum {string} */ type: "t2i_adapter"; }; @@ -10429,7 +12513,10 @@ export type components = { T2IAdapterMetadataField: { /** @description The control image. */ image: components["schemas"]["ImageField"]; - /** @description The control image, after processing. */ + /** + * @description The control image, after processing. + * @default null + */ processed_image?: components["schemas"]["ImageField"] | null; /** @description The T2I-Adapter model to use. */ t2i_adapter_model: components["schemas"]["ModelIdentifierField"]; @@ -10459,6 +12546,21 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; + /** T2IAdapterOutput */ + T2IAdapterOutput: { + /** + * T2I Adapter + * @description T2I-Adapter(s) to apply + */ + t2i_adapter: components["schemas"]["T2IAdapterField"]; + /** + * type + * @default t2i_adapter_output + * @constant + * @enum {string} + */ + type: "t2i_adapter_output"; + }; /** TBLR */ TBLR: { /** Top */ @@ -10534,12 +12636,14 @@ export type components = { * Type * @default embedding * @constant + * @enum {string} */ type: "embedding"; /** * Format * @default embedding_file * @constant + * @enum {string} */ format: "embedding_file"; }; @@ -10596,12 +12700,14 @@ export type components = { * Type * @default embedding * @constant + * @enum {string} */ type: "embedding"; /** * Format * @default embedding_folder * @constant + * @enum {string} */ format: "embedding_folder"; }; @@ -10659,6 +12765,7 @@ export type components = { * type * @default tile_image_processor * @constant + * @enum {string} */ type: "tile_image_processor"; }; @@ -10693,14 +12800,150 @@ export type components = { * type * @default tile_to_properties * @constant + * @enum {string} */ type: "tile_to_properties"; }; + /** TileToPropertiesOutput */ + TileToPropertiesOutput: { + /** + * Coords Left + * @description Left coordinate of the tile relative to its parent image. + */ + coords_left: number; + /** + * Coords Right + * @description Right coordinate of the tile relative to its parent image. + */ + coords_right: number; + /** + * Coords Top + * @description Top coordinate of the tile relative to its parent image. + */ + coords_top: number; + /** + * Coords Bottom + * @description Bottom coordinate of the tile relative to its parent image. + */ + coords_bottom: number; + /** + * Width + * @description The width of the tile. Equal to coords_right - coords_left. + */ + width: number; + /** + * Height + * @description The height of the tile. Equal to coords_bottom - coords_top. + */ + height: number; + /** + * Overlap Top + * @description Overlap between this tile and its top neighbor. + */ + overlap_top: number; + /** + * Overlap Bottom + * @description Overlap between this tile and its bottom neighbor. + */ + overlap_bottom: number; + /** + * Overlap Left + * @description Overlap between this tile and its left neighbor. + */ + overlap_left: number; + /** + * Overlap Right + * @description Overlap between this tile and its right neighbor. + */ + overlap_right: number; + /** + * type + * @default tile_to_properties_output + * @constant + * @enum {string} + */ + type: "tile_to_properties_output"; + }; /** TileWithImage */ TileWithImage: { tile: components["schemas"]["Tile"]; image: components["schemas"]["ImageField"]; }; + /** + * UIComponent + * @description The type of UI component to use for a field, used to override the default components, which are + * inferred from the field type. + * @enum {string} + */ + UIComponent: "none" | "textarea" | "slider"; + /** + * UIConfigBase + * @description Provides additional node configuration to the UI. + * This is used internally by the @invocation decorator logic. Do not use this directly. + */ + UIConfigBase: { + /** + * Tags + * @description The node's tags + */ + tags: string[] | null; + /** + * Title + * @description The node's display name + * @default null + */ + title: string | null; + /** + * Category + * @description The node's category + * @default null + */ + category: string | null; + /** + * Version + * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". + */ + version: string; + /** + * Node Pack + * @description Whether or not this is a custom node + * @default null + */ + node_pack: string | null; + /** + * @description The node's classification + * @default stable + */ + classification: components["schemas"]["Classification"]; + }; + /** + * UIType + * @description Type hints for the UI for situations in which the field type is not enough to infer the correct UI type. + * + * - Model Fields + * The most common node-author-facing use will be for model fields. Internally, there is no difference + * between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the + * base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that + * the field is an SDXL main model field. + * + * - Any Field + * We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to + * indicate that the field accepts any type. Use with caution. This cannot be used on outputs. + * + * - Scheduler Field + * Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field. + * + * - Internal Fields + * Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate + * handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These + * should not be used by node authors. + * + * - DEPRECATED Fields + * These types are deprecated and should not be used by node authors. A warning will be logged if one is + * used, and the type will be ignored. They are included here for backwards compatibility. + * @enum {string} + */ + UIType: "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VAEModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "T2IAdapterModelField" | "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict"; /** UNetField */ UNetField: { /** @description Info to load unet submodel */ @@ -10717,9 +12960,30 @@ export type components = { * @description Axes("x" and "y") to which apply seamless */ seamless_axes?: string[]; - /** @description FreeU configuration */ + /** + * @description FreeU configuration + * @default null + */ freeu_config?: components["schemas"]["FreeUConfig"] | null; }; + /** + * UNetOutput + * @description Base class for invocations that output a UNet field. + */ + UNetOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; + /** + * type + * @default unet_output + * @constant + * @enum {string} + */ + type: "unet_output"; + }; /** * URLModelSource * @description A generic URL point to a checkpoint file. @@ -10736,6 +13000,7 @@ export type components = { * Type * @default url * @constant + * @enum {string} */ type?: "url"; }; @@ -10792,6 +13057,7 @@ export type components = { * type * @default unsharp_mask * @constant + * @enum {string} */ type: "unsharp_mask"; }; @@ -10861,6 +13127,7 @@ export type components = { * Format * @default checkpoint * @constant + * @enum {string} */ format: "checkpoint"; /** @@ -10877,6 +13144,7 @@ export type components = { * Type * @default vae * @constant + * @enum {string} */ type: "vae"; }; @@ -10933,12 +13201,14 @@ export type components = { * Type * @default vae * @constant + * @enum {string} */ type: "vae"; /** * Format * @default diffusers * @constant + * @enum {string} */ format: "diffusers"; }; @@ -10984,9 +13254,28 @@ export type components = { * type * @default vae_loader * @constant + * @enum {string} */ type: "vae_loader"; }; + /** + * VAEOutput + * @description Base class for invocations that output a VAE field + */ + VAEOutput: { + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default vae_output + * @constant + * @enum {string} + */ + type: "vae_output"; + }; /** ValidationError */ ValidationError: { /** Location */ @@ -11262,2047 +13551,10 @@ export type components = { * type * @default zoe_depth_image_processor * @constant + * @enum {string} */ type: "zoe_depth_image_processor"; }; - /** - * FloatCollectionOutput - * @description Base class for nodes that output a collection of floats - */ - FloatCollectionOutput: { - /** - * Collection - * @description The float collection - */ - collection: number[]; - /** - * type - * @default float_collection_output - * @constant - */ - type: "float_collection_output"; - }; - /** - * ConditioningOutput - * @description Base class for nodes that output a single conditioning tensor - */ - ConditioningOutput: { - /** @description Conditioning tensor */ - conditioning: components["schemas"]["ConditioningField"]; - /** - * type - * @default conditioning_output - * @constant - */ - type: "conditioning_output"; - }; - /** - * IntegerOutput - * @description Base class for nodes that output a single integer - */ - IntegerOutput: { - /** - * Value - * @description The output integer - */ - value: number; - /** - * type - * @default integer_output - * @constant - */ - type: "integer_output"; - }; - /** - * DenoiseMaskOutput - * @description Base class for nodes that output a single image - */ - DenoiseMaskOutput: { - /** @description Mask for denoise model run */ - denoise_mask: components["schemas"]["DenoiseMaskField"]; - /** - * type - * @default denoise_mask_output - * @constant - */ - type: "denoise_mask_output"; - }; - /** SchedulerOutput */ - SchedulerOutput: { - /** - * Scheduler - * @description Scheduler to use during inference - * @enum {string} - */ - scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; - /** - * type - * @default scheduler_output - * @constant - */ - type: "scheduler_output"; - }; - /** - * IntegerCollectionOutput - * @description Base class for nodes that output a collection of integers - */ - IntegerCollectionOutput: { - /** - * Collection - * @description The int collection - */ - collection: number[]; - /** - * type - * @default integer_collection_output - * @constant - */ - type: "integer_collection_output"; - }; - /** PairTileImageOutput */ - PairTileImageOutput: { - /** @description A tile description with its corresponding image. */ - tile_with_image: components["schemas"]["TileWithImage"]; - /** - * type - * @default pair_tile_image_output - * @constant - */ - type: "pair_tile_image_output"; - }; - /** - * GradientMaskOutput - * @description Outputs a denoise mask and an image representing the total gradient of the mask. - */ - GradientMaskOutput: { - /** @description Mask for denoise model run */ - denoise_mask: components["schemas"]["DenoiseMaskField"]; - /** @description Image representing the total gradient area of the mask. For paste-back purposes. */ - expanded_mask_area: components["schemas"]["ImageField"]; - /** - * type - * @default gradient_mask_output - * @constant - */ - type: "gradient_mask_output"; - }; - /** CollectInvocationOutput */ - CollectInvocationOutput: { - /** - * Collection - * @description The collection of input items - */ - collection: unknown[]; - /** - * type - * @default collect_output - * @constant - */ - type: "collect_output"; - }; - /** - * CLIPSkipInvocationOutput - * @description CLIP skip node output - */ - CLIPSkipInvocationOutput: { - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip: components["schemas"]["CLIPField"] | null; - /** - * type - * @default clip_skip_output - * @constant - */ - type: "clip_skip_output"; - }; - /** - * ConditioningCollectionOutput - * @description Base class for nodes that output a collection of conditioning tensors - */ - ConditioningCollectionOutput: { - /** - * Collection - * @description The output conditioning tensors - */ - collection: components["schemas"]["ConditioningField"][]; - /** - * type - * @default conditioning_collection_output - * @constant - */ - type: "conditioning_collection_output"; - }; - /** - * LoRALoaderOutput - * @description Model loader output - */ - LoRALoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet: components["schemas"]["UNetField"] | null; - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip: components["schemas"]["CLIPField"] | null; - /** - * type - * @default lora_loader_output - * @constant - */ - type: "lora_loader_output"; - }; - /** - * FaceOffOutput - * @description Base class for FaceOff Output - */ - FaceOffOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; - /** - * Width - * @description The width of the image in pixels - */ - width: number; - /** - * Height - * @description The height of the image in pixels - */ - height: number; - /** - * type - * @default face_off_output - * @constant - */ - type: "face_off_output"; - /** @description The output mask */ - mask: components["schemas"]["ImageField"]; - /** - * X - * @description The x coordinate of the bounding box's left side - */ - x: number; - /** - * Y - * @description The y coordinate of the bounding box's top side - */ - y: number; - }; - /** - * FaceMaskOutput - * @description Base class for FaceMask output - */ - FaceMaskOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; - /** - * Width - * @description The width of the image in pixels - */ - width: number; - /** - * Height - * @description The height of the image in pixels - */ - height: number; - /** - * type - * @default face_mask_output - * @constant - */ - type: "face_mask_output"; - /** @description The output mask */ - mask: components["schemas"]["ImageField"]; - }; - /** - * LatentsCollectionOutput - * @description Base class for nodes that output a collection of latents tensors - */ - LatentsCollectionOutput: { - /** - * Collection - * @description Latents tensor - */ - collection: components["schemas"]["LatentsField"][]; - /** - * type - * @default latents_collection_output - * @constant - */ - type: "latents_collection_output"; - }; - /** - * ImageOutput - * @description Base class for nodes that output a single image - */ - ImageOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; - /** - * Width - * @description The width of the image in pixels - */ - width: number; - /** - * Height - * @description The height of the image in pixels - */ - height: number; - /** - * type - * @default image_output - * @constant - */ - type: "image_output"; - }; - /** - * ColorOutput - * @description Base class for nodes that output a single color - */ - ColorOutput: { - /** @description The output color */ - color: components["schemas"]["ColorField"]; - /** - * type - * @default color_output - * @constant - */ - type: "color_output"; - }; - /** - * VAEOutput - * @description Base class for invocations that output a VAE field - */ - VAEOutput: { - /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; - /** - * type - * @default vae_output - * @constant - */ - type: "vae_output"; - }; - /** - * LoRASelectorOutput - * @description Model loader output - */ - LoRASelectorOutput: { - /** - * LoRA - * @description LoRA model and weight - */ - lora: components["schemas"]["LoRAField"]; - /** - * type - * @default lora_selector_output - * @constant - */ - type: "lora_selector_output"; - }; - /** - * FloatOutput - * @description Base class for nodes that output a single float - */ - FloatOutput: { - /** - * Value - * @description The output float - */ - value: number; - /** - * type - * @default float_output - * @constant - */ - type: "float_output"; - }; - /** - * ModelLoaderOutput - * @description Model loader output - */ - ModelLoaderOutput: { - /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; - /** - * type - * @default model_loader_output - * @constant - */ - type: "model_loader_output"; - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip: components["schemas"]["CLIPField"]; - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - }; - /** - * ModelIdentifierOutput - * @description Model identifier output - */ - ModelIdentifierOutput: { - /** - * Model - * @description Model identifier - */ - model: components["schemas"]["ModelIdentifierField"]; - /** - * type - * @default model_identifier_output - * @constant - */ - type: "model_identifier_output"; - }; - /** - * StringOutput - * @description Base class for nodes that output a single string - */ - StringOutput: { - /** - * Value - * @description The output string - */ - value: string; - /** - * type - * @default string_output - * @constant - */ - type: "string_output"; - }; - /** - * StringPosNegOutput - * @description Base class for invocations that output a positive and negative string - */ - StringPosNegOutput: { - /** - * Positive String - * @description Positive string - */ - positive_string: string; - /** - * Negative String - * @description Negative string - */ - negative_string: string; - /** - * type - * @default string_pos_neg_output - * @constant - */ - type: "string_pos_neg_output"; - }; - /** MetadataOutput */ - MetadataOutput: { - /** @description Metadata Dict */ - metadata: components["schemas"]["MetadataField"]; - /** - * type - * @default metadata_output - * @constant - */ - type: "metadata_output"; - }; - /** - * CLIPOutput - * @description Base class for invocations that output a CLIP field - */ - CLIPOutput: { - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip: components["schemas"]["CLIPField"]; - /** - * type - * @default clip_output - * @constant - */ - type: "clip_output"; - }; - /** - * IterateInvocationOutput - * @description Used to connect iteration outputs. Will be expanded to a specific output. - */ - IterateInvocationOutput: { - /** - * Collection Item - * @description The item being iterated over - */ - item: unknown; - /** - * Index - * @description The index of the item - */ - index: number; - /** - * Total - * @description The total number of items - */ - total: number; - /** - * type - * @default iterate_output - * @constant - */ - type: "iterate_output"; - }; - /** - * SDXLRefinerModelLoaderOutput - * @description SDXL refiner model loader output - */ - SDXLRefinerModelLoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip2: components["schemas"]["CLIPField"]; - /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; - /** - * type - * @default sdxl_refiner_model_loader_output - * @constant - */ - type: "sdxl_refiner_model_loader_output"; - }; - /** - * BooleanCollectionOutput - * @description Base class for nodes that output a collection of booleans - */ - BooleanCollectionOutput: { - /** - * Collection - * @description The output boolean collection - */ - collection: boolean[]; - /** - * type - * @default boolean_collection_output - * @constant - */ - type: "boolean_collection_output"; - }; - /** - * LatentsOutput - * @description Base class for nodes that output a single latents tensor - */ - LatentsOutput: { - /** @description Latents tensor */ - latents: components["schemas"]["LatentsField"]; - /** - * Width - * @description Width of output (px) - */ - width: number; - /** - * Height - * @description Height of output (px) - */ - height: number; - /** - * type - * @default latents_output - * @constant - */ - type: "latents_output"; - }; - /** - * SDXLLoRALoaderOutput - * @description SDXL LoRA Loader Output - */ - SDXLLoRALoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet: components["schemas"]["UNetField"] | null; - /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip: components["schemas"]["CLIPField"] | null; - /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip2: components["schemas"]["CLIPField"] | null; - /** - * type - * @default sdxl_lora_loader_output - * @constant - */ - type: "sdxl_lora_loader_output"; - }; - /** - * BooleanOutput - * @description Base class for nodes that output a single boolean - */ - BooleanOutput: { - /** - * Value - * @description The output boolean - */ - value: boolean; - /** - * type - * @default boolean_output - * @constant - */ - type: "boolean_output"; - }; - /** - * NoiseOutput - * @description Invocation noise output - */ - NoiseOutput: { - /** @description Noise tensor */ - noise: components["schemas"]["LatentsField"]; - /** - * Width - * @description Width of output (px) - */ - width: number; - /** - * Height - * @description Height of output (px) - */ - height: number; - /** - * type - * @default noise_output - * @constant - */ - type: "noise_output"; - }; - /** CalculateImageTilesOutput */ - CalculateImageTilesOutput: { - /** - * Tiles - * @description The tiles coordinates that cover a particular image shape. - */ - tiles: components["schemas"]["Tile"][]; - /** - * type - * @default calculate_image_tiles_output - * @constant - */ - type: "calculate_image_tiles_output"; - }; - /** - * String2Output - * @description Base class for invocations that output two strings - */ - String2Output: { - /** - * String 1 - * @description string 1 - */ - string_1: string; - /** - * String 2 - * @description string 2 - */ - string_2: string; - /** - * type - * @default string_2_output - * @constant - */ - type: "string_2_output"; - }; - /** T2IAdapterOutput */ - T2IAdapterOutput: { - /** - * T2I Adapter - * @description T2I-Adapter(s) to apply - */ - t2i_adapter: components["schemas"]["T2IAdapterField"]; - /** - * type - * @default t2i_adapter_output - * @constant - */ - type: "t2i_adapter_output"; - }; - /** - * UNetOutput - * @description Base class for invocations that output a UNet field. - */ - UNetOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - /** - * type - * @default unet_output - * @constant - */ - type: "unet_output"; - }; - /** TileToPropertiesOutput */ - TileToPropertiesOutput: { - /** - * Coords Left - * @description Left coordinate of the tile relative to its parent image. - */ - coords_left: number; - /** - * Coords Right - * @description Right coordinate of the tile relative to its parent image. - */ - coords_right: number; - /** - * Coords Top - * @description Top coordinate of the tile relative to its parent image. - */ - coords_top: number; - /** - * Coords Bottom - * @description Bottom coordinate of the tile relative to its parent image. - */ - coords_bottom: number; - /** - * Width - * @description The width of the tile. Equal to coords_right - coords_left. - */ - width: number; - /** - * Height - * @description The height of the tile. Equal to coords_bottom - coords_top. - */ - height: number; - /** - * Overlap Top - * @description Overlap between this tile and its top neighbor. - */ - overlap_top: number; - /** - * Overlap Bottom - * @description Overlap between this tile and its bottom neighbor. - */ - overlap_bottom: number; - /** - * Overlap Left - * @description Overlap between this tile and its left neighbor. - */ - overlap_left: number; - /** - * Overlap Right - * @description Overlap between this tile and its right neighbor. - */ - overlap_right: number; - /** - * type - * @default tile_to_properties_output - * @constant - */ - type: "tile_to_properties_output"; - }; - /** - * ColorCollectionOutput - * @description Base class for nodes that output a collection of colors - */ - ColorCollectionOutput: { - /** - * Collection - * @description The output colors - */ - collection: components["schemas"]["ColorField"][]; - /** - * type - * @default color_collection_output - * @constant - */ - type: "color_collection_output"; - }; - /** - * ImageCollectionOutput - * @description Base class for nodes that output a collection of images - */ - ImageCollectionOutput: { - /** - * Collection - * @description The output images - */ - collection: components["schemas"]["ImageField"][]; - /** - * type - * @default image_collection_output - * @constant - */ - type: "image_collection_output"; - }; - /** - * MaskOutput - * @description A torch mask tensor. - */ - MaskOutput: { - /** @description The mask. */ - mask: components["schemas"]["TensorField"]; - /** - * Width - * @description The width of the mask in pixels. - */ - width: number; - /** - * Height - * @description The height of the mask in pixels. - */ - height: number; - /** - * type - * @default mask_output - * @constant - */ - type: "mask_output"; - }; - /** - * SeamlessModeOutput - * @description Modified Seamless Model output - */ - SeamlessModeOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet: components["schemas"]["UNetField"] | null; - /** - * VAE - * @description VAE - * @default null - */ - vae: components["schemas"]["VAEField"] | null; - /** - * type - * @default seamless_output - * @constant - */ - type: "seamless_output"; - }; - /** - * MetadataItemOutput - * @description Metadata Item Output - */ - MetadataItemOutput: { - /** @description Metadata Item */ - item: components["schemas"]["MetadataItemField"]; - /** - * type - * @default metadata_item_output - * @constant - */ - type: "metadata_item_output"; - }; - /** - * StringCollectionOutput - * @description Base class for nodes that output a collection of strings - */ - StringCollectionOutput: { - /** - * Collection - * @description The output strings - */ - collection: string[]; - /** - * type - * @default string_collection_output - * @constant - */ - type: "string_collection_output"; - }; - /** IPAdapterOutput */ - IPAdapterOutput: { - /** - * IP-Adapter - * @description IP-Adapter to apply - */ - ip_adapter: components["schemas"]["IPAdapterField"]; - /** - * type - * @default ip_adapter_output - * @constant - */ - type: "ip_adapter_output"; - }; - /** - * IdealSizeOutput - * @description Base class for invocations that output an image - */ - IdealSizeOutput: { - /** - * Width - * @description The ideal width of the image (in pixels) - */ - width: number; - /** - * Height - * @description The ideal height of the image (in pixels) - */ - height: number; - /** - * type - * @default ideal_size_output - * @constant - */ - type: "ideal_size_output"; - }; - /** - * ControlOutput - * @description node output for ControlNet info - */ - ControlOutput: { - /** @description ControlNet(s) to apply */ - control: components["schemas"]["ControlField"]; - /** - * type - * @default control_output - * @constant - */ - type: "control_output"; - }; - /** - * SDXLModelLoaderOutput - * @description SDXL base model loader output - */ - SDXLModelLoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip: components["schemas"]["CLIPField"]; - /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip2: components["schemas"]["CLIPField"]; - /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; - /** - * type - * @default sdxl_model_loader_output - * @constant - */ - type: "sdxl_model_loader_output"; - }; - InvocationOutputMap: { - controlnet: components["schemas"]["ControlOutput"]; - rand_int: components["schemas"]["IntegerOutput"]; - clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; - color: components["schemas"]["ColorOutput"]; - iterate: components["schemas"]["IterateInvocationOutput"]; - tile_image_processor: components["schemas"]["ImageOutput"]; - depth_anything_image_processor: components["schemas"]["ImageOutput"]; - lora_selector: components["schemas"]["LoRASelectorOutput"]; - crop_latents: components["schemas"]["LatentsOutput"]; - img_resize: components["schemas"]["ImageOutput"]; - mask_combine: components["schemas"]["ImageOutput"]; - prompt_from_file: components["schemas"]["StringCollectionOutput"]; - ideal_size: components["schemas"]["IdealSizeOutput"]; - image_mask_to_tensor: components["schemas"]["MaskOutput"]; - rand_float: components["schemas"]["FloatOutput"]; - infill_rgba: components["schemas"]["ImageOutput"]; - color_correct: components["schemas"]["ImageOutput"]; - ip_adapter: components["schemas"]["IPAdapterOutput"]; - rectangle_mask: components["schemas"]["MaskOutput"]; - img_scale: components["schemas"]["ImageOutput"]; - boolean_collection: components["schemas"]["BooleanCollectionOutput"]; - calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; - esrgan: components["schemas"]["ImageOutput"]; - normalbae_image_processor: components["schemas"]["ImageOutput"]; - img_ilerp: components["schemas"]["ImageOutput"]; - img_mul: components["schemas"]["ImageOutput"]; - dynamic_prompt: components["schemas"]["StringCollectionOutput"]; - cv_inpaint: components["schemas"]["ImageOutput"]; - lscale: components["schemas"]["LatentsOutput"]; - range_of_size: components["schemas"]["IntegerCollectionOutput"]; - color_map_image_processor: components["schemas"]["ImageOutput"]; - conditioning: components["schemas"]["ConditioningOutput"]; - dw_openpose_image_processor: components["schemas"]["ImageOutput"]; - img_blur: components["schemas"]["ImageOutput"]; - save_image: components["schemas"]["ImageOutput"]; - string: components["schemas"]["StringOutput"]; - img_chan: components["schemas"]["ImageOutput"]; - step_param_easing: components["schemas"]["FloatCollectionOutput"]; - midas_depth_image_processor: components["schemas"]["ImageOutput"]; - unsharp_mask: components["schemas"]["ImageOutput"]; - sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - integer: components["schemas"]["IntegerOutput"]; - img_channel_offset: components["schemas"]["ImageOutput"]; - image: components["schemas"]["ImageOutput"]; - round_float: components["schemas"]["FloatOutput"]; - mediapipe_face_processor: components["schemas"]["ImageOutput"]; - infill_cv2: components["schemas"]["ImageOutput"]; - random_range: components["schemas"]["IntegerCollectionOutput"]; - float_math: components["schemas"]["FloatOutput"]; - sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; - string_collection: components["schemas"]["StringCollectionOutput"]; - img_lerp: components["schemas"]["ImageOutput"]; - face_off: components["schemas"]["FaceOffOutput"]; - sub: components["schemas"]["IntegerOutput"]; - merge_tiles_to_image: components["schemas"]["ImageOutput"]; - boolean: components["schemas"]["BooleanOutput"]; - integer_math: components["schemas"]["IntegerOutput"]; - string_replace: components["schemas"]["StringOutput"]; - metadata: components["schemas"]["MetadataOutput"]; - invert_tensor_mask: components["schemas"]["MaskOutput"]; - img_conv: components["schemas"]["ImageOutput"]; - merge_metadata: components["schemas"]["MetadataOutput"]; - img_watermark: components["schemas"]["ImageOutput"]; - float_to_int: components["schemas"]["IntegerOutput"]; - float_range: components["schemas"]["FloatCollectionOutput"]; - string_split: components["schemas"]["String2Output"]; - float: components["schemas"]["FloatOutput"]; - blank_image: components["schemas"]["ImageOutput"]; - collect: components["schemas"]["CollectInvocationOutput"]; - infill_tile: components["schemas"]["ImageOutput"]; - seamless: components["schemas"]["SeamlessModeOutput"]; - create_gradient_mask: components["schemas"]["GradientMaskOutput"]; - lora_loader: components["schemas"]["LoRALoaderOutput"]; - zoe_depth_image_processor: components["schemas"]["ImageOutput"]; - string_join: components["schemas"]["StringOutput"]; - show_image: components["schemas"]["ImageOutput"]; - t2i_adapter: components["schemas"]["T2IAdapterOutput"]; - create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; - metadata_item: components["schemas"]["MetadataItemOutput"]; - main_model_loader: components["schemas"]["ModelLoaderOutput"]; - hed_image_processor: components["schemas"]["ImageOutput"]; - core_metadata: components["schemas"]["MetadataOutput"]; - conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; - content_shuffle_image_processor: components["schemas"]["ImageOutput"]; - lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; - model_identifier: components["schemas"]["ModelIdentifierOutput"]; - mlsd_image_processor: components["schemas"]["ImageOutput"]; - lresize: components["schemas"]["LatentsOutput"]; - tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; - div: components["schemas"]["IntegerOutput"]; - calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; - infill_lama: components["schemas"]["ImageOutput"]; - tomask: components["schemas"]["ImageOutput"]; - canny_image_processor: components["schemas"]["ImageOutput"]; - img_hue_adjust: components["schemas"]["ImageOutput"]; - i2l: components["schemas"]["LatentsOutput"]; - float_collection: components["schemas"]["FloatCollectionOutput"]; - integer_collection: components["schemas"]["IntegerCollectionOutput"]; - face_identifier: components["schemas"]["ImageOutput"]; - sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; - compel: components["schemas"]["ConditioningOutput"]; - mask_edge: components["schemas"]["ImageOutput"]; - face_mask_detection: components["schemas"]["FaceMaskOutput"]; - scheduler: components["schemas"]["SchedulerOutput"]; - image_collection: components["schemas"]["ImageCollectionOutput"]; - infill_patchmatch: components["schemas"]["ImageOutput"]; - freeu: components["schemas"]["UNetOutput"]; - pidi_image_processor: components["schemas"]["ImageOutput"]; - img_channel_multiply: components["schemas"]["ImageOutput"]; - sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; - l2i: components["schemas"]["ImageOutput"]; - segment_anything_processor: components["schemas"]["ImageOutput"]; - lblend: components["schemas"]["LatentsOutput"]; - vae_loader: components["schemas"]["VAEOutput"]; - canvas_paste_back: components["schemas"]["ImageOutput"]; - leres_image_processor: components["schemas"]["ImageOutput"]; - mask_from_id: components["schemas"]["ImageOutput"]; - latents_collection: components["schemas"]["LatentsCollectionOutput"]; - range: components["schemas"]["IntegerCollectionOutput"]; - img_pad_crop: components["schemas"]["ImageOutput"]; - img_crop: components["schemas"]["ImageOutput"]; - img_nsfw: components["schemas"]["ImageOutput"]; - denoise_latents: components["schemas"]["LatentsOutput"]; - mul: components["schemas"]["IntegerOutput"]; - noise: components["schemas"]["NoiseOutput"]; - sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; - lineart_anime_image_processor: components["schemas"]["ImageOutput"]; - img_paste: components["schemas"]["ImageOutput"]; - latents: components["schemas"]["LatentsOutput"]; - calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; - string_split_neg: components["schemas"]["StringPosNegOutput"]; - lineart_image_processor: components["schemas"]["ImageOutput"]; - heuristic_resize: components["schemas"]["ImageOutput"]; - add: components["schemas"]["IntegerOutput"]; - string_join_three: components["schemas"]["StringOutput"]; - sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; - pair_tile_image: components["schemas"]["PairTileImageOutput"]; - }; - /** - * BatchEnqueuedEvent - * @description Event model for batch_enqueued - */ - BatchEnqueuedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Batch Id - * @description The ID of the batch - */ - batch_id: string; - /** - * Enqueued - * @description The number of invocations enqueued - */ - enqueued: number; - /** - * Requested - * @description The number of invocations initially requested to be enqueued (may be less than enqueued if queue was full) - */ - requested: number; - /** - * Priority - * @description The priority of the batch - */ - priority: number; - }; - /** - * BulkDownloadCompleteEvent - * @description Event model for bulk_download_complete - */ - BulkDownloadCompleteEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Bulk Download Id - * @description The ID of the bulk image download - */ - bulk_download_id: string; - /** - * Bulk Download Item Id - * @description The ID of the bulk image download item - */ - bulk_download_item_id: string; - /** - * Bulk Download Item Name - * @description The name of the bulk image download item - */ - bulk_download_item_name: string; - }; - /** - * BulkDownloadErrorEvent - * @description Event model for bulk_download_error - */ - BulkDownloadErrorEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Bulk Download Id - * @description The ID of the bulk image download - */ - bulk_download_id: string; - /** - * Bulk Download Item Id - * @description The ID of the bulk image download item - */ - bulk_download_item_id: string; - /** - * Bulk Download Item Name - * @description The name of the bulk image download item - */ - bulk_download_item_name: string; - /** - * Error - * @description The error message - */ - error: string; - }; - /** - * BulkDownloadStartedEvent - * @description Event model for bulk_download_started - */ - BulkDownloadStartedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Bulk Download Id - * @description The ID of the bulk image download - */ - bulk_download_id: string; - /** - * Bulk Download Item Id - * @description The ID of the bulk image download item - */ - bulk_download_item_id: string; - /** - * Bulk Download Item Name - * @description The name of the bulk image download item - */ - bulk_download_item_name: string; - }; - /** - * Classification - * @description The classification of an Invocation. - * - `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation. - * - `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term. - * - `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation. - * @enum {string} - */ - Classification: "stable" | "beta" | "prototype"; - /** - * DownloadCancelledEvent - * @description Event model for download_cancelled - */ - DownloadCancelledEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Source - * @description The source of the download - */ - source: string; - }; - /** - * DownloadCompleteEvent - * @description Event model for download_complete - */ - DownloadCompleteEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Source - * @description The source of the download - */ - source: string; - /** - * Download Path - * @description The local path where the download is saved - */ - download_path: string; - /** - * Total Bytes - * @description The total number of bytes downloaded - */ - total_bytes: number; - }; - /** - * DownloadErrorEvent - * @description Event model for download_error - */ - DownloadErrorEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Source - * @description The source of the download - */ - source: string; - /** - * Error Type - * @description The type of error - */ - error_type: string; - /** - * Error - * @description The error message - */ - error: string; - }; - /** - * DownloadProgressEvent - * @description Event model for download_progress - */ - DownloadProgressEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Source - * @description The source of the download - */ - source: string; - /** - * Download Path - * @description The local path where the download is saved - */ - download_path: string; - /** - * Current Bytes - * @description The number of bytes downloaded so far - */ - current_bytes: number; - /** - * Total Bytes - * @description The total number of bytes to be downloaded - */ - total_bytes: number; - }; - /** - * DownloadStartedEvent - * @description Event model for download_started - */ - DownloadStartedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Source - * @description The source of the download - */ - source: string; - /** - * Download Path - * @description The local path where the download is saved - */ - download_path: string; - }; - /** - * FieldKind - * @description The kind of field. - * - `Input`: An input field on a node. - * - `Output`: An output field on a node. - * - `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is - * one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name - * "metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic, - * allowing "metadata" for that field. - * - `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs, - * but which are used to store information about the node. For example, the `id` and `type` fields are node - * attributes. - * - * The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app - * startup, and when generating the OpenAPI schema for the workflow editor. - * @enum {string} - */ - FieldKind: "input" | "output" | "internal" | "node_attribute"; - /** - * Input - * @description The type of input a field accepts. - * - `Input.Direct`: The field must have its value provided directly, when the invocation and field are instantiated. - * - `Input.Connection`: The field must have its value provided by a connection. - * - `Input.Any`: The field may have its value provided either directly or by a connection. - * @enum {string} - */ - Input: "connection" | "direct" | "any"; - /** - * InputFieldJSONSchemaExtra - * @description Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution, - * and by the workflow editor during schema parsing and UI rendering. - */ - InputFieldJSONSchemaExtra: { - input: components["schemas"]["Input"]; - /** Orig Required */ - orig_required: boolean; - field_kind: components["schemas"]["FieldKind"]; - /** - * Default - * @default null - */ - default: unknown; - /** - * Orig Default - * @default null - */ - orig_default: unknown; - /** - * Ui Hidden - * @default false - */ - ui_hidden: boolean; - /** @default null */ - ui_type: components["schemas"]["UIType"] | null; - /** @default null */ - ui_component: components["schemas"]["UIComponent"] | null; - /** - * Ui Order - * @default null - */ - ui_order: number | null; - /** - * Ui Choice Labels - * @default null - */ - ui_choice_labels: { - [key: string]: string; - } | null; - }; - /** - * InvocationCompleteEvent - * @description Event model for invocation_complete - */ - InvocationCompleteEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Item Id - * @description The ID of the queue item - */ - item_id: number; - /** - * Batch Id - * @description The ID of the queue batch - */ - batch_id: string; - /** - * Session Id - * @description The ID of the session (aka graph execution state) - */ - session_id: string; - /** @description The ID of the invocation */ - invocation: components["schemas"]["AnyInvocation"]; - /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node - */ - invocation_source_id: string; - /** @description The result of the invocation */ - result: components["schemas"]["AnyInvocationOutput"]; - }; - /** - * InvocationDenoiseProgressEvent - * @description Event model for invocation_denoise_progress - */ - InvocationDenoiseProgressEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Item Id - * @description The ID of the queue item - */ - item_id: number; - /** - * Batch Id - * @description The ID of the queue batch - */ - batch_id: string; - /** - * Session Id - * @description The ID of the session (aka graph execution state) - */ - session_id: string; - /** @description The ID of the invocation */ - invocation: components["schemas"]["AnyInvocation"]; - /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node - */ - invocation_source_id: string; - /** @description The progress image sent at each step during processing */ - progress_image: components["schemas"]["ProgressImage"]; - /** - * Step - * @description The current step of the invocation - */ - step: number; - /** - * Total Steps - * @description The total number of steps in the invocation - */ - total_steps: number; - /** - * Order - * @description The order of the invocation in the session - */ - order: number; - /** - * Percentage - * @description The percentage of completion of the invocation - */ - percentage: number; - }; - /** - * InvocationErrorEvent - * @description Event model for invocation_error - */ - InvocationErrorEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Item Id - * @description The ID of the queue item - */ - item_id: number; - /** - * Batch Id - * @description The ID of the queue batch - */ - batch_id: string; - /** - * Session Id - * @description The ID of the session (aka graph execution state) - */ - session_id: string; - /** @description The ID of the invocation */ - invocation: components["schemas"]["AnyInvocation"]; - /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node - */ - invocation_source_id: string; - /** - * Error Type - * @description The error type - */ - error_type: string; - /** - * Error Message - * @description The error message - */ - error_message: string; - /** - * Error Traceback - * @description The error traceback - */ - error_traceback: string; - /** - * User Id - * @description The ID of the user who created the invocation - * @default null - */ - user_id: string | null; - /** - * Project Id - * @description The ID of the user who created the invocation - * @default null - */ - project_id: string | null; - }; - /** - * InvocationStartedEvent - * @description Event model for invocation_started - */ - InvocationStartedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Item Id - * @description The ID of the queue item - */ - item_id: number; - /** - * Batch Id - * @description The ID of the queue batch - */ - batch_id: string; - /** - * Session Id - * @description The ID of the session (aka graph execution state) - */ - session_id: string; - /** @description The ID of the invocation */ - invocation: components["schemas"]["AnyInvocation"]; - /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node - */ - invocation_source_id: string; - }; - /** - * ModelInstallCancelledEvent - * @description Event model for model_install_cancelled - */ - ModelInstallCancelledEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Id - * @description The ID of the install job - */ - id: number; - /** - * Source - * @description Source of the model; local path, repo_id or url - */ - source: string; - }; - /** - * ModelInstallCompleteEvent - * @description Event model for model_install_complete - */ - ModelInstallCompleteEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Id - * @description The ID of the install job - */ - id: number; - /** - * Source - * @description Source of the model; local path, repo_id or url - */ - source: string; - /** - * Key - * @description Model config record key - */ - key: string; - /** - * Total Bytes - * @description Size of the model (may be None for installation of a local path) - */ - total_bytes: number | null; - }; - /** - * ModelInstallDownloadProgressEvent - * @description Event model for model_install_download_progress - */ - ModelInstallDownloadProgressEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Id - * @description The ID of the install job - */ - id: number; - /** - * Source - * @description Source of the model; local path, repo_id or url - */ - source: string; - /** - * Local Path - * @description Where model is downloading to - */ - local_path: string; - /** - * Bytes - * @description Number of bytes downloaded so far - */ - bytes: number; - /** - * Total Bytes - * @description Total size of download, including all files - */ - total_bytes: number; - /** - * Parts - * @description Progress of downloading URLs that comprise the model, if any - */ - parts: ({ - [key: string]: number | string; - })[]; - }; - /** - * ModelInstallDownloadsCompleteEvent - * @description Emitted once when an install job becomes active. - */ - ModelInstallDownloadsCompleteEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Id - * @description The ID of the install job - */ - id: number; - /** - * Source - * @description Source of the model; local path, repo_id or url - */ - source: string; - }; - /** - * ModelInstallErrorEvent - * @description Event model for model_install_error - */ - ModelInstallErrorEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Id - * @description The ID of the install job - */ - id: number; - /** - * Source - * @description Source of the model; local path, repo_id or url - */ - source: string; - /** - * Error Type - * @description The name of the exception - */ - error_type: string; - /** - * Error - * @description A text description of the exception - */ - error: string; - }; - /** - * ModelInstallStartedEvent - * @description Event model for model_install_started - */ - ModelInstallStartedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Id - * @description The ID of the install job - */ - id: number; - /** - * Source - * @description Source of the model; local path, repo_id or url - */ - source: string; - }; - /** - * ModelLoadCompleteEvent - * @description Event model for model_load_complete - */ - ModelLoadCompleteEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Config - * @description The model's config - */ - config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"]; - /** - * @description The submodel type, if any - * @default null - */ - submodel_type: components["schemas"]["SubModelType"] | null; - }; - /** - * ModelLoadStartedEvent - * @description Event model for model_load_started - */ - ModelLoadStartedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Config - * @description The model's config - */ - config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"]; - /** - * @description The submodel type, if any - * @default null - */ - submodel_type: components["schemas"]["SubModelType"] | null; - }; - /** - * OutputFieldJSONSchemaExtra - * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor - * during schema parsing and UI rendering. - */ - OutputFieldJSONSchemaExtra: { - field_kind: components["schemas"]["FieldKind"]; - /** Ui Hidden */ - ui_hidden: boolean; - ui_type: components["schemas"]["UIType"] | null; - /** Ui Order */ - ui_order: number | null; - }; - /** - * ProgressImage - * @description The progress image sent intermittently during processing - */ - ProgressImage: { - /** - * Width - * @description The effective width of the image in pixels - */ - width: number; - /** - * Height - * @description The effective height of the image in pixels - */ - height: number; - /** - * Dataurl - * @description The image data as a b64 data URL - */ - dataURL: string; - }; - /** - * QueueClearedEvent - * @description Event model for queue_cleared - */ - QueueClearedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - }; - /** - * QueueItemStatusChangedEvent - * @description Event model for queue_item_status_changed - */ - QueueItemStatusChangedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Item Id - * @description The ID of the queue item - */ - item_id: number; - /** - * Batch Id - * @description The ID of the queue batch - */ - batch_id: string; - /** - * Status - * @description The new status of the queue item - * @enum {string} - */ - status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; - /** - * Error Type - * @description The error type, if any - * @default null - */ - error_type: string | null; - /** - * Error Message - * @description The error message, if any - * @default null - */ - error_message: string | null; - /** - * Error Traceback - * @description The error traceback, if any - * @default null - */ - error_traceback: string | null; - /** - * Created At - * @description The timestamp when the queue item was created - * @default null - */ - created_at: string | null; - /** - * Updated At - * @description The timestamp when the queue item was last updated - * @default null - */ - updated_at: string | null; - /** - * Started At - * @description The timestamp when the queue item was started - * @default null - */ - started_at: string | null; - /** - * Completed At - * @description The timestamp when the queue item was completed - * @default null - */ - completed_at: string | null; - /** @description The status of the batch */ - batch_status: components["schemas"]["BatchStatus"]; - /** @description The status of the queue */ - queue_status: components["schemas"]["SessionQueueStatus"]; - /** - * Session Id - * @description The ID of the session (aka graph execution state) - */ - session_id: string; - }; - /** - * UIComponent - * @description The type of UI component to use for a field, used to override the default components, which are - * inferred from the field type. - * @enum {string} - */ - UIComponent: "none" | "textarea" | "slider"; - /** - * UIConfigBase - * @description Provides additional node configuration to the UI. - * This is used internally by the @invocation decorator logic. Do not use this directly. - */ - UIConfigBase: { - /** - * Tags - * @description The node's tags - */ - tags: string[] | null; - /** - * Title - * @description The node's display name - * @default null - */ - title: string | null; - /** - * Category - * @description The node's category - * @default null - */ - category: string | null; - /** - * Version - * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". - */ - version: string; - /** - * Node Pack - * @description Whether or not this is a custom node - * @default null - */ - node_pack: string | null; - /** - * @description The node's classification - * @default stable - */ - classification: components["schemas"]["Classification"]; - }; - /** - * UIType - * @description Type hints for the UI for situations in which the field type is not enough to infer the correct UI type. - * - * - Model Fields - * The most common node-author-facing use will be for model fields. Internally, there is no difference - * between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the - * base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that - * the field is an SDXL main model field. - * - * - Any Field - * We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to - * indicate that the field accepts any type. Use with caution. This cannot be used on outputs. - * - * - Scheduler Field - * Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field. - * - * - Internal Fields - * Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate - * handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These - * should not be used by node authors. - * - * - DEPRECATED Fields - * These types are deprecated and should not be used by node authors. A warning will be logged if one is - * used, and the type will be ignored. They are included here for backwards compatibility. - * @enum {string} - */ - UIType: "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VAEModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "T2IAdapterModelField" | "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict"; }; responses: never; parameters: never; From a983f27aada6befa157f184426b22380a78110fe Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 29 May 2024 21:06:16 +1000 Subject: [PATCH 17/52] fix(ui): update types --- .../components/sidePanel/inspector/InspectorOutputsTab.tsx | 4 ++-- .../web/src/features/nodes/util/graph/buildNodesGraph.ts | 4 ++-- invokeai/frontend/web/src/services/api/types.ts | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx index 59a603e7f1..b0c6d778b8 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx @@ -11,7 +11,7 @@ import { selectLastSelectedNode } from 'features/nodes/store/selectors'; import { isInvocationNode } from 'features/nodes/types/invocation'; import { memo, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import type { ImageOutput, S } from 'services/api/types'; +import type { AnyInvocationOutput, ImageOutput } from 'services/api/types'; import ImageOutputPreview from './outputs/ImageOutputPreview'; @@ -65,4 +65,4 @@ const InspectorOutputsTab = () => { export default memo(InspectorOutputsTab); -const getKey = (result: S['AnyInvocationOutput'], i: number) => `${result.type}-${i}`; +const getKey = (result: AnyInvocationOutput, i: number) => `${result.type}-${i}`; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts index 8f880a46a7..094dc529de 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts @@ -1,7 +1,7 @@ import type { NodesState } from 'features/nodes/store/types'; import { isInvocationNode } from 'features/nodes/types/invocation'; import { omit, reduce } from 'lodash-es'; -import type { Graph, S } from 'services/api/types'; +import type { AnyInvocation, Graph } from 'services/api/types'; import { v4 as uuidv4 } from 'uuid'; /** @@ -81,7 +81,7 @@ export const buildNodesGraph = (nodesState: NodesState): Graph => { parsedEdges.forEach((edge) => { const destination_node = parsedNodes[edge.destination.node_id]; const field = edge.destination.field; - parsedNodes[edge.destination.node_id] = omit(destination_node, field) as S['AnyInvocation']; + parsedNodes[edge.destination.node_id] = omit(destination_node, field) as AnyInvocation; }); // Assemble! diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 17b63f6f7c..90ddf3cca1 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -131,14 +131,14 @@ export type WorkflowRecordListItemDTO = S['WorkflowRecordListItemDTO']; type KeysOfUnion = T extends T ? keyof T : never; export type AnyInvocation = Exclude< - S['AnyInvocation'], + NonNullable[string], S['CoreMetadataInvocation'] | S['MetadataInvocation'] | S['MetadataItemInvocation'] | S['MergeMetadataInvocation'] >; -export type AnyInvocationIncMetadata = S['AnyInvocation']; +export type AnyInvocationIncMetadata = NonNullable[string]; export type InvocationType = AnyInvocation['type']; type InvocationOutputMap = S['InvocationOutputMap']; -type AnyInvocationOutput = InvocationOutputMap[InvocationType]; +export type AnyInvocationOutput = InvocationOutputMap[InvocationType]; export type Invocation = Extract; // export type InvocationOutput = InvocationOutputMap[T]; From c2eef93476b88ee20e534fee93017a3fa63891d4 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 13:15:00 +1000 Subject: [PATCH 18/52] feat(ui): wip slider implementations --- invokeai/frontend/web/package.json | 2 + invokeai/frontend/web/pnpm-lock.yaml | 33 ++++ .../ImageViewer/ImageSliderComparison.tsx | 77 +++++++++ .../ImageViewer/ImageSliderComparison2.tsx | 148 ++++++++++++++++ .../ImageViewer/ImageSliderComparison3.tsx | 162 ++++++++++++++++++ .../components/ImageViewer/ImageViewer.tsx | 11 +- 6 files changed, 431 insertions(+), 2 deletions(-) create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison2.tsx create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index f2210e4c68..f5189f23df 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -59,8 +59,10 @@ "@dnd-kit/sortable": "^8.0.0", "@dnd-kit/utilities": "^3.2.2", "@fontsource-variable/inter": "^5.0.18", + "@img-comparison-slider/react": "^8.0.2", "@invoke-ai/ui-library": "^0.0.25", "@nanostores/react": "^0.7.2", + "@reactuses/core": "^5.0.14", "@reduxjs/toolkit": "2.2.3", "@roarr/browser-log-writer": "^1.3.0", "chakra-react-select": "^4.7.6", diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml index 64189f0d82..d805591721 100644 --- a/invokeai/frontend/web/pnpm-lock.yaml +++ b/invokeai/frontend/web/pnpm-lock.yaml @@ -29,12 +29,18 @@ dependencies: '@fontsource-variable/inter': specifier: ^5.0.18 version: 5.0.18 + '@img-comparison-slider/react': + specifier: ^8.0.2 + version: 8.0.2 '@invoke-ai/ui-library': specifier: ^0.0.25 version: 0.0.25(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.18)(@internationalized/date@3.5.3)(@types/react@18.3.1)(i18next@23.11.3)(react-dom@18.3.1)(react@18.3.1) '@nanostores/react': specifier: ^0.7.2 version: 0.7.2(nanostores@0.10.3)(react@18.3.1) + '@reactuses/core': + specifier: ^5.0.14 + version: 5.0.14(react@18.3.1) '@reduxjs/toolkit': specifier: 2.2.3 version: 2.2.3(react-redux@9.1.2)(react@18.3.1) @@ -3544,6 +3550,12 @@ packages: resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} dev: true + /@img-comparison-slider/react@8.0.2: + resolution: {integrity: sha512-Him0yhbXpMXdnV6R3XE3LiXcMRhSXFMsbk6I7ct5HxO2YpK/BAGz3ub+7+akJRnK2XI7c3vQqvoIE507N1K4SA==} + dependencies: + img-comparison-slider: 8.0.6 + dev: false + /@internationalized/date@3.5.3: resolution: {integrity: sha512-X9bi8NAEHAjD8yzmPYT2pdJsbe+tYSEBAfowtlxJVJdZR3aK8Vg7ZUT1Fm5M47KLzp/M1p1VwAaeSma3RT7biw==} dependencies: @@ -3982,6 +3994,18 @@ packages: - immer dev: false + /@reactuses/core@5.0.14(react@18.3.1): + resolution: {integrity: sha512-lg640pRPOPT0HZ8XQAA1VRZ47fLIvSd2JrUTtKpzm4t3MtZvza+w2RHBGgPsdmtiLV3GsJJC9x5ge7XOQmiJ/Q==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + js-cookie: 3.0.5 + lodash-es: 4.17.21 + react: 18.3.1 + screenfull: 5.2.0 + use-sync-external-store: 1.2.2(react@18.3.1) + dev: false + /@reduxjs/toolkit@2.2.3(react-redux@9.1.2)(react@18.3.1): resolution: {integrity: sha512-76dll9EnJXg4EVcI5YNxZA/9hSAmZsFqzMmNRHvIlzw2WS/twfcVX3ysYrWGJMClwEmChQFC4yRq74tn6fdzRA==} peerDependencies: @@ -9223,6 +9247,10 @@ packages: engines: {node: '>= 4'} dev: true + /img-comparison-slider@8.0.6: + resolution: {integrity: sha512-ej4de7mWyjcXZvDgHq8K2a/dG8Vv+qYTdUjZa3cVILf316rLtDrHyGbh9fPvixmAFgbs30zTLfmaRDa7abjtzw==} + dev: false + /immer@10.1.1: resolution: {integrity: sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw==} dev: false @@ -9668,6 +9696,11 @@ packages: resolution: {integrity: sha512-HvdH2LzI/EAZcUwA8+0nKNtWHqS+ZmijLA30RwZA0bo7ToCckjK5MkGhjED9KoRcXO6BaGI3I9UIzSA1FKFPOQ==} dev: false + /js-cookie@3.0.5: + resolution: {integrity: sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==} + engines: {node: '>=14'} + dev: false + /js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx new file mode 100644 index 0000000000..2cb5034b30 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx @@ -0,0 +1,77 @@ +import { ImgComparisonSlider } from '@img-comparison-slider/react'; +import { Flex, Icon, Image, Text } from '@invoke-ai/ui-library'; +import { useAppSelector } from 'app/store/storeHooks'; +import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors'; +import { atom } from 'nanostores'; +import { memo } from 'react'; +import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; +import { useMeasure } from 'react-use'; +import type { ImageDTO } from 'services/api/types'; + +const $compareWith = atom(null); + +export const ImageSliderComparison = memo(() => { + const [containerRef, containerDims] = useMeasure(); + const lastSelectedImage = useAppSelector(selectLastSelectedImage); + const imageToCompare = useAppSelector((s) => s.gallery.selection[0]); + // const imageToCompare = useStore($imageToCompare); + const { imageA, imageB } = useAppSelector((s) => { + const images = s.gallery.selection.slice(-2); + return { imageA: images[0] ?? null, imageB: images[1] ?? null }; + }); + + if (!imageA || !imageB) { + return ( + + Select images to compare + + ); + } + + return ( + + + + {imageA.image_name} + {imageB.image_name} + + + + + + + + ); +}); + +ImageSliderComparison.displayName = 'ImageSliderComparison'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison2.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison2.tsx new file mode 100644 index 0000000000..a6f441c7a4 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison2.tsx @@ -0,0 +1,148 @@ +import { Box, Flex, Icon, Image } from '@invoke-ai/ui-library'; +import { useAppSelector } from 'app/store/storeHooks'; +import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview'; +import { memo, useCallback, useRef } from 'react'; +import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; + +const INITIAL_POS = '50%'; +const HANDLE_WIDTH = 2; +const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; +const HANDLE_HITBOX = 20; +const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; +const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`; +const HANDLE_INNER_LEFT_INITIAL_PX = `${HANDLE_HITBOX / 2 - HANDLE_WIDTH / 2}px`; + +export const ImageSliderComparison = memo(() => { + const containerRef = useRef(null); + const imageAContainerRef = useRef(null); + const handleRef = useRef(null); + + const updateHandlePos = useCallback((clientX: number) => { + if (!containerRef.current || !imageAContainerRef.current || !handleRef.current) { + return; + } + const { x, width } = containerRef.current.getBoundingClientRect(); + const rawHandlePos = ((clientX - x) * 100) / width; + const handleWidthPct = (HANDLE_WIDTH * 100) / width; + const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); + imageAContainerRef.current.style.width = `${newHandlePos}%`; + handleRef.current.style.left = `calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`; + }, []); + + const onMouseMove = useCallback( + (e: MouseEvent) => { + updateHandlePos(e.clientX); + }, + [updateHandlePos] + ); + + const onMouseUp = useCallback(() => { + window.removeEventListener('mousemove', onMouseMove); + }, [onMouseMove]); + + const onMouseDown = useCallback( + (e: React.MouseEvent) => { + updateHandlePos(e.clientX); + window.addEventListener('mouseup', onMouseUp, { once: true }); + window.addEventListener('mousemove', onMouseMove); + }, + [onMouseMove, onMouseUp, updateHandlePos] + ); + + const { imageA, imageB } = useAppSelector((s) => { + const images = s.gallery.selection.slice(-2); + return { imageA: images[0] ?? null, imageB: images[1] ?? null }; + }); + + if (imageA && !imageB) { + return ; + } + + if (!imageA || !imageB) { + return null; + } + + return ( + + + + + + + + + + + + + + + + + ); +}); + +ImageSliderComparison.displayName = 'ImageSliderComparison'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx new file mode 100644 index 0000000000..a69a31e2c5 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx @@ -0,0 +1,162 @@ +import { Box, Flex, Icon } from '@invoke-ai/ui-library'; +import { useMeasure } from '@reactuses/core'; +import { memo, useCallback, useMemo, useRef } from 'react'; +import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; +import type { ImageDTO } from 'services/api/types'; + +const INITIAL_POS = '50%'; +const HANDLE_WIDTH = 2; +const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; +const HANDLE_HITBOX = 20; +const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; +const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`; +const HANDLE_INNER_LEFT_INITIAL_PX = `${HANDLE_HITBOX / 2 - HANDLE_WIDTH / 2}px`; + +type Props = { + firstImage: ImageDTO; + secondImage: ImageDTO; +}; + +export const ImageSliderComparison = memo(({ firstImage, secondImage }: Props) => { + const secondImageContainerRef = useRef(null); + const handleRef = useRef(null); + const containerRef = useRef(null); + const [containerSize] = useMeasure(containerRef); + + const updateHandlePos = useCallback((clientX: number) => { + if (!secondImageContainerRef.current || !handleRef.current || !containerRef.current) { + return; + } + const { x, width } = containerRef.current.getBoundingClientRect(); + const rawHandlePos = ((clientX - x) * 100) / width; + const handleWidthPct = (HANDLE_WIDTH * 100) / width; + const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); + secondImageContainerRef.current.style.width = `${newHandlePos}%`; + handleRef.current.style.left = `calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`; + }, []); + + const onMouseMove = useCallback( + (e: MouseEvent) => { + updateHandlePos(e.clientX); + }, + [updateHandlePos] + ); + + const onMouseUp = useCallback(() => { + window.removeEventListener('mousemove', onMouseMove); + }, [onMouseMove]); + + const onMouseDown = useCallback( + (e: React.MouseEvent) => { + updateHandlePos(e.clientX); + window.addEventListener('mouseup', onMouseUp, { once: true }); + window.addEventListener('mousemove', onMouseMove); + }, + [onMouseMove, onMouseUp, updateHandlePos] + ); + + const fittedSize = useMemo(() => { + let width = containerSize.width; + let height = containerSize.height; + const aspectRatio = firstImage.width / firstImage.height; + if (firstImage.width > firstImage.height) { + width = firstImage.width; + height = width / aspectRatio; + } else { + height = firstImage.height; + width = height * aspectRatio; + } + return { width, height }; + }, [containerSize.height, containerSize.width, firstImage.height, firstImage.width]); + + console.log({ containerSize, fittedSize }); + + return ( + + + + + + + + + + + + + + + + ); +}); + +ImageSliderComparison.displayName = 'ImageSliderComparison'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx index 7064e553dc..4de793ea43 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx @@ -1,5 +1,7 @@ import { Flex } from '@invoke-ai/ui-library'; import { useAppSelector } from 'app/store/storeHooks'; +import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview'; +import { ImageSliderComparison } from 'features/gallery/components/ImageViewer/ImageSliderComparison3'; import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton'; import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton'; import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer'; @@ -9,7 +11,6 @@ import { memo, useMemo } from 'react'; import { useHotkeys } from 'react-hotkeys-hook'; import CurrentImageButtons from './CurrentImageButtons'; -import CurrentImagePreview from './CurrentImagePreview'; import { ViewerToggleMenu } from './ViewerToggleMenu'; const VIEWER_ENABLED_TABS: InvokeTabName[] = ['canvas', 'generation', 'workflows']; @@ -28,6 +29,11 @@ export const ImageViewer = memo(() => { useHotkeys('z', onToggle, { enabled: isViewerEnabled }, [isViewerEnabled, onToggle]); useHotkeys('esc', onClose, { enabled: isViewerEnabled }, [isViewerEnabled, onClose]); + const { firstImage, secondImage } = useAppSelector((s) => { + const images = s.gallery.selection.slice(-2); + return { firstImage: images[0] ?? null, secondImage: images[0] ? images[1] ?? null : null }; + }); + if (!shouldShowViewer) { return null; } @@ -64,7 +70,8 @@ export const ImageViewer = memo(() => { - + {firstImage && !secondImage && } + {firstImage && secondImage && } ); }); From 72bbcb2d9499e174a9db3b01e6d501c2d1f66122 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 13:42:19 +1000 Subject: [PATCH 19/52] feat(ui): slider working for all aspect ratios --- .../ImageViewer/ImageSliderComparison3.tsx | 55 ++++++++++++------- 1 file changed, 34 insertions(+), 21 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx index a69a31e2c5..e84b4f5d6a 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx @@ -1,5 +1,6 @@ import { Box, Flex, Icon } from '@invoke-ai/ui-library'; import { useMeasure } from '@reactuses/core'; +import type { Dimensions } from 'features/canvas/store/canvasTypes'; import { memo, useCallback, useMemo, useRef } from 'react'; import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; import type { ImageDTO } from 'services/api/types'; @@ -55,24 +56,32 @@ export const ImageSliderComparison = memo(({ firstImage, secondImage }: Props) = [onMouseMove, onMouseUp, updateHandlePos] ); - const fittedSize = useMemo(() => { - let width = containerSize.width; - let height = containerSize.height; - const aspectRatio = firstImage.width / firstImage.height; - if (firstImage.width > firstImage.height) { - width = firstImage.width; - height = width / aspectRatio; + const fittedSize = useMemo(() => { + // Fit the first image to the container + const targetAspectRatio = containerSize.width / containerSize.height; + const imageAspectRatio = firstImage.width / firstImage.height; + + if (firstImage.width <= containerSize.width && firstImage.height <= containerSize.height) { + return { width: firstImage.width, height: firstImage.height }; + } + + let width: number; + let height: number; + + if (imageAspectRatio > targetAspectRatio) { + // Image is wider than container's aspect ratio + width = containerSize.width; + height = width / imageAspectRatio; } else { - height = firstImage.height; - width = height * aspectRatio; + // Image is taller than container's aspect ratio + height = containerSize.height; + width = height * imageAspectRatio; } return { width, height }; - }, [containerSize.height, containerSize.width, firstImage.height, firstImage.width]); - - console.log({ containerSize, fittedSize }); + }, [containerSize.height, containerSize.width, firstImage]); return ( - + - + @@ -151,7 +165,6 @@ export const ImageSliderComparison = memo(({ firstImage, secondImage }: Props) = left={0} onMouseDown={onMouseDown} userSelect="none" - bg="rgba(255,0,0,0.3)" /> From 7a4bbd092e1586a5a37824fcd116735c3620b9c0 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 15:18:31 +1000 Subject: [PATCH 20/52] feat(ui): revised image comparison slider Should work for any components and image now. --- invokeai/frontend/web/public/locales/en.json | 4 +- .../features/controlLayers/util/renderers.ts | 2 +- .../ImageViewer/ImageSliderComparison3.tsx | 363 +++++++++++------- .../components/ImageViewer/ImageViewer.tsx | 15 +- 4 files changed, 248 insertions(+), 136 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index f7a91ef756..43df1534e5 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -375,7 +375,9 @@ "bulkDownloadRequestFailed": "Problem Preparing Download", "bulkDownloadFailed": "Download Failed", "problemDeletingImages": "Problem Deleting Images", - "problemDeletingImagesDesc": "One or more images could not be deleted" + "problemDeletingImagesDesc": "One or more images could not be deleted", + "firstImage": "First Image", + "secondImage": "Second Image" }, "hotkeys": { "searchHotkeys": "Search Hotkeys", diff --git a/invokeai/frontend/web/src/features/controlLayers/util/renderers.ts b/invokeai/frontend/web/src/features/controlLayers/util/renderers.ts index 25ac30387b..79933e6b00 100644 --- a/invokeai/frontend/web/src/features/controlLayers/util/renderers.ts +++ b/invokeai/frontend/web/src/features/controlLayers/util/renderers.ts @@ -54,7 +54,7 @@ const BBOX_SELECTED_STROKE = 'rgba(78, 190, 255, 1)'; const BRUSH_BORDER_INNER_COLOR = 'rgba(0,0,0,1)'; const BRUSH_BORDER_OUTER_COLOR = 'rgba(255,255,255,0.8)'; // This is invokeai/frontend/web/public/assets/images/transparent_bg.png as a dataURL -const STAGE_BG_DATAURL = +export const STAGE_BG_DATAURL = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAAEsmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS41LjAiPgogPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyIKICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIgogICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgZXhpZjpQaXhlbFhEaW1lbnNpb249IjIwIgogICBleGlmOlBpeGVsWURpbWVuc2lvbj0iMjAiCiAgIGV4aWY6Q29sb3JTcGFjZT0iMSIKICAgdGlmZjpJbWFnZVdpZHRoPSIyMCIKICAgdGlmZjpJbWFnZUxlbmd0aD0iMjAiCiAgIHRpZmY6UmVzb2x1dGlvblVuaXQ9IjIiCiAgIHRpZmY6WFJlc29sdXRpb249IjMwMC8xIgogICB0aWZmOllSZXNvbHV0aW9uPSIzMDAvMSIKICAgcGhvdG9zaG9wOkNvbG9yTW9kZT0iMyIKICAgcGhvdG9zaG9wOklDQ1Byb2ZpbGU9InNSR0IgSUVDNjE5NjYtMi4xIgogICB4bXA6TW9kaWZ5RGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCIKICAgeG1wOk1ldGFkYXRhRGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCI+CiAgIDx4bXBNTTpIaXN0b3J5PgogICAgPHJkZjpTZXE+CiAgICAgPHJkZjpsaQogICAgICBzdEV2dDphY3Rpb249InByb2R1Y2VkIgogICAgICBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZmZpbml0eSBQaG90byAxLjEwLjgiCiAgICAgIHN0RXZ0OndoZW49IjIwMjQtMDQtMjNUMDg6MjA6NDcrMTA6MDAiLz4KICAgIDwvcmRmOlNlcT4KICAgPC94bXBNTTpIaXN0b3J5PgogIDwvcmRmOkRlc2NyaXB0aW9uPgogPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KPD94cGFja2V0IGVuZD0iciI/Pn9pdVgAAAGBaUNDUHNSR0IgSUVDNjE5NjYtMi4xAAAokXWR3yuDURjHP5uJmKghFy6WxpVpqMWNMgm1tGbKr5vt3S+1d3t73y3JrXKrKHHj1wV/AbfKtVJESq53TdywXs9rakv2nJ7zfM73nOfpnOeAPZJRVMPhAzWb18NTAffC4pK7oYiDTjpw4YgqhjYeCgWpaR8P2Kx457Vq1T73rzXHE4YCtkbhMUXT88LTwsG1vGbxrnC7ko7Ghc+F+3W5oPC9pcfKXLQ4VeYvi/VIeALsbcLuVBXHqlhJ66qwvByPmikov/exXuJMZOfnJPaId2MQZooAbmaYZAI/g4zK7MfLEAOyoka+7yd/lpzkKjJrrKOzSoo0efpFLUj1hMSk6AkZGdat/v/tq5EcHipXdwag/sU033qhYQdK26b5eWyapROoe4arbCU/dwQj76JvVzTPIbRuwsV1RYvtweUWdD1pUT36I9WJ25NJeD2DlkVw3ULTcrlnv/ucPkJkQ77qBvYPoE/Ot658AxagZ8FoS/a7AAAACXBIWXMAAC4jAAAuIwF4pT92AAAAL0lEQVQ4jWM8ffo0A25gYmKCR5YJjxxBMKp5ZGhm/P//Px7pM2fO0MrmUc0jQzMAB2EIhZC3pUYAAAAASUVORK5CYII='; const mapId = (object: { id: string }) => object.id; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx index e84b4f5d6a..fbb3cef3a7 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx @@ -1,175 +1,274 @@ -import { Box, Flex, Icon } from '@invoke-ai/ui-library'; -import { useMeasure } from '@reactuses/core'; +import { Box, Flex, Icon, Image, Text } from '@invoke-ai/ui-library'; +import { useMeasure, type UseMeasureRect } from '@reactuses/core'; import type { Dimensions } from 'features/canvas/store/canvasTypes'; -import { memo, useCallback, useMemo, useRef } from 'react'; +import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; +import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'; +import { useTranslation } from 'react-i18next'; import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; import type { ImageDTO } from 'services/api/types'; +const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0))'; const INITIAL_POS = '50%'; const HANDLE_WIDTH = 2; const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; const HANDLE_HITBOX = 20; const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; +const HANDLE_INNER_LEFT_PX = `${HANDLE_HITBOX / 2 - HANDLE_WIDTH / 2}px`; const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`; -const HANDLE_INNER_LEFT_INITIAL_PX = `${HANDLE_HITBOX / 2 - HANDLE_WIDTH / 2}px`; type Props = { + /** + * The first image to compare + */ firstImage: ImageDTO; + /** + * The second image to compare + */ secondImage: ImageDTO; + /** + * The size of the container, used for sizing. + * If not provided, an internal container will be used, but this can cause a flicker effect as the component is first rendered. + */ + containerSize?: UseMeasureRect; + /** + * The ref of the container, used for sizing. + * If not provided, an internal container will be used, but this can cause a flicker effect as the component is first rendered. + */ + containerRef?: React.RefObject; }; -export const ImageSliderComparison = memo(({ firstImage, secondImage }: Props) => { - const secondImageContainerRef = useRef(null); - const handleRef = useRef(null); - const containerRef = useRef(null); - const [containerSize] = useMeasure(containerRef); +export const ImageSliderComparison = memo( + ({ firstImage, secondImage, containerSize: containerSizeProp, containerRef: containerRefProp }: Props) => { + const { t } = useTranslation(); + // How far the handle is from the left - this will be a CSS calculation that takes into account the handle width + const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX); + // How wide the first image is + const [width, setWidth] = useState(INITIAL_POS); + const handleRef = useRef(null); + // If the container size is not provided, use an internal ref and measure - can cause flicker on mount tho + const _containerRef = useRef(null); + const [_containerSize] = useMeasure(_containerRef); + const containerRef = useMemo(() => containerRefProp ?? _containerRef, [containerRefProp, _containerRef]); + const containerSize = useMemo(() => containerSizeProp ?? _containerSize, [containerSizeProp, _containerSize]); + // To keep things smooth, we use RAF to update the handle position & gate it to 60fps + const rafRef = useRef(null); + const lastMoveTimeRef = useRef(0); - const updateHandlePos = useCallback((clientX: number) => { - if (!secondImageContainerRef.current || !handleRef.current || !containerRef.current) { - return; - } - const { x, width } = containerRef.current.getBoundingClientRect(); - const rawHandlePos = ((clientX - x) * 100) / width; - const handleWidthPct = (HANDLE_WIDTH * 100) / width; - const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); - secondImageContainerRef.current.style.width = `${newHandlePos}%`; - handleRef.current.style.left = `calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`; - }, []); + const updateHandlePos = useCallback( + (clientX: number) => { + if (!handleRef.current || !containerRef.current) { + return; + } + lastMoveTimeRef.current = performance.now(); + const { x, width } = containerRef.current.getBoundingClientRect(); + const rawHandlePos = ((clientX - x) * 100) / width; + const handleWidthPct = (HANDLE_WIDTH * 100) / width; + const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); + setWidth(`${newHandlePos}%`); + setLeft(`calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`); + }, + [containerRef] + ); - const onMouseMove = useCallback( - (e: MouseEvent) => { - updateHandlePos(e.clientX); - }, - [updateHandlePos] - ); + const onMouseMove = useCallback( + (e: MouseEvent) => { + if (rafRef.current === null && performance.now() > lastMoveTimeRef.current + 1000 / 60) { + rafRef.current = window.requestAnimationFrame(() => { + updateHandlePos(e.clientX); + rafRef.current = null; + }); + } + }, + [updateHandlePos] + ); - const onMouseUp = useCallback(() => { - window.removeEventListener('mousemove', onMouseMove); - }, [onMouseMove]); + const onMouseUp = useCallback(() => { + window.removeEventListener('mousemove', onMouseMove); + }, [onMouseMove]); - const onMouseDown = useCallback( - (e: React.MouseEvent) => { - updateHandlePos(e.clientX); - window.addEventListener('mouseup', onMouseUp, { once: true }); - window.addEventListener('mousemove', onMouseMove); - }, - [onMouseMove, onMouseUp, updateHandlePos] - ); + const onMouseDown = useCallback( + (e: React.MouseEvent) => { + // Update the handle position immediately on click + updateHandlePos(e.clientX); + window.addEventListener('mouseup', onMouseUp, { once: true }); + window.addEventListener('mousemove', onMouseMove); + }, + [onMouseMove, onMouseUp, updateHandlePos] + ); - const fittedSize = useMemo(() => { - // Fit the first image to the container - const targetAspectRatio = containerSize.width / containerSize.height; - const imageAspectRatio = firstImage.width / firstImage.height; + const fittedSize = useMemo(() => { + // Fit the first image to the container + if (containerSize.width === 0 || containerSize.height === 0) { + return { width: firstImage.width, height: firstImage.height }; + } + const targetAspectRatio = containerSize.width / containerSize.height; + const imageAspectRatio = firstImage.width / firstImage.height; - if (firstImage.width <= containerSize.width && firstImage.height <= containerSize.height) { - return { width: firstImage.width, height: firstImage.height }; - } + if (firstImage.width <= containerSize.width && firstImage.height <= containerSize.height) { + return { width: firstImage.width, height: firstImage.height }; + } - let width: number; - let height: number; + let width: number; + let height: number; - if (imageAspectRatio > targetAspectRatio) { - // Image is wider than container's aspect ratio - width = containerSize.width; - height = width / imageAspectRatio; - } else { - // Image is taller than container's aspect ratio - height = containerSize.height; - width = height * imageAspectRatio; - } - return { width, height }; - }, [containerSize.height, containerSize.width, firstImage]); + if (imageAspectRatio > targetAspectRatio) { + // Image is wider than container's aspect ratio + width = containerSize.width; + height = width / imageAspectRatio; + } else { + // Image is taller than container's aspect ratio + height = containerSize.height; + width = height * imageAspectRatio; + } + return { width, height }; + }, [containerSize, firstImage.height, firstImage.width]); - return ( - - - () => { + if (rafRef.current !== null) { + cancelAnimationFrame(rafRef.current); + } + }, + [] + ); + + return ( + + - - + - - + {t('gallery.secondImage')} + + + + + {t('gallery.firstImage')} + + + + + + + + - - - + + + - - ); -}); + ); + } +); ImageSliderComparison.displayName = 'ImageSliderComparison'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx index 4de793ea43..5669ec5550 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx @@ -1,4 +1,5 @@ import { Flex } from '@invoke-ai/ui-library'; +import { useMeasure } from '@reactuses/core'; import { useAppSelector } from 'app/store/storeHooks'; import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview'; import { ImageSliderComparison } from 'features/gallery/components/ImageViewer/ImageSliderComparison3'; @@ -7,7 +8,7 @@ import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/To import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer'; import type { InvokeTabName } from 'features/ui/store/tabMap'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; -import { memo, useMemo } from 'react'; +import { memo, useMemo, useRef } from 'react'; import { useHotkeys } from 'react-hotkeys-hook'; import CurrentImageButtons from './CurrentImageButtons'; @@ -19,6 +20,8 @@ export const ImageViewer = memo(() => { const { isOpen, onToggle, onClose } = useImageViewer(); const activeTabName = useAppSelector(activeTabNameSelector); const isViewerEnabled = useMemo(() => VIEWER_ENABLED_TABS.includes(activeTabName), [activeTabName]); + const containerRef = useRef(null); + const [containerSize] = useMeasure(containerRef); const shouldShowViewer = useMemo(() => { if (!isViewerEnabled) { return false; @@ -40,6 +43,7 @@ export const ImageViewer = memo(() => { return ( { {firstImage && !secondImage && } - {firstImage && secondImage && } + {firstImage && secondImage && ( + + )} ); }); From 1af53aed608cdbaad0be9409edd29e13ba73d9a8 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 15:34:43 +1000 Subject: [PATCH 21/52] feat(ui): fix image comparison slider resizing/aspect ratio jank --- .../ImageViewer/ImageSliderComparison.tsx | 302 ++++++++++++++---- .../ImageViewer/ImageSliderComparison2.tsx | 148 --------- .../ImageViewer/ImageSliderComparison3.tsx | 274 ---------------- .../components/ImageViewer/ImageViewer.tsx | 20 +- 4 files changed, 252 insertions(+), 492 deletions(-) delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison2.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx index 2cb5034b30..3965cd5fd0 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx @@ -1,74 +1,260 @@ -import { ImgComparisonSlider } from '@img-comparison-slider/react'; -import { Flex, Icon, Image, Text } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors'; -import { atom } from 'nanostores'; -import { memo } from 'react'; +import { Box, Flex, Icon, Image, Text } from '@invoke-ai/ui-library'; +import type { UseMeasureRect } from '@reactuses/core'; +import type { Dimensions } from 'features/canvas/store/canvasTypes'; +import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; +import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'; +import { useTranslation } from 'react-i18next'; import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; -import { useMeasure } from 'react-use'; import type { ImageDTO } from 'services/api/types'; -const $compareWith = atom(null); +const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0))'; +const INITIAL_POS = '50%'; +const HANDLE_WIDTH = 2; +const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; +const HANDLE_HITBOX = 20; +const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; +const HANDLE_INNER_LEFT_PX = `${HANDLE_HITBOX / 2 - HANDLE_WIDTH / 2}px`; +const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`; -export const ImageSliderComparison = memo(() => { - const [containerRef, containerDims] = useMeasure(); - const lastSelectedImage = useAppSelector(selectLastSelectedImage); - const imageToCompare = useAppSelector((s) => s.gallery.selection[0]); - // const imageToCompare = useStore($imageToCompare); - const { imageA, imageB } = useAppSelector((s) => { - const images = s.gallery.selection.slice(-2); - return { imageA: images[0] ?? null, imageB: images[1] ?? null }; - }); +type Props = { + /** + * The first image to compare + */ + firstImage: ImageDTO; + /** + * The second image to compare + */ + secondImage: ImageDTO; + /** + * The size of the container, required to fit the component correctly and manage aspect ratios. + */ + containerSize: UseMeasureRect; +}; - if (!imageA || !imageB) { - return ( - - Select images to compare - - ); - } +export const ImageSliderComparison = memo(({ firstImage, secondImage, containerSize }: Props) => { + const { t } = useTranslation(); + // How far the handle is from the left - this will be a CSS calculation that takes into account the handle width + const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX); + // How wide the first image is + const [width, setWidth] = useState(INITIAL_POS); + const handleRef = useRef(null); + // If the container size is not provided, use an internal ref and measure - can cause flicker on mount tho + const containerRef = useRef(null); + // To keep things smooth, we use RAF to update the handle position & gate it to 60fps + const rafRef = useRef(null); + const lastMoveTimeRef = useRef(0); + + const updateHandlePos = useCallback( + (clientX: number) => { + if (!handleRef.current || !containerRef.current) { + return; + } + lastMoveTimeRef.current = performance.now(); + const { x, width } = containerRef.current.getBoundingClientRect(); + const rawHandlePos = ((clientX - x) * 100) / width; + const handleWidthPct = (HANDLE_WIDTH * 100) / width; + const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); + setWidth(`${newHandlePos}%`); + setLeft(`calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`); + }, + [containerRef] + ); + + const onMouseMove = useCallback( + (e: MouseEvent) => { + if (rafRef.current === null && performance.now() > lastMoveTimeRef.current + 1000 / 60) { + rafRef.current = window.requestAnimationFrame(() => { + updateHandlePos(e.clientX); + rafRef.current = null; + }); + } + }, + [updateHandlePos] + ); + + const onMouseUp = useCallback(() => { + window.removeEventListener('mousemove', onMouseMove); + }, [onMouseMove]); + + const onMouseDown = useCallback( + (e: React.MouseEvent) => { + // Update the handle position immediately on click + updateHandlePos(e.clientX); + window.addEventListener('mouseup', onMouseUp, { once: true }); + window.addEventListener('mousemove', onMouseMove); + }, + [onMouseMove, onMouseUp, updateHandlePos] + ); + + const fittedSize = useMemo(() => { + // Fit the first image to the container + if (containerSize.width === 0 || containerSize.height === 0) { + return { width: firstImage.width, height: firstImage.height }; + } + const targetAspectRatio = containerSize.width / containerSize.height; + const imageAspectRatio = firstImage.width / firstImage.height; + + if (firstImage.width <= containerSize.width && firstImage.height <= containerSize.height) { + return { width: firstImage.width, height: firstImage.height }; + } + + let width: number; + let height: number; + + if (imageAspectRatio > targetAspectRatio) { + // Image is wider than container's aspect ratio + width = containerSize.width; + height = width / imageAspectRatio; + } else { + // Image is taller than container's aspect ratio + height = containerSize.height; + width = height * imageAspectRatio; + } + return { width, height }; + }, [containerSize, firstImage.height, firstImage.width]); + + useEffect( + () => () => { + if (rafRef.current !== null) { + cancelAnimationFrame(rafRef.current); + } + }, + [] + ); return ( - - - - {imageA.image_name} + + + {imageB.image_name} - - - + + {t('gallery.secondImage')} + + + + + {t('gallery.firstImage')} + + + + + + + + - + + ); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison2.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison2.tsx deleted file mode 100644 index a6f441c7a4..0000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison2.tsx +++ /dev/null @@ -1,148 +0,0 @@ -import { Box, Flex, Icon, Image } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; -import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview'; -import { memo, useCallback, useRef } from 'react'; -import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; - -const INITIAL_POS = '50%'; -const HANDLE_WIDTH = 2; -const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; -const HANDLE_HITBOX = 20; -const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; -const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`; -const HANDLE_INNER_LEFT_INITIAL_PX = `${HANDLE_HITBOX / 2 - HANDLE_WIDTH / 2}px`; - -export const ImageSliderComparison = memo(() => { - const containerRef = useRef(null); - const imageAContainerRef = useRef(null); - const handleRef = useRef(null); - - const updateHandlePos = useCallback((clientX: number) => { - if (!containerRef.current || !imageAContainerRef.current || !handleRef.current) { - return; - } - const { x, width } = containerRef.current.getBoundingClientRect(); - const rawHandlePos = ((clientX - x) * 100) / width; - const handleWidthPct = (HANDLE_WIDTH * 100) / width; - const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); - imageAContainerRef.current.style.width = `${newHandlePos}%`; - handleRef.current.style.left = `calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`; - }, []); - - const onMouseMove = useCallback( - (e: MouseEvent) => { - updateHandlePos(e.clientX); - }, - [updateHandlePos] - ); - - const onMouseUp = useCallback(() => { - window.removeEventListener('mousemove', onMouseMove); - }, [onMouseMove]); - - const onMouseDown = useCallback( - (e: React.MouseEvent) => { - updateHandlePos(e.clientX); - window.addEventListener('mouseup', onMouseUp, { once: true }); - window.addEventListener('mousemove', onMouseMove); - }, - [onMouseMove, onMouseUp, updateHandlePos] - ); - - const { imageA, imageB } = useAppSelector((s) => { - const images = s.gallery.selection.slice(-2); - return { imageA: images[0] ?? null, imageB: images[1] ?? null }; - }); - - if (imageA && !imageB) { - return ; - } - - if (!imageA || !imageB) { - return null; - } - - return ( - - - - - - - - - - - - - - - - - ); -}); - -ImageSliderComparison.displayName = 'ImageSliderComparison'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx deleted file mode 100644 index fbb3cef3a7..0000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison3.tsx +++ /dev/null @@ -1,274 +0,0 @@ -import { Box, Flex, Icon, Image, Text } from '@invoke-ai/ui-library'; -import { useMeasure, type UseMeasureRect } from '@reactuses/core'; -import type { Dimensions } from 'features/canvas/store/canvasTypes'; -import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; -import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; -import type { ImageDTO } from 'services/api/types'; - -const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0))'; -const INITIAL_POS = '50%'; -const HANDLE_WIDTH = 2; -const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; -const HANDLE_HITBOX = 20; -const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; -const HANDLE_INNER_LEFT_PX = `${HANDLE_HITBOX / 2 - HANDLE_WIDTH / 2}px`; -const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`; - -type Props = { - /** - * The first image to compare - */ - firstImage: ImageDTO; - /** - * The second image to compare - */ - secondImage: ImageDTO; - /** - * The size of the container, used for sizing. - * If not provided, an internal container will be used, but this can cause a flicker effect as the component is first rendered. - */ - containerSize?: UseMeasureRect; - /** - * The ref of the container, used for sizing. - * If not provided, an internal container will be used, but this can cause a flicker effect as the component is first rendered. - */ - containerRef?: React.RefObject; -}; - -export const ImageSliderComparison = memo( - ({ firstImage, secondImage, containerSize: containerSizeProp, containerRef: containerRefProp }: Props) => { - const { t } = useTranslation(); - // How far the handle is from the left - this will be a CSS calculation that takes into account the handle width - const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX); - // How wide the first image is - const [width, setWidth] = useState(INITIAL_POS); - const handleRef = useRef(null); - // If the container size is not provided, use an internal ref and measure - can cause flicker on mount tho - const _containerRef = useRef(null); - const [_containerSize] = useMeasure(_containerRef); - const containerRef = useMemo(() => containerRefProp ?? _containerRef, [containerRefProp, _containerRef]); - const containerSize = useMemo(() => containerSizeProp ?? _containerSize, [containerSizeProp, _containerSize]); - // To keep things smooth, we use RAF to update the handle position & gate it to 60fps - const rafRef = useRef(null); - const lastMoveTimeRef = useRef(0); - - const updateHandlePos = useCallback( - (clientX: number) => { - if (!handleRef.current || !containerRef.current) { - return; - } - lastMoveTimeRef.current = performance.now(); - const { x, width } = containerRef.current.getBoundingClientRect(); - const rawHandlePos = ((clientX - x) * 100) / width; - const handleWidthPct = (HANDLE_WIDTH * 100) / width; - const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); - setWidth(`${newHandlePos}%`); - setLeft(`calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`); - }, - [containerRef] - ); - - const onMouseMove = useCallback( - (e: MouseEvent) => { - if (rafRef.current === null && performance.now() > lastMoveTimeRef.current + 1000 / 60) { - rafRef.current = window.requestAnimationFrame(() => { - updateHandlePos(e.clientX); - rafRef.current = null; - }); - } - }, - [updateHandlePos] - ); - - const onMouseUp = useCallback(() => { - window.removeEventListener('mousemove', onMouseMove); - }, [onMouseMove]); - - const onMouseDown = useCallback( - (e: React.MouseEvent) => { - // Update the handle position immediately on click - updateHandlePos(e.clientX); - window.addEventListener('mouseup', onMouseUp, { once: true }); - window.addEventListener('mousemove', onMouseMove); - }, - [onMouseMove, onMouseUp, updateHandlePos] - ); - - const fittedSize = useMemo(() => { - // Fit the first image to the container - if (containerSize.width === 0 || containerSize.height === 0) { - return { width: firstImage.width, height: firstImage.height }; - } - const targetAspectRatio = containerSize.width / containerSize.height; - const imageAspectRatio = firstImage.width / firstImage.height; - - if (firstImage.width <= containerSize.width && firstImage.height <= containerSize.height) { - return { width: firstImage.width, height: firstImage.height }; - } - - let width: number; - let height: number; - - if (imageAspectRatio > targetAspectRatio) { - // Image is wider than container's aspect ratio - width = containerSize.width; - height = width / imageAspectRatio; - } else { - // Image is taller than container's aspect ratio - height = containerSize.height; - width = height * imageAspectRatio; - } - return { width, height }; - }, [containerSize, firstImage.height, firstImage.width]); - - useEffect( - () => () => { - if (rafRef.current !== null) { - cancelAnimationFrame(rafRef.current); - } - }, - [] - ); - - return ( - - - - - - - {t('gallery.secondImage')} - - - - - {t('gallery.firstImage')} - - - - - - - - - - - - - - ); - } -); - -ImageSliderComparison.displayName = 'ImageSliderComparison'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx index 5669ec5550..6697f9fd1d 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx @@ -1,8 +1,8 @@ -import { Flex } from '@invoke-ai/ui-library'; +import { Box, Flex } from '@invoke-ai/ui-library'; import { useMeasure } from '@reactuses/core'; import { useAppSelector } from 'app/store/storeHooks'; import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview'; -import { ImageSliderComparison } from 'features/gallery/components/ImageViewer/ImageSliderComparison3'; +import { ImageSliderComparison } from 'features/gallery/components/ImageViewer/ImageSliderComparison'; import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton'; import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton'; import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer'; @@ -43,7 +43,6 @@ export const ImageViewer = memo(() => { return ( { - {firstImage && !secondImage && } - {firstImage && secondImage && ( - - )} + + {firstImage && !secondImage && } + {firstImage && secondImage && ( + + )} + ); }); From 8f8ddd620bb4835a367939c002b9cc13639573b1 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 17:20:13 +1000 Subject: [PATCH 22/52] feat(ui): add comparison modes, side-by-side view --- invokeai/frontend/web/public/locales/en.json | 5 +- .../listeners/enqueueRequestedLinear.ts | 4 +- .../listeners/imageDropped.ts | 41 +++++------ .../socketio/socketInvocationComplete.ts | 4 +- .../web/src/features/dnd/types/index.ts | 7 +- .../web/src/features/dnd/util/isValidDrop.ts | 2 + .../SingleSelectionMenuItems.tsx | 9 +++ .../components/ImageGrid/GalleryImage.tsx | 4 +- .../ImageViewer/CurrentImagePreview.tsx | 39 +++------- .../ImageViewer/ImageComparison.tsx | 59 +++++++++++++++ .../ImageViewer/ImageComparisonSideBySide.tsx | 72 +++++++++++++++++++ ...mparison.tsx => ImageComparisonSlider.tsx} | 4 +- .../ImageComparisonToolbarButtons.tsx | 39 ++++++++++ .../components/ImageViewer/ImageViewer.tsx | 25 +++---- .../ImageViewer/ViewerToggleMenu.tsx | 52 +++++++++++--- .../components/ImageViewer/useImageViewer.tsx | 22 +++--- .../features/gallery/store/gallerySlice.ts | 25 +++++-- .../web/src/features/gallery/store/types.ts | 6 +- .../components/ParametersPanelTextToImage.tsx | 4 +- 19 files changed, 319 insertions(+), 104 deletions(-) create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx rename invokeai/frontend/web/src/features/gallery/components/ImageViewer/{ImageSliderComparison.tsx => ImageComparisonSlider.tsx} (98%) create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 43df1534e5..640f7c0958 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -148,6 +148,8 @@ "viewingDesc": "Review images in a large gallery view", "editing": "Editing", "editingDesc": "Edit on the Control Layers canvas", + "comparing": "Comparing", + "comparingDesc": "Comparing two images", "enabled": "Enabled", "disabled": "Disabled" }, @@ -377,7 +379,8 @@ "problemDeletingImages": "Problem Deleting Images", "problemDeletingImagesDesc": "One or more images could not be deleted", "firstImage": "First Image", - "secondImage": "Second Image" + "secondImage": "Second Image", + "selectForCompare": "Select for Compare" }, "hotkeys": { "searchHotkeys": "Search Hotkeys", diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts index 6ca7ee7ffa..339b34d2be 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts @@ -1,6 +1,6 @@ import { enqueueRequested } from 'app/store/actions'; import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; -import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; +import { viewerModeChanged } from 'features/gallery/store/gallerySlice'; import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig'; import { buildGenerationTabGraph } from 'features/nodes/util/graph/generation/buildGenerationTabGraph'; import { buildGenerationTabSDXLGraph } from 'features/nodes/util/graph/generation/buildGenerationTabSDXLGraph'; @@ -34,7 +34,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening) try { await req.unwrap(); if (shouldShowProgressInViewer) { - dispatch(isImageViewerOpenChanged(true)); + dispatch(viewerModeChanged('view')); } } finally { req.reset(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts index 9bc9635299..eb16b7912a 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts @@ -14,7 +14,7 @@ import { rgLayerIPAdapterImageChanged, } from 'features/controlLayers/store/controlLayersSlice'; import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types'; -import { imageSelected } from 'features/gallery/store/gallerySlice'; +import { imageSelected, imageToCompareChanged } from 'features/gallery/store/gallerySlice'; import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice'; import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; import { imagesApi } from 'services/api/endpoints/images'; @@ -181,40 +181,31 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) => return; } - /** - * TODO - * Image selection dropped on node image collection field - */ - // if ( - // overData.actionType === 'SET_MULTI_NODES_IMAGE' && - // activeData.payloadType === 'IMAGE_DTO' && - // activeData.payload.imageDTO - // ) { - // const { fieldName, nodeId } = overData.context; - // dispatch( - // fieldValueChanged({ - // nodeId, - // fieldName, - // value: [activeData.payload.imageDTO], - // }) - // ); - // return; - // } - /** * Image dropped on user board */ if ( - overData.actionType === 'ADD_TO_BOARD' && + overData.actionType === 'SELECT_FOR_COMPARE' && + activeData.payloadType === 'IMAGE_DTO' && + activeData.payload.imageDTO + ) { + const { imageDTO } = activeData.payload; + dispatch(imageToCompareChanged(imageDTO)); + return; + } + + /** + * Image dropped on 'none' board + */ + if ( + overData.actionType === 'REMOVE_FROM_BOARD' && activeData.payloadType === 'IMAGE_DTO' && activeData.payload.imageDTO ) { const { imageDTO } = activeData.payload; - const { boardId } = overData.context; dispatch( - imagesApi.endpoints.addImageToBoard.initiate({ + imagesApi.endpoints.removeImageFromBoard.initiate({ imageDTO, - board_id: boardId, }) ); return; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts index 2841493ca6..2d8e10bae9 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts @@ -7,7 +7,7 @@ import { boardIdSelected, galleryViewChanged, imageSelected, - isImageViewerOpenChanged, + viewerModeChanged, } from 'features/gallery/store/gallerySlice'; import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState'; @@ -108,7 +108,7 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi } dispatch(imageSelected(imageDTO)); - dispatch(isImageViewerOpenChanged(true)); + dispatch(viewerModeChanged('view')); } } } diff --git a/invokeai/frontend/web/src/features/dnd/types/index.ts b/invokeai/frontend/web/src/features/dnd/types/index.ts index 4d09c759eb..f61090b6bd 100644 --- a/invokeai/frontend/web/src/features/dnd/types/index.ts +++ b/invokeai/frontend/web/src/features/dnd/types/index.ts @@ -79,6 +79,10 @@ export type RemoveFromBoardDropData = BaseDropData & { actionType: 'REMOVE_FROM_BOARD'; }; +export type SelectForCompareDropData = BaseDropData & { + actionType: 'SELECT_FOR_COMPARE'; +}; + export type TypesafeDroppableData = | CurrentImageDropData | ControlAdapterDropData @@ -89,7 +93,8 @@ export type TypesafeDroppableData = | CALayerImageDropData | IPALayerImageDropData | RGLayerIPAdapterImageDropData - | IILayerImageDropData; + | IILayerImageDropData + | SelectForCompareDropData; type BaseDragData = { id: string; diff --git a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts index b701c72947..6c470c313f 100644 --- a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts +++ b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts @@ -29,6 +29,8 @@ export const isValidDrop = (overData: TypesafeDroppableData | undefined, active: return payloadType === 'IMAGE_DTO'; case 'SET_NODES_IMAGE': return payloadType === 'IMAGE_DTO'; + case 'SELECT_FOR_COMPARE': + return payloadType === 'IMAGE_DTO'; case 'ADD_TO_BOARD': { // If the board is the same, don't allow the drop diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx index b3119aa8fa..0c0e3da8bd 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx @@ -10,6 +10,7 @@ import { iiLayerAdded } from 'features/controlLayers/store/controlLayersSlice'; import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice'; import { useImageActions } from 'features/gallery/hooks/useImageActions'; import { sentImageToCanvas, sentImageToImg2Img } from 'features/gallery/store/actions'; +import { imageToCompareChanged } from 'features/gallery/store/gallerySlice'; import { $templates } from 'features/nodes/store/nodesSlice'; import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; @@ -27,6 +28,7 @@ import { PiDownloadSimpleBold, PiFlowArrowBold, PiFoldersBold, + PiImagesBold, PiPlantBold, PiQuotesBold, PiShareFatBold, @@ -117,6 +119,10 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { downloadImage(imageDTO.image_url, imageDTO.image_name); }, [downloadImage, imageDTO.image_name, imageDTO.image_url]); + const handleSelectImageForCompare = useCallback(() => { + dispatch(imageToCompareChanged(imageDTO)); + }, [dispatch, imageDTO]); + return ( <> }> @@ -130,6 +136,9 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { } onClickCapture={handleDownloadImage}> {t('parameters.downloadImage')} + } onClickCapture={handleSelectImageForCompare}> + {t('gallery.selectForCompare')} + : } diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx index 2c53599ba3..a43d10e4ca 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx @@ -11,7 +11,7 @@ import type { GallerySelectionDraggableData, ImageDraggableData, TypesafeDraggab import { getGalleryImageDataTestId } from 'features/gallery/components/ImageGrid/getGalleryImageDataTestId'; import { useMultiselect } from 'features/gallery/hooks/useMultiselect'; import { useScrollIntoView } from 'features/gallery/hooks/useScrollIntoView'; -import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; +import { viewerModeChanged } from 'features/gallery/store/gallerySlice'; import type { MouseEvent } from 'react'; import { memo, useCallback, useMemo, useState } from 'react'; import { useTranslation } from 'react-i18next'; @@ -104,7 +104,7 @@ const GalleryImage = (props: HoverableImageProps) => { }, []); const onDoubleClick = useCallback(() => { - dispatch(isImageViewerOpenChanged(true)); + dispatch(viewerModeChanged('view')); }, [dispatch]); const handleMouseOut = useCallback(() => { diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx index f40ecfca32..fd8d3c5f31 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx @@ -3,8 +3,9 @@ import { createSelector } from '@reduxjs/toolkit'; import { skipToken } from '@reduxjs/toolkit/query'; import { useAppSelector } from 'app/store/storeHooks'; import IAIDndImage from 'common/components/IAIDndImage'; +import IAIDroppable from 'common/components/IAIDroppable'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; -import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types'; +import type { SelectForCompareDropData, TypesafeDraggableData } from 'features/dnd/types'; import ImageMetadataViewer from 'features/gallery/components/ImageMetadataViewer/ImageMetadataViewer'; import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons'; import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors'; @@ -22,21 +23,12 @@ const selectLastSelectedImageName = createSelector( (lastSelectedImage) => lastSelectedImage?.image_name ); -type Props = { - isDragDisabled?: boolean; - isDropDisabled?: boolean; - withNextPrevButtons?: boolean; - withMetadata?: boolean; - alwaysShowProgress?: boolean; +const droppableData: SelectForCompareDropData = { + id: 'current-image', + actionType: 'SELECT_FOR_COMPARE', }; -const CurrentImagePreview = ({ - isDragDisabled = false, - isDropDisabled = false, - withNextPrevButtons = true, - withMetadata = true, - alwaysShowProgress = false, -}: Props) => { +const CurrentImagePreview = () => { const { t } = useTranslation(); const shouldShowImageDetails = useAppSelector((s) => s.ui.shouldShowImageDetails); const imageName = useAppSelector(selectLastSelectedImageName); @@ -55,14 +47,6 @@ const CurrentImagePreview = ({ } }, [imageDTO]); - const droppableData = useMemo( - () => ({ - id: 'current-image', - actionType: 'SET_CURRENT_IMAGE', - }), - [] - ); - // Show and hide the next/prev buttons on mouse move const [shouldShowNextPrevButtons, setShouldShowNextPrevButtons] = useState(false); const timeoutId = useRef(0); @@ -86,15 +70,13 @@ const CurrentImagePreview = ({ justifyContent="center" position="relative" > - {hasDenoiseProgress && (shouldShowProgressInViewer || alwaysShowProgress) ? ( + {hasDenoiseProgress && shouldShowProgressInViewer ? ( ) : ( )} - {shouldShowImageDetails && imageDTO && withMetadata && ( + + {shouldShowImageDetails && imageDTO && ( )} - {withNextPrevButtons && shouldShowNextPrevButtons && imageDTO && ( + {shouldShowNextPrevButtons && imageDTO && ( { + const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); + const { firstImage, secondImage } = useAppSelector((s) => { + const firstImage = s.gallery.selection.slice(-1)[0] ?? null; + const secondImage = s.gallery.imageToCompare; + return { firstImage, secondImage }; + }); + + if (!firstImage || !secondImage) { + return No images to compare; + } + + if (comparisonMode === 'slider') { + return ( + + + + ); + } + + if (comparisonMode === 'side-by-side') { + return ( + + + + ); + } +}); + +ImageComparison.displayName = 'ImageComparison'; + +const droppableData: SelectForCompareDropData = { + id: 'image-comparison', + actionType: 'SELECT_FOR_COMPARE', +}; + +const ImageComparisonWrapper = memo((props: PropsWithChildren) => { + return ( + <> + {props.children} + + + ); +}); + +ImageComparisonWrapper.displayName = 'ImageComparisonWrapper'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx new file mode 100644 index 0000000000..edc2199aed --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx @@ -0,0 +1,72 @@ +import { Flex, Image } from '@invoke-ai/ui-library'; +import ResizeHandle from 'features/ui/components/tabs/ResizeHandle'; +import { memo, useCallback, useRef } from 'react'; +import { useTranslation } from 'react-i18next'; +import type { ImperativePanelGroupHandle } from 'react-resizable-panels'; +import { Panel, PanelGroup } from 'react-resizable-panels'; +import type { ImageDTO } from 'services/api/types'; + +type Props = { + /** + * The first image to compare + */ + firstImage: ImageDTO; + /** + * The second image to compare + */ + secondImage: ImageDTO; +}; + +export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Props) => { + const { t } = useTranslation(); + const panelGroupRef = useRef(null); + const onDoubleClickHandle = useCallback(() => { + if (!panelGroupRef.current) { + return; + } + panelGroupRef.current.setLayout([50, 50]); + }, []); + + return ( + + + + + + + + + + + + + + + + + + + ); +}); + +ImageComparisonSideBySide.displayName = 'ImageComparisonSideBySide'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx similarity index 98% rename from invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx rename to invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index 3965cd5fd0..c9f169eeff 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageSliderComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -31,7 +31,7 @@ type Props = { containerSize: UseMeasureRect; }; -export const ImageSliderComparison = memo(({ firstImage, secondImage, containerSize }: Props) => { +export const ImageComparisonSlider = memo(({ firstImage, secondImage, containerSize }: Props) => { const { t } = useTranslation(); // How far the handle is from the left - this will be a CSS calculation that takes into account the handle width const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX); @@ -260,4 +260,4 @@ export const ImageSliderComparison = memo(({ firstImage, secondImage, containerS ); }); -ImageSliderComparison.displayName = 'ImageSliderComparison'; +ImageComparisonSlider.displayName = 'ImageComparisonSlider'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx new file mode 100644 index 0000000000..df0304d1f9 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx @@ -0,0 +1,39 @@ +import { Button, ButtonGroup } from '@invoke-ai/ui-library'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { comparisonModeChanged } from 'features/gallery/store/gallerySlice'; +import { memo, useCallback } from 'react'; + +export const ImageComparisonToolbarButtons = memo(() => { + const dispatch = useAppDispatch(); + const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); + const setComparisonModeSlider = useCallback(() => { + dispatch(comparisonModeChanged('slider')); + }, [dispatch]); + const setComparisonModeSideBySide = useCallback(() => { + dispatch(comparisonModeChanged('side-by-side')); + }, [dispatch]); + const setComparisonModeOverlay = useCallback(() => { + dispatch(comparisonModeChanged('overlay')); + }, [dispatch]); + + return ( + <> + + + + + + + ); +}); + +ImageComparisonToolbarButtons.displayName = 'ImageComparisonToolbarButtons'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx index 6697f9fd1d..1f4ce59b6e 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx @@ -2,7 +2,8 @@ import { Box, Flex } from '@invoke-ai/ui-library'; import { useMeasure } from '@reactuses/core'; import { useAppSelector } from 'app/store/storeHooks'; import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview'; -import { ImageSliderComparison } from 'features/gallery/components/ImageViewer/ImageSliderComparison'; +import { ImageComparison } from 'features/gallery/components/ImageViewer/ImageComparison'; +import { ImageComparisonToolbarButtons } from 'features/gallery/components/ImageViewer/ImageComparisonToolbarButtons'; import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton'; import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton'; import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer'; @@ -17,7 +18,7 @@ import { ViewerToggleMenu } from './ViewerToggleMenu'; const VIEWER_ENABLED_TABS: InvokeTabName[] = ['canvas', 'generation', 'workflows']; export const ImageViewer = memo(() => { - const { isOpen, onToggle, onClose } = useImageViewer(); + const { viewerMode, onToggle, openEditor } = useImageViewer(); const activeTabName = useAppSelector(activeTabNameSelector); const isViewerEnabled = useMemo(() => VIEWER_ENABLED_TABS.includes(activeTabName), [activeTabName]); const containerRef = useRef(null); @@ -26,16 +27,11 @@ export const ImageViewer = memo(() => { if (!isViewerEnabled) { return false; } - return isOpen; - }, [isOpen, isViewerEnabled]); + return viewerMode === 'view' || viewerMode === 'compare'; + }, [viewerMode, isViewerEnabled]); useHotkeys('z', onToggle, { enabled: isViewerEnabled }, [isViewerEnabled, onToggle]); - useHotkeys('esc', onClose, { enabled: isViewerEnabled }, [isViewerEnabled, onClose]); - - const { firstImage, secondImage } = useAppSelector((s) => { - const images = s.gallery.selection.slice(-2); - return { firstImage: images[0] ?? null, secondImage: images[0] ? images[1] ?? null : null }; - }); + useHotkeys('esc', openEditor, { enabled: isViewerEnabled }, [isViewerEnabled, openEditor]); if (!shouldShowViewer) { return null; @@ -65,7 +61,8 @@ export const ImageViewer = memo(() => { - + {viewerMode === 'view' && } + {viewerMode === 'compare' && } @@ -74,10 +71,8 @@ export const ImageViewer = memo(() => { - {firstImage && !secondImage && } - {firstImage && secondImage && ( - - )} + {viewerMode === 'view' && } + {viewerMode === 'compare' && } ); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx index 3552c28a5b..db7b1f7b70 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx @@ -9,22 +9,45 @@ import { PopoverTrigger, Text, } from '@invoke-ai/ui-library'; +import { useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { PiCaretDownBold, PiCheckBold, PiEyeBold, PiPencilBold } from 'react-icons/pi'; +import { PiCaretDownBold, PiCheckBold, PiEyeBold, PiImagesBold, PiPencilBold } from 'react-icons/pi'; import { useImageViewer } from './useImageViewer'; export const ViewerToggleMenu = () => { const { t } = useTranslation(); - const { isOpen, onClose, onOpen } = useImageViewer(); + const { viewerMode, openEditor, openViewer, openCompare } = useImageViewer(); + const icon = useMemo(() => { + if (viewerMode === 'view') { + return ; + } + if (viewerMode === 'edit') { + return ; + } + if (viewerMode === 'compare') { + return ; + } + }, [viewerMode]); + const label = useMemo(() => { + if (viewerMode === 'view') { + return t('common.viewing'); + } + if (viewerMode === 'edit') { + return t('common.editing'); + } + if (viewerMode === 'compare') { + return t('common.comparing'); + } + }, [t, viewerMode]); return ( @@ -33,9 +56,9 @@ export const ViewerToggleMenu = () => { - - + diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx index 57b3697b7e..fe4dc47607 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx @@ -1,22 +1,26 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; +import { viewerModeChanged } from 'features/gallery/store/gallerySlice'; import { useCallback } from 'react'; export const useImageViewer = () => { const dispatch = useAppDispatch(); - const isOpen = useAppSelector((s) => s.gallery.isImageViewerOpen); + const viewerMode = useAppSelector((s) => s.gallery.viewerMode); - const onClose = useCallback(() => { - dispatch(isImageViewerOpenChanged(false)); + const openEditor = useCallback(() => { + dispatch(viewerModeChanged('edit')); }, [dispatch]); - const onOpen = useCallback(() => { - dispatch(isImageViewerOpenChanged(true)); + const openViewer = useCallback(() => { + dispatch(viewerModeChanged('view')); }, [dispatch]); const onToggle = useCallback(() => { - dispatch(isImageViewerOpenChanged(!isOpen)); - }, [dispatch, isOpen]); + dispatch(viewerModeChanged(viewerMode === 'view' ? 'edit' : 'view')); + }, [dispatch, viewerMode]); - return { isOpen, onOpen, onClose, onToggle }; + const openCompare = useCallback(() => { + dispatch(viewerModeChanged('compare')); + }, [dispatch]); + + return { viewerMode, openEditor, openViewer, openCompare, onToggle }; }; diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index af19017486..4419c10acd 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -6,7 +6,7 @@ import { boardsApi } from 'services/api/endpoints/boards'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; -import type { BoardId, GalleryState, GalleryView } from './types'; +import type { BoardId, ComparisonMode, GalleryState, GalleryView, ViewerMode } from './types'; import { IMAGE_LIMIT, INITIAL_IMAGE_LIMIT } from './types'; const initialGalleryState: GalleryState = { @@ -21,7 +21,9 @@ const initialGalleryState: GalleryState = { boardSearchText: '', limit: INITIAL_IMAGE_LIMIT, offset: 0, - isImageViewerOpen: true, + viewerMode: 'view', + imageToCompare: null, + comparisonMode: 'slider', }; export const gallerySlice = createSlice({ @@ -34,6 +36,15 @@ export const gallerySlice = createSlice({ selectionChanged: (state, action: PayloadAction) => { state.selection = uniqBy(action.payload, (i) => i.image_name); }, + imageToCompareChanged: (state, action: PayloadAction) => { + state.imageToCompare = action.payload; + if (action.payload) { + state.viewerMode = 'compare'; + } + }, + comparisonModeChanged: (state, action: PayloadAction) => { + state.comparisonMode = action.payload; + }, shouldAutoSwitchChanged: (state, action: PayloadAction) => { state.shouldAutoSwitch = action.payload; }, @@ -76,8 +87,8 @@ export const gallerySlice = createSlice({ alwaysShowImageSizeBadgeChanged: (state, action: PayloadAction) => { state.alwaysShowImageSizeBadge = action.payload; }, - isImageViewerOpenChanged: (state, action: PayloadAction) => { - state.isImageViewerOpen = action.payload; + viewerModeChanged: (state, action: PayloadAction) => { + state.viewerMode = action.payload; }, }, extraReducers: (builder) => { @@ -116,7 +127,9 @@ export const { boardSearchTextChanged, moreImagesLoaded, alwaysShowImageSizeBadgeChanged, - isImageViewerOpenChanged, + viewerModeChanged, + imageToCompareChanged, + comparisonModeChanged, } = gallerySlice.actions; const isAnyBoardDeleted = isAnyOf( @@ -138,5 +151,5 @@ export const galleryPersistConfig: PersistConfig = { name: gallerySlice.name, initialState: initialGalleryState, migrate: migrateGalleryState, - persistDenylist: ['selection', 'selectedBoardId', 'galleryView', 'offset', 'limit', 'isImageViewerOpen'], + persistDenylist: ['selection', 'selectedBoardId', 'galleryView', 'offset', 'limit', 'viewerMode', 'imageToCompare'], }; diff --git a/invokeai/frontend/web/src/features/gallery/store/types.ts b/invokeai/frontend/web/src/features/gallery/store/types.ts index 0e86d2d4be..300586a8dc 100644 --- a/invokeai/frontend/web/src/features/gallery/store/types.ts +++ b/invokeai/frontend/web/src/features/gallery/store/types.ts @@ -7,6 +7,8 @@ export const IMAGE_LIMIT = 20; export type GalleryView = 'images' | 'assets'; export type BoardId = 'none' | (string & Record); +export type ComparisonMode = 'slider' | 'side-by-side' | 'overlay'; +export type ViewerMode = 'edit' | 'view' | 'compare'; export type GalleryState = { selection: ImageDTO[]; @@ -20,5 +22,7 @@ export type GalleryState = { offset: number; limit: number; alwaysShowImageSizeBadge: boolean; - isImageViewerOpen: boolean; + imageToCompare: ImageDTO | null; + comparisonMode: ComparisonMode; + viewerMode: ViewerMode; }; diff --git a/invokeai/frontend/web/src/features/ui/components/ParametersPanelTextToImage.tsx b/invokeai/frontend/web/src/features/ui/components/ParametersPanelTextToImage.tsx index b78d5dce9a..23a7837b20 100644 --- a/invokeai/frontend/web/src/features/ui/components/ParametersPanelTextToImage.tsx +++ b/invokeai/frontend/web/src/features/ui/components/ParametersPanelTextToImage.tsx @@ -3,7 +3,7 @@ import { Box, Flex, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/u import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants'; import { ControlLayersPanelContent } from 'features/controlLayers/components/ControlLayersPanelContent'; -import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; +import { viewerModeChanged } from 'features/gallery/store/gallerySlice'; import { Prompts } from 'features/parameters/components/Prompts/Prompts'; import QueueControls from 'features/queue/components/QueueControls'; import { SDXLPrompts } from 'features/sdxl/components/SDXLPrompts/SDXLPrompts'; @@ -51,7 +51,7 @@ const ParametersPanelTextToImage = () => { const onChangeTabs = useCallback( (i: number) => { if (i === 1) { - dispatch(isImageViewerOpenChanged(false)); + dispatch(viewerModeChanged('edit')); } }, [dispatch] From 4ef8cbd9d06ee5179d76f1cfafe35c1a56181206 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 17:37:30 +1000 Subject: [PATCH 23/52] fix(ui): use isValidDrop in imageDropped listener It was possible for a drop event to be invalid but still processed. Fixed by slightly changing the signature of isValidDrop. --- .../listeners/imageDropped.ts | 4 ++++ .../src/common/components/IAIDroppable.tsx | 2 +- .../web/src/features/dnd/util/isValidDrop.ts | 24 +++++++++++-------- .../ui/components/tabs/UnifiedCanvasTab.tsx | 2 +- 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts index eb16b7912a..c515a0d88e 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts @@ -14,6 +14,7 @@ import { rgLayerIPAdapterImageChanged, } from 'features/controlLayers/store/controlLayersSlice'; import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types'; +import { isValidDrop } from 'features/dnd/util/isValidDrop'; import { imageSelected, imageToCompareChanged } from 'features/gallery/store/gallerySlice'; import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice'; import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; @@ -30,6 +31,9 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) => effect: async (action, { dispatch, getState }) => { const log = logger('dnd'); const { activeData, overData } = action.payload; + if (!isValidDrop(overData, activeData)) { + return; + } if (activeData.payloadType === 'IMAGE_DTO') { log.debug({ activeData, overData }, 'Image dropped'); diff --git a/invokeai/frontend/web/src/common/components/IAIDroppable.tsx b/invokeai/frontend/web/src/common/components/IAIDroppable.tsx index 258a6e9004..ef331c4377 100644 --- a/invokeai/frontend/web/src/common/components/IAIDroppable.tsx +++ b/invokeai/frontend/web/src/common/components/IAIDroppable.tsx @@ -36,7 +36,7 @@ const IAIDroppable = (props: IAIDroppableProps) => { pointerEvents={active ? 'auto' : 'none'} > - {isValidDrop(data, active) && } + {isValidDrop(data, active?.data.current) && } ); diff --git a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts index 6c470c313f..d8e9d98e10 100644 --- a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts +++ b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts @@ -1,14 +1,14 @@ -import type { TypesafeActive, TypesafeDroppableData } from 'features/dnd/types'; +import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types'; -export const isValidDrop = (overData: TypesafeDroppableData | undefined, active: TypesafeActive | null) => { - if (!overData || !active?.data.current) { +export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData?: TypesafeDraggableData | null) => { + if (!overData || !activeData) { return false; } const { actionType } = overData; - const { payloadType } = active.data.current; + const { payloadType } = activeData; - if (overData.id === active.data.current.id) { + if (overData.id === activeData.id) { return false; } @@ -30,7 +30,11 @@ export const isValidDrop = (overData: TypesafeDroppableData | undefined, active: case 'SET_NODES_IMAGE': return payloadType === 'IMAGE_DTO'; case 'SELECT_FOR_COMPARE': - return payloadType === 'IMAGE_DTO'; + return ( + payloadType === 'IMAGE_DTO' && + activeData.id !== 'image-compare-first-image' && + activeData.id !== 'image-compare-second-image' + ); case 'ADD_TO_BOARD': { // If the board is the same, don't allow the drop @@ -42,7 +46,7 @@ export const isValidDrop = (overData: TypesafeDroppableData | undefined, active: // Check if the image's board is the board we are dragging onto if (payloadType === 'IMAGE_DTO') { - const { imageDTO } = active.data.current.payload; + const { imageDTO } = activeData.payload; const currentBoard = imageDTO.board_id ?? 'none'; const destinationBoard = overData.context.boardId; @@ -51,7 +55,7 @@ export const isValidDrop = (overData: TypesafeDroppableData | undefined, active: if (payloadType === 'GALLERY_SELECTION') { // Assume all images are on the same board - this is true for the moment - const currentBoard = active.data.current.payload.boardId; + const currentBoard = activeData.payload.boardId; const destinationBoard = overData.context.boardId; return currentBoard !== destinationBoard; } @@ -69,14 +73,14 @@ export const isValidDrop = (overData: TypesafeDroppableData | undefined, active: // Check if the image's board is the board we are dragging onto if (payloadType === 'IMAGE_DTO') { - const { imageDTO } = active.data.current.payload; + const { imageDTO } = activeData.payload; const currentBoard = imageDTO.board_id ?? 'none'; return currentBoard !== 'none'; } if (payloadType === 'GALLERY_SELECTION') { - const currentBoard = active.data.current.payload.boardId; + const currentBoard = activeData.payload.boardId; return currentBoard !== 'none'; } diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvasTab.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvasTab.tsx index 3e0d9b35d4..db2156fbde 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvasTab.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvasTab.tsx @@ -41,7 +41,7 @@ const UnifiedCanvasTab = () => { > - {isValidDrop(droppableData, active) && ( + {isValidDrop(droppableData, active?.data.current) && ( )} From 0da36c12387098c7bebd288f70ece0b69d08219f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 18:07:39 +1000 Subject: [PATCH 24/52] feat(ui): use IAIDndImage for compare mode --- .../ImageViewer/ImageComparison.tsx | 23 +++++----- .../ImageViewer/ImageComparisonSideBySide.tsx | 46 ++++++++++--------- .../ImageViewer/ImageComparisonSlider.tsx | 27 ++++++----- .../components/ImageViewer/ImageViewer.tsx | 9 ++-- 4 files changed, 54 insertions(+), 51 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx index ab17c9ef4e..4377658348 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx @@ -1,32 +1,31 @@ -import type { UseMeasureRect } from '@reactuses/core'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import IAIDroppable from 'common/components/IAIDroppable'; import type { SelectForCompareDropData } from 'features/dnd/types'; import { ImageComparisonSideBySide } from 'features/gallery/components/ImageViewer/ImageComparisonSideBySide'; import { ImageComparisonSlider } from 'features/gallery/components/ImageViewer/ImageComparisonSlider'; +import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; import type { PropsWithChildren } from 'react'; import { memo } from 'react'; -type Props = { - containerSize: UseMeasureRect; -}; +const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { + const firstImage = gallerySlice.selection.slice(-1)[0] ?? null; + const secondImage = gallerySlice.imageToCompare; + return { firstImage, secondImage }; +}); -export const ImageComparison = memo(({ containerSize }: Props) => { +export const ImageComparison = memo(() => { const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); - const { firstImage, secondImage } = useAppSelector((s) => { - const firstImage = s.gallery.selection.slice(-1)[0] ?? null; - const secondImage = s.gallery.imageToCompare; - return { firstImage, secondImage }; - }); + const { firstImage, secondImage } = useAppSelector(selector); if (!firstImage || !secondImage) { - return No images to compare; + return Select an image to compare; } if (comparisonMode === 'slider') { return ( - + ); } diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx index edc2199aed..0f9636a61c 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx @@ -1,7 +1,8 @@ -import { Flex, Image } from '@invoke-ai/ui-library'; +import { Flex } from '@invoke-ai/ui-library'; +import IAIDndImage from 'common/components/IAIDndImage'; +import type { ImageDraggableData } from 'features/dnd/types'; import ResizeHandle from 'features/ui/components/tabs/ResizeHandle'; -import { memo, useCallback, useRef } from 'react'; -import { useTranslation } from 'react-i18next'; +import { memo, useCallback, useMemo, useRef } from 'react'; import type { ImperativePanelGroupHandle } from 'react-resizable-panels'; import { Panel, PanelGroup } from 'react-resizable-panels'; import type { ImageDTO } from 'services/api/types'; @@ -18,7 +19,6 @@ type Props = { }; export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Props) => { - const { t } = useTranslation(); const panelGroupRef = useRef(null); const onDoubleClickHandle = useCallback(() => { if (!panelGroupRef.current) { @@ -27,21 +27,31 @@ export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Prop panelGroupRef.current.setLayout([50, 50]); }, []); + const firstImageDraggableData = useMemo( + () => ({ + id: 'image-compare-first-image', + payloadType: 'IMAGE_DTO', + payload: { imageDTO: firstImage }, + }), + [firstImage] + ); + + const secondImageDraggableData = useMemo( + () => ({ + id: 'image-compare-second-image', + payloadType: 'IMAGE_DTO', + payload: { imageDTO: secondImage }, + }), + [secondImage] + ); + return ( - + - + diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index c9f169eeff..3eacacf6e5 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -1,5 +1,5 @@ import { Box, Flex, Icon, Image, Text } from '@invoke-ai/ui-library'; -import type { UseMeasureRect } from '@reactuses/core'; +import { useMeasure } from '@reactuses/core'; import type { Dimensions } from 'features/canvas/store/canvasTypes'; import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'; @@ -25,13 +25,9 @@ type Props = { * The second image to compare */ secondImage: ImageDTO; - /** - * The size of the container, required to fit the component correctly and manage aspect ratios. - */ - containerSize: UseMeasureRect; }; -export const ImageComparisonSlider = memo(({ firstImage, secondImage, containerSize }: Props) => { +export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) => { const { t } = useTranslation(); // How far the handle is from the left - this will be a CSS calculation that takes into account the handle width const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX); @@ -40,6 +36,7 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage, containerS const handleRef = useRef(null); // If the container size is not provided, use an internal ref and measure - can cause flicker on mount tho const containerRef = useRef(null); + const [containerSize] = useMeasure(containerRef); // To keep things smooth, we use RAF to update the handle position & gate it to 60fps const rafRef = useRef(null); const lastMoveTimeRef = useRef(0); @@ -94,13 +91,13 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage, containerS const targetAspectRatio = containerSize.width / containerSize.height; const imageAspectRatio = firstImage.width / firstImage.height; + let width: number; + let height: number; + if (firstImage.width <= containerSize.width && firstImage.height <= containerSize.height) { return { width: firstImage.width, height: firstImage.height }; } - let width: number; - let height: number; - if (imageAspectRatio > targetAspectRatio) { // Image is wider than container's aspect ratio width = containerSize.width; @@ -123,7 +120,16 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage, containerS ); return ( - + { const { viewerMode, onToggle, openEditor } = useImageViewer(); const activeTabName = useAppSelector(activeTabNameSelector); const isViewerEnabled = useMemo(() => VIEWER_ENABLED_TABS.includes(activeTabName), [activeTabName]); - const containerRef = useRef(null); - const [containerSize] = useMeasure(containerRef); const shouldShowViewer = useMemo(() => { if (!isViewerEnabled) { return false; @@ -70,9 +67,9 @@ export const ImageViewer = memo(() => { - + {viewerMode === 'view' && } - {viewerMode === 'compare' && } + {viewerMode === 'compare' && } ); From e976571fba510744f6981e147936ccc06193fe07 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 18:47:17 +1000 Subject: [PATCH 25/52] build(ui): remove unused dep --- invokeai/frontend/web/package.json | 1 - invokeai/frontend/web/pnpm-lock.yaml | 13 ------------- 2 files changed, 14 deletions(-) diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index f5189f23df..0211994f22 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -59,7 +59,6 @@ "@dnd-kit/sortable": "^8.0.0", "@dnd-kit/utilities": "^3.2.2", "@fontsource-variable/inter": "^5.0.18", - "@img-comparison-slider/react": "^8.0.2", "@invoke-ai/ui-library": "^0.0.25", "@nanostores/react": "^0.7.2", "@reactuses/core": "^5.0.14", diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml index d805591721..f9a3da4e39 100644 --- a/invokeai/frontend/web/pnpm-lock.yaml +++ b/invokeai/frontend/web/pnpm-lock.yaml @@ -29,9 +29,6 @@ dependencies: '@fontsource-variable/inter': specifier: ^5.0.18 version: 5.0.18 - '@img-comparison-slider/react': - specifier: ^8.0.2 - version: 8.0.2 '@invoke-ai/ui-library': specifier: ^0.0.25 version: 0.0.25(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.18)(@internationalized/date@3.5.3)(@types/react@18.3.1)(i18next@23.11.3)(react-dom@18.3.1)(react@18.3.1) @@ -3550,12 +3547,6 @@ packages: resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} dev: true - /@img-comparison-slider/react@8.0.2: - resolution: {integrity: sha512-Him0yhbXpMXdnV6R3XE3LiXcMRhSXFMsbk6I7ct5HxO2YpK/BAGz3ub+7+akJRnK2XI7c3vQqvoIE507N1K4SA==} - dependencies: - img-comparison-slider: 8.0.6 - dev: false - /@internationalized/date@3.5.3: resolution: {integrity: sha512-X9bi8NAEHAjD8yzmPYT2pdJsbe+tYSEBAfowtlxJVJdZR3aK8Vg7ZUT1Fm5M47KLzp/M1p1VwAaeSma3RT7biw==} dependencies: @@ -9247,10 +9238,6 @@ packages: engines: {node: '>= 4'} dev: true - /img-comparison-slider@8.0.6: - resolution: {integrity: sha512-ej4de7mWyjcXZvDgHq8K2a/dG8Vv+qYTdUjZa3cVILf316rLtDrHyGbh9fPvixmAFgbs30zTLfmaRDa7abjtzw==} - dev: false - /immer@10.1.1: resolution: {integrity: sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw==} dev: false From e4ce188500b67ac360456a1058d684aeb0373f3a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 19:20:30 +1000 Subject: [PATCH 26/52] feat(ui): image selection gallery state & tweaks --- invokeai/frontend/web/public/locales/en.json | 6 +- .../app/components/ThemeLocaleProvider.tsx | 7 ++ .../web/src/common/components/IAIDndImage.tsx | 8 +- .../common/components/SelectionOverlay.tsx | 13 ++- .../web/src/features/dnd/types/index.ts | 6 +- .../web/src/features/dnd/util/isValidDrop.ts | 4 +- .../Boards/BoardsList/GalleryBoard.tsx | 2 +- .../Boards/BoardsList/NoBoardBoard.tsx | 2 +- .../SingleSelectionMenuItems.tsx | 7 +- .../components/ImageGrid/GalleryImage.tsx | 4 + .../ImageViewer/CurrentImagePreview.tsx | 11 +-- .../ImageViewer/ImageComparison.tsx | 39 +++++++-- .../ImageViewer/ImageComparisonDroppable.tsx | 33 +++++++ .../ImageViewer/ImageComparisonSideBySide.tsx | 14 ++- .../ImageViewer/ImageComparisonSlider.tsx | 85 +++++++++---------- .../ImageComparisonToolbarButtons.tsx | 16 ++-- .../ImageViewer/ViewerToggleMenu.tsx | 24 ++++-- .../features/gallery/store/gallerySlice.ts | 8 ++ .../web/src/features/gallery/store/types.ts | 2 +- 19 files changed, 205 insertions(+), 86 deletions(-) create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 640f7c0958..c476411d3f 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -380,7 +380,11 @@ "problemDeletingImagesDesc": "One or more images could not be deleted", "firstImage": "First Image", "secondImage": "Second Image", - "selectForCompare": "Select for Compare" + "selectForCompare": "Select for Compare", + "selectAnImageToCompare": "Select an Image to Compare", + "slider": "Slider", + "sideBySide": "Side-by-Side", + "swapImages": "Swap" }, "hotkeys": { "searchHotkeys": "Search Hotkeys", diff --git a/invokeai/frontend/web/src/app/components/ThemeLocaleProvider.tsx b/invokeai/frontend/web/src/app/components/ThemeLocaleProvider.tsx index 0b4ca90933..aa3a24209c 100644 --- a/invokeai/frontend/web/src/app/components/ThemeLocaleProvider.tsx +++ b/invokeai/frontend/web/src/app/components/ThemeLocaleProvider.tsx @@ -19,6 +19,13 @@ function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) { return extendTheme({ ..._theme, direction, + shadows: { + ..._theme.shadows, + selectedForCompare: + '0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-400)', + hoverSelectedForCompare: + '0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-300)', + }, }); }, [direction]); diff --git a/invokeai/frontend/web/src/common/components/IAIDndImage.tsx b/invokeai/frontend/web/src/common/components/IAIDndImage.tsx index 2712334e1e..f16aa3d4b4 100644 --- a/invokeai/frontend/web/src/common/components/IAIDndImage.tsx +++ b/invokeai/frontend/web/src/common/components/IAIDndImage.tsx @@ -35,6 +35,7 @@ type IAIDndImageProps = FlexProps & { draggableData?: TypesafeDraggableData; dropLabel?: ReactNode; isSelected?: boolean; + isSelectedForCompare?: boolean; thumbnail?: boolean; noContentFallback?: ReactElement; useThumbailFallback?: boolean; @@ -61,6 +62,7 @@ const IAIDndImage = (props: IAIDndImageProps) => { draggableData, dropLabel, isSelected = false, + isSelectedForCompare = false, thumbnail = false, noContentFallback = defaultNoContentFallback, uploadElement = defaultUploadElement, @@ -165,7 +167,11 @@ const IAIDndImage = (props: IAIDndImageProps) => { data-testid={dataTestId} /> {withMetadataOverlay && } - + )} {!imageDTO && !isUploadDisabled && ( diff --git a/invokeai/frontend/web/src/common/components/SelectionOverlay.tsx b/invokeai/frontend/web/src/common/components/SelectionOverlay.tsx index eb50a6b9d4..3e2ecca4ae 100644 --- a/invokeai/frontend/web/src/common/components/SelectionOverlay.tsx +++ b/invokeai/frontend/web/src/common/components/SelectionOverlay.tsx @@ -3,10 +3,17 @@ import { memo, useMemo } from 'react'; type Props = { isSelected: boolean; + isSelectedForCompare: boolean; isHovered: boolean; }; -const SelectionOverlay = ({ isSelected, isHovered }: Props) => { +const SelectionOverlay = ({ isSelected, isSelectedForCompare, isHovered }: Props) => { const shadow = useMemo(() => { + if (isSelectedForCompare && isHovered) { + return 'hoverSelectedForCompare'; + } + if (isSelectedForCompare && !isHovered) { + return 'selectedForCompare'; + } if (isSelected && isHovered) { return 'hoverSelected'; } @@ -17,7 +24,7 @@ const SelectionOverlay = ({ isSelected, isHovered }: Props) => { return 'hoverUnselected'; } return undefined; - }, [isHovered, isSelected]); + }, [isHovered, isSelected, isSelectedForCompare]); return ( { bottom={0} insetInlineStart={0} borderRadius="base" - opacity={isSelected ? 1 : 0.7} + opacity={isSelected || isSelectedForCompare ? 1 : 0.7} transitionProperty="common" transitionDuration="0.1s" pointerEvents="none" diff --git a/invokeai/frontend/web/src/features/dnd/types/index.ts b/invokeai/frontend/web/src/features/dnd/types/index.ts index f61090b6bd..f66fec0ea1 100644 --- a/invokeai/frontend/web/src/features/dnd/types/index.ts +++ b/invokeai/frontend/web/src/features/dnd/types/index.ts @@ -81,6 +81,10 @@ export type RemoveFromBoardDropData = BaseDropData & { export type SelectForCompareDropData = BaseDropData & { actionType: 'SELECT_FOR_COMPARE'; + context: { + firstImageName?: string | null; + secondImageName?: string | null; + }; }; export type TypesafeDroppableData = @@ -139,7 +143,7 @@ export type UseDraggableTypesafeReturnValue = Omit { +interface TypesafeActive extends Omit { data: React.MutableRefObject; } diff --git a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts index d8e9d98e10..ceca331725 100644 --- a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts +++ b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts @@ -33,7 +33,9 @@ export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData? return ( payloadType === 'IMAGE_DTO' && activeData.id !== 'image-compare-first-image' && - activeData.id !== 'image-compare-second-image' + activeData.id !== 'image-compare-second-image' && + activeData.payload.imageDTO.image_name !== overData.context.firstImageName && + activeData.payload.imageDTO.image_name !== overData.context.secondImageName ); case 'ADD_TO_BOARD': { // If the board is the same, don't allow the drop diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx index 0509305192..f8c4f5ebcf 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx @@ -162,7 +162,7 @@ const GalleryBoard = ({ board, isSelected, setBoardToDelete }: GalleryBoardProps )} {isSelectedForAutoAdd && } - + { > {boardName} - + {t('unifiedCanvas.move')}} /> diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx index 0c0e3da8bd..bc7e1bdb84 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx @@ -46,6 +46,11 @@ type SingleSelectionMenuItemsProps = { const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { const { imageDTO } = props; const optimalDimension = useAppSelector(selectOptimalDimension); + const maySelectForCompare = useAppSelector( + (s) => + s.gallery.imageToCompare?.image_name !== imageDTO.image_name && + s.gallery.selection.slice(-1)[0]?.image_name !== imageDTO.image_name + ); const dispatch = useAppDispatch(); const { t } = useTranslation(); const isCanvasEnabled = useFeatureStatus('canvas'); @@ -136,7 +141,7 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { } onClickCapture={handleDownloadImage}> {t('parameters.downloadImage')} - } onClickCapture={handleSelectImageForCompare}> + } isDisabled={!maySelectForCompare} onClickCapture={handleSelectImageForCompare}> {t('gallery.selectForCompare')} diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx index a43d10e4ca..812a042c8b 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx @@ -46,6 +46,9 @@ const GalleryImage = (props: HoverableImageProps) => { const { t } = useTranslation(); const selectedBoardId = useAppSelector((s) => s.gallery.selectedBoardId); const alwaysShowImageSizeBadge = useAppSelector((s) => s.gallery.alwaysShowImageSizeBadge); + const isSelectedForCompare = useAppSelector( + (s) => s.gallery.imageToCompare?.image_name === imageName && s.gallery.viewerMode === 'compare' + ); const { handleClick, isSelected, areMultiplesSelected } = useMultiselect(imageDTO); const customStarUi = useStore($customStarUI); @@ -152,6 +155,7 @@ const GalleryImage = (props: HoverableImageProps) => { imageDTO={imageDTO} draggableData={draggableData} isSelected={isSelected} + isSelectedForCompare={isSelectedForCompare} minSize={0} imageSx={imageSx} isDropDisabled={true} diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx index fd8d3c5f31..5de4f28d2a 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx @@ -3,10 +3,10 @@ import { createSelector } from '@reduxjs/toolkit'; import { skipToken } from '@reduxjs/toolkit/query'; import { useAppSelector } from 'app/store/storeHooks'; import IAIDndImage from 'common/components/IAIDndImage'; -import IAIDroppable from 'common/components/IAIDroppable'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; -import type { SelectForCompareDropData, TypesafeDraggableData } from 'features/dnd/types'; +import type { TypesafeDraggableData } from 'features/dnd/types'; import ImageMetadataViewer from 'features/gallery/components/ImageMetadataViewer/ImageMetadataViewer'; +import { ImageComparisonDroppable } from 'features/gallery/components/ImageViewer/ImageComparisonDroppable'; import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons'; import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors'; import type { AnimationProps } from 'framer-motion'; @@ -23,11 +23,6 @@ const selectLastSelectedImageName = createSelector( (lastSelectedImage) => lastSelectedImage?.image_name ); -const droppableData: SelectForCompareDropData = { - id: 'current-image', - actionType: 'SELECT_FOR_COMPARE', -}; - const CurrentImagePreview = () => { const { t } = useTranslation(); const shouldShowImageDetails = useAppSelector((s) => s.ui.shouldShowImageDetails); @@ -85,7 +80,7 @@ const CurrentImagePreview = () => { dataTestId="image-preview" /> )} - + {shouldShowImageDetails && imageDTO && ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx index 4377658348..74acdfa13f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx @@ -1,12 +1,16 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import IAIDroppable from 'common/components/IAIDroppable'; +import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import type { SelectForCompareDropData } from 'features/dnd/types'; import { ImageComparisonSideBySide } from 'features/gallery/components/ImageViewer/ImageComparisonSideBySide'; import { ImageComparisonSlider } from 'features/gallery/components/ImageViewer/ImageComparisonSlider'; import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; import type { PropsWithChildren } from 'react'; -import { memo } from 'react'; +import { memo, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiImagesBold } from 'react-icons/pi'; +import type { ImageDTO } from 'services/api/types'; const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { const firstImage = gallerySlice.selection.slice(-1)[0] ?? null; @@ -15,16 +19,21 @@ const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { }); export const ImageComparison = memo(() => { + const { t } = useTranslation(); const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); const { firstImage, secondImage } = useAppSelector(selector); if (!firstImage || !secondImage) { - return Select an image to compare; + return ( + + + + ); } if (comparisonMode === 'slider') { return ( - + ); @@ -32,7 +41,7 @@ export const ImageComparison = memo(() => { if (comparisonMode === 'side-by-side') { return ( - + ); @@ -41,12 +50,24 @@ export const ImageComparison = memo(() => { ImageComparison.displayName = 'ImageComparison'; -const droppableData: SelectForCompareDropData = { - id: 'image-comparison', - actionType: 'SELECT_FOR_COMPARE', -}; +type Props = PropsWithChildren<{ + firstImage: ImageDTO | null; + secondImage: ImageDTO | null; +}>; + +const ImageComparisonWrapper = memo((props: Props) => { + const droppableData = useMemo( + () => ({ + id: 'image-comparison', + actionType: 'SELECT_FOR_COMPARE', + context: { + firstImageName: props.firstImage?.image_name, + secondImageName: props.secondImage?.image_name, + }, + }), + [props.firstImage?.image_name, props.secondImage?.image_name] + ); -const ImageComparisonWrapper = memo((props: PropsWithChildren) => { return ( <> {props.children} diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx new file mode 100644 index 0000000000..6f163f63cf --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx @@ -0,0 +1,33 @@ +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppSelector } from 'app/store/storeHooks'; +import IAIDroppable from 'common/components/IAIDroppable'; +import type { SelectForCompareDropData } from 'features/dnd/types'; +import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; +import { memo, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; + +const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { + const firstImage = gallerySlice.selection.slice(-1)[0] ?? null; + const secondImage = gallerySlice.imageToCompare; + return { firstImage, secondImage }; +}); + +export const ImageComparisonDroppable = memo(() => { + const { t } = useTranslation(); + const { firstImage, secondImage } = useAppSelector(selector); + const droppableData = useMemo( + () => ({ + id: 'image-comparison', + actionType: 'SELECT_FOR_COMPARE', + context: { + firstImageName: firstImage?.image_name, + secondImageName: secondImage?.image_name, + }, + }), + [firstImage?.image_name, secondImage?.image_name] + ); + + return ; +}); + +ImageComparisonDroppable.displayName = 'ImageComparisonDroppable'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx index 0f9636a61c..6cddb175cd 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx @@ -51,7 +51,12 @@ export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Prop - + - + diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index 3eacacf6e5..c7a411c07c 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -9,7 +9,7 @@ import type { ImageDTO } from 'services/api/types'; const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0))'; const INITIAL_POS = '50%'; -const HANDLE_WIDTH = 2; +const HANDLE_WIDTH = 1; const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; const HANDLE_HITBOX = 20; const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; @@ -37,52 +37,11 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = // If the container size is not provided, use an internal ref and measure - can cause flicker on mount tho const containerRef = useRef(null); const [containerSize] = useMeasure(containerRef); + const imageContainerRef = useRef(null); // To keep things smooth, we use RAF to update the handle position & gate it to 60fps const rafRef = useRef(null); const lastMoveTimeRef = useRef(0); - const updateHandlePos = useCallback( - (clientX: number) => { - if (!handleRef.current || !containerRef.current) { - return; - } - lastMoveTimeRef.current = performance.now(); - const { x, width } = containerRef.current.getBoundingClientRect(); - const rawHandlePos = ((clientX - x) * 100) / width; - const handleWidthPct = (HANDLE_WIDTH * 100) / width; - const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); - setWidth(`${newHandlePos}%`); - setLeft(`calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`); - }, - [containerRef] - ); - - const onMouseMove = useCallback( - (e: MouseEvent) => { - if (rafRef.current === null && performance.now() > lastMoveTimeRef.current + 1000 / 60) { - rafRef.current = window.requestAnimationFrame(() => { - updateHandlePos(e.clientX); - rafRef.current = null; - }); - } - }, - [updateHandlePos] - ); - - const onMouseUp = useCallback(() => { - window.removeEventListener('mousemove', onMouseMove); - }, [onMouseMove]); - - const onMouseDown = useCallback( - (e: React.MouseEvent) => { - // Update the handle position immediately on click - updateHandlePos(e.clientX); - window.addEventListener('mouseup', onMouseUp, { once: true }); - window.addEventListener('mousemove', onMouseMove); - }, - [onMouseMove, onMouseUp, updateHandlePos] - ); - const fittedSize = useMemo(() => { // Fit the first image to the container if (containerSize.width === 0 || containerSize.height === 0) { @@ -110,6 +69,45 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = return { width, height }; }, [containerSize, firstImage.height, firstImage.width]); + const updateHandlePos = useCallback((clientX: number) => { + if (!handleRef.current || !imageContainerRef.current) { + return; + } + lastMoveTimeRef.current = performance.now(); + const { x, width } = imageContainerRef.current.getBoundingClientRect(); + const rawHandlePos = ((clientX - x) * 100) / width; + const handleWidthPct = (HANDLE_WIDTH * 100) / width; + const newHandlePos = Math.min(100 - handleWidthPct, Math.max(0, rawHandlePos)); + setWidth(`${newHandlePos}%`); + setLeft(`calc(${newHandlePos}% - ${HANDLE_HITBOX / 2}px)`); + }, []); + + const onMouseMove = useCallback( + (e: MouseEvent) => { + if (rafRef.current === null && performance.now() > lastMoveTimeRef.current + 1000 / 60) { + rafRef.current = window.requestAnimationFrame(() => { + updateHandlePos(e.clientX); + rafRef.current = null; + }); + } + }, + [updateHandlePos] + ); + + const onMouseUp = useCallback(() => { + window.removeEventListener('mousemove', onMouseMove); + }, [onMouseMove]); + + const onMouseDown = useCallback( + (e: React.MouseEvent) => { + // Update the handle position immediately on click + updateHandlePos(e.clientX); + window.addEventListener('mouseup', onMouseUp, { once: true }); + window.addEventListener('mousemove', onMouseMove); + }, + [onMouseMove, onMouseUp, updateHandlePos] + ); + useEffect( () => () => { if (rafRef.current !== null) { @@ -141,6 +139,7 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = justifyContent="center" > { + const { t } = useTranslation(); const dispatch = useAppDispatch(); const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); const setComparisonModeSlider = useCallback(() => { @@ -12,26 +14,24 @@ export const ImageComparisonToolbarButtons = memo(() => { const setComparisonModeSideBySide = useCallback(() => { dispatch(comparisonModeChanged('side-by-side')); }, [dispatch]); - const setComparisonModeOverlay = useCallback(() => { - dispatch(comparisonModeChanged('overlay')); + const swapImages = useCallback(() => { + dispatch(comparedImagesSwapped()); }, [dispatch]); return ( <> - + ); }); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx index db7b1f7b70..f5b02db2fc 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx @@ -8,8 +8,9 @@ import { PopoverContent, PopoverTrigger, Text, + useDisclosure, } from '@invoke-ai/ui-library'; -import { useMemo } from 'react'; +import { useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiCaretDownBold, PiCheckBold, PiEyeBold, PiImagesBold, PiPencilBold } from 'react-icons/pi'; @@ -17,6 +18,7 @@ import { useImageViewer } from './useImageViewer'; export const ViewerToggleMenu = () => { const { t } = useTranslation(); + const { isOpen, onOpen, onClose } = useDisclosure(); const { viewerMode, openEditor, openViewer, openCompare } = useImageViewer(); const icon = useMemo(() => { if (viewerMode === 'view') { @@ -40,9 +42,21 @@ export const ViewerToggleMenu = () => { return t('common.comparing'); } }, [t, viewerMode]); + const _openEditor = useCallback(() => { + openEditor(); + onClose(); + }, [onClose, openEditor]); + const _openViewer = useCallback(() => { + openViewer(); + onClose(); + }, [onClose, openViewer]); + const _openCompare = useCallback(() => { + openCompare(); + onClose(); + }, [onClose, openCompare]); return ( - + - - + + + } + /> + + + + + + {t('gallery.sliderFitLabel')} + + + + + + + ); diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index d8618ea200..4a49acafc5 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -24,6 +24,7 @@ const initialGalleryState: GalleryState = { viewerMode: 'view', imageToCompare: null, comparisonMode: 'slider', + sliderFit: 'fill', }; export const gallerySlice = createSlice({ @@ -97,6 +98,9 @@ export const gallerySlice = createSlice({ state.imageToCompare = oldSelection[0] ?? null; } }, + sliderFitChanged: (state, action: PayloadAction<'contain' | 'fill'>) => { + state.sliderFit = action.payload; + }, }, extraReducers: (builder) => { builder.addMatcher(isAnyBoardDeleted, (state, action) => { @@ -138,6 +142,7 @@ export const { imageToCompareChanged, comparisonModeChanged, comparedImagesSwapped, + sliderFitChanged, } = gallerySlice.actions; const isAnyBoardDeleted = isAnyOf( diff --git a/invokeai/frontend/web/src/features/gallery/store/types.ts b/invokeai/frontend/web/src/features/gallery/store/types.ts index f406c37303..1388c792c3 100644 --- a/invokeai/frontend/web/src/features/gallery/store/types.ts +++ b/invokeai/frontend/web/src/features/gallery/store/types.ts @@ -24,5 +24,6 @@ export type GalleryState = { alwaysShowImageSizeBadge: boolean; imageToCompare: ImageDTO | null; comparisonMode: ComparisonMode; + sliderFit: 'contain' | 'fill'; viewerMode: ViewerMode; }; From 0e5336d8fa0ea2e9c3402cdc360b160d8afaf50e Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 20:26:34 +1000 Subject: [PATCH 28/52] feat(ui): rework comparison activation, add hotkeys --- invokeai/frontend/web/public/locales/en.json | 3 +- .../listeners/enqueueRequestedLinear.ts | 4 +- .../listeners/galleryImageClicked.ts | 9 ++- .../socketio/socketInvocationComplete.ts | 4 +- .../SingleSelectionMenuItems.tsx | 6 +- .../components/ImageGrid/GalleryImage.tsx | 9 ++- .../ImageComparisonToolbarButtons.tsx | 46 ++++++++----- .../components/ImageViewer/ImageViewer.tsx | 17 ++--- .../ImageViewer/ViewerToggleMenu.tsx | 68 +++---------------- .../components/ImageViewer/useImageViewer.tsx | 22 +++--- .../gallery/hooks/useGalleryHotkeys.ts | 24 +++---- .../gallery/hooks/useGalleryNavigation.ts | 68 +++++++++++++------ .../features/gallery/hooks/useMultiselect.ts | 1 + .../features/gallery/store/gallerySlice.ts | 26 +++++-- .../web/src/features/gallery/store/types.ts | 3 +- .../components/ParametersPanelTextToImage.tsx | 4 +- 16 files changed, 153 insertions(+), 161 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 6fd46aafcf..970c926500 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -386,7 +386,8 @@ "sideBySide": "Side-by-Side", "swapImages": "Swap Images", "compareOptions": "Comparison Options", - "sliderFitLabel": "Stretch second image to fit" + "sliderFitLabel": "Stretch second image to fit", + "exitCompare": "Exit Compare" }, "hotkeys": { "searchHotkeys": "Search Hotkeys", diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts index 339b34d2be..6ca7ee7ffa 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts @@ -1,6 +1,6 @@ import { enqueueRequested } from 'app/store/actions'; import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; -import { viewerModeChanged } from 'features/gallery/store/gallerySlice'; +import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig'; import { buildGenerationTabGraph } from 'features/nodes/util/graph/generation/buildGenerationTabGraph'; import { buildGenerationTabSDXLGraph } from 'features/nodes/util/graph/generation/buildGenerationTabSDXLGraph'; @@ -34,7 +34,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening) try { await req.unwrap(); if (shouldShowProgressInViewer) { - dispatch(viewerModeChanged('view')); + dispatch(isImageViewerOpenChanged(true)); } } finally { req.reset(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts index 67c6d076ee..de04202435 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts @@ -1,7 +1,7 @@ import { createAction } from '@reduxjs/toolkit'; import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors'; -import { selectionChanged } from 'features/gallery/store/gallerySlice'; +import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; import { imagesSelectors } from 'services/api/util'; @@ -11,6 +11,7 @@ export const galleryImageClicked = createAction<{ shiftKey: boolean; ctrlKey: boolean; metaKey: boolean; + altKey: boolean; }>('gallery/imageClicked'); /** @@ -28,7 +29,7 @@ export const addGalleryImageClickedListener = (startAppListening: AppStartListen startAppListening({ actionCreator: galleryImageClicked, effect: async (action, { dispatch, getState }) => { - const { imageDTO, shiftKey, ctrlKey, metaKey } = action.payload; + const { imageDTO, shiftKey, ctrlKey, metaKey, altKey } = action.payload; const state = getState(); const queryArgs = selectListImagesQueryArgs(state); const { data: listImagesData } = imagesApi.endpoints.listImages.select(queryArgs)(state); @@ -41,7 +42,9 @@ export const addGalleryImageClickedListener = (startAppListening: AppStartListen const imageDTOs = imagesSelectors.selectAll(listImagesData); const selection = state.gallery.selection; - if (shiftKey) { + if (altKey) { + dispatch(imageToCompareChanged(imageDTO)); + } else if (shiftKey) { const rangeEndImageName = imageDTO.image_name; const lastSelectedImage = selection[selection.length - 1]?.image_name; const lastClickedIndex = imageDTOs.findIndex((n) => n.image_name === lastSelectedImage); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts index 2d8e10bae9..2841493ca6 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts @@ -7,7 +7,7 @@ import { boardIdSelected, galleryViewChanged, imageSelected, - viewerModeChanged, + isImageViewerOpenChanged, } from 'features/gallery/store/gallerySlice'; import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState'; @@ -108,7 +108,7 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi } dispatch(imageSelected(imageDTO)); - dispatch(viewerModeChanged('view')); + dispatch(isImageViewerOpenChanged(true)); } } } diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx index bc7e1bdb84..2b29ba9ddf 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx @@ -46,11 +46,7 @@ type SingleSelectionMenuItemsProps = { const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { const { imageDTO } = props; const optimalDimension = useAppSelector(selectOptimalDimension); - const maySelectForCompare = useAppSelector( - (s) => - s.gallery.imageToCompare?.image_name !== imageDTO.image_name && - s.gallery.selection.slice(-1)[0]?.image_name !== imageDTO.image_name - ); + const maySelectForCompare = useAppSelector((s) => s.gallery.imageToCompare?.image_name !== imageDTO.image_name); const dispatch = useAppDispatch(); const { t } = useTranslation(); const isCanvasEnabled = useFeatureStatus('canvas'); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx index 812a042c8b..e5e216c97c 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx @@ -11,7 +11,7 @@ import type { GallerySelectionDraggableData, ImageDraggableData, TypesafeDraggab import { getGalleryImageDataTestId } from 'features/gallery/components/ImageGrid/getGalleryImageDataTestId'; import { useMultiselect } from 'features/gallery/hooks/useMultiselect'; import { useScrollIntoView } from 'features/gallery/hooks/useScrollIntoView'; -import { viewerModeChanged } from 'features/gallery/store/gallerySlice'; +import { imageToCompareChanged, isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; import type { MouseEvent } from 'react'; import { memo, useCallback, useMemo, useState } from 'react'; import { useTranslation } from 'react-i18next'; @@ -46,9 +46,7 @@ const GalleryImage = (props: HoverableImageProps) => { const { t } = useTranslation(); const selectedBoardId = useAppSelector((s) => s.gallery.selectedBoardId); const alwaysShowImageSizeBadge = useAppSelector((s) => s.gallery.alwaysShowImageSizeBadge); - const isSelectedForCompare = useAppSelector( - (s) => s.gallery.imageToCompare?.image_name === imageName && s.gallery.viewerMode === 'compare' - ); + const isSelectedForCompare = useAppSelector((s) => s.gallery.imageToCompare?.image_name === imageName); const { handleClick, isSelected, areMultiplesSelected } = useMultiselect(imageDTO); const customStarUi = useStore($customStarUI); @@ -107,7 +105,8 @@ const GalleryImage = (props: HoverableImageProps) => { }, []); const onDoubleClick = useCallback(() => { - dispatch(viewerModeChanged('view')); + dispatch(isImageViewerOpenChanged(true)); + dispatch(imageToCompareChanged(null)); }, [dispatch]); const handleMouseOut = useCallback(() => { diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx index e9650445b5..2ee25d75a8 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx @@ -12,7 +12,12 @@ import { Switch, } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { comparedImagesSwapped, comparisonModeChanged, sliderFitChanged } from 'features/gallery/store/gallerySlice'; +import { + comparedImagesSwapped, + comparisonModeChanged, + imageToCompareChanged, + sliderFitChanged, +} from 'features/gallery/store/gallerySlice'; import type { ChangeEvent } from 'react'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; @@ -38,20 +43,12 @@ export const ImageComparisonToolbarButtons = memo(() => { }, [dispatch] ); + const exitCompare = useCallback(() => { + dispatch(imageToCompareChanged(null)); + }, [dispatch]); return ( <> - - - - { - + + + + + {t('gallery.sliderFitLabel')} - + @@ -77,6 +86,7 @@ export const ImageComparisonToolbarButtons = memo(() => { + ); }); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx index 32e9d606fc..f676a89f7e 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx @@ -17,18 +17,19 @@ import { ViewerToggleMenu } from './ViewerToggleMenu'; const VIEWER_ENABLED_TABS: InvokeTabName[] = ['canvas', 'generation', 'workflows']; export const ImageViewer = memo(() => { - const { viewerMode, onToggle, openEditor } = useImageViewer(); + const { isOpen, onToggle, onClose } = useImageViewer(); const activeTabName = useAppSelector(activeTabNameSelector); + const isComparing = useAppSelector((s) => s.gallery.imageToCompare !== null); const isViewerEnabled = useMemo(() => VIEWER_ENABLED_TABS.includes(activeTabName), [activeTabName]); const shouldShowViewer = useMemo(() => { if (!isViewerEnabled) { return false; } - return viewerMode === 'view' || viewerMode === 'compare'; - }, [viewerMode, isViewerEnabled]); + return isOpen; + }, [isOpen, isViewerEnabled]); useHotkeys('z', onToggle, { enabled: isViewerEnabled }, [isViewerEnabled, onToggle]); - useHotkeys('esc', openEditor, { enabled: isViewerEnabled }, [isViewerEnabled, openEditor]); + useHotkeys('esc', onClose, { enabled: isViewerEnabled }, [isViewerEnabled, onClose]); if (!shouldShowViewer) { return null; @@ -58,8 +59,8 @@ export const ImageViewer = memo(() => { - {viewerMode === 'view' && } - {viewerMode === 'compare' && } + {!isComparing && } + {isComparing && } @@ -68,8 +69,8 @@ export const ImageViewer = memo(() => { - {viewerMode === 'view' && } - {viewerMode === 'compare' && } + {!isComparing && } + {isComparing && } ); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx index f5b02db2fc..3552c28a5b 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx @@ -8,60 +8,23 @@ import { PopoverContent, PopoverTrigger, Text, - useDisclosure, } from '@invoke-ai/ui-library'; -import { useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { PiCaretDownBold, PiCheckBold, PiEyeBold, PiImagesBold, PiPencilBold } from 'react-icons/pi'; +import { PiCaretDownBold, PiCheckBold, PiEyeBold, PiPencilBold } from 'react-icons/pi'; import { useImageViewer } from './useImageViewer'; export const ViewerToggleMenu = () => { const { t } = useTranslation(); - const { isOpen, onOpen, onClose } = useDisclosure(); - const { viewerMode, openEditor, openViewer, openCompare } = useImageViewer(); - const icon = useMemo(() => { - if (viewerMode === 'view') { - return ; - } - if (viewerMode === 'edit') { - return ; - } - if (viewerMode === 'compare') { - return ; - } - }, [viewerMode]); - const label = useMemo(() => { - if (viewerMode === 'view') { - return t('common.viewing'); - } - if (viewerMode === 'edit') { - return t('common.editing'); - } - if (viewerMode === 'compare') { - return t('common.comparing'); - } - }, [t, viewerMode]); - const _openEditor = useCallback(() => { - openEditor(); - onClose(); - }, [onClose, openEditor]); - const _openViewer = useCallback(() => { - openViewer(); - onClose(); - }, [onClose, openViewer]); - const _openCompare = useCallback(() => { - openCompare(); - onClose(); - }, [onClose, openCompare]); + const { isOpen, onClose, onOpen } = useImageViewer(); return ( - + @@ -70,9 +33,9 @@ export const ViewerToggleMenu = () => { - - - diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx index fe4dc47607..57b3697b7e 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx @@ -1,26 +1,22 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { viewerModeChanged } from 'features/gallery/store/gallerySlice'; +import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; import { useCallback } from 'react'; export const useImageViewer = () => { const dispatch = useAppDispatch(); - const viewerMode = useAppSelector((s) => s.gallery.viewerMode); + const isOpen = useAppSelector((s) => s.gallery.isImageViewerOpen); - const openEditor = useCallback(() => { - dispatch(viewerModeChanged('edit')); + const onClose = useCallback(() => { + dispatch(isImageViewerOpenChanged(false)); }, [dispatch]); - const openViewer = useCallback(() => { - dispatch(viewerModeChanged('view')); + const onOpen = useCallback(() => { + dispatch(isImageViewerOpenChanged(true)); }, [dispatch]); const onToggle = useCallback(() => { - dispatch(viewerModeChanged(viewerMode === 'view' ? 'edit' : 'view')); - }, [dispatch, viewerMode]); + dispatch(isImageViewerOpenChanged(!isOpen)); + }, [dispatch, isOpen]); - const openCompare = useCallback(() => { - dispatch(viewerModeChanged('compare')); - }, [dispatch]); - - return { viewerMode, openEditor, openViewer, openCompare, onToggle }; + return { isOpen, onOpen, onClose, onToggle }; }; diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useGalleryHotkeys.ts b/invokeai/frontend/web/src/features/gallery/hooks/useGalleryHotkeys.ts index 1efc317e3a..931d93272b 100644 --- a/invokeai/frontend/web/src/features/gallery/hooks/useGalleryHotkeys.ts +++ b/invokeai/frontend/web/src/features/gallery/hooks/useGalleryHotkeys.ts @@ -27,16 +27,16 @@ export const useGalleryHotkeys = () => { useGalleryNavigation(); useHotkeys( - 'left', - () => { - canNavigateGallery && handleLeftImage(); + ['left', 'alt+left'], + (e) => { + canNavigateGallery && handleLeftImage(e.altKey); }, [handleLeftImage, canNavigateGallery] ); useHotkeys( - 'right', - () => { + ['right', 'alt+right'], + (e) => { if (!canNavigateGallery) { return; } @@ -45,29 +45,29 @@ export const useGalleryHotkeys = () => { return; } if (!isOnLastImage) { - handleRightImage(); + handleRightImage(e.altKey); } }, [isOnLastImage, areMoreImagesAvailable, handleLoadMoreImages, isFetching, handleRightImage, canNavigateGallery] ); useHotkeys( - 'up', - () => { - handleUpImage(); + ['up', 'alt+up'], + (e) => { + handleUpImage(e.altKey); }, { preventDefault: true }, [handleUpImage] ); useHotkeys( - 'down', - () => { + ['down', 'alt+down'], + (e) => { if (!areImagesBelowCurrent && areMoreImagesAvailable && !isFetching) { handleLoadMoreImages(); return; } - handleDownImage(); + handleDownImage(e.altKey); }, { preventDefault: true }, [areImagesBelowCurrent, areMoreImagesAvailable, handleLoadMoreImages, isFetching, handleDownImage] diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts b/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts index 1464c23285..177d7c7318 100644 --- a/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts +++ b/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts @@ -1,11 +1,11 @@ +import { useAltModifier } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { getGalleryImageDataTestId } from 'features/gallery/components/ImageGrid/getGalleryImageDataTestId'; import { imageItemContainerTestId } from 'features/gallery/components/ImageGrid/ImageGridItemContainer'; import { imageListContainerTestId } from 'features/gallery/components/ImageGrid/ImageGridListContainer'; import { virtuosoGridRefs } from 'features/gallery/components/ImageGrid/types'; import { useGalleryImages } from 'features/gallery/hooks/useGalleryImages'; -import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors'; -import { imageSelected } from 'features/gallery/store/gallerySlice'; +import { imageSelected, imageToCompareChanged } from 'features/gallery/store/gallerySlice'; import { getIsVisible } from 'features/gallery/util/getIsVisible'; import { getScrollToIndexAlign } from 'features/gallery/util/getScrollToIndexAlign'; import { clamp } from 'lodash-es'; @@ -106,10 +106,10 @@ const getImageFuncs = { }; type UseGalleryNavigationReturn = { - handleLeftImage: () => void; - handleRightImage: () => void; - handleUpImage: () => void; - handleDownImage: () => void; + handleLeftImage: (alt?: boolean) => void; + handleRightImage: (alt?: boolean) => void; + handleUpImage: (alt?: boolean) => void; + handleDownImage: (alt?: boolean) => void; isOnFirstImage: boolean; isOnLastImage: boolean; areImagesBelowCurrent: boolean; @@ -123,7 +123,15 @@ type UseGalleryNavigationReturn = { */ export const useGalleryNavigation = (): UseGalleryNavigationReturn => { const dispatch = useAppDispatch(); - const lastSelectedImage = useAppSelector(selectLastSelectedImage); + const alt = useAltModifier(); + const lastSelectedImage = useAppSelector((s) => { + const lastSelected = s.gallery.selection.slice(-1)[0] ?? null; + if (alt) { + return s.gallery.imageToCompare ?? lastSelected; + } else { + return lastSelected; + } + }); const { queryResult: { data }, } = useGalleryImages(); @@ -136,7 +144,7 @@ export const useGalleryNavigation = (): UseGalleryNavigationReturn => { }, [lastSelectedImage, data]); const handleNavigation = useCallback( - (direction: 'left' | 'right' | 'up' | 'down') => { + (direction: 'left' | 'right' | 'up' | 'down', alt?: boolean) => { if (!data) { return; } @@ -144,10 +152,14 @@ export const useGalleryNavigation = (): UseGalleryNavigationReturn => { if (!image || index === lastSelectedImageIndex) { return; } - dispatch(imageSelected(image)); + if (alt) { + dispatch(imageToCompareChanged(image)); + } else { + dispatch(imageSelected(image)); + } scrollToImage(image.image_name, index); }, - [dispatch, lastSelectedImageIndex, data] + [data, lastSelectedImageIndex, dispatch] ); const isOnFirstImage = useMemo(() => lastSelectedImageIndex === 0, [lastSelectedImageIndex]); @@ -162,21 +174,33 @@ export const useGalleryNavigation = (): UseGalleryNavigationReturn => { return lastSelectedImageIndex + imagesPerRow < loadedImagesCount; }, [lastSelectedImageIndex, loadedImagesCount]); - const handleLeftImage = useCallback(() => { - handleNavigation('left'); - }, [handleNavigation]); + const handleLeftImage = useCallback( + (alt?: boolean) => { + handleNavigation('left', alt); + }, + [handleNavigation] + ); - const handleRightImage = useCallback(() => { - handleNavigation('right'); - }, [handleNavigation]); + const handleRightImage = useCallback( + (alt?: boolean) => { + handleNavigation('right', alt); + }, + [handleNavigation] + ); - const handleUpImage = useCallback(() => { - handleNavigation('up'); - }, [handleNavigation]); + const handleUpImage = useCallback( + (alt?: boolean) => { + handleNavigation('up', alt); + }, + [handleNavigation] + ); - const handleDownImage = useCallback(() => { - handleNavigation('down'); - }, [handleNavigation]); + const handleDownImage = useCallback( + (alt?: boolean) => { + handleNavigation('down', alt); + }, + [handleNavigation] + ); return { handleLeftImage, diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useMultiselect.ts b/invokeai/frontend/web/src/features/gallery/hooks/useMultiselect.ts index f84a349d2a..5f7c5e4da8 100644 --- a/invokeai/frontend/web/src/features/gallery/hooks/useMultiselect.ts +++ b/invokeai/frontend/web/src/features/gallery/hooks/useMultiselect.ts @@ -36,6 +36,7 @@ export const useMultiselect = (imageDTO?: ImageDTO) => { shiftKey: e.shiftKey, ctrlKey: e.ctrlKey, metaKey: e.metaKey, + altKey: e.altKey, }) ); }, diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index 4a49acafc5..7861515eb5 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -6,7 +6,7 @@ import { boardsApi } from 'services/api/endpoints/boards'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; -import type { BoardId, ComparisonMode, GalleryState, GalleryView, ViewerMode } from './types'; +import type { BoardId, ComparisonMode, GalleryState, GalleryView } from './types'; import { IMAGE_LIMIT, INITIAL_IMAGE_LIMIT } from './types'; const initialGalleryState: GalleryState = { @@ -21,7 +21,7 @@ const initialGalleryState: GalleryState = { boardSearchText: '', limit: INITIAL_IMAGE_LIMIT, offset: 0, - viewerMode: 'view', + isImageViewerOpen: true, imageToCompare: null, comparisonMode: 'slider', sliderFit: 'fill', @@ -40,7 +40,7 @@ export const gallerySlice = createSlice({ imageToCompareChanged: (state, action: PayloadAction) => { state.imageToCompare = action.payload; if (action.payload) { - state.viewerMode = 'compare'; + state.isImageViewerOpen = true; } }, comparisonModeChanged: (state, action: PayloadAction) => { @@ -88,8 +88,12 @@ export const gallerySlice = createSlice({ alwaysShowImageSizeBadgeChanged: (state, action: PayloadAction) => { state.alwaysShowImageSizeBadge = action.payload; }, - viewerModeChanged: (state, action: PayloadAction) => { - state.viewerMode = action.payload; + isImageViewerOpenChanged: (state, action: PayloadAction) => { + if (state.isImageViewerOpen && state.imageToCompare) { + state.imageToCompare = null; + return; + } + state.isImageViewerOpen = action.payload; }, comparedImagesSwapped: (state) => { if (state.imageToCompare) { @@ -138,7 +142,7 @@ export const { boardSearchTextChanged, moreImagesLoaded, alwaysShowImageSizeBadgeChanged, - viewerModeChanged, + isImageViewerOpenChanged, imageToCompareChanged, comparisonModeChanged, comparedImagesSwapped, @@ -164,5 +168,13 @@ export const galleryPersistConfig: PersistConfig = { name: gallerySlice.name, initialState: initialGalleryState, migrate: migrateGalleryState, - persistDenylist: ['selection', 'selectedBoardId', 'galleryView', 'offset', 'limit', 'viewerMode', 'imageToCompare'], + persistDenylist: [ + 'selection', + 'selectedBoardId', + 'galleryView', + 'offset', + 'limit', + 'isImageViewerOpen', + 'imageToCompare', + ], }; diff --git a/invokeai/frontend/web/src/features/gallery/store/types.ts b/invokeai/frontend/web/src/features/gallery/store/types.ts index 1388c792c3..1bdc91fc1e 100644 --- a/invokeai/frontend/web/src/features/gallery/store/types.ts +++ b/invokeai/frontend/web/src/features/gallery/store/types.ts @@ -8,7 +8,6 @@ export const IMAGE_LIMIT = 20; export type GalleryView = 'images' | 'assets'; export type BoardId = 'none' | (string & Record); export type ComparisonMode = 'slider' | 'side-by-side'; -export type ViewerMode = 'edit' | 'view' | 'compare'; export type GalleryState = { selection: ImageDTO[]; @@ -25,5 +24,5 @@ export type GalleryState = { imageToCompare: ImageDTO | null; comparisonMode: ComparisonMode; sliderFit: 'contain' | 'fill'; - viewerMode: ViewerMode; + isImageViewerOpen: boolean; }; diff --git a/invokeai/frontend/web/src/features/ui/components/ParametersPanelTextToImage.tsx b/invokeai/frontend/web/src/features/ui/components/ParametersPanelTextToImage.tsx index 23a7837b20..b78d5dce9a 100644 --- a/invokeai/frontend/web/src/features/ui/components/ParametersPanelTextToImage.tsx +++ b/invokeai/frontend/web/src/features/ui/components/ParametersPanelTextToImage.tsx @@ -3,7 +3,7 @@ import { Box, Flex, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/u import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants'; import { ControlLayersPanelContent } from 'features/controlLayers/components/ControlLayersPanelContent'; -import { viewerModeChanged } from 'features/gallery/store/gallerySlice'; +import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; import { Prompts } from 'features/parameters/components/Prompts/Prompts'; import QueueControls from 'features/queue/components/QueueControls'; import { SDXLPrompts } from 'features/sdxl/components/SDXLPrompts/SDXLPrompts'; @@ -51,7 +51,7 @@ const ParametersPanelTextToImage = () => { const onChangeTabs = useCallback( (i: number) => { if (i === 1) { - dispatch(viewerModeChanged('edit')); + dispatch(isImageViewerOpenChanged(false)); } }, [dispatch] From 76b1f241d70e66c43e52f52aeddf83970c0757ac Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 31 May 2024 20:45:10 +1000 Subject: [PATCH 29/52] fix(ui): useGalleryNavigation callback typing issue --- .../gallery/components/NextPrevImageButtons.tsx | 6 +++--- .../features/gallery/hooks/useGalleryNavigation.ts | 12 ++++++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/NextPrevImageButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/NextPrevImageButtons.tsx index 9949fb5bd5..19368455e3 100644 --- a/invokeai/frontend/web/src/features/gallery/components/NextPrevImageButtons.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/NextPrevImageButtons.tsx @@ -14,7 +14,7 @@ const nextPrevButtonStyles: ChakraProps['sx'] = { const NextPrevImageButtons = () => { const { t } = useTranslation(); - const { handleLeftImage, handleRightImage, isOnFirstImage, isOnLastImage } = useGalleryNavigation(); + const { prevImage, nextImage, isOnFirstImage, isOnLastImage } = useGalleryNavigation(); const { areMoreImagesAvailable, @@ -30,7 +30,7 @@ const NextPrevImageButtons = () => { aria-label={t('accessibility.previousImage')} icon={} variant="unstyled" - onClick={handleLeftImage} + onClick={prevImage} boxSize={16} sx={nextPrevButtonStyles} /> @@ -42,7 +42,7 @@ const NextPrevImageButtons = () => { aria-label={t('accessibility.nextImage')} icon={} variant="unstyled" - onClick={handleRightImage} + onClick={nextImage} boxSize={16} sx={nextPrevButtonStyles} /> diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts b/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts index 177d7c7318..ce6b152577 100644 --- a/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts +++ b/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts @@ -110,6 +110,8 @@ type UseGalleryNavigationReturn = { handleRightImage: (alt?: boolean) => void; handleUpImage: (alt?: boolean) => void; handleDownImage: (alt?: boolean) => void; + prevImage: () => void; + nextImage: () => void; isOnFirstImage: boolean; isOnLastImage: boolean; areImagesBelowCurrent: boolean; @@ -202,6 +204,14 @@ export const useGalleryNavigation = (): UseGalleryNavigationReturn => { [handleNavigation] ); + const nextImage = useCallback(() => { + handleRightImage(); + }, [handleRightImage]); + + const prevImage = useCallback(() => { + handleLeftImage(); + }, [handleLeftImage]); + return { handleLeftImage, handleRightImage, @@ -210,5 +220,7 @@ export const useGalleryNavigation = (): UseGalleryNavigationReturn => { isOnFirstImage, isOnLastImage, areImagesBelowCurrent, + nextImage, + prevImage, }; }; From 69da67e9200c157b82a1f62160e9517e2b3a4b92 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 08:42:16 +1000 Subject: [PATCH 30/52] fix(ui): dnd on board Copy-paste error broke this --- .../listenerMiddleware/listeners/imageDropped.ts | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts index c515a0d88e..84823407e9 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts @@ -186,7 +186,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) => } /** - * Image dropped on user board + * Image selected for compare */ if ( overData.actionType === 'SELECT_FOR_COMPARE' && @@ -199,17 +199,19 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) => } /** - * Image dropped on 'none' board + * Image dropped on user board */ if ( - overData.actionType === 'REMOVE_FROM_BOARD' && + overData.actionType === 'ADD_TO_BOARD' && activeData.payloadType === 'IMAGE_DTO' && activeData.payload.imageDTO ) { const { imageDTO } = activeData.payload; + const { boardId } = overData.context; dispatch( - imagesApi.endpoints.removeImageFromBoard.initiate({ + imagesApi.endpoints.addImageToBoard.initiate({ imageDTO, + board_id: boardId, }) ); return; From dd74e891275a163d6d37b3069b1b1a85db8d2350 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 08:55:49 +1000 Subject: [PATCH 31/52] fix(ui): close context menu on click select for compare --- .../components/ImageContextMenu/SingleSelectionMenuItems.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx index 2b29ba9ddf..31df113115 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx @@ -137,7 +137,7 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { } onClickCapture={handleDownloadImage}> {t('parameters.downloadImage')} - } isDisabled={!maySelectForCompare} onClickCapture={handleSelectImageForCompare}> + } isDisabled={!maySelectForCompare} onClick={handleSelectImageForCompare}> {t('gallery.selectForCompare')} From 940de6a5c55af26ed5d8bbe91fd90f6296bd9f87 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 09:41:54 +1000 Subject: [PATCH 32/52] fix(ui): allow drop of currently-selected image for compare --- invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts index ceca331725..d8e9d98e10 100644 --- a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts +++ b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts @@ -33,9 +33,7 @@ export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData? return ( payloadType === 'IMAGE_DTO' && activeData.id !== 'image-compare-first-image' && - activeData.id !== 'image-compare-second-image' && - activeData.payload.imageDTO.image_name !== overData.context.firstImageName && - activeData.payload.imageDTO.image_name !== overData.context.secondImageName + activeData.id !== 'image-compare-second-image' ); case 'ADD_TO_BOARD': { // If the board is the same, don't allow the drop From 8ea4067f8366e6f257f010ec0105f3a16433005c Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 09:42:04 +1000 Subject: [PATCH 33/52] feat(ui): rework compare toolbar --- invokeai/frontend/web/public/locales/en.json | 2 +- .../components/ImageViewer/CompareToolbar.tsx | 89 ++++++++++++++++++ .../ImageComparisonToolbarButtons.tsx | 94 ------------------- .../components/ImageViewer/ImageViewer.tsx | 27 +----- .../components/ImageViewer/ViewerToolbar.tsx | 30 ++++++ 5 files changed, 124 insertions(+), 118 deletions(-) create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToolbar.tsx diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 970c926500..aaa6c4e441 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -386,7 +386,7 @@ "sideBySide": "Side-by-Side", "swapImages": "Swap Images", "compareOptions": "Comparison Options", - "sliderFitLabel": "Stretch second image to fit", + "stretchToFit": "Stretch to Fit", "exitCompare": "Exit Compare" }, "hotkeys": { diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx new file mode 100644 index 0000000000..7ad9a9bf8f --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx @@ -0,0 +1,89 @@ +import { Button, ButtonGroup, Flex, IconButton } from '@invoke-ai/ui-library'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { + comparedImagesSwapped, + comparisonModeChanged, + imageToCompareChanged, + sliderFitChanged, +} from 'features/gallery/store/gallerySlice'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiArrowsOutBold, PiSwapBold, PiXBold } from 'react-icons/pi'; + +export const CompareToolbar = memo(() => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); + const sliderFit = useAppSelector((s) => s.gallery.sliderFit); + const setComparisonModeSlider = useCallback(() => { + dispatch(comparisonModeChanged('slider')); + }, [dispatch]); + const setComparisonModeSideBySide = useCallback(() => { + dispatch(comparisonModeChanged('side-by-side')); + }, [dispatch]); + const swapImages = useCallback(() => { + dispatch(comparedImagesSwapped()); + }, [dispatch]); + const toggleSliderFit = useCallback(() => { + dispatch(sliderFitChanged(sliderFit === 'contain' ? 'fill' : 'contain')); + }, [dispatch, sliderFit]); + const exitCompare = useCallback(() => { + dispatch(imageToCompareChanged(null)); + }, [dispatch]); + + return ( + + + + } + aria-label={t('gallery.swapImages')} + tooltip={t('gallery.swapImages')} + onClick={swapImages} + /> + {comparisonMode === 'slider' && ( + } + /> + )} + + + + + + + + + + + } + aria-label={t('gallery.exitCompare')} + tooltip={t('gallery.exitCompare')} + onClick={exitCompare} + /> + + + + ); +}); + +CompareToolbar.displayName = 'CompareToolbar'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx deleted file mode 100644 index 2ee25d75a8..0000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonToolbarButtons.tsx +++ /dev/null @@ -1,94 +0,0 @@ -import { - Button, - ButtonGroup, - Flex, - FormControl, - FormLabel, - IconButton, - Popover, - PopoverBody, - PopoverContent, - PopoverTrigger, - Switch, -} from '@invoke-ai/ui-library'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { - comparedImagesSwapped, - comparisonModeChanged, - imageToCompareChanged, - sliderFitChanged, -} from 'features/gallery/store/gallerySlice'; -import type { ChangeEvent } from 'react'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiGearBold } from 'react-icons/pi'; - -export const ImageComparisonToolbarButtons = memo(() => { - const { t } = useTranslation(); - const dispatch = useAppDispatch(); - const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); - const sliderFit = useAppSelector((s) => s.gallery.sliderFit); - const setComparisonModeSlider = useCallback(() => { - dispatch(comparisonModeChanged('slider')); - }, [dispatch]); - const setComparisonModeSideBySide = useCallback(() => { - dispatch(comparisonModeChanged('side-by-side')); - }, [dispatch]); - const swapImages = useCallback(() => { - dispatch(comparedImagesSwapped()); - }, [dispatch]); - const onSliderFitChanged = useCallback( - (e: ChangeEvent) => { - dispatch(sliderFitChanged(e.target.checked ? 'fill' : 'contain')); - }, - [dispatch] - ); - const exitCompare = useCallback(() => { - dispatch(imageToCompareChanged(null)); - }, [dispatch]); - - return ( - <> - - - } - /> - - - - - - - - - - {t('gallery.sliderFitLabel')} - - - - - - - - - - - ); -}); - -ImageComparisonToolbarButtons.displayName = 'ImageComparisonToolbarButtons'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx index f676a89f7e..ce5936b1c2 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx @@ -1,19 +1,15 @@ import { Box, Flex } from '@invoke-ai/ui-library'; import { useAppSelector } from 'app/store/storeHooks'; +import { CompareToolbar } from 'features/gallery/components/ImageViewer/CompareToolbar'; import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview'; import { ImageComparison } from 'features/gallery/components/ImageViewer/ImageComparison'; -import { ImageComparisonToolbarButtons } from 'features/gallery/components/ImageViewer/ImageComparisonToolbarButtons'; -import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton'; -import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton'; import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer'; +import { ViewerToolbar } from 'features/gallery/components/ImageViewer/ViewerToolbar'; import type { InvokeTabName } from 'features/ui/store/tabMap'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; import { memo, useMemo } from 'react'; import { useHotkeys } from 'react-hotkeys-hook'; -import CurrentImageButtons from './CurrentImageButtons'; -import { ViewerToggleMenu } from './ViewerToggleMenu'; - const VIEWER_ENABLED_TABS: InvokeTabName[] = ['canvas', 'generation', 'workflows']; export const ImageViewer = memo(() => { @@ -51,23 +47,8 @@ export const ImageViewer = memo(() => { justifyContent="center" zIndex={10} // reactflow puts its minimap at 5, so we need to be above that > - - - - - - - - - {!isComparing && } - {isComparing && } - - - - - - - + {isComparing && } + {!isComparing && } {!isComparing && } {isComparing && } diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToolbar.tsx new file mode 100644 index 0000000000..6310874030 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToolbar.tsx @@ -0,0 +1,30 @@ +import { Flex } from '@invoke-ai/ui-library'; +import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton'; +import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton'; +import { memo } from 'react'; + +import CurrentImageButtons from './CurrentImageButtons'; +import { ViewerToggleMenu } from './ViewerToggleMenu'; + +export const ViewerToolbar = memo(() => { + return ( + + + + + + + + + + + + + + + + + ); +}); + +ViewerToolbar.displayName = 'ViewerToolbar'; From 3cfd2755c2dbaa6be98a806cf00590a6a84ce8cb Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 09:44:19 +1000 Subject: [PATCH 34/52] fix(ui): when changing viewer state, always clear compare image --- .../frontend/web/src/features/gallery/store/gallerySlice.ts | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index 7861515eb5..fe17f45940 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -89,10 +89,7 @@ export const gallerySlice = createSlice({ state.alwaysShowImageSizeBadge = action.payload; }, isImageViewerOpenChanged: (state, action: PayloadAction) => { - if (state.isImageViewerOpen && state.imageToCompare) { - state.imageToCompare = null; - return; - } + state.imageToCompare = null; state.isImageViewerOpen = action.payload; }, comparedImagesSwapped: (state) => { From 1c9c982b636e79f31f8befa45dae18c34dce672e Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 09:46:15 +1000 Subject: [PATCH 35/52] feat(ui): use appropriate cursor on slider --- .../gallery/components/ImageViewer/ImageComparisonSlider.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index 43c97a69b5..d16fad69b7 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -259,6 +259,7 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = left={0} onMouseDown={onMouseDown} userSelect="none" + cursor="ew-resize" /> From ad9740d72d219d01a95ca8a85a03ecbb796d04a7 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 09:48:08 +1000 Subject: [PATCH 36/52] feat(ui): alt-click comparison image exits compare --- .../frontend/web/src/features/gallery/store/gallerySlice.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index fe17f45940..899e5db13d 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -38,7 +38,11 @@ export const gallerySlice = createSlice({ state.selection = uniqBy(action.payload, (i) => i.image_name); }, imageToCompareChanged: (state, action: PayloadAction) => { - state.imageToCompare = action.payload; + if (state.imageToCompare?.image_name === action.payload?.image_name) { + state.imageToCompare = null; + } else { + state.imageToCompare = action.payload; + } if (action.payload) { state.isImageViewerOpen = true; } From d0fca53e67e26a7cd006afd5b67913481bd3ba78 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 10:04:40 +1000 Subject: [PATCH 37/52] fix(ui): only clear comparison image on alt click of gallery image This logic can't e in the reducer else it applies to dnd events which isn't right --- .../listenerMiddleware/listeners/galleryImageClicked.ts | 6 +++++- .../frontend/web/src/features/gallery/store/gallerySlice.ts | 6 +----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts index de04202435..43f9355125 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts @@ -43,7 +43,11 @@ export const addGalleryImageClickedListener = (startAppListening: AppStartListen const selection = state.gallery.selection; if (altKey) { - dispatch(imageToCompareChanged(imageDTO)); + if (state.gallery.imageToCompare?.image_name === imageDTO.image_name) { + dispatch(imageToCompareChanged(null)); + } else { + dispatch(imageToCompareChanged(imageDTO)); + } } else if (shiftKey) { const rangeEndImageName = imageDTO.image_name; const lastSelectedImage = selection[selection.length - 1]?.image_name; diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index 899e5db13d..fe17f45940 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -38,11 +38,7 @@ export const gallerySlice = createSlice({ state.selection = uniqBy(action.payload, (i) => i.image_name); }, imageToCompareChanged: (state, action: PayloadAction) => { - if (state.imageToCompare?.image_name === action.payload?.image_name) { - state.imageToCompare = null; - } else { - state.imageToCompare = action.payload; - } + state.imageToCompare = action.payload; if (action.payload) { state.isImageViewerOpen = true; } From ca728ca29fe362beeee5cf5cc37c8f1bddbaa1d9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 10:20:22 +1000 Subject: [PATCH 38/52] fix(ui): ignore context menu in slider view It doesn't make sense to allow context menu here, because the context menu will technically be on a div and not an image - there won't be any image options there. --- invokeai/frontend/web/src/common/util/stopPropagation.ts | 4 ++++ .../gallery/components/ImageViewer/ImageComparisonSlider.tsx | 2 ++ 2 files changed, 6 insertions(+) diff --git a/invokeai/frontend/web/src/common/util/stopPropagation.ts b/invokeai/frontend/web/src/common/util/stopPropagation.ts index b3481b7c0e..0c6a1fc507 100644 --- a/invokeai/frontend/web/src/common/util/stopPropagation.ts +++ b/invokeai/frontend/web/src/common/util/stopPropagation.ts @@ -1,3 +1,7 @@ export const stopPropagation = (e: React.MouseEvent) => { e.stopPropagation(); }; + +export const preventDefault = (e: React.MouseEvent) => { + e.preventDefault(); +}; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index d16fad69b7..e0d5e7fbd4 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -1,6 +1,7 @@ import { Box, Flex, Icon, Image, Text } from '@invoke-ai/ui-library'; import { useMeasure } from '@reactuses/core'; import { useAppSelector } from 'app/store/storeHooks'; +import { preventDefault } from 'common/util/stopPropagation'; import type { Dimensions } from 'features/canvas/store/canvasTypes'; import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'; @@ -258,6 +259,7 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = bottom={0} left={0} onMouseDown={onMouseDown} + onContextMenu={preventDefault} userSelect="none" cursor="ew-resize" /> From 405fc46888b3f6e89d585fab89c1598845ad7a41 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 10:36:13 +1000 Subject: [PATCH 39/52] feat(ui): z/esc first exit compare before closing viewer --- .../components/ImageViewer/useImageViewer.tsx | 19 ++++++++++++++----- .../features/gallery/store/gallerySlice.ts | 1 - 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx index 57b3697b7e..978fbc0cef 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx @@ -1,22 +1,31 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; +import { imageToCompareChanged, isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; import { useCallback } from 'react'; export const useImageViewer = () => { const dispatch = useAppDispatch(); + const isComparing = useAppSelector((s) => s.gallery.imageToCompare !== null); const isOpen = useAppSelector((s) => s.gallery.isImageViewerOpen); const onClose = useCallback(() => { - dispatch(isImageViewerOpenChanged(false)); - }, [dispatch]); + if (isComparing && isOpen) { + dispatch(imageToCompareChanged(null)); + } else { + dispatch(isImageViewerOpenChanged(false)); + } + }, [dispatch, isComparing, isOpen]); const onOpen = useCallback(() => { dispatch(isImageViewerOpenChanged(true)); }, [dispatch]); const onToggle = useCallback(() => { - dispatch(isImageViewerOpenChanged(!isOpen)); - }, [dispatch, isOpen]); + if (isComparing && isOpen) { + dispatch(imageToCompareChanged(null)); + } else { + dispatch(isImageViewerOpenChanged(!isOpen)); + } + }, [dispatch, isComparing, isOpen]); return { isOpen, onOpen, onClose, onToggle }; }; diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index fe17f45940..ee97166081 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -89,7 +89,6 @@ export const gallerySlice = createSlice({ state.alwaysShowImageSizeBadge = action.payload; }, isImageViewerOpenChanged: (state, action: PayloadAction) => { - state.imageToCompare = null; state.isImageViewerOpen = action.payload; }, comparedImagesSwapped: (state) => { From 745140fa6bc12570885e1272c7e4c3cd8cba4709 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 10:40:56 +1000 Subject: [PATCH 40/52] feat(ui): "first image"/"second image" -> "viewer image"/"compare image" --- invokeai/frontend/web/public/locales/en.json | 4 ++-- .../gallery/components/ImageViewer/ImageComparisonSlider.tsx | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index aaa6c4e441..351e483fcc 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -378,8 +378,8 @@ "bulkDownloadFailed": "Download Failed", "problemDeletingImages": "Problem Deleting Images", "problemDeletingImagesDesc": "One or more images could not be deleted", - "firstImage": "First Image", - "secondImage": "Second Image", + "viewerImage": "Viewer Image", + "compareImage": "Compare Image", "selectForCompare": "Select for Compare", "selectAnImageToCompare": "Select an Image to Compare", "slider": "Slider", diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index e0d5e7fbd4..269c394498 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -184,7 +184,7 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = filter={DROP_SHADOW} color="base.50" > - {t('gallery.secondImage')} + {t('gallery.compareImage')} - {t('gallery.firstImage')} + {t('gallery.viewerImage')} Date: Sat, 1 Jun 2024 11:07:51 +1000 Subject: [PATCH 41/52] fix(ui): make compare image scale with first image when using contain fit --- .../gallery/components/ImageViewer/ImageComparisonSlider.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index 269c394498..39cb1d43ae 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -168,8 +168,8 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = Date: Sat, 1 Jun 2024 11:43:49 +1000 Subject: [PATCH 42/52] feat(ui): revise drop zones The main viewer area has two drop zones: - Select for Viewer - Select for Compare These do what you'd imagine they would do. --- invokeai/frontend/web/public/locales/en.json | 1 + .../listeners/imageDropped.ts | 4 +- .../web/src/features/dnd/types/index.ts | 2 +- .../web/src/features/dnd/util/isValidDrop.ts | 6 +-- .../ImageViewer/CurrentImagePreview.tsx | 3 -- .../ImageViewer/ImageComparison.tsx | 53 ++----------------- .../ImageViewer/ImageComparisonDroppable.tsx | 21 ++++++-- .../ImageViewer/ImageComparisonSlider.tsx | 8 +-- .../components/ImageViewer/ImageViewer.tsx | 7 ++- .../ImageViewer/ImageViewerWorkflows.tsx | 45 ---------------- .../components/ImageViewer/ViewerToolbar.tsx | 15 +++++- .../features/ui/components/tabs/NodesTab.tsx | 6 ++- .../ui/components/tabs/TextToImageTab.tsx | 2 + 13 files changed, 58 insertions(+), 115 deletions(-) delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerWorkflows.tsx diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 351e483fcc..9a10cde6ce 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -380,6 +380,7 @@ "problemDeletingImagesDesc": "One or more images could not be deleted", "viewerImage": "Viewer Image", "compareImage": "Compare Image", + "selectForViewer": "Select for Viewer", "selectForCompare": "Select for Compare", "selectAnImageToCompare": "Select an Image to Compare", "slider": "Slider", diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts index 84823407e9..7cb0703af8 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts @@ -15,7 +15,7 @@ import { } from 'features/controlLayers/store/controlLayersSlice'; import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types'; import { isValidDrop } from 'features/dnd/util/isValidDrop'; -import { imageSelected, imageToCompareChanged } from 'features/gallery/store/gallerySlice'; +import { imageSelected, imageToCompareChanged, isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice'; import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice'; import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; import { imagesApi } from 'services/api/endpoints/images'; @@ -54,6 +54,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) => activeData.payload.imageDTO ) { dispatch(imageSelected(activeData.payload.imageDTO)); + dispatch(isImageViewerOpenChanged(true)); return; } @@ -195,6 +196,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) => ) { const { imageDTO } = activeData.payload; dispatch(imageToCompareChanged(imageDTO)); + dispatch(isImageViewerOpenChanged(true)); return; } diff --git a/invokeai/frontend/web/src/features/dnd/types/index.ts b/invokeai/frontend/web/src/features/dnd/types/index.ts index f66fec0ea1..6fcf18421e 100644 --- a/invokeai/frontend/web/src/features/dnd/types/index.ts +++ b/invokeai/frontend/web/src/features/dnd/types/index.ts @@ -18,7 +18,7 @@ type BaseDropData = { id: string; }; -type CurrentImageDropData = BaseDropData & { +export type CurrentImageDropData = BaseDropData & { actionType: 'SET_CURRENT_IMAGE'; }; diff --git a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts index d8e9d98e10..6dec862345 100644 --- a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts +++ b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts @@ -30,11 +30,7 @@ export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData? case 'SET_NODES_IMAGE': return payloadType === 'IMAGE_DTO'; case 'SELECT_FOR_COMPARE': - return ( - payloadType === 'IMAGE_DTO' && - activeData.id !== 'image-compare-first-image' && - activeData.id !== 'image-compare-second-image' - ); + return payloadType === 'IMAGE_DTO'; case 'ADD_TO_BOARD': { // If the board is the same, don't allow the drop diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx index 5de4f28d2a..a812391992 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImagePreview.tsx @@ -6,7 +6,6 @@ import IAIDndImage from 'common/components/IAIDndImage'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import type { TypesafeDraggableData } from 'features/dnd/types'; import ImageMetadataViewer from 'features/gallery/components/ImageMetadataViewer/ImageMetadataViewer'; -import { ImageComparisonDroppable } from 'features/gallery/components/ImageViewer/ImageComparisonDroppable'; import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons'; import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors'; import type { AnimationProps } from 'framer-motion'; @@ -75,12 +74,10 @@ const CurrentImagePreview = () => { isUploadDisabled={true} fitContainer useThumbailFallback - dropLabel={t('gallery.setCurrentImage')} noContentFallback={} dataTestId="image-preview" /> )} - {shouldShowImageDetails && imageDTO && ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx index 74acdfa13f..a0dc48bd5d 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx @@ -1,16 +1,12 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; -import IAIDroppable from 'common/components/IAIDroppable'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; -import type { SelectForCompareDropData } from 'features/dnd/types'; import { ImageComparisonSideBySide } from 'features/gallery/components/ImageViewer/ImageComparisonSideBySide'; import { ImageComparisonSlider } from 'features/gallery/components/ImageViewer/ImageComparisonSlider'; import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; -import type { PropsWithChildren } from 'react'; -import { memo, useMemo } from 'react'; +import { memo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiImagesBold } from 'react-icons/pi'; -import type { ImageDTO } from 'services/api/types'; const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { const firstImage = gallerySlice.selection.slice(-1)[0] ?? null; @@ -24,56 +20,17 @@ export const ImageComparison = memo(() => { const { firstImage, secondImage } = useAppSelector(selector); if (!firstImage || !secondImage) { - return ( - - - - ); + // Should rarely/never happen - we don't render this component unless we have images to compare + return ; } if (comparisonMode === 'slider') { - return ( - - - - ); + return ; } if (comparisonMode === 'side-by-side') { - return ( - - - - ); + return ; } }); ImageComparison.displayName = 'ImageComparison'; - -type Props = PropsWithChildren<{ - firstImage: ImageDTO | null; - secondImage: ImageDTO | null; -}>; - -const ImageComparisonWrapper = memo((props: Props) => { - const droppableData = useMemo( - () => ({ - id: 'image-comparison', - actionType: 'SELECT_FOR_COMPARE', - context: { - firstImageName: props.firstImage?.image_name, - secondImageName: props.secondImage?.image_name, - }, - }), - [props.firstImage?.image_name, props.secondImage?.image_name] - ); - - return ( - <> - {props.children} - - - ); -}); - -ImageComparisonWrapper.displayName = 'ImageComparisonWrapper'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx index 6f163f63cf..9639daac10 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx @@ -1,7 +1,8 @@ +import { Flex } from '@invoke-ai/ui-library'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import IAIDroppable from 'common/components/IAIDroppable'; -import type { SelectForCompareDropData } from 'features/dnd/types'; +import type { CurrentImageDropData, SelectForCompareDropData } from 'features/dnd/types'; import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; import { memo, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -12,10 +13,15 @@ const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { return { firstImage, secondImage }; }); +const setCurrentImageDropData: CurrentImageDropData = { + id: 'current-image', + actionType: 'SET_CURRENT_IMAGE', +}; + export const ImageComparisonDroppable = memo(() => { const { t } = useTranslation(); const { firstImage, secondImage } = useAppSelector(selector); - const droppableData = useMemo( + const selectForCompareDropData = useMemo( () => ({ id: 'image-comparison', actionType: 'SELECT_FOR_COMPARE', @@ -27,7 +33,16 @@ export const ImageComparisonDroppable = memo(() => { [firstImage?.image_name, secondImage?.image_name] ); - return ; + return ( + + + + + + + + + ); }); ImageComparisonDroppable.displayName = 'ImageComparisonDroppable'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index 39cb1d43ae..3ced364b64 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -132,7 +132,7 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = justifyContent="center" > { const { isOpen, onToggle, onClose } = useImageViewer(); const activeTabName = useAppSelector(activeTabNameSelector); + const workflowsMode = useAppSelector((s) => s.workflow.mode); const isComparing = useAppSelector((s) => s.gallery.imageToCompare !== null); const isViewerEnabled = useMemo(() => VIEWER_ENABLED_TABS.includes(activeTabName), [activeTabName]); const shouldShowViewer = useMemo(() => { + if (activeTabName === 'workflows' && workflowsMode === 'view') { + return true; + } if (!isViewerEnabled) { return false; } return isOpen; - }, [isOpen, isViewerEnabled]); + }, [isOpen, isViewerEnabled, workflowsMode, activeTabName]); useHotkeys('z', onToggle, { enabled: isViewerEnabled }, [isViewerEnabled, onToggle]); useHotkeys('esc', onClose, { enabled: isViewerEnabled }, [isViewerEnabled, onClose]); @@ -45,7 +49,6 @@ export const ImageViewer = memo(() => { rowGap={4} alignItems="center" justifyContent="center" - zIndex={10} // reactflow puts its minimap at 5, so we need to be above that > {isComparing && } {!isComparing && } diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerWorkflows.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerWorkflows.tsx deleted file mode 100644 index fe09f11be6..0000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerWorkflows.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import { Flex } from '@invoke-ai/ui-library'; -import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton'; -import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton'; -import { memo } from 'react'; - -import CurrentImageButtons from './CurrentImageButtons'; -import CurrentImagePreview from './CurrentImagePreview'; - -export const ImageViewerWorkflows = memo(() => { - return ( - - - - - - - - - - - - - - - - - - ); -}); - -ImageViewerWorkflows.displayName = 'ImageViewerWorkflows'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToolbar.tsx index 6310874030..21d3ba59d4 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToolbar.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToolbar.tsx @@ -1,12 +1,23 @@ import { Flex } from '@invoke-ai/ui-library'; +import { useAppSelector } from 'app/store/storeHooks'; import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton'; import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton'; -import { memo } from 'react'; +import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; +import { memo, useMemo } from 'react'; import CurrentImageButtons from './CurrentImageButtons'; import { ViewerToggleMenu } from './ViewerToggleMenu'; export const ViewerToolbar = memo(() => { + const workflowsMode = useAppSelector((s) => s.workflow.mode); + const activeTabName = useAppSelector(activeTabNameSelector); + const shouldShowToggleMenu = useMemo(() => { + if (activeTabName !== 'workflows') { + return true; + } + return workflowsMode === 'edit'; + }, [workflowsMode, activeTabName]); + return ( @@ -20,7 +31,7 @@ export const ViewerToolbar = memo(() => { - + {shouldShowToggleMenu && } diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/NodesTab.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/NodesTab.tsx index b4f473ae03..256a4331cd 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/NodesTab.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/NodesTab.tsx @@ -1,6 +1,7 @@ import { Box } from '@invoke-ai/ui-library'; import { useAppSelector } from 'app/store/storeHooks'; -import { ImageViewerWorkflows } from 'features/gallery/components/ImageViewer/ImageViewerWorkflows'; +import { ImageComparisonDroppable } from 'features/gallery/components/ImageViewer/ImageComparisonDroppable'; +import { ImageViewer } from 'features/gallery/components/ImageViewer/ImageViewer'; import NodeEditor from 'features/nodes/components/NodeEditor'; import { memo } from 'react'; import { ReactFlowProvider } from 'reactflow'; @@ -10,7 +11,8 @@ const NodesTab = () => { if (mode === 'view') { return ( - + + ); } diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/TextToImageTab.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/TextToImageTab.tsx index 1c1c9c24a4..5583624823 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/TextToImageTab.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/TextToImageTab.tsx @@ -1,5 +1,6 @@ import { Box } from '@invoke-ai/ui-library'; import { ControlLayersEditor } from 'features/controlLayers/components/ControlLayersEditor'; +import { ImageComparisonDroppable } from 'features/gallery/components/ImageViewer/ImageComparisonDroppable'; import { ImageViewer } from 'features/gallery/components/ImageViewer/ImageViewer'; import { memo } from 'react'; @@ -8,6 +9,7 @@ const TextToImageTab = () => { + ); }; From 08bcc71e991668582069de278b9b7d77aafaf906 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 13:20:04 +1000 Subject: [PATCH 43/52] fix(ui): workflows fit on load --- .../listeners/workflowLoadRequested.ts | 6 ++---- .../web/src/features/nodes/components/flow/Flow.tsx | 13 +++++++++++-- .../panels/BottomLeftPanel/ViewportControls.tsx | 13 +++---------- .../src/features/nodes/store/reactFlowInstance.ts | 1 + 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts index e6fc5a526a..9ccd967464 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts @@ -3,7 +3,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware' import { parseify } from 'common/util/serialize'; import { workflowLoaded, workflowLoadRequested } from 'features/nodes/store/actions'; import { $templates } from 'features/nodes/store/nodesSlice'; -import { $flow } from 'features/nodes/store/reactFlowInstance'; +import { $needsFit } from 'features/nodes/store/reactFlowInstance'; import type { Templates } from 'features/nodes/store/types'; import { WorkflowMigrationError, WorkflowVersionError } from 'features/nodes/types/error'; import { graphToWorkflow } from 'features/nodes/util/workflow/graphToWorkflow'; @@ -65,9 +65,7 @@ export const addWorkflowLoadRequestedListener = (startAppListening: AppStartList }); } - requestAnimationFrame(() => { - $flow.get()?.fitView(); - }); + $needsFit.set(true) } catch (e) { if (e instanceof WorkflowVersionError) { // The workflow version was not recognized in the valid list of versions diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx index 1748989394..727dad9617 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx @@ -19,7 +19,7 @@ import { redo, undo, } from 'features/nodes/store/nodesSlice'; -import { $flow } from 'features/nodes/store/reactFlowInstance'; +import { $flow, $needsFit } from 'features/nodes/store/reactFlowInstance'; import { connectionToEdge } from 'features/nodes/store/util/reactFlowUtil'; import type { CSSProperties, MouseEvent } from 'react'; import { memo, useCallback, useMemo, useRef } from 'react'; @@ -68,6 +68,7 @@ export const Flow = memo(() => { const nodes = useAppSelector((s) => s.nodes.present.nodes); const edges = useAppSelector((s) => s.nodes.present.edges); const viewport = useStore($viewport); + const needsFit = useStore($needsFit); const mayUndo = useAppSelector((s) => s.nodes.past.length > 0); const mayRedo = useAppSelector((s) => s.nodes.future.length > 0); const shouldSnapToGrid = useAppSelector((s) => s.workflowSettings.shouldSnapToGrid); @@ -92,8 +93,16 @@ export const Flow = memo(() => { const onNodesChange: OnNodesChange = useCallback( (nodeChanges) => { dispatch(nodesChanged(nodeChanges)); + const flow = $flow.get(); + if (!flow) { + return; + } + if (needsFit) { + $needsFit.set(false); + flow.fitView(); + } }, - [dispatch] + [dispatch, needsFit] ); const onEdgesChange: OnEdgesChange = useCallback( diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/panels/BottomLeftPanel/ViewportControls.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/panels/BottomLeftPanel/ViewportControls.tsx index f2624de58e..3d44e9d47b 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/panels/BottomLeftPanel/ViewportControls.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/panels/BottomLeftPanel/ViewportControls.tsx @@ -15,27 +15,20 @@ const ViewportControls = () => { const { t } = useTranslation(); const { zoomIn, zoomOut, fitView } = useReactFlow(); const dispatch = useAppDispatch(); - // const shouldShowFieldTypeLegend = useAppSelector( - // (s) => s.nodes.present.shouldShowFieldTypeLegend - // ); const shouldShowMinimapPanel = useAppSelector((s) => s.workflowSettings.shouldShowMinimapPanel); const handleClickedZoomIn = useCallback(() => { - zoomIn(); + zoomIn({ duration: 300 }); }, [zoomIn]); const handleClickedZoomOut = useCallback(() => { - zoomOut(); + zoomOut({ duration: 300 }); }, [zoomOut]); const handleClickedFitView = useCallback(() => { - fitView(); + fitView({ duration: 300 }); }, [fitView]); - // const handleClickedToggleFieldTypeLegend = useCallback(() => { - // dispatch(shouldShowFieldTypeLegendChanged(!shouldShowFieldTypeLegend)); - // }, [shouldShowFieldTypeLegend, dispatch]); - const handleClickedToggleMiniMapPanel = useCallback(() => { dispatch(shouldShowMinimapPanelChanged(!shouldShowMinimapPanel)); }, [shouldShowMinimapPanel, dispatch]); diff --git a/invokeai/frontend/web/src/features/nodes/store/reactFlowInstance.ts b/invokeai/frontend/web/src/features/nodes/store/reactFlowInstance.ts index 467c47de58..cfbadc6669 100644 --- a/invokeai/frontend/web/src/features/nodes/store/reactFlowInstance.ts +++ b/invokeai/frontend/web/src/features/nodes/store/reactFlowInstance.ts @@ -2,3 +2,4 @@ import { atom } from 'nanostores'; import type { ReactFlowInstance } from 'reactflow'; export const $flow = atom(null); +export const $needsFit = atom(true); From 8bb95714856f2bdf7524f70a6e9aa4a61b4f9ae3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 1 Jun 2024 19:17:18 +1000 Subject: [PATCH 44/52] feat(ui): tweak slider divider styling --- .../components/ImageViewer/ImageComparisonSlider.tsx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index 3ced364b64..f1c2cc6f33 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -11,7 +11,7 @@ import type { ImageDTO } from 'services/api/types'; const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0))'; const INITIAL_POS = '50%'; -const HANDLE_WIDTH = 1; +const HANDLE_WIDTH = 2; const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; const HANDLE_HITBOX = 20; const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; @@ -226,15 +226,16 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = bottom={0} left={left} w={HANDLE_HITBOX_PX} - tabIndex={-1} cursor="ew-resize" filter={DROP_SHADOW} + opacity={0.8} + color="base.50" > Date: Sun, 2 Jun 2024 08:52:32 +1000 Subject: [PATCH 45/52] feat(ui): hover comparison mode --- invokeai/frontend/web/public/locales/en.json | 1 + .../components/ImageViewer/CompareToolbar.tsx | 27 +++-- .../ImageViewer/ImageComparison.tsx | 5 + .../ImageViewer/ImageComparisonHover.tsx | 104 ++++++++++++++++++ .../ImageViewer/ImageComparisonSideBySide.tsx | 91 +++++++++------ .../ImageViewer/ImageComparisonSlider.tsx | 11 +- .../components/ImageViewer/useImageViewer.tsx | 2 + .../features/gallery/store/gallerySlice.ts | 8 +- .../web/src/features/gallery/store/types.ts | 4 +- 9 files changed, 197 insertions(+), 56 deletions(-) create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 9a10cde6ce..049f9ea979 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -385,6 +385,7 @@ "selectAnImageToCompare": "Select an Image to Compare", "slider": "Slider", "sideBySide": "Side-by-Side", + "hover": "Hover", "swapImages": "Swap Images", "compareOptions": "Comparison Options", "stretchToFit": "Stretch to Fit", diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx index 7ad9a9bf8f..7fb781edca 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx @@ -2,9 +2,9 @@ import { Button, ButtonGroup, Flex, IconButton } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { comparedImagesSwapped, + comparisonFitChanged, comparisonModeChanged, imageToCompareChanged, - sliderFitChanged, } from 'features/gallery/store/gallerySlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; @@ -14,19 +14,22 @@ export const CompareToolbar = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); - const sliderFit = useAppSelector((s) => s.gallery.sliderFit); + const comparisonFit = useAppSelector((s) => s.gallery.comparisonFit); const setComparisonModeSlider = useCallback(() => { dispatch(comparisonModeChanged('slider')); }, [dispatch]); const setComparisonModeSideBySide = useCallback(() => { dispatch(comparisonModeChanged('side-by-side')); }, [dispatch]); + const setComparisonModeHover = useCallback(() => { + dispatch(comparisonModeChanged('hover')); + }, [dispatch]); const swapImages = useCallback(() => { dispatch(comparedImagesSwapped()); }, [dispatch]); - const toggleSliderFit = useCallback(() => { - dispatch(sliderFitChanged(sliderFit === 'contain' ? 'fill' : 'contain')); - }, [dispatch, sliderFit]); + const togglecomparisonFit = useCallback(() => { + dispatch(comparisonFitChanged(comparisonFit === 'contain' ? 'fill' : 'contain')); + }, [dispatch, comparisonFit]); const exitCompare = useCallback(() => { dispatch(imageToCompareChanged(null)); }, [dispatch]); @@ -41,13 +44,12 @@ export const CompareToolbar = memo(() => { tooltip={t('gallery.swapImages')} onClick={swapImages} /> - {comparisonMode === 'slider' && ( + {comparisonMode !== 'side-by-side' && ( } /> @@ -70,6 +72,13 @@ export const CompareToolbar = memo(() => { > {t('gallery.sideBySide')} + diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx index a0dc48bd5d..73148851c3 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx @@ -1,6 +1,7 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; +import { ImageComparisonHover } from 'features/gallery/components/ImageViewer/ImageComparisonHover'; import { ImageComparisonSideBySide } from 'features/gallery/components/ImageViewer/ImageComparisonSideBySide'; import { ImageComparisonSlider } from 'features/gallery/components/ImageViewer/ImageComparisonSlider'; import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; @@ -31,6 +32,10 @@ export const ImageComparison = memo(() => { if (comparisonMode === 'side-by-side') { return ; } + + if (comparisonMode === 'hover') { + return ; + } }); ImageComparison.displayName = 'ImageComparison'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx new file mode 100644 index 0000000000..d00723a36e --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx @@ -0,0 +1,104 @@ +import { Flex, Image, Text } from '@invoke-ai/ui-library'; +import { useAppSelector } from 'app/store/storeHooks'; +import { preventDefault } from 'common/util/stopPropagation'; +import { DROP_SHADOW } from 'features/gallery/components/ImageViewer/useImageViewer'; +import { memo, useCallback, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import type { ImageDTO } from 'services/api/types'; + +type Props = { + /** + * The first image to compare + */ + firstImage: ImageDTO; + /** + * The second image to compare + */ + secondImage: ImageDTO; +}; + +export const ImageComparisonHover = memo(({ firstImage, secondImage }: Props) => { + const { t } = useTranslation(); + const comparisonFit = useAppSelector((s) => s.gallery.comparisonFit); + const [isMouseOver, setIsMouseOver] = useState(false); + const onMouseOver = useCallback(() => { + setIsMouseOver(true); + }, []); + const onMouseOut = useCallback(() => { + setIsMouseOver(false); + }, []); + return ( + + + + + {t('gallery.viewerImage')} + + + + + {t('gallery.compareImage')} + + + + + + ); +}); + +ImageComparisonHover.displayName = 'ImageComparisonHover'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx index 6cddb175cd..49f03fb9c8 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx @@ -1,8 +1,8 @@ -import { Flex } from '@invoke-ai/ui-library'; -import IAIDndImage from 'common/components/IAIDndImage'; -import type { ImageDraggableData } from 'features/dnd/types'; +import { Flex, Image, Text } from '@invoke-ai/ui-library'; +import { DROP_SHADOW } from 'features/gallery/components/ImageViewer/useImageViewer'; import ResizeHandle from 'features/ui/components/tabs/ResizeHandle'; -import { memo, useCallback, useMemo, useRef } from 'react'; +import { memo, useCallback, useRef } from 'react'; +import { useTranslation } from 'react-i18next'; import type { ImperativePanelGroupHandle } from 'react-resizable-panels'; import { Panel, PanelGroup } from 'react-resizable-panels'; import type { ImageDTO } from 'services/api/types'; @@ -19,6 +19,7 @@ type Props = { }; export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Props) => { + const { t } = useTranslation(); const panelGroupRef = useRef(null); const onDoubleClickHandle = useCallback(() => { if (!panelGroupRef.current) { @@ -27,36 +28,36 @@ export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Prop panelGroupRef.current.setLayout([50, 50]); }, []); - const firstImageDraggableData = useMemo( - () => ({ - id: 'image-compare-first-image', - payloadType: 'IMAGE_DTO', - payload: { imageDTO: firstImage }, - }), - [firstImage] - ); - - const secondImageDraggableData = useMemo( - () => ({ - id: 'image-compare-second-image', - payloadType: 'IMAGE_DTO', - payload: { imageDTO: secondImage }, - }), - [secondImage] - ); - return ( - - + + + + + {t('gallery.viewerImage')} + + - - + + + + + {t('gallery.compareImage')} + + diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index f1c2cc6f33..bda4c12eeb 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -9,7 +9,8 @@ import { useTranslation } from 'react-i18next'; import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; import type { ImageDTO } from 'services/api/types'; -const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0))'; +import { DROP_SHADOW } from './useImageViewer'; + const INITIAL_POS = '50%'; const HANDLE_WIDTH = 2; const HANDLE_WIDTH_PX = `${HANDLE_WIDTH}px`; @@ -31,7 +32,7 @@ type Props = { export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) => { const { t } = useTranslation(); - const sliderFit = useAppSelector((s) => s.gallery.sliderFit); + const comparisonFit = useAppSelector((s) => s.gallery.comparisonFit); // How far the handle is from the left - this will be a CSS calculation that takes into account the handle width const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX); // How wide the first image is @@ -169,11 +170,11 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = id="image-comparison-second-image" src={secondImage.image_url} fallbackSrc={secondImage.thumbnail_url} - w={sliderFit === 'fill' ? fittedSize.width : (fittedSize.width * secondImage.width) / firstImage.width} - h={sliderFit === 'fill' ? fittedSize.height : (fittedSize.height * secondImage.height) / firstImage.height} + w={comparisonFit === 'fill' ? fittedSize.width : (fittedSize.width * secondImage.width) / firstImage.width} + h={comparisonFit === 'fill' ? fittedSize.height : (fittedSize.height * secondImage.height) / firstImage.height} maxW={fittedSize.width} maxH={fittedSize.height} - objectFit={sliderFit} + objectFit={comparisonFit} objectPosition="top left" /> { return { isOpen, onOpen, onClose, onToggle }; }; + +export const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0)) drop-shadow(0px 0px 4px rgba(0, 0, 0, 0.3))'; diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index ee97166081..d5928c52f8 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -24,7 +24,7 @@ const initialGalleryState: GalleryState = { isImageViewerOpen: true, imageToCompare: null, comparisonMode: 'slider', - sliderFit: 'fill', + comparisonFit: 'fill', }; export const gallerySlice = createSlice({ @@ -98,8 +98,8 @@ export const gallerySlice = createSlice({ state.imageToCompare = oldSelection[0] ?? null; } }, - sliderFitChanged: (state, action: PayloadAction<'contain' | 'fill'>) => { - state.sliderFit = action.payload; + comparisonFitChanged: (state, action: PayloadAction<'contain' | 'fill'>) => { + state.comparisonFit = action.payload; }, }, extraReducers: (builder) => { @@ -142,7 +142,7 @@ export const { imageToCompareChanged, comparisonModeChanged, comparedImagesSwapped, - sliderFitChanged, + comparisonFitChanged, } = gallerySlice.actions; const isAnyBoardDeleted = isAnyOf( diff --git a/invokeai/frontend/web/src/features/gallery/store/types.ts b/invokeai/frontend/web/src/features/gallery/store/types.ts index 1bdc91fc1e..0b2618be65 100644 --- a/invokeai/frontend/web/src/features/gallery/store/types.ts +++ b/invokeai/frontend/web/src/features/gallery/store/types.ts @@ -7,7 +7,7 @@ export const IMAGE_LIMIT = 20; export type GalleryView = 'images' | 'assets'; export type BoardId = 'none' | (string & Record); -export type ComparisonMode = 'slider' | 'side-by-side'; +export type ComparisonMode = 'slider' | 'side-by-side' | 'hover'; export type GalleryState = { selection: ImageDTO[]; @@ -23,6 +23,6 @@ export type GalleryState = { alwaysShowImageSizeBadge: boolean; imageToCompare: ImageDTO | null; comparisonMode: ComparisonMode; - sliderFit: 'contain' | 'fill'; + comparisonFit: 'contain' | 'fill'; isImageViewerOpen: boolean; }; From 449bc4dbe5caea7dd03b77b81fb01066dd59bd1c Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 2 Jun 2024 10:02:33 +1000 Subject: [PATCH 46/52] feat(ui): abstract out and share logic between comparisons --- invokeai/frontend/web/package.json | 1 - invokeai/frontend/web/pnpm-lock.yaml | 20 -- .../listeners/workflowLoadRequested.ts | 2 +- .../web/src/common/hooks/useBoolean.ts | 21 ++ .../ImageViewer/ImageComparison.tsx | 15 +- .../ImageViewer/ImageComparisonHover.tsx | 179 ++++++++++-------- .../ImageViewer/ImageComparisonLabel.tsx | 33 ++++ .../ImageViewer/ImageComparisonSideBySide.tsx | 49 +---- .../ImageViewer/ImageComparisonSlider.tsx | 111 +++-------- .../components/ImageViewer/ImageViewer.tsx | 9 +- .../gallery/components/ImageViewer/common.ts | 57 ++++++ .../{useImageViewer.tsx => useImageViewer.ts} | 2 - .../web/src/features/gallery/store/types.ts | 3 +- 13 files changed, 260 insertions(+), 242 deletions(-) create mode 100644 invokeai/frontend/web/src/common/hooks/useBoolean.ts create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonLabel.tsx create mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts rename invokeai/frontend/web/src/features/gallery/components/ImageViewer/{useImageViewer.tsx => useImageViewer.ts} (90%) diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index 0211994f22..f2210e4c68 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -61,7 +61,6 @@ "@fontsource-variable/inter": "^5.0.18", "@invoke-ai/ui-library": "^0.0.25", "@nanostores/react": "^0.7.2", - "@reactuses/core": "^5.0.14", "@reduxjs/toolkit": "2.2.3", "@roarr/browser-log-writer": "^1.3.0", "chakra-react-select": "^4.7.6", diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml index f9a3da4e39..64189f0d82 100644 --- a/invokeai/frontend/web/pnpm-lock.yaml +++ b/invokeai/frontend/web/pnpm-lock.yaml @@ -35,9 +35,6 @@ dependencies: '@nanostores/react': specifier: ^0.7.2 version: 0.7.2(nanostores@0.10.3)(react@18.3.1) - '@reactuses/core': - specifier: ^5.0.14 - version: 5.0.14(react@18.3.1) '@reduxjs/toolkit': specifier: 2.2.3 version: 2.2.3(react-redux@9.1.2)(react@18.3.1) @@ -3985,18 +3982,6 @@ packages: - immer dev: false - /@reactuses/core@5.0.14(react@18.3.1): - resolution: {integrity: sha512-lg640pRPOPT0HZ8XQAA1VRZ47fLIvSd2JrUTtKpzm4t3MtZvza+w2RHBGgPsdmtiLV3GsJJC9x5ge7XOQmiJ/Q==} - peerDependencies: - react: ^16.8.0 || ^17.0.0 || ^18.0.0 - dependencies: - js-cookie: 3.0.5 - lodash-es: 4.17.21 - react: 18.3.1 - screenfull: 5.2.0 - use-sync-external-store: 1.2.2(react@18.3.1) - dev: false - /@reduxjs/toolkit@2.2.3(react-redux@9.1.2)(react@18.3.1): resolution: {integrity: sha512-76dll9EnJXg4EVcI5YNxZA/9hSAmZsFqzMmNRHvIlzw2WS/twfcVX3ysYrWGJMClwEmChQFC4yRq74tn6fdzRA==} peerDependencies: @@ -9683,11 +9668,6 @@ packages: resolution: {integrity: sha512-HvdH2LzI/EAZcUwA8+0nKNtWHqS+ZmijLA30RwZA0bo7ToCckjK5MkGhjED9KoRcXO6BaGI3I9UIzSA1FKFPOQ==} dev: false - /js-cookie@3.0.5: - resolution: {integrity: sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==} - engines: {node: '>=14'} - dev: false - /js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts index 9ccd967464..2c0caa0ec9 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts @@ -65,7 +65,7 @@ export const addWorkflowLoadRequestedListener = (startAppListening: AppStartList }); } - $needsFit.set(true) + $needsFit.set(true); } catch (e) { if (e instanceof WorkflowVersionError) { // The workflow version was not recognized in the valid list of versions diff --git a/invokeai/frontend/web/src/common/hooks/useBoolean.ts b/invokeai/frontend/web/src/common/hooks/useBoolean.ts new file mode 100644 index 0000000000..123e48cd75 --- /dev/null +++ b/invokeai/frontend/web/src/common/hooks/useBoolean.ts @@ -0,0 +1,21 @@ +import { useCallback, useMemo, useState } from 'react'; + +export const useBoolean = (initialValue: boolean) => { + const [isTrue, set] = useState(initialValue); + const setTrue = useCallback(() => set(true), []); + const setFalse = useCallback(() => set(false), []); + const toggle = useCallback(() => set((v) => !v), []); + + const api = useMemo( + () => ({ + isTrue, + set, + setTrue, + setFalse, + toggle, + }), + [isTrue, set, setTrue, setFalse, toggle] + ); + + return api; +}; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx index 73148851c3..ca740a5c16 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx @@ -1,6 +1,7 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; +import type { Dimensions } from 'features/canvas/store/canvasTypes'; import { ImageComparisonHover } from 'features/gallery/components/ImageViewer/ImageComparisonHover'; import { ImageComparisonSideBySide } from 'features/gallery/components/ImageViewer/ImageComparisonSideBySide'; import { ImageComparisonSlider } from 'features/gallery/components/ImageViewer/ImageComparisonSlider'; @@ -15,7 +16,11 @@ const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { return { firstImage, secondImage }; }); -export const ImageComparison = memo(() => { +type Props = { + containerDims: Dimensions; +}; + +export const ImageComparison = memo(({ containerDims }: Props) => { const { t } = useTranslation(); const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); const { firstImage, secondImage } = useAppSelector(selector); @@ -26,15 +31,17 @@ export const ImageComparison = memo(() => { } if (comparisonMode === 'slider') { - return ; + return ; } if (comparisonMode === 'side-by-side') { - return ; + return ( + + ); } if (comparisonMode === 'hover') { - return ; + return ; } }); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx index d00723a36e..a02e94b547 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx @@ -1,101 +1,114 @@ -import { Flex, Image, Text } from '@invoke-ai/ui-library'; +import { Box, Flex, Image } from '@invoke-ai/ui-library'; import { useAppSelector } from 'app/store/storeHooks'; +import { useBoolean } from 'common/hooks/useBoolean'; import { preventDefault } from 'common/util/stopPropagation'; -import { DROP_SHADOW } from 'features/gallery/components/ImageViewer/useImageViewer'; -import { memo, useCallback, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import type { ImageDTO } from 'services/api/types'; +import type { Dimensions } from 'features/canvas/store/canvasTypes'; +import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; +import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel'; +import { memo, useMemo, useRef } from 'react'; -type Props = { - /** - * The first image to compare - */ - firstImage: ImageDTO; - /** - * The second image to compare - */ - secondImage: ImageDTO; -}; +import type { ComparisonProps } from './common'; +import { fitDimsToContainer, getSecondImageDims } from './common'; -export const ImageComparisonHover = memo(({ firstImage, secondImage }: Props) => { - const { t } = useTranslation(); +export const ImageComparisonHover = memo(({ firstImage, secondImage, containerDims }: ComparisonProps) => { const comparisonFit = useAppSelector((s) => s.gallery.comparisonFit); - const [isMouseOver, setIsMouseOver] = useState(false); - const onMouseOver = useCallback(() => { - setIsMouseOver(true); - }, []); - const onMouseOut = useCallback(() => { - setIsMouseOver(false); - }, []); + const imageContainerRef = useRef(null); + const mouseOver = useBoolean(false); + const fittedDims = useMemo( + () => fitDimsToContainer(containerDims, firstImage), + [containerDims, firstImage] + ); + const compareImageDims = useMemo( + () => getSecondImageDims(comparisonFit, fittedDims, firstImage, secondImage), + [comparisonFit, fittedDims, firstImage, secondImage] + ); return ( - - + - - {t('gallery.viewerImage')} - - - + + - {t('gallery.compareImage')} - - - + + + + + + ); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonLabel.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonLabel.tsx new file mode 100644 index 0000000000..a5a40dfc9c --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonLabel.tsx @@ -0,0 +1,33 @@ +import type { TextProps } from '@invoke-ai/ui-library'; +import { Text } from '@invoke-ai/ui-library'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; + +import { DROP_SHADOW } from './common'; + +type Props = TextProps & { + type: 'first' | 'second'; +}; + +export const ImageComparisonLabel = memo(({ type, ...rest }: Props) => { + const { t } = useTranslation(); + return ( + + {type === 'first' ? t('gallery.viewerImage') : t('gallery.compareImage')} + + ); +}); + +ImageComparisonLabel.displayName = 'ImageComparisonLabel'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx index 49f03fb9c8..8bac2bb45d 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx @@ -1,25 +1,12 @@ -import { Flex, Image, Text } from '@invoke-ai/ui-library'; -import { DROP_SHADOW } from 'features/gallery/components/ImageViewer/useImageViewer'; +import { Flex, Image } from '@invoke-ai/ui-library'; +import type { ComparisonProps } from 'features/gallery/components/ImageViewer/common'; +import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel'; import ResizeHandle from 'features/ui/components/tabs/ResizeHandle'; import { memo, useCallback, useRef } from 'react'; -import { useTranslation } from 'react-i18next'; import type { ImperativePanelGroupHandle } from 'react-resizable-panels'; import { Panel, PanelGroup } from 'react-resizable-panels'; -import type { ImageDTO } from 'services/api/types'; -type Props = { - /** - * The first image to compare - */ - firstImage: ImageDTO; - /** - * The second image to compare - */ - secondImage: ImageDTO; -}; - -export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Props) => { - const { t } = useTranslation(); +export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: ComparisonProps) => { const panelGroupRef = useRef(null); const onDoubleClickHandle = useCallback(() => { if (!panelGroupRef.current) { @@ -44,19 +31,9 @@ export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Prop src={firstImage.image_url} fallbackSrc={firstImage.thumbnail_url} objectFit="contain" + borderRadius="base" /> - - {t('gallery.viewerImage')} - + @@ -78,19 +55,9 @@ export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Prop src={secondImage.image_url} fallbackSrc={secondImage.thumbnail_url} objectFit="contain" + borderRadius="base" /> - - {t('gallery.compareImage')} - + diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index bda4c12eeb..8972af7d4f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -1,15 +1,14 @@ -import { Box, Flex, Icon, Image, Text } from '@invoke-ai/ui-library'; -import { useMeasure } from '@reactuses/core'; +import { Box, Flex, Icon, Image } from '@invoke-ai/ui-library'; import { useAppSelector } from 'app/store/storeHooks'; import { preventDefault } from 'common/util/stopPropagation'; import type { Dimensions } from 'features/canvas/store/canvasTypes'; import { STAGE_BG_DATAURL } from 'features/controlLayers/util/renderers'; +import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel'; import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'; -import { useTranslation } from 'react-i18next'; import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; -import type { ImageDTO } from 'services/api/types'; -import { DROP_SHADOW } from './useImageViewer'; +import type { ComparisonProps } from './common'; +import { DROP_SHADOW, fitDimsToContainer, getSecondImageDims } from './common'; const INITIAL_POS = '50%'; const HANDLE_WIDTH = 2; @@ -19,59 +18,28 @@ const HANDLE_HITBOX_PX = `${HANDLE_HITBOX}px`; const HANDLE_INNER_LEFT_PX = `${HANDLE_HITBOX / 2 - HANDLE_WIDTH / 2}px`; const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`; -type Props = { - /** - * The first image to compare - */ - firstImage: ImageDTO; - /** - * The second image to compare - */ - secondImage: ImageDTO; -}; - -export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) => { - const { t } = useTranslation(); +export const ImageComparisonSlider = memo(({ firstImage, secondImage, containerDims }: ComparisonProps) => { const comparisonFit = useAppSelector((s) => s.gallery.comparisonFit); // How far the handle is from the left - this will be a CSS calculation that takes into account the handle width const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX); // How wide the first image is const [width, setWidth] = useState(INITIAL_POS); const handleRef = useRef(null); - // If the container size is not provided, use an internal ref and measure - can cause flicker on mount tho - const containerRef = useRef(null); - const [containerSize] = useMeasure(containerRef); + // To manage aspect ratios, we need to know the size of the container const imageContainerRef = useRef(null); // To keep things smooth, we use RAF to update the handle position & gate it to 60fps const rafRef = useRef(null); const lastMoveTimeRef = useRef(0); - const fittedSize = useMemo(() => { - // Fit the first image to the container - if (containerSize.width === 0 || containerSize.height === 0) { - return { width: firstImage.width, height: firstImage.height }; - } - const targetAspectRatio = containerSize.width / containerSize.height; - const imageAspectRatio = firstImage.width / firstImage.height; + const fittedDims = useMemo( + () => fitDimsToContainer(containerDims, firstImage), + [containerDims, firstImage] + ); - let width: number; - let height: number; - - if (firstImage.width <= containerSize.width && firstImage.height <= containerSize.height) { - return { width: firstImage.width, height: firstImage.height }; - } - - if (imageAspectRatio > targetAspectRatio) { - // Image is wider than container's aspect ratio - width = containerSize.width; - height = width / imageAspectRatio; - } else { - // Image is taller than container's aspect ratio - height = containerSize.height; - width = height * imageAspectRatio; - } - return { width, height }; - }, [containerSize, firstImage.height, firstImage.width]); + const compareImageDims = useMemo( + () => getSecondImageDims(comparisonFit, fittedDims, firstImage, secondImage), + [comparisonFit, fittedDims, firstImage, secondImage] + ); const updateHandlePos = useCallback((clientX: number) => { if (!handleRef.current || !imageContainerRef.current) { @@ -122,16 +90,7 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage }: Props) = ); return ( - + - - {t('gallery.compareImage')} - + - - {t('gallery.viewerImage')} - + { } return isOpen; }, [isOpen, isViewerEnabled, workflowsMode, activeTabName]); + const [containerRef, containerDims] = useMeasure(); useHotkeys('z', onToggle, { enabled: isViewerEnabled }, [isViewerEnabled, onToggle]); useHotkeys('esc', onClose, { enabled: isViewerEnabled }, [isViewerEnabled, onClose]); @@ -52,9 +55,9 @@ export const ImageViewer = memo(() => { > {isComparing && } {!isComparing && } - + {!isComparing && } - {isComparing && } + {isComparing && } ); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts new file mode 100644 index 0000000000..8d7f02c0fc --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts @@ -0,0 +1,57 @@ +import type { Dimensions } from 'features/canvas/store/canvasTypes'; +import type { ComparisonFit } from 'features/gallery/store/types'; +import type { ImageDTO } from 'services/api/types'; + +export const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0)) drop-shadow(0px 0px 4px rgba(0, 0, 0, 0.3))'; + +export type ComparisonProps = { + firstImage: ImageDTO; + secondImage: ImageDTO; + containerDims: Dimensions; +}; + +export const fitDimsToContainer = (containerDims: Dimensions, imageDims: Dimensions): Dimensions => { + // Fall back to the image's dimensions if the container has no dimensions + if (containerDims.width === 0 || containerDims.height === 0) { + return { width: imageDims.width, height: imageDims.height }; + } + + // Fall back to the image's dimensions if the image fits within the container + if (imageDims.width <= containerDims.width && imageDims.height <= containerDims.height) { + return { width: imageDims.width, height: imageDims.height }; + } + + const targetAspectRatio = containerDims.width / containerDims.height; + const imageAspectRatio = imageDims.width / imageDims.height; + + let width: number; + let height: number; + + if (imageAspectRatio > targetAspectRatio) { + // Image is wider than container's aspect ratio + width = containerDims.width; + height = width / imageAspectRatio; + } else { + // Image is taller than container's aspect ratio + height = containerDims.height; + width = height * imageAspectRatio; + } + return { width, height }; +}; + +/** + * Gets the dimensions of the second image in a comparison based on the comparison fit mode. + */ +export const getSecondImageDims = ( + comparisonFit: ComparisonFit, + fittedDims: Dimensions, + firstImageDims: Dimensions, + secondImageDims: Dimensions +): Dimensions => { + const width = + comparisonFit === 'fill' ? fittedDims.width : (fittedDims.width * secondImageDims.width) / firstImageDims.width; + const height = + comparisonFit === 'fill' ? fittedDims.height : (fittedDims.height * secondImageDims.height) / firstImageDims.height; + + return { width, height }; +}; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.ts similarity index 90% rename from invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx rename to invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.ts index 4232499c00..978fbc0cef 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/useImageViewer.ts @@ -29,5 +29,3 @@ export const useImageViewer = () => { return { isOpen, onOpen, onClose, onToggle }; }; - -export const DROP_SHADOW = 'drop-shadow(0px 0px 4px rgb(0, 0, 0)) drop-shadow(0px 0px 4px rgba(0, 0, 0, 0.3))'; diff --git a/invokeai/frontend/web/src/features/gallery/store/types.ts b/invokeai/frontend/web/src/features/gallery/store/types.ts index 0b2618be65..a88715b0bd 100644 --- a/invokeai/frontend/web/src/features/gallery/store/types.ts +++ b/invokeai/frontend/web/src/features/gallery/store/types.ts @@ -8,6 +8,7 @@ export const IMAGE_LIMIT = 20; export type GalleryView = 'images' | 'assets'; export type BoardId = 'none' | (string & Record); export type ComparisonMode = 'slider' | 'side-by-side' | 'hover'; +export type ComparisonFit = 'contain' | 'fill'; export type GalleryState = { selection: ImageDTO[]; @@ -23,6 +24,6 @@ export type GalleryState = { alwaysShowImageSizeBadge: boolean; imageToCompare: ImageDTO | null; comparisonMode: ComparisonMode; - comparisonFit: 'contain' | 'fill'; + comparisonFit: ComparisonFit; isImageViewerOpen: boolean; }; From c325ad34320bcbc4cc12a416fe51fa331ce358de Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 2 Jun 2024 12:58:37 +1000 Subject: [PATCH 47/52] feat(ui): add hotkey hint to exit compare button --- .../gallery/components/ImageViewer/CompareToolbar.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx index 7fb781edca..1b9849ac6e 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx @@ -85,8 +85,8 @@ export const CompareToolbar = memo(() => { } - aria-label={t('gallery.exitCompare')} - tooltip={t('gallery.exitCompare')} + aria-label={`${t('gallery.exitCompare')} (Esc)`} + tooltip={`${t('gallery.exitCompare')} (Esc)`} onClick={exitCompare} /> From 038a482ef0a5c0ec82e14c0464f45d25173ef1dd Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 2 Jun 2024 14:36:24 +1000 Subject: [PATCH 48/52] feat(ui): rework visibility conditions for image viewer --- invokeai/frontend/web/public/locales/en.json | 2 +- .../components/ImageViewer/CompareToolbar.tsx | 2 + .../ImageViewer/ImageComparison.tsx | 11 +----- .../ImageViewer/ImageComparisonDroppable.tsx | 27 +++++++------ .../components/ImageViewer/ImageViewer.tsx | 38 +++---------------- .../ImageViewer/ViewerToggleMenu.tsx | 24 ++++++------ .../components/ImageViewer/ViewerToolbar.tsx | 14 ++----- .../gallery/components/ImageViewer/common.ts | 7 ++++ .../components/ImageViewer/useImageViewer.ts | 2 +- .../sidePanel/NodeEditorPanelGroup.tsx | 10 +---- .../components/sidePanel/WorkflowMenu.tsx | 10 +---- .../ui/components/tabs/TextToImageTab.tsx | 4 +- 12 files changed, 53 insertions(+), 98 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 049f9ea979..d127720bd5 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -380,7 +380,7 @@ "problemDeletingImagesDesc": "One or more images could not be deleted", "viewerImage": "Viewer Image", "compareImage": "Compare Image", - "selectForViewer": "Select for Viewer", + "openInViewer": "Open in Viewer", "selectForCompare": "Select for Compare", "selectAnImageToCompare": "Select an Image to Compare", "slider": "Slider", diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx index 1b9849ac6e..73316d1b1a 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CompareToolbar.tsx @@ -7,6 +7,7 @@ import { imageToCompareChanged, } from 'features/gallery/store/gallerySlice'; import { memo, useCallback } from 'react'; +import { useHotkeys } from 'react-hotkeys-hook'; import { useTranslation } from 'react-i18next'; import { PiArrowsOutBold, PiSwapBold, PiXBold } from 'react-icons/pi'; @@ -33,6 +34,7 @@ export const CompareToolbar = memo(() => { const exitCompare = useCallback(() => { dispatch(imageToCompareChanged(null)); }, [dispatch]); + useHotkeys('esc', exitCompare, [exitCompare]); return ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx index ca740a5c16..5607d7dd4f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx @@ -1,21 +1,14 @@ -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import type { Dimensions } from 'features/canvas/store/canvasTypes'; +import { selectComparisonImages } from 'features/gallery/components/ImageViewer/common'; import { ImageComparisonHover } from 'features/gallery/components/ImageViewer/ImageComparisonHover'; import { ImageComparisonSideBySide } from 'features/gallery/components/ImageViewer/ImageComparisonSideBySide'; import { ImageComparisonSlider } from 'features/gallery/components/ImageViewer/ImageComparisonSlider'; -import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiImagesBold } from 'react-icons/pi'; -const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { - const firstImage = gallerySlice.selection.slice(-1)[0] ?? null; - const secondImage = gallerySlice.imageToCompare; - return { firstImage, secondImage }; -}); - type Props = { containerDims: Dimensions; }; @@ -23,7 +16,7 @@ type Props = { export const ImageComparison = memo(({ containerDims }: Props) => { const { t } = useTranslation(); const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode); - const { firstImage, secondImage } = useAppSelector(selector); + const { firstImage, secondImage } = useAppSelector(selectComparisonImages); if (!firstImage || !secondImage) { // Should rarely/never happen - we don't render this component unless we have images to compare diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx index 9639daac10..3678c920c0 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonDroppable.tsx @@ -1,17 +1,12 @@ import { Flex } from '@invoke-ai/ui-library'; -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import IAIDroppable from 'common/components/IAIDroppable'; import type { CurrentImageDropData, SelectForCompareDropData } from 'features/dnd/types'; -import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; +import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer'; import { memo, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -const selector = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { - const firstImage = gallerySlice.selection.slice(-1)[0] ?? null; - const secondImage = gallerySlice.imageToCompare; - return { firstImage, secondImage }; -}); +import { selectComparisonImages } from './common'; const setCurrentImageDropData: CurrentImageDropData = { id: 'current-image', @@ -20,7 +15,8 @@ const setCurrentImageDropData: CurrentImageDropData = { export const ImageComparisonDroppable = memo(() => { const { t } = useTranslation(); - const { firstImage, secondImage } = useAppSelector(selector); + const imageViewer = useImageViewer(); + const { firstImage, secondImage } = useAppSelector(selectComparisonImages); const selectForCompareDropData = useMemo( () => ({ id: 'image-comparison', @@ -33,14 +29,17 @@ export const ImageComparisonDroppable = memo(() => { [firstImage?.image_name, secondImage?.image_name] ); + if (!imageViewer.isOpen) { + return ( + + + + ); + } + return ( - - - - - - + ); }); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx index 33a70f973c..530431fc4c 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx @@ -1,43 +1,17 @@ import { Box, Flex } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { CompareToolbar } from 'features/gallery/components/ImageViewer/CompareToolbar'; import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview'; import { ImageComparison } from 'features/gallery/components/ImageViewer/ImageComparison'; import { ViewerToolbar } from 'features/gallery/components/ImageViewer/ViewerToolbar'; -import type { InvokeTabName } from 'features/ui/store/tabMap'; -import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; -import { memo, useMemo } from 'react'; -import { useHotkeys } from 'react-hotkeys-hook'; +import { memo } from 'react'; import { useMeasure } from 'react-use'; import { useImageViewer } from './useImageViewer'; -const VIEWER_ENABLED_TABS: InvokeTabName[] = ['canvas', 'generation', 'workflows']; - export const ImageViewer = memo(() => { - const { isOpen, onToggle, onClose } = useImageViewer(); - const activeTabName = useAppSelector(activeTabNameSelector); - const workflowsMode = useAppSelector((s) => s.workflow.mode); - const isComparing = useAppSelector((s) => s.gallery.imageToCompare !== null); - const isViewerEnabled = useMemo(() => VIEWER_ENABLED_TABS.includes(activeTabName), [activeTabName]); - const shouldShowViewer = useMemo(() => { - if (activeTabName === 'workflows' && workflowsMode === 'view') { - return true; - } - if (!isViewerEnabled) { - return false; - } - return isOpen; - }, [isOpen, isViewerEnabled, workflowsMode, activeTabName]); + const imageViewer = useImageViewer(); const [containerRef, containerDims] = useMeasure(); - useHotkeys('z', onToggle, { enabled: isViewerEnabled }, [isViewerEnabled, onToggle]); - useHotkeys('esc', onClose, { enabled: isViewerEnabled }, [isViewerEnabled, onClose]); - - if (!shouldShowViewer) { - return null; - } - return ( { alignItems="center" justifyContent="center" > - {isComparing && } - {!isComparing && } + {imageViewer.isComparing && } + {!imageViewer.isComparing && } - {!isComparing && } - {isComparing && } + {!imageViewer.isComparing && } + {imageViewer.isComparing && } ); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx index 3552c28a5b..7dc13afb48 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ViewerToggleMenu.tsx @@ -9,33 +9,35 @@ import { PopoverTrigger, Text, } from '@invoke-ai/ui-library'; +import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer'; +import { useHotkeys } from 'react-hotkeys-hook'; import { useTranslation } from 'react-i18next'; import { PiCaretDownBold, PiCheckBold, PiEyeBold, PiPencilBold } from 'react-icons/pi'; -import { useImageViewer } from './useImageViewer'; - export const ViewerToggleMenu = () => { const { t } = useTranslation(); - const { isOpen, onClose, onOpen } = useImageViewer(); + const imageViewer = useImageViewer(); + useHotkeys('z', imageViewer.onToggle, [imageViewer]); + useHotkeys('esc', imageViewer.onClose, [imageViewer]); return ( - - + - -