From e4813f800a6b3ce58714878c8315bb913a798210 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 2 Jul 2024 21:14:12 -0400 Subject: [PATCH 01/38] Update calc_model_size_by_data(...) to handle all expected model types, and to log an error if an unexpected model type is received. --- invokeai/backend/ip_adapter/ip_adapter.py | 8 ++--- .../load/model_cache/model_cache_default.py | 2 +- .../backend/model_manager/load/model_util.py | 32 ++++++++++++++++--- invokeai/backend/textual_inversion.py | 8 +++++ 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/invokeai/backend/ip_adapter/ip_adapter.py b/invokeai/backend/ip_adapter/ip_adapter.py index c33cb3f4ab..abd6ca655a 100644 --- a/invokeai/backend/ip_adapter/ip_adapter.py +++ b/invokeai/backend/ip_adapter/ip_adapter.py @@ -136,11 +136,11 @@ class IPAdapter(RawModel): self._image_proj_model.to(device=self.device, dtype=self.dtype, non_blocking=non_blocking) self.attn_weights.to(device=self.device, dtype=self.dtype, non_blocking=non_blocking) - def calc_size(self): - # workaround for circular import - from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data + def calc_size(self) -> int: + # HACK(ryand): Fix this issue with circular imports. + from invokeai.backend.model_manager.load.model_util import calc_module_size - return calc_model_size_by_data(self._image_proj_model) + calc_model_size_by_data(self.attn_weights) + return calc_module_size(self._image_proj_model) + calc_module_size(self.attn_weights) def _init_image_proj_model( self, state_dict: dict[str, torch.Tensor] diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index 697d3daf9b..c9e68a926a 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -160,7 +160,7 @@ class ModelCache(ModelCacheBase[AnyModel]): key = self._make_cache_key(key, submodel_type) if key in self._cached_models: return - size = calc_model_size_by_data(model) + size = calc_model_size_by_data(self.logger, model) self.make_room(size) state_dict = model.state_dict() if isinstance(model, torch.nn.Module) else None diff --git a/invokeai/backend/model_manager/load/model_util.py b/invokeai/backend/model_manager/load/model_util.py index c55eee48fa..c798b92d8c 100644 --- a/invokeai/backend/model_manager/load/model_util.py +++ b/invokeai/backend/model_manager/load/model_util.py @@ -2,25 +2,46 @@ """Various utility functions needed by the loader and caching system.""" import json +import logging from pathlib import Path from typing import Optional import torch -from diffusers import DiffusionPipeline +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.schedulers.scheduling_utils import SchedulerMixin +from transformers import CLIPTokenizer +from invokeai.backend.ip_adapter.ip_adapter import IPAdapter +from invokeai.backend.lora import LoRAModelRaw from invokeai.backend.model_manager.config import AnyModel from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel +from invokeai.backend.textual_inversion import TextualInversionModelRaw -def calc_model_size_by_data(model: AnyModel) -> int: +def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int: """Get size of a model in memory in bytes.""" + # TODO(ryand): We should create a CacheableModel interface for all models, and move the size calculations down to + # the models themselves. if isinstance(model, DiffusionPipeline): return _calc_pipeline_by_data(model) elif isinstance(model, torch.nn.Module): - return _calc_model_by_data(model) + return calc_module_size(model) elif isinstance(model, IAIOnnxRuntimeModel): return _calc_onnx_model_by_data(model) + elif isinstance(model, SchedulerMixin): + return 0 + elif isinstance(model, CLIPTokenizer): + # TODO(ryand): Accurately calculate the tokenizer's size. It's small enough that it shouldn't matter for now. + return 0 + elif isinstance(model, (TextualInversionModelRaw, IPAdapter, LoRAModelRaw)): + return model.calc_size() else: + # TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the + # supported model types. + logger.error( + f"Failed to calculate model size for unexpected model type: {type(model)}. The model will be treated as " + "having size 0." + ) return 0 @@ -30,11 +51,12 @@ def _calc_pipeline_by_data(pipeline: DiffusionPipeline) -> int: for submodel_key in pipeline.components.keys(): submodel = getattr(pipeline, submodel_key) if submodel is not None and isinstance(submodel, torch.nn.Module): - res += _calc_model_by_data(submodel) + res += calc_module_size(submodel) return res -def _calc_model_by_data(model: torch.nn.Module) -> int: +def calc_module_size(model: torch.nn.Module) -> int: + """Calculate the size (in bytes) of a torch.nn.Module.""" mem_params = sum([param.nelement() * param.element_size() for param in model.parameters()]) mem_bufs = sum([buf.nelement() * buf.element_size() for buf in model.buffers()]) mem: int = mem_params + mem_bufs # in bytes diff --git a/invokeai/backend/textual_inversion.py b/invokeai/backend/textual_inversion.py index 0408176edb..4c7625ea37 100644 --- a/invokeai/backend/textual_inversion.py +++ b/invokeai/backend/textual_inversion.py @@ -77,6 +77,14 @@ class TextualInversionModelRaw(RawModel): if emb is not None: emb.to(device=device, dtype=dtype, non_blocking=non_blocking) + def calc_size(self) -> int: + """Get the size of this model in bytes.""" + embedding_size = self.embedding.element_size() * self.embedding.nelement() + embedding_2_size = 0 + if self.embedding_2 is not None: + embedding_2_size = self.embedding_2.element_size() * self.embedding_2.nelement() + return embedding_size + embedding_2_size + class TextualInversionManager(BaseTextualInversionManager): """TextualInversionManager implements the BaseTextualInversionManager ABC from the compel library.""" From c1afe3570423da43077ec91c452a187bf055024a Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 27 Jun 2024 15:17:22 -0400 Subject: [PATCH 02/38] Add prototype invocation for running upscaling models with spandrel. --- invokeai/app/invocations/spandrel_upscale.py | 94 ++++++++++++++++++++ pyproject.toml | 1 + 2 files changed, 95 insertions(+) create mode 100644 invokeai/app/invocations/spandrel_upscale.py diff --git a/invokeai/app/invocations/spandrel_upscale.py b/invokeai/app/invocations/spandrel_upscale.py new file mode 100644 index 0000000000..babf399cd6 --- /dev/null +++ b/invokeai/app/invocations/spandrel_upscale.py @@ -0,0 +1,94 @@ +import numpy as np +import torch +from PIL import Image +from spandrel import ImageModelDescriptor, ModelLoader + +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.util.devices import TorchDevice + + +def pil_to_tensor(image: Image.Image) -> torch.Tensor: + """Convert PIL Image to torch.Tensor. + + Args: + image (Image.Image): A PIL Image with shape (H, W, C) and values in the range [0, 255]. + + Returns: + torch.Tensor: A torch.Tensor with shape (N, C, H, W) and values in the range [0, 1]. + """ + image_np = np.array(image) + # (H, W, C) -> (C, H, W) + image_np = np.transpose(image_np, (2, 0, 1)) + image_np = image_np / 255 + image_tensor = torch.from_numpy(image_np).float() + # (C, H, W) -> (N, C, H, W) + image_tensor = image_tensor.unsqueeze(0) + return image_tensor + + +def tensor_to_pil(tensor: torch.Tensor) -> Image.Image: + """Convert torch.Tensor to PIL Image. + + Args: + tensor (torch.Tensor): A torch.Tensor with shape (N, C, H, W) and values in the range [0, 1]. + + Returns: + Image.Image: A PIL Image with shape (H, W, C) and values in the range [0, 255]. + """ + # (N, C, H, W) -> (C, H, W) + tensor = tensor.squeeze(0) + # (C, H, W) -> (H, W, C) + tensor = tensor.permute(1, 2, 0) + tensor = tensor.clamp(0, 1) + tensor = (tensor * 255).cpu().detach().numpy().astype(np.uint8) + image = Image.fromarray(tensor) + return image + + +@invocation("upscale_spandrel", title="Upscale (spandrel)", tags=["upscale"], category="upscale", version="1.0.0") +class UpscaleSpandrelInvocation(BaseInvocation, WithMetadata, WithBoard): + """Upscales an image using any upscaler supported by spandrel (https://github.com/chaiNNer-org/spandrel).""" + + image: ImageField = InputField(description="The input image") + # TODO(ryand): Figure out how to handle all the spandrel models so that you don't have to enter a string. + model_path: str = InputField(description="The path to the upscaling model to use.") + + def invoke(self, context: InvocationContext) -> ImageOutput: + image = context.images.get_pil(self.image.image_name) + + # Load the model. + # TODO(ryand): Integrate with the model manager. + model = ModelLoader().load_from_file(self.model_path) + if not isinstance(model, ImageModelDescriptor): + raise ValueError( + f"Loaded a spandrel model of type '{type(model)}'. Only image-to-image models are supported " + "('ImageModelDescriptor')." + ) + + # Select model device and dtype. + torch_dtype = TorchDevice.choose_torch_dtype() + torch_device = TorchDevice.choose_torch_device() + if (torch_dtype == torch.float16 and not model.supports_half) or ( + torch_dtype == torch.bfloat16 and not model.supports_bfloat16 + ): + context.logger.warning( + f"The configured dtype ('{torch_dtype}') is not supported by the {type(model.model)} model. Falling " + "back to 'float32'." + ) + torch_dtype = torch.float32 + model.to(device=torch_device, dtype=torch_dtype) + + # Prepare input image for inference. + image_tensor = pil_to_tensor(image) + image_tensor = image_tensor.to(device=torch_device, dtype=torch_dtype) + + # Run inference. + image_tensor = model(image_tensor) + + # Convert the output tensor to a PIL image. + pil_image = tensor_to_pil(image_tensor) + image_dto = context.images.save(image=pil_image) + return ImageOutput.build(image_dto) diff --git a/pyproject.toml b/pyproject.toml index fcc0aff60c..fa716254de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ dependencies = [ "opencv-python==4.9.0.80", "pytorch-lightning==2.1.3", "safetensors==0.4.3", + "spandrel==0.3.4", "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "torch==2.2.2", "torchmetrics==0.11.4", From c335f92345ec9e36d6ccccb3be230e0ce4c1b920 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 28 Jun 2024 14:28:36 -0400 Subject: [PATCH 03/38] (minor) simplify startswith(...) syntax. --- invokeai/backend/model_manager/probe.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index 2f18f1a8a6..ce68b1e902 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -221,17 +221,17 @@ class ModelProbe(object): ckpt = ckpt.get("state_dict", ckpt) for key in [str(k) for k in ckpt.keys()]: - if any(key.startswith(v) for v in {"cond_stage_model.", "first_stage_model.", "model.diffusion_model."}): + if key.startswith(("cond_stage_model.", "first_stage_model.", "model.diffusion_model.")): return ModelType.Main - elif any(key.startswith(v) for v in {"encoder.conv_in", "decoder.conv_in"}): + elif key.startswith(("encoder.conv_in", "decoder.conv_in")): return ModelType.VAE - elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}): + elif key.startswith(("lora_te_", "lora_unet_")): return ModelType.LoRA - elif any(key.endswith(v) for v in {"to_k_lora.up.weight", "to_q_lora.down.weight"}): + elif key.endswith(("to_k_lora.up.weight", "to_q_lora.down.weight")): return ModelType.LoRA - elif any(key.startswith(v) for v in {"controlnet", "control_model", "input_blocks"}): + elif key.startswith(("controlnet", "control_model", "input_blocks")): return ModelType.ControlNet - elif any(key.startswith(v) for v in {"image_proj.", "ip_adapter."}): + elif key.startswith(("image_proj.", "ip_adapter.")): return ModelType.IPAdapter elif key in {"emb_params", "string_to_param"}: return ModelType.TextualInversion From e6abea7bc5f58069a92e0f6f7110464381786e65 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 28 Jun 2024 14:30:14 -0400 Subject: [PATCH 04/38] (minor) Remove redundant else clause on a for-loop with no break statement. --- invokeai/backend/model_manager/probe.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index ce68b1e902..28b42caa53 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -235,10 +235,10 @@ class ModelProbe(object): return ModelType.IPAdapter elif key in {"emb_params", "string_to_param"}: return ModelType.TextualInversion - else: - # diffusers-ti - if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()): - return ModelType.TextualInversion + + # diffusers-ti + if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()): + return ModelType.TextualInversion raise InvalidModelConfigException(f"Unable to determine model type for {model_path}") From 59ce9cf41ce02057ed45d018aae386fe4c625f07 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 28 Jun 2024 15:01:42 -0400 Subject: [PATCH 05/38] WIP - Begin to integrate SpandreImageToImageModel type into the model manager. --- invokeai/backend/model_manager/config.py | 1 + .../model_loaders/spandrel_image_to_image.py | 34 ++++++++++ invokeai/backend/model_manager/probe.py | 14 +++++ invokeai/backend/raw_model.py | 24 ++++--- .../backend/spandrel_image_to_image_model.py | 63 +++++++++++++++++++ 5 files changed, 123 insertions(+), 13 deletions(-) create mode 100644 invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py create mode 100644 invokeai/backend/spandrel_image_to_image_model.py diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index d788012dc7..9a33cc502e 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -68,6 +68,7 @@ class ModelType(str, Enum): IPAdapter = "ip_adapter" CLIPVision = "clip_vision" T2IAdapter = "t2i_adapter" + SpandrelImageToImage = "spandrel_image_to_image" class SubModelType(str, Enum): diff --git a/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py b/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py new file mode 100644 index 0000000000..4241c21d24 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py @@ -0,0 +1,34 @@ +from pathlib import Path +from typing import Optional + +from invokeai.backend.model_manager.config import ( + AnyModel, + AnyModelConfig, + BaseModelType, + ModelFormat, + ModelType, + SubModelType, +) +from invokeai.backend.model_manager.load.load_default import ModelLoader +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry +from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel + + +@ModelLoaderRegistry.register( + base=BaseModelType.Any, type=ModelType.SpandrelImageToImage, format=ModelFormat.Checkpoint +) +class SpandrelImageToImageModelLoader(ModelLoader): + """Class for loading Spandrel Image-to-Image models (i.e. models wrapped by spandrel.ImageModelDescriptor).""" + + def _load_model( + self, + config: AnyModelConfig, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + if submodel_type is not None: + raise ValueError("Unexpected submodel requested for Spandrel model.") + + model_path = Path(config.path) + model = SpandrelImageToImageModel.load_from_file(model_path) + + return model diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index 28b42caa53..8ba63f0db5 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -10,6 +10,7 @@ from picklescan.scanner import scan_file_path import invokeai.backend.util.logging as logger from invokeai.app.util.misc import uuid_string from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash +from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel from invokeai.backend.util.silence_warnings import SilenceWarnings from .config import ( @@ -240,6 +241,14 @@ class ModelProbe(object): if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()): return ModelType.TextualInversion + # Check if the model can be loaded as a SpandrelImageToImageModel. + try: + _ = SpandrelImageToImageModel.load_from_state_dict(ckpt) + return ModelType.SpandrelImageToImage + except Exception: + # TODO(ryand): Catch a more specific exception type here if we can. + pass + raise InvalidModelConfigException(f"Unable to determine model type for {model_path}") @classmethod @@ -570,6 +579,11 @@ class T2IAdapterCheckpointProbe(CheckpointProbeBase): raise NotImplementedError() +class SpandrelImageToImageModelProbe(CheckpointProbeBase): + def get_base_type(self) -> BaseModelType: + raise NotImplementedError() + + ######################################################## # classes for probing folders ####################################################### diff --git a/invokeai/backend/raw_model.py b/invokeai/backend/raw_model.py index 7bca6945d9..6cce354c45 100644 --- a/invokeai/backend/raw_model.py +++ b/invokeai/backend/raw_model.py @@ -1,15 +1,3 @@ -"""Base class for 'Raw' models. - -The RawModel class is the base class of LoRAModelRaw and TextualInversionModelRaw, -and is used for type checking of calls to the model patcher. Its main purpose -is to avoid a circular import issues when lora.py tries to import BaseModelType -from invokeai.backend.model_manager.config, and the latter tries to import LoRAModelRaw -from lora.py. - -The term 'raw' was introduced to describe a wrapper around a torch.nn.Module -that adds additional methods and attributes. -""" - from abc import ABC, abstractmethod from typing import Optional @@ -17,7 +5,17 @@ import torch class RawModel(ABC): - """Abstract base class for 'Raw' model wrappers.""" + """Base class for 'Raw' models. + + The RawModel class is the base class of LoRAModelRaw, TextualInversionModelRaw, etc. + and is used for type checking of calls to the model patcher. Its main purpose + is to avoid a circular import issues when lora.py tries to import BaseModelType + from invokeai.backend.model_manager.config, and the latter tries to import LoRAModelRaw + from lora.py. + + The term 'raw' was introduced to describe a wrapper around a torch.nn.Module + that adds additional methods and attributes. + """ @abstractmethod def to( diff --git a/invokeai/backend/spandrel_image_to_image_model.py b/invokeai/backend/spandrel_image_to_image_model.py new file mode 100644 index 0000000000..270f521604 --- /dev/null +++ b/invokeai/backend/spandrel_image_to_image_model.py @@ -0,0 +1,63 @@ +from pathlib import Path +from typing import Any, Optional + +import torch +from spandrel import ImageModelDescriptor, ModelLoader + +from invokeai.backend.raw_model import RawModel + + +class SpandrelImageToImageModel(RawModel): + """A wrapper for a Spandrel Image-to-Image model. + + The main reason for having a wrapper class is to integrate with the type handling of RawModel. + """ + + def __init__(self, spandrel_model: ImageModelDescriptor[Any]): + self._spandrel_model = spandrel_model + + @classmethod + def load_from_file(cls, file_path: str | Path): + model = ModelLoader().load_from_file(file_path) + if not isinstance(model, ImageModelDescriptor): + raise ValueError( + f"Loaded a spandrel model of type '{type(model)}'. Only image-to-image models are supported " + "('ImageModelDescriptor')." + ) + + return cls(spandrel_model=model) + + @classmethod + def load_from_state_dict(cls, state_dict: dict[str, torch.Tensor]): + model = ModelLoader().load_from_state_dict(state_dict) + if not isinstance(model, ImageModelDescriptor): + raise ValueError( + f"Loaded a spandrel model of type '{type(model)}'. Only image-to-image models are supported " + "('ImageModelDescriptor')." + ) + + return cls(spandrel_model=model) + + def supports_dtype(self, dtype: torch.dtype) -> bool: + """Check if the model supports the given dtype.""" + if dtype == torch.float16: + return self._spandrel_model.supports_half + elif dtype == torch.bfloat16: + return self._spandrel_model.supports_bfloat16 + elif dtype == torch.float32: + # All models support float32. + return True + else: + raise ValueError(f"Unexpected dtype '{dtype}'.") + + def to( + self, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + non_blocking: bool = False, + ) -> None: + """Note: Some models have limited dtype support. Call supports_dtype(...) to check if the dtype is supported. + Note: The non_blocking parameter is currently ignored.""" + # TODO(ryand): spandrel.ImageModelDescriptor.to(...) does not support non_blocking. We will access the model + # directly if we want to apply this optimization. + self._spandrel_model.to(device=device, dtype=dtype) From 2a1514272f949a0feecc9f45fac564594550d0f9 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 28 Jun 2024 15:22:39 -0400 Subject: [PATCH 06/38] Set the dtype correctly for SpandrelImageToImageModels when they are loaded. --- .../load/model_loaders/spandrel_image_to_image.py | 11 +++++++++++ invokeai/backend/spandrel_image_to_image_model.py | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py b/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py index 4241c21d24..7a57c5cf59 100644 --- a/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py +++ b/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py @@ -1,6 +1,8 @@ from pathlib import Path from typing import Optional +import torch + from invokeai.backend.model_manager.config import ( AnyModel, AnyModelConfig, @@ -31,4 +33,13 @@ class SpandrelImageToImageModelLoader(ModelLoader): model_path = Path(config.path) model = SpandrelImageToImageModel.load_from_file(model_path) + torch_dtype = self._torch_dtype + if not model.supports_dtype(torch_dtype): + self._logger.warning( + f"The configured dtype ('{self._torch_dtype}') is not supported by the {model.get_model_type_name()} " + "model. Falling back to 'float32'." + ) + torch_dtype = torch.float32 + model.to(dtype=torch_dtype) + return model diff --git a/invokeai/backend/spandrel_image_to_image_model.py b/invokeai/backend/spandrel_image_to_image_model.py index 270f521604..6413ebba6b 100644 --- a/invokeai/backend/spandrel_image_to_image_model.py +++ b/invokeai/backend/spandrel_image_to_image_model.py @@ -50,6 +50,12 @@ class SpandrelImageToImageModel(RawModel): else: raise ValueError(f"Unexpected dtype '{dtype}'.") + def get_model_type_name(self) -> str: + """The model type name. Intended for logging / debugging purposes. Do not rely on this field remaining + consistent over time. + """ + return str(type(self._spandrel_model.model)) + def to( self, device: Optional[torch.device] = None, From 95079dc7d4b499470dbcf43e9ea9f4f9a2db05c9 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 28 Jun 2024 15:30:35 -0400 Subject: [PATCH 07/38] Use a ModelIdentifierField to identify the spandrel model in the UpscaleSpandrelInvocation. --- invokeai/app/invocations/spandrel_upscale.py | 51 ++++++++----------- .../backend/spandrel_image_to_image_model.py | 14 +++++ 2 files changed, 36 insertions(+), 29 deletions(-) diff --git a/invokeai/app/invocations/spandrel_upscale.py b/invokeai/app/invocations/spandrel_upscale.py index babf399cd6..3e26457104 100644 --- a/invokeai/app/invocations/spandrel_upscale.py +++ b/invokeai/app/invocations/spandrel_upscale.py @@ -1,13 +1,20 @@ import numpy as np import torch from PIL import Image -from spandrel import ImageModelDescriptor, ModelLoader from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata +from invokeai.app.invocations.fields import ( + FieldDescriptions, + ImageField, + InputField, + UIType, + WithBoard, + WithMetadata, +) +from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.backend.util.devices import TorchDevice +from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel def pil_to_tensor(image: Image.Image) -> torch.Tensor: @@ -53,40 +60,26 @@ class UpscaleSpandrelInvocation(BaseInvocation, WithMetadata, WithBoard): """Upscales an image using any upscaler supported by spandrel (https://github.com/chaiNNer-org/spandrel).""" image: ImageField = InputField(description="The input image") - # TODO(ryand): Figure out how to handle all the spandrel models so that you don't have to enter a string. - model_path: str = InputField(description="The path to the upscaling model to use.") + spandrel_image_to_image_model: ModelIdentifierField = InputField( + description=FieldDescriptions.spandrel_image_to_image_model, ui_type=UIType.LoRAModel + ) + @torch.inference_mode() def invoke(self, context: InvocationContext) -> ImageOutput: image = context.images.get_pil(self.image.image_name) # Load the model. - # TODO(ryand): Integrate with the model manager. - model = ModelLoader().load_from_file(self.model_path) - if not isinstance(model, ImageModelDescriptor): - raise ValueError( - f"Loaded a spandrel model of type '{type(model)}'. Only image-to-image models are supported " - "('ImageModelDescriptor')." - ) + spandrel_model_info = context.models.load(self.spandrel_image_to_image_model) - # Select model device and dtype. - torch_dtype = TorchDevice.choose_torch_dtype() - torch_device = TorchDevice.choose_torch_device() - if (torch_dtype == torch.float16 and not model.supports_half) or ( - torch_dtype == torch.bfloat16 and not model.supports_bfloat16 - ): - context.logger.warning( - f"The configured dtype ('{torch_dtype}') is not supported by the {type(model.model)} model. Falling " - "back to 'float32'." - ) - torch_dtype = torch.float32 - model.to(device=torch_device, dtype=torch_dtype) + with spandrel_model_info as spandrel_model: + assert isinstance(spandrel_model, SpandrelImageToImageModel) - # Prepare input image for inference. - image_tensor = pil_to_tensor(image) - image_tensor = image_tensor.to(device=torch_device, dtype=torch_dtype) + # Prepare input image for inference. + image_tensor = pil_to_tensor(image) + image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype) - # Run inference. - image_tensor = model(image_tensor) + # Run inference. + image_tensor = spandrel_model.run(image_tensor) # Convert the output tensor to a PIL image. pil_image = tensor_to_pil(image_tensor) diff --git a/invokeai/backend/spandrel_image_to_image_model.py b/invokeai/backend/spandrel_image_to_image_model.py index 6413ebba6b..e5be24e80d 100644 --- a/invokeai/backend/spandrel_image_to_image_model.py +++ b/invokeai/backend/spandrel_image_to_image_model.py @@ -16,6 +16,10 @@ class SpandrelImageToImageModel(RawModel): def __init__(self, spandrel_model: ImageModelDescriptor[Any]): self._spandrel_model = spandrel_model + def run(self, image_tensor: torch.Tensor) -> torch.Tensor: + """Run the image-to-image model.""" + return self._spandrel_model(image_tensor) + @classmethod def load_from_file(cls, file_path: str | Path): model = ModelLoader().load_from_file(file_path) @@ -67,3 +71,13 @@ class SpandrelImageToImageModel(RawModel): # TODO(ryand): spandrel.ImageModelDescriptor.to(...) does not support non_blocking. We will access the model # directly if we want to apply this optimization. self._spandrel_model.to(device=device, dtype=dtype) + + @property + def device(self) -> torch.device: + """The device of the underlying model.""" + return self._spandrel_model.device + + @property + def dtype(self) -> torch.dtype: + """The dtype of the underlying model.""" + return self._spandrel_model.dtype From 29c8ddfb884e5ab9562b27145fe926b3feaff798 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 28 Jun 2024 18:03:09 -0400 Subject: [PATCH 08/38] WIP - A bunch of boilerplate to support Spandrel Image-to-Image models throughout the model manager and the frontend. --- invokeai/app/invocations/fields.py | 2 + invokeai/backend/model_manager/config.py | 12 ++ invokeai/backend/model_manager/probe.py | 19 ++- .../Invocation/fields/InputFieldRenderer.tsx | 8 + ...elImageToImageModelFieldInputComponent.tsx | 56 +++++++ .../src/features/nodes/store/nodesSlice.ts | 6 + .../web/src/features/nodes/types/common.ts | 1 + .../web/src/features/nodes/types/constants.ts | 2 + .../web/src/features/nodes/types/field.ts | 33 ++++ .../util/schema/buildFieldInputInstance.ts | 1 + .../util/schema/buildFieldInputTemplate.ts | 13 ++ .../nodes/util/workflow/validateWorkflow.ts | 1 + .../src/services/api/hooks/modelsByType.ts | 2 + .../frontend/web/src/services/api/schema.ts | 144 ++++++++++++++++-- .../frontend/web/src/services/api/types.ts | 6 + 15 files changed, 287 insertions(+), 19 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SpandrelImageToImageModelFieldInputComponent.tsx diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py index b792453b47..f341039fe0 100644 --- a/invokeai/app/invocations/fields.py +++ b/invokeai/app/invocations/fields.py @@ -48,6 +48,7 @@ class UIType(str, Enum, metaclass=MetaEnum): ControlNetModel = "ControlNetModelField" IPAdapterModel = "IPAdapterModelField" T2IAdapterModel = "T2IAdapterModelField" + SpandrelImageToImageModel = "SpandrelImageToImageModelField" # endregion # region Misc Field Types @@ -134,6 +135,7 @@ class FieldDescriptions: sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load" sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load" onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load" + spandrel_image_to_image_model = "Spandrel Image-to-Image model" lora_weight = "The weight at which the LoRA is applied to each model" compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor" raw_prompt = "Raw prompt text (no parsing)" diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 9a33cc502e..3579a0c7b2 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -373,6 +373,17 @@ class T2IAdapterConfig(DiffusersConfigBase, ControlAdapterConfigBase): return Tag(f"{ModelType.T2IAdapter.value}.{ModelFormat.Diffusers.value}") +class SpandrelImageToImageConfig(ModelConfigBase): + """Model config for Spandrel Image to Image models.""" + + type: Literal[ModelType.SpandrelImageToImage] = ModelType.SpandrelImageToImage + format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint + + @staticmethod + def get_tag() -> Tag: + return Tag(f"{ModelType.SpandrelImageToImage.value}.{ModelFormat.Checkpoint.value}") + + def get_model_discriminator_value(v: Any) -> str: """ Computes the discriminator value for a model config. @@ -409,6 +420,7 @@ AnyModelConfig = Annotated[ Annotated[IPAdapterInvokeAIConfig, IPAdapterInvokeAIConfig.get_tag()], Annotated[IPAdapterCheckpointConfig, IPAdapterCheckpointConfig.get_tag()], Annotated[T2IAdapterConfig, T2IAdapterConfig.get_tag()], + Annotated[SpandrelImageToImageConfig, SpandrelImageToImageConfig.get_tag()], Annotated[CLIPVisionDiffusersConfig, CLIPVisionDiffusersConfig.get_tag()], ], Discriminator(get_model_discriminator_value), diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index 8ba63f0db5..53da5fc152 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -243,10 +243,14 @@ class ModelProbe(object): # Check if the model can be loaded as a SpandrelImageToImageModel. try: - _ = SpandrelImageToImageModel.load_from_state_dict(ckpt) + # TODO(ryand): Figure out why load_from_state_dict() doesn't work as expected. + # _ = SpandrelImageToImageModel.load_from_state_dict(ckpt) + _ = SpandrelImageToImageModel.load_from_file(model_path) return ModelType.SpandrelImageToImage - except Exception: + except Exception as e: # TODO(ryand): Catch a more specific exception type here if we can. + # TODO(ryand): Delete this print statement. + print(e) pass raise InvalidModelConfigException(f"Unable to determine model type for {model_path}") @@ -579,9 +583,9 @@ class T2IAdapterCheckpointProbe(CheckpointProbeBase): raise NotImplementedError() -class SpandrelImageToImageModelProbe(CheckpointProbeBase): +class SpandrelImageToImageCheckpointProbe(CheckpointProbeBase): def get_base_type(self) -> BaseModelType: - raise NotImplementedError() + return BaseModelType.Any ######################################################## @@ -791,6 +795,11 @@ class CLIPVisionFolderProbe(FolderProbeBase): return BaseModelType.Any +class SpandrelImageToImageFolderProbe(FolderProbeBase): + def get_base_type(self) -> BaseModelType: + raise NotImplementedError() + + class T2IAdapterFolderProbe(FolderProbeBase): def get_base_type(self) -> BaseModelType: config_file = self.model_path / "config.json" @@ -820,6 +829,7 @@ ModelProbe.register_probe("diffusers", ModelType.ControlNet, ControlNetFolderPro ModelProbe.register_probe("diffusers", ModelType.IPAdapter, IPAdapterFolderProbe) ModelProbe.register_probe("diffusers", ModelType.CLIPVision, CLIPVisionFolderProbe) ModelProbe.register_probe("diffusers", ModelType.T2IAdapter, T2IAdapterFolderProbe) +ModelProbe.register_probe("diffusers", ModelType.SpandrelImageToImage, SpandrelImageToImageFolderProbe) ModelProbe.register_probe("checkpoint", ModelType.Main, PipelineCheckpointProbe) ModelProbe.register_probe("checkpoint", ModelType.VAE, VaeCheckpointProbe) @@ -829,5 +839,6 @@ ModelProbe.register_probe("checkpoint", ModelType.ControlNet, ControlNetCheckpoi ModelProbe.register_probe("checkpoint", ModelType.IPAdapter, IPAdapterCheckpointProbe) ModelProbe.register_probe("checkpoint", ModelType.CLIPVision, CLIPVisionCheckpointProbe) ModelProbe.register_probe("checkpoint", ModelType.T2IAdapter, T2IAdapterCheckpointProbe) +ModelProbe.register_probe("checkpoint", ModelType.SpandrelImageToImage, SpandrelImageToImageCheckpointProbe) ModelProbe.register_probe("onnx", ModelType.ONNX, ONNXFolderProbe) diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx index 99937ceec4..b67439eb70 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx @@ -32,6 +32,8 @@ import { isSDXLMainModelFieldInputTemplate, isSDXLRefinerModelFieldInputInstance, isSDXLRefinerModelFieldInputTemplate, + isSpandrelImageToImageModelFieldInputInstance, + isSpandrelImageToImageModelFieldInputTemplate, isStringFieldInputInstance, isStringFieldInputTemplate, isT2IAdapterModelFieldInputInstance, @@ -54,6 +56,7 @@ import NumberFieldInputComponent from './inputs/NumberFieldInputComponent'; import RefinerModelFieldInputComponent from './inputs/RefinerModelFieldInputComponent'; import SchedulerFieldInputComponent from './inputs/SchedulerFieldInputComponent'; import SDXLMainModelFieldInputComponent from './inputs/SDXLMainModelFieldInputComponent'; +import SpandrelImageToImageModelFieldInputComponent from './inputs/SpandrelImageToImageModelFieldInputComponent'; import StringFieldInputComponent from './inputs/StringFieldInputComponent'; import T2IAdapterModelFieldInputComponent from './inputs/T2IAdapterModelFieldInputComponent'; import VAEModelFieldInputComponent from './inputs/VAEModelFieldInputComponent'; @@ -125,6 +128,11 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => { if (isT2IAdapterModelFieldInputInstance(fieldInstance) && isT2IAdapterModelFieldInputTemplate(fieldTemplate)) { return ; } + + if (isSpandrelImageToImageModelFieldInputInstance(fieldInstance) && isSpandrelImageToImageModelFieldInputTemplate(fieldTemplate)) { + return ; + } + if (isColorFieldInputInstance(fieldInstance) && isColorFieldInputTemplate(fieldTemplate)) { return ; } diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SpandrelImageToImageModelFieldInputComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SpandrelImageToImageModelFieldInputComponent.tsx new file mode 100644 index 0000000000..fbb23caa90 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SpandrelImageToImageModelFieldInputComponent.tsx @@ -0,0 +1,56 @@ +import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox'; +import { fieldSpandrelImageToImageModelValueChanged, } from 'features/nodes/store/nodesSlice'; +import type { + SpandrelImageToImageModelFieldInputInstance, + SpandrelImageToImageModelFieldInputTemplate, +} from 'features/nodes/types/field'; +import { memo, useCallback } from 'react'; +import { useSpandrelImageToImageModels } from 'services/api/hooks/modelsByType'; +import type { SpandrelImageToImageModelConfig } from 'services/api/types'; + +import type { FieldComponentProps } from './types'; + +const SpandrelImageToImageModelFieldInputComponent = ( + props: FieldComponentProps +) => { + const { nodeId, field } = props; + const dispatch = useAppDispatch(); + + const [modelConfigs, { isLoading }] = useSpandrelImageToImageModels(); + + const _onChange = useCallback( + (value: SpandrelImageToImageModelConfig | null) => { + if (!value) { + return; + } + dispatch( + + fieldSpandrelImageToImageModelValueChanged({ + nodeId, + fieldName: field.name, + value, + }) + ); + }, + [dispatch, field.name, nodeId] + ); + + const { options, value, onChange } = useGroupedModelCombobox({ + modelConfigs, + onChange: _onChange, + selectedModel: field.value, + isLoading, + }); + + return ( + + + + + + ); +}; + +export default memo(SpandrelImageToImageModelFieldInputComponent); diff --git a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts index 5ebc5de147..e1a74b947d 100644 --- a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts +++ b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts @@ -19,6 +19,7 @@ import type { ModelIdentifierFieldValue, SchedulerFieldValue, SDXLRefinerModelFieldValue, + SpandrelImageToImageModelFieldValue, StatefulFieldValue, StringFieldValue, T2IAdapterModelFieldValue, @@ -39,6 +40,7 @@ import { zModelIdentifierFieldValue, zSchedulerFieldValue, zSDXLRefinerModelFieldValue, + zSpandrelImageToImageModelFieldValue, zStatefulFieldValue, zStringFieldValue, zT2IAdapterModelFieldValue, @@ -333,6 +335,9 @@ export const nodesSlice = createSlice({ fieldT2IAdapterModelValueChanged: (state, action: FieldValueAction) => { fieldValueReducer(state, action, zT2IAdapterModelFieldValue); }, + fieldSpandrelImageToImageModelValueChanged: (state, action: FieldValueAction) => { + fieldValueReducer(state, action, zSpandrelImageToImageModelFieldValue); + }, fieldEnumModelValueChanged: (state, action: FieldValueAction) => { fieldValueReducer(state, action, zEnumFieldValue); }, @@ -384,6 +389,7 @@ export const { fieldImageValueChanged, fieldIPAdapterModelValueChanged, fieldT2IAdapterModelValueChanged, + fieldSpandrelImageToImageModelValueChanged, fieldLabelChanged, fieldLoRAModelValueChanged, fieldModelIdentifierValueChanged, diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index 54e126af3a..2ea8900281 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -66,6 +66,7 @@ const zModelType = z.enum([ 'embedding', 'onnx', 'clip_vision', + 'spandrel_image_to_image', ]); const zSubModelType = z.enum([ 'unet', diff --git a/invokeai/frontend/web/src/features/nodes/types/constants.ts b/invokeai/frontend/web/src/features/nodes/types/constants.ts index 4ede5cd479..05697c384c 100644 --- a/invokeai/frontend/web/src/features/nodes/types/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/types/constants.ts @@ -38,6 +38,7 @@ export const MODEL_TYPES = [ 'VAEField', 'CLIPField', 'T2IAdapterModelField', + 'SpandrelImageToImageModelField', ]; /** @@ -62,6 +63,7 @@ export const FIELD_COLORS: { [key: string]: string } = { MainModelField: 'teal.500', SDXLMainModelField: 'teal.500', SDXLRefinerModelField: 'teal.500', + SpandrelImageToImageModelField: 'teal.500', StringField: 'yellow.500', T2IAdapterField: 'teal.500', T2IAdapterModelField: 'teal.500', diff --git a/invokeai/frontend/web/src/features/nodes/types/field.ts b/invokeai/frontend/web/src/features/nodes/types/field.ts index e2a84e3390..ba9078bec2 100644 --- a/invokeai/frontend/web/src/features/nodes/types/field.ts +++ b/invokeai/frontend/web/src/features/nodes/types/field.ts @@ -139,6 +139,10 @@ const zT2IAdapterModelFieldType = zFieldTypeBase.extend({ name: z.literal('T2IAdapterModelField'), originalType: zStatelessFieldType.optional(), }); +const zSpandrelImageToImageModelFieldType = zFieldTypeBase.extend({ + name: z.literal('SpandrelImageToImageModelField'), + originalType: zStatelessFieldType.optional(), +}); const zSchedulerFieldType = zFieldTypeBase.extend({ name: z.literal('SchedulerField'), originalType: zStatelessFieldType.optional(), @@ -160,6 +164,7 @@ const zStatefulFieldType = z.union([ zControlNetModelFieldType, zIPAdapterModelFieldType, zT2IAdapterModelFieldType, + zSpandrelImageToImageModelFieldType, zColorFieldType, zSchedulerFieldType, ]); @@ -581,6 +586,30 @@ export const isT2IAdapterModelFieldInputTemplate = (val: unknown): val is T2IAda zT2IAdapterModelFieldInputTemplate.safeParse(val).success; // #endregion +// #region SpandrelModelToModelField + +export const zSpandrelImageToImageModelFieldValue = zModelIdentifierField.optional(); +const zSpandrelImageToImageModelFieldInputInstance = zFieldInputInstanceBase.extend({ + value: zSpandrelImageToImageModelFieldValue, +}); +const zSpandrelImageToImageModelFieldInputTemplate = zFieldInputTemplateBase.extend({ + type: zSpandrelImageToImageModelFieldType, + originalType: zFieldType.optional(), + default: zSpandrelImageToImageModelFieldValue, +}); +const zSpandrelImageToImageModelFieldOutputTemplate = zFieldOutputTemplateBase.extend({ + type: zSpandrelImageToImageModelFieldType, +}); +export type SpandrelImageToImageModelFieldValue = z.infer; +export type SpandrelImageToImageModelFieldInputInstance = z.infer; +export type SpandrelImageToImageModelFieldInputTemplate = z.infer; +export const isSpandrelImageToImageModelFieldInputInstance = (val: unknown): val is SpandrelImageToImageModelFieldInputInstance => + zSpandrelImageToImageModelFieldInputInstance.safeParse(val).success; +export const isSpandrelImageToImageModelFieldInputTemplate = (val: unknown): val is SpandrelImageToImageModelFieldInputTemplate => + zSpandrelImageToImageModelFieldInputTemplate.safeParse(val).success; +// #endregion + + // #region SchedulerField export const zSchedulerFieldValue = zSchedulerField.optional(); @@ -667,6 +696,7 @@ export const zStatefulFieldValue = z.union([ zControlNetModelFieldValue, zIPAdapterModelFieldValue, zT2IAdapterModelFieldValue, + zSpandrelImageToImageModelFieldValue, zColorFieldValue, zSchedulerFieldValue, ]); @@ -694,6 +724,7 @@ const zStatefulFieldInputInstance = z.union([ zControlNetModelFieldInputInstance, zIPAdapterModelFieldInputInstance, zT2IAdapterModelFieldInputInstance, + zSpandrelImageToImageModelFieldInputInstance, zColorFieldInputInstance, zSchedulerFieldInputInstance, ]); @@ -722,6 +753,7 @@ const zStatefulFieldInputTemplate = z.union([ zControlNetModelFieldInputTemplate, zIPAdapterModelFieldInputTemplate, zT2IAdapterModelFieldInputTemplate, + zSpandrelImageToImageModelFieldInputTemplate, zColorFieldInputTemplate, zSchedulerFieldInputTemplate, zStatelessFieldInputTemplate, @@ -751,6 +783,7 @@ const zStatefulFieldOutputTemplate = z.union([ zControlNetModelFieldOutputTemplate, zIPAdapterModelFieldOutputTemplate, zT2IAdapterModelFieldOutputTemplate, + zSpandrelImageToImageModelFieldOutputTemplate, zColorFieldOutputTemplate, zSchedulerFieldOutputTemplate, ]); diff --git a/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputInstance.ts b/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputInstance.ts index 597779fd61..a5a2d89f03 100644 --- a/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputInstance.ts +++ b/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputInstance.ts @@ -18,6 +18,7 @@ const FIELD_VALUE_FALLBACK_MAP: Record = SDXLRefinerModelField: undefined, StringField: '', T2IAdapterModelField: undefined, + SpandrelImageToImageModelField: undefined, VAEModelField: undefined, ControlNetModelField: undefined, }; diff --git a/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts b/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts index 2b77274526..8478415cd1 100644 --- a/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts +++ b/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts @@ -17,6 +17,7 @@ import type { SchedulerFieldInputTemplate, SDXLMainModelFieldInputTemplate, SDXLRefinerModelFieldInputTemplate, + SpandrelImageToImageModelFieldInputTemplate, StatefulFieldType, StatelessFieldInputTemplate, StringFieldInputTemplate, @@ -263,6 +264,17 @@ const buildT2IAdapterModelFieldInputTemplate: FieldInputTemplateBuilder = ({ schemaObject, baseField, fieldType }) => { + const template: SpandrelImageToImageModelFieldInputTemplate = { + ...baseField, + type: fieldType, + default: schemaObject.default ?? undefined, + }; + + return template; +}; const buildBoardFieldInputTemplate: FieldInputTemplateBuilder = ({ schemaObject, baseField, @@ -377,6 +389,7 @@ export const TEMPLATE_BUILDER_MAP: Record { + return config.type === 'spandrel_image_to_image'; +} + export const isControlAdapterModelConfig = ( config: AnyModelConfig ): config is ControlNetModelConfig | T2IAdapterModelConfig | IPAdapterModelConfig => { From 504a42fe6133ea9c949492d4a96eff909b500a82 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 28 Jun 2024 18:18:45 -0400 Subject: [PATCH 09/38] typo: fix UIType on Spandrel Upscaling node. --- invokeai/app/invocations/spandrel_upscale.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/invocations/spandrel_upscale.py b/invokeai/app/invocations/spandrel_upscale.py index 3e26457104..455a466c4f 100644 --- a/invokeai/app/invocations/spandrel_upscale.py +++ b/invokeai/app/invocations/spandrel_upscale.py @@ -61,7 +61,7 @@ class UpscaleSpandrelInvocation(BaseInvocation, WithMetadata, WithBoard): image: ImageField = InputField(description="The input image") spandrel_image_to_image_model: ModelIdentifierField = InputField( - description=FieldDescriptions.spandrel_image_to_image_model, ui_type=UIType.LoRAModel + description=FieldDescriptions.spandrel_image_to_image_model, ui_type=UIType.SpandrelImageToImageModel ) @torch.inference_mode() From c1c8e55e8eb5bf5ca03b6aac35187626f1ce0c52 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 2 Jul 2024 09:05:02 -0400 Subject: [PATCH 10/38] Fix static check errors. --- .../subpanels/ModelManagerPanel/ModelTypeFilter.tsx | 1 + .../nodes/Invocation/fields/InputFieldRenderer.tsx | 13 +++++++++++-- ...SpandrelImageToImageModelFieldInputComponent.tsx | 3 +-- .../web/src/features/nodes/store/nodesSlice.ts | 5 ++++- .../frontend/web/src/features/nodes/types/field.ts | 9 ++++++--- invokeai/frontend/web/src/services/api/types.ts | 6 ++++-- 6 files changed, 27 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelTypeFilter.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelTypeFilter.tsx index 76802b36e7..85f3fd5bf6 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelTypeFilter.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelTypeFilter.tsx @@ -21,6 +21,7 @@ export const ModelTypeFilter = () => { t2i_adapter: t('common.t2iAdapter'), ip_adapter: t('common.ipAdapter'), clip_vision: 'Clip Vision', + spandrel_image_to_image: 'Spandrel Image to Image', }), [t] ); diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx index b67439eb70..d863def973 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx @@ -129,8 +129,17 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => { return ; } - if (isSpandrelImageToImageModelFieldInputInstance(fieldInstance) && isSpandrelImageToImageModelFieldInputTemplate(fieldTemplate)) { - return ; + if ( + isSpandrelImageToImageModelFieldInputInstance(fieldInstance) && + isSpandrelImageToImageModelFieldInputTemplate(fieldTemplate) + ) { + return ( + + ); } if (isColorFieldInputInstance(fieldInstance) && isColorFieldInputTemplate(fieldTemplate)) { diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SpandrelImageToImageModelFieldInputComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SpandrelImageToImageModelFieldInputComponent.tsx index fbb23caa90..ccd4eaa797 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SpandrelImageToImageModelFieldInputComponent.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SpandrelImageToImageModelFieldInputComponent.tsx @@ -1,7 +1,7 @@ import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library'; import { useAppDispatch } from 'app/store/storeHooks'; import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox'; -import { fieldSpandrelImageToImageModelValueChanged, } from 'features/nodes/store/nodesSlice'; +import { fieldSpandrelImageToImageModelValueChanged } from 'features/nodes/store/nodesSlice'; import type { SpandrelImageToImageModelFieldInputInstance, SpandrelImageToImageModelFieldInputTemplate, @@ -26,7 +26,6 @@ const SpandrelImageToImageModelFieldInputComponent = ( return; } dispatch( - fieldSpandrelImageToImageModelValueChanged({ nodeId, fieldName: field.name, diff --git a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts index e1a74b947d..f9214c1572 100644 --- a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts +++ b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts @@ -335,7 +335,10 @@ export const nodesSlice = createSlice({ fieldT2IAdapterModelValueChanged: (state, action: FieldValueAction) => { fieldValueReducer(state, action, zT2IAdapterModelFieldValue); }, - fieldSpandrelImageToImageModelValueChanged: (state, action: FieldValueAction) => { + fieldSpandrelImageToImageModelValueChanged: ( + state, + action: FieldValueAction + ) => { fieldValueReducer(state, action, zSpandrelImageToImageModelFieldValue); }, fieldEnumModelValueChanged: (state, action: FieldValueAction) => { diff --git a/invokeai/frontend/web/src/features/nodes/types/field.ts b/invokeai/frontend/web/src/features/nodes/types/field.ts index ba9078bec2..925bd40b9d 100644 --- a/invokeai/frontend/web/src/features/nodes/types/field.ts +++ b/invokeai/frontend/web/src/features/nodes/types/field.ts @@ -603,13 +603,16 @@ const zSpandrelImageToImageModelFieldOutputTemplate = zFieldOutputTemplateBase.e export type SpandrelImageToImageModelFieldValue = z.infer; export type SpandrelImageToImageModelFieldInputInstance = z.infer; export type SpandrelImageToImageModelFieldInputTemplate = z.infer; -export const isSpandrelImageToImageModelFieldInputInstance = (val: unknown): val is SpandrelImageToImageModelFieldInputInstance => +export const isSpandrelImageToImageModelFieldInputInstance = ( + val: unknown +): val is SpandrelImageToImageModelFieldInputInstance => zSpandrelImageToImageModelFieldInputInstance.safeParse(val).success; -export const isSpandrelImageToImageModelFieldInputTemplate = (val: unknown): val is SpandrelImageToImageModelFieldInputTemplate => +export const isSpandrelImageToImageModelFieldInputTemplate = ( + val: unknown +): val is SpandrelImageToImageModelFieldInputTemplate => zSpandrelImageToImageModelFieldInputTemplate.safeParse(val).success; // #endregion - // #region SchedulerField export const zSchedulerFieldValue = zSchedulerField.optional(); diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 7784d3f0e5..fdfa62342d 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -86,9 +86,11 @@ export const isT2IAdapterModelConfig = (config: AnyModelConfig): config is T2IAd return config.type === 't2i_adapter'; }; -export const isSpandrelImageToImageModelConfig = (config: AnyModelConfig): config is SpandrelImageToImageModelConfig => { +export const isSpandrelImageToImageModelConfig = ( + config: AnyModelConfig +): config is SpandrelImageToImageModelConfig => { return config.type === 'spandrel_image_to_image'; -} +}; export const isControlAdapterModelConfig = ( config: AnyModelConfig From 9328c17ded3d9f36e98a3e2cb97fba42d884e728 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 2 Jul 2024 09:21:25 -0400 Subject: [PATCH 11/38] Add Spandrel models to the list of models in the Model Manager tab. --- .../subpanels/ModelManagerPanel/ModelList.tsx | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx index 67e65dbfb6..7e9ec2cad6 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx @@ -11,6 +11,7 @@ import { useLoRAModels, useMainModels, useRefinerModels, + useSpandrelImageToImageModels, useT2IAdapterModels, useVAEModels, } from 'services/api/hooks/modelsByType'; @@ -71,6 +72,12 @@ const ModelList = () => { [vaeModels, searchTerm, filteredModelType] ); + const [spandrelImageToImageModels, { isLoading: isLoadingSpandrelImageToImageModels }] = useSpandrelImageToImageModels(); + const filteredSpandrelImageToImageModels = useMemo( + () => modelsFilter(spandrelImageToImageModels, searchTerm, filteredModelType), + [spandrelImageToImageModels, searchTerm, filteredModelType] + ); + const totalFilteredModels = useMemo(() => { return ( filteredMainModels.length + @@ -80,7 +87,8 @@ const ModelList = () => { filteredControlNetModels.length + filteredT2IAdapterModels.length + filteredIPAdapterModels.length + - filteredVAEModels.length + filteredVAEModels.length + + filteredSpandrelImageToImageModels.length ); }, [ filteredControlNetModels.length, @@ -91,6 +99,7 @@ const ModelList = () => { filteredRefinerModels.length, filteredT2IAdapterModels.length, filteredVAEModels.length, + filteredSpandrelImageToImageModels.length, ]); return ( @@ -143,6 +152,11 @@ const ModelList = () => { {!isLoadingT2IAdapterModels && filteredT2IAdapterModels.length > 0 && ( )} + {/* Spandrel Image to Image List */} + {isLoadingSpandrelImageToImageModels && } + {!isLoadingSpandrelImageToImageModels && filteredSpandrelImageToImageModels.length > 0 && ( + + )} {totalFilteredModels === 0 && ( {t('modelManager.noMatchingModels')} From 1ab20f43c858c6cc788c2d0fb18c63c79e5caef9 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 2 Jul 2024 09:51:51 -0400 Subject: [PATCH 12/38] Tidy spandrel model probe logic, and document the reasons behind the current implementation. --- invokeai/backend/model_manager/probe.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index 53da5fc152..c7267e9f1e 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -4,6 +4,7 @@ from pathlib import Path from typing import Any, Dict, Literal, Optional, Union import safetensors.torch +import spandrel import torch from picklescan.scanner import scan_file_path @@ -242,15 +243,19 @@ class ModelProbe(object): return ModelType.TextualInversion # Check if the model can be loaded as a SpandrelImageToImageModel. + # This check is intentionally performed last, as it can be expensive (it requires loading the model from disk). try: - # TODO(ryand): Figure out why load_from_state_dict() doesn't work as expected. - # _ = SpandrelImageToImageModel.load_from_state_dict(ckpt) + # It would be nice to avoid having to load the Spandrel model from disk here. A couple of options were + # explored to avoid this: + # 1. Call `SpandrelImageToImageModel.load_from_state_dict(ckpt)`, where `ckpt` is a state_dict on the meta + # device. Unfortunately, some Spandrel models perform operations during initialization that are not + # supported on meta tensors. + # 2. Spandrel has internal logic to determine a model's type from its state_dict before loading the model. + # This logic is not exposed in spandrel's public API. We could copy the logic here, but then we have to + # maintain it, and the risk of false positive detections is higher. _ = SpandrelImageToImageModel.load_from_file(model_path) return ModelType.SpandrelImageToImage - except Exception as e: - # TODO(ryand): Catch a more specific exception type here if we can. - # TODO(ryand): Delete this print statement. - print(e) + except spandrel.UnsupportedModelError: pass raise InvalidModelConfigException(f"Unable to determine model type for {model_path}") From 6161aa73afb511729fb3a7ca6b78c02e27fe3b89 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 2 Jul 2024 10:11:25 -0400 Subject: [PATCH 13/38] Move pil_to_tensor() and tensor_to_pil() utilities to the SpandrelImageToImage class. --- invokeai/app/invocations/spandrel_upscale.py | 44 +----------------- .../backend/spandrel_image_to_image_model.py | 46 ++++++++++++++++++- 2 files changed, 47 insertions(+), 43 deletions(-) diff --git a/invokeai/app/invocations/spandrel_upscale.py b/invokeai/app/invocations/spandrel_upscale.py index 455a466c4f..8123691668 100644 --- a/invokeai/app/invocations/spandrel_upscale.py +++ b/invokeai/app/invocations/spandrel_upscale.py @@ -1,6 +1,4 @@ -import numpy as np import torch -from PIL import Image from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.fields import ( @@ -17,44 +15,6 @@ from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel -def pil_to_tensor(image: Image.Image) -> torch.Tensor: - """Convert PIL Image to torch.Tensor. - - Args: - image (Image.Image): A PIL Image with shape (H, W, C) and values in the range [0, 255]. - - Returns: - torch.Tensor: A torch.Tensor with shape (N, C, H, W) and values in the range [0, 1]. - """ - image_np = np.array(image) - # (H, W, C) -> (C, H, W) - image_np = np.transpose(image_np, (2, 0, 1)) - image_np = image_np / 255 - image_tensor = torch.from_numpy(image_np).float() - # (C, H, W) -> (N, C, H, W) - image_tensor = image_tensor.unsqueeze(0) - return image_tensor - - -def tensor_to_pil(tensor: torch.Tensor) -> Image.Image: - """Convert torch.Tensor to PIL Image. - - Args: - tensor (torch.Tensor): A torch.Tensor with shape (N, C, H, W) and values in the range [0, 1]. - - Returns: - Image.Image: A PIL Image with shape (H, W, C) and values in the range [0, 255]. - """ - # (N, C, H, W) -> (C, H, W) - tensor = tensor.squeeze(0) - # (C, H, W) -> (H, W, C) - tensor = tensor.permute(1, 2, 0) - tensor = tensor.clamp(0, 1) - tensor = (tensor * 255).cpu().detach().numpy().astype(np.uint8) - image = Image.fromarray(tensor) - return image - - @invocation("upscale_spandrel", title="Upscale (spandrel)", tags=["upscale"], category="upscale", version="1.0.0") class UpscaleSpandrelInvocation(BaseInvocation, WithMetadata, WithBoard): """Upscales an image using any upscaler supported by spandrel (https://github.com/chaiNNer-org/spandrel).""" @@ -75,13 +35,13 @@ class UpscaleSpandrelInvocation(BaseInvocation, WithMetadata, WithBoard): assert isinstance(spandrel_model, SpandrelImageToImageModel) # Prepare input image for inference. - image_tensor = pil_to_tensor(image) + image_tensor = SpandrelImageToImageModel.pil_to_tensor(image) image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype) # Run inference. image_tensor = spandrel_model.run(image_tensor) # Convert the output tensor to a PIL image. - pil_image = tensor_to_pil(image_tensor) + pil_image = SpandrelImageToImageModel.tensor_to_pil(image_tensor) image_dto = context.images.save(image=pil_image) return ImageOutput.build(image_dto) diff --git a/invokeai/backend/spandrel_image_to_image_model.py b/invokeai/backend/spandrel_image_to_image_model.py index e5be24e80d..d6afcc8a04 100644 --- a/invokeai/backend/spandrel_image_to_image_model.py +++ b/invokeai/backend/spandrel_image_to_image_model.py @@ -1,7 +1,9 @@ from pathlib import Path from typing import Any, Optional +import numpy as np import torch +from PIL import Image from spandrel import ImageModelDescriptor, ModelLoader from invokeai.backend.raw_model import RawModel @@ -16,8 +18,50 @@ class SpandrelImageToImageModel(RawModel): def __init__(self, spandrel_model: ImageModelDescriptor[Any]): self._spandrel_model = spandrel_model + @staticmethod + def pil_to_tensor(image: Image.Image) -> torch.Tensor: + """Convert PIL Image to the torch.Tensor format expected by SpandrelImageToImageModel.run(). + + Args: + image (Image.Image): A PIL Image with shape (H, W, C) and values in the range [0, 255]. + + Returns: + torch.Tensor: A torch.Tensor with shape (N, C, H, W) and values in the range [0, 1]. + """ + image_np = np.array(image) + # (H, W, C) -> (C, H, W) + image_np = np.transpose(image_np, (2, 0, 1)) + image_np = image_np / 255 + image_tensor = torch.from_numpy(image_np).float() + # (C, H, W) -> (N, C, H, W) + image_tensor = image_tensor.unsqueeze(0) + return image_tensor + + @staticmethod + def tensor_to_pil(tensor: torch.Tensor) -> Image.Image: + """Convert a torch.Tensor produced by SpandrelImageToImageModel.run() to a PIL Image. + + Args: + tensor (torch.Tensor): A torch.Tensor with shape (N, C, H, W) and values in the range [0, 1]. + + Returns: + Image.Image: A PIL Image with shape (H, W, C) and values in the range [0, 255]. + """ + # (N, C, H, W) -> (C, H, W) + tensor = tensor.squeeze(0) + # (C, H, W) -> (H, W, C) + tensor = tensor.permute(1, 2, 0) + tensor = tensor.clamp(0, 1) + tensor = (tensor * 255).cpu().detach().numpy().astype(np.uint8) + image = Image.fromarray(tensor) + return image + def run(self, image_tensor: torch.Tensor) -> torch.Tensor: - """Run the image-to-image model.""" + """Run the image-to-image model. + + Args: + image_tensor (torch.Tensor): A torch.Tensor with shape (N, C, H, W) and values in the range [0, 1]. + """ return self._spandrel_model(image_tensor) @classmethod From 114320ee69515ce10cb42e719faa4001c6f81c77 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 2 Jul 2024 10:14:20 -0400 Subject: [PATCH 14/38] (minor) typo --- invokeai/backend/spandrel_image_to_image_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/spandrel_image_to_image_model.py b/invokeai/backend/spandrel_image_to_image_model.py index d6afcc8a04..1b5a032d17 100644 --- a/invokeai/backend/spandrel_image_to_image_model.py +++ b/invokeai/backend/spandrel_image_to_image_model.py @@ -112,8 +112,8 @@ class SpandrelImageToImageModel(RawModel): ) -> None: """Note: Some models have limited dtype support. Call supports_dtype(...) to check if the dtype is supported. Note: The non_blocking parameter is currently ignored.""" - # TODO(ryand): spandrel.ImageModelDescriptor.to(...) does not support non_blocking. We will access the model - # directly if we want to apply this optimization. + # TODO(ryand): spandrel.ImageModelDescriptor.to(...) does not support non_blocking. We will have to access the + # model directly if we want to apply this optimization. self._spandrel_model.to(device=device, dtype=dtype) @property From 534528b85a82225286f109a869aa52f195a9cbd0 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 2 Jul 2024 10:18:29 -0400 Subject: [PATCH 15/38] Re-generate schema.ts --- .../frontend/web/src/services/api/schema.ts | 277 +++++++++--------- 1 file changed, 145 insertions(+), 132 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 154f9ab5a5..4c1a9fdbe9 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -6561,6 +6561,12 @@ export type components = { * @default false */ tiled?: boolean; + /** + * Tile Size + * @description The tile size for VAE tiling in pixels (image space). If set to 0, the default tile size for the + * @default 0 + */ + tile_size?: number; /** * Fp32 * @description Whether or not to use full float32 precision @@ -7293,145 +7299,146 @@ export type components = { project_id: string | null; }; InvocationOutputMap: { - save_image: components["schemas"]["ImageOutput"]; - integer_math: components["schemas"]["IntegerOutput"]; - segment_anything_processor: components["schemas"]["ImageOutput"]; - sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; - zoe_depth_image_processor: components["schemas"]["ImageOutput"]; - collect: components["schemas"]["CollectInvocationOutput"]; - range: components["schemas"]["IntegerCollectionOutput"]; - unsharp_mask: components["schemas"]["ImageOutput"]; - string_replace: components["schemas"]["StringOutput"]; - face_identifier: components["schemas"]["ImageOutput"]; - heuristic_resize: components["schemas"]["ImageOutput"]; - range_of_size: components["schemas"]["IntegerCollectionOutput"]; - latents_collection: components["schemas"]["LatentsCollectionOutput"]; - color_map_image_processor: components["schemas"]["ImageOutput"]; - img_ilerp: components["schemas"]["ImageOutput"]; - infill_patchmatch: components["schemas"]["ImageOutput"]; - face_off: components["schemas"]["FaceOffOutput"]; - string_collection: components["schemas"]["StringCollectionOutput"]; - sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; - sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - string_join: components["schemas"]["StringOutput"]; - lblend: components["schemas"]["LatentsOutput"]; - conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; - string_split_neg: components["schemas"]["StringPosNegOutput"]; - img_watermark: components["schemas"]["ImageOutput"]; - infill_lama: components["schemas"]["ImageOutput"]; - div: components["schemas"]["IntegerOutput"]; - show_image: components["schemas"]["ImageOutput"]; - tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; - sub: components["schemas"]["IntegerOutput"]; normalbae_image_processor: components["schemas"]["ImageOutput"]; - invert_tensor_mask: components["schemas"]["MaskOutput"]; - create_gradient_mask: components["schemas"]["GradientMaskOutput"]; - string_split: components["schemas"]["String2Output"]; - step_param_easing: components["schemas"]["FloatCollectionOutput"]; - metadata: components["schemas"]["MetadataOutput"]; - img_pad_crop: components["schemas"]["ImageOutput"]; - integer: components["schemas"]["IntegerOutput"]; - img_mul: components["schemas"]["ImageOutput"]; - calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; - color: components["schemas"]["ColorOutput"]; - infill_rgba: components["schemas"]["ImageOutput"]; - t2i_adapter: components["schemas"]["T2IAdapterOutput"]; - denoise_latents: components["schemas"]["LatentsOutput"]; - img_lerp: components["schemas"]["ImageOutput"]; - img_channel_offset: components["schemas"]["ImageOutput"]; - img_crop: components["schemas"]["ImageOutput"]; - alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; - color_correct: components["schemas"]["ImageOutput"]; - calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; - img_hue_adjust: components["schemas"]["ImageOutput"]; - lresize: components["schemas"]["LatentsOutput"]; - img_blur: components["schemas"]["ImageOutput"]; - compel: components["schemas"]["ConditioningOutput"]; - sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - float_to_int: components["schemas"]["IntegerOutput"]; - boolean: components["schemas"]["BooleanOutput"]; - string_join_three: components["schemas"]["StringOutput"]; - add: components["schemas"]["IntegerOutput"]; - merge_tiles_to_image: components["schemas"]["ImageOutput"]; - core_metadata: components["schemas"]["MetadataOutput"]; - lscale: components["schemas"]["LatentsOutput"]; - mlsd_image_processor: components["schemas"]["ImageOutput"]; - image_collection: components["schemas"]["ImageCollectionOutput"]; - crop_latents: components["schemas"]["LatentsOutput"]; - image_mask_to_tensor: components["schemas"]["MaskOutput"]; - lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; - ip_adapter: components["schemas"]["IPAdapterOutput"]; - pidi_image_processor: components["schemas"]["ImageOutput"]; - rand_int: components["schemas"]["IntegerOutput"]; - img_conv: components["schemas"]["ImageOutput"]; - scheduler: components["schemas"]["SchedulerOutput"]; - img_paste: components["schemas"]["ImageOutput"]; - noise: components["schemas"]["NoiseOutput"]; - img_scale: components["schemas"]["ImageOutput"]; - i2l: components["schemas"]["LatentsOutput"]; - main_model_loader: components["schemas"]["ModelLoaderOutput"]; - blank_image: components["schemas"]["ImageOutput"]; - mask_edge: components["schemas"]["ImageOutput"]; - seamless: components["schemas"]["SeamlessModeOutput"]; - esrgan: components["schemas"]["ImageOutput"]; - canvas_paste_back: components["schemas"]["ImageOutput"]; - mul: components["schemas"]["IntegerOutput"]; - dynamic_prompt: components["schemas"]["StringCollectionOutput"]; - controlnet: components["schemas"]["ControlOutput"]; - l2i: components["schemas"]["ImageOutput"]; - ideal_size: components["schemas"]["IdealSizeOutput"]; - latents: components["schemas"]["LatentsOutput"]; - midas_depth_image_processor: components["schemas"]["ImageOutput"]; - tomask: components["schemas"]["ImageOutput"]; - float_math: components["schemas"]["FloatOutput"]; - round_float: components["schemas"]["FloatOutput"]; - cv_inpaint: components["schemas"]["ImageOutput"]; - create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; - model_identifier: components["schemas"]["ModelIdentifierOutput"]; - pair_tile_image: components["schemas"]["PairTileImageOutput"]; - lineart_image_processor: components["schemas"]["ImageOutput"]; - img_nsfw: components["schemas"]["ImageOutput"]; - infill_cv2: components["schemas"]["ImageOutput"]; - clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; - dw_openpose_image_processor: components["schemas"]["ImageOutput"]; - img_resize: components["schemas"]["ImageOutput"]; - iterate: components["schemas"]["IterateInvocationOutput"]; - rectangle_mask: components["schemas"]["MaskOutput"]; - canny_image_processor: components["schemas"]["ImageOutput"]; - calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; - mask_from_id: components["schemas"]["ImageOutput"]; - metadata_item: components["schemas"]["MetadataItemOutput"]; - infill_tile: components["schemas"]["ImageOutput"]; - tiled_multi_diffusion_denoise_latents: components["schemas"]["LatentsOutput"]; - img_channel_multiply: components["schemas"]["ImageOutput"]; - boolean_collection: components["schemas"]["BooleanCollectionOutput"]; - lora_loader: components["schemas"]["LoRALoaderOutput"]; - float_collection: components["schemas"]["FloatCollectionOutput"]; - string: components["schemas"]["StringOutput"]; - freeu: components["schemas"]["UNetOutput"]; - lineart_anime_image_processor: components["schemas"]["ImageOutput"]; - depth_anything_image_processor: components["schemas"]["ImageOutput"]; - image: components["schemas"]["ImageOutput"]; - face_mask_detection: components["schemas"]["FaceMaskOutput"]; - rand_float: components["schemas"]["FloatOutput"]; - float: components["schemas"]["FloatOutput"]; - random_range: components["schemas"]["IntegerCollectionOutput"]; - integer_collection: components["schemas"]["IntegerCollectionOutput"]; - sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; mask_combine: components["schemas"]["ImageOutput"]; - tile_image_processor: components["schemas"]["ImageOutput"]; - img_chan: components["schemas"]["ImageOutput"]; - vae_loader: components["schemas"]["VAEOutput"]; + save_image: components["schemas"]["ImageOutput"]; + lineart_anime_image_processor: components["schemas"]["ImageOutput"]; + latents: components["schemas"]["LatentsOutput"]; + add: components["schemas"]["IntegerOutput"]; + sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + t2i_adapter: components["schemas"]["T2IAdapterOutput"]; + lblend: components["schemas"]["LatentsOutput"]; + sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; + lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; + esrgan: components["schemas"]["ImageOutput"]; + noise: components["schemas"]["NoiseOutput"]; + conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; + mlsd_image_processor: components["schemas"]["ImageOutput"]; + canvas_paste_back: components["schemas"]["ImageOutput"]; + cv_inpaint: components["schemas"]["ImageOutput"]; prompt_from_file: components["schemas"]["StringCollectionOutput"]; - float_range: components["schemas"]["FloatCollectionOutput"]; - merge_metadata: components["schemas"]["MetadataOutput"]; - sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; - hed_image_processor: components["schemas"]["ImageOutput"]; + unsharp_mask: components["schemas"]["ImageOutput"]; lora_selector: components["schemas"]["LoRASelectorOutput"]; + img_scale: components["schemas"]["ImageOutput"]; + img_resize: components["schemas"]["ImageOutput"]; + integer_collection: components["schemas"]["IntegerCollectionOutput"]; + latents_collection: components["schemas"]["LatentsCollectionOutput"]; conditioning: components["schemas"]["ConditioningOutput"]; - leres_image_processor: components["schemas"]["ImageOutput"]; + segment_anything_processor: components["schemas"]["ImageOutput"]; + calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; + sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; + lscale: components["schemas"]["LatentsOutput"]; + crop_latents: components["schemas"]["LatentsOutput"]; + sub: components["schemas"]["IntegerOutput"]; + infill_patchmatch: components["schemas"]["ImageOutput"]; + create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; + freeu: components["schemas"]["UNetOutput"]; + heuristic_resize: components["schemas"]["ImageOutput"]; + calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; + image: components["schemas"]["ImageOutput"]; + controlnet: components["schemas"]["ControlOutput"]; + img_pad_crop: components["schemas"]["ImageOutput"]; + img_paste: components["schemas"]["ImageOutput"]; + string_split_neg: components["schemas"]["StringPosNegOutput"]; + face_identifier: components["schemas"]["ImageOutput"]; + model_identifier: components["schemas"]["ModelIdentifierOutput"]; + integer: components["schemas"]["IntegerOutput"]; + merge_tiles_to_image: components["schemas"]["ImageOutput"]; + scheduler: components["schemas"]["SchedulerOutput"]; + mul: components["schemas"]["IntegerOutput"]; + metadata: components["schemas"]["MetadataOutput"]; + boolean: components["schemas"]["BooleanOutput"]; + lineart_image_processor: components["schemas"]["ImageOutput"]; + clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; + img_ilerp: components["schemas"]["ImageOutput"]; + dw_openpose_image_processor: components["schemas"]["ImageOutput"]; + float_range: components["schemas"]["FloatCollectionOutput"]; + rectangle_mask: components["schemas"]["MaskOutput"]; + metadata_item: components["schemas"]["MetadataItemOutput"]; + seamless: components["schemas"]["SeamlessModeOutput"]; + float_to_int: components["schemas"]["IntegerOutput"]; + calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; + string_replace: components["schemas"]["StringOutput"]; + infill_rgba: components["schemas"]["ImageOutput"]; + depth_anything_image_processor: components["schemas"]["ImageOutput"]; mediapipe_face_processor: components["schemas"]["ImageOutput"]; + img_blur: components["schemas"]["ImageOutput"]; + face_mask_detection: components["schemas"]["FaceMaskOutput"]; + color: components["schemas"]["ColorOutput"]; + string_join: components["schemas"]["StringOutput"]; content_shuffle_image_processor: components["schemas"]["ImageOutput"]; + invert_tensor_mask: components["schemas"]["MaskOutput"]; + float_collection: components["schemas"]["FloatCollectionOutput"]; + create_gradient_mask: components["schemas"]["GradientMaskOutput"]; + integer_math: components["schemas"]["IntegerOutput"]; + ip_adapter: components["schemas"]["IPAdapterOutput"]; + img_conv: components["schemas"]["ImageOutput"]; + img_crop: components["schemas"]["ImageOutput"]; + pair_tile_image: components["schemas"]["PairTileImageOutput"]; + string_join_three: components["schemas"]["StringOutput"]; + merge_metadata: components["schemas"]["MetadataOutput"]; + upscale_spandrel: components["schemas"]["ImageOutput"]; + denoise_latents: components["schemas"]["LatentsOutput"]; + float: components["schemas"]["FloatOutput"]; + collect: components["schemas"]["CollectInvocationOutput"]; + boolean_collection: components["schemas"]["BooleanCollectionOutput"]; + infill_tile: components["schemas"]["ImageOutput"]; + zoe_depth_image_processor: components["schemas"]["ImageOutput"]; + core_metadata: components["schemas"]["MetadataOutput"]; + string_split: components["schemas"]["String2Output"]; + random_range: components["schemas"]["IntegerCollectionOutput"]; + range: components["schemas"]["IntegerCollectionOutput"]; + div: components["schemas"]["IntegerOutput"]; + float_math: components["schemas"]["FloatOutput"]; + l2i: components["schemas"]["ImageOutput"]; + vae_loader: components["schemas"]["VAEOutput"]; + img_channel_offset: components["schemas"]["ImageOutput"]; + img_chan: components["schemas"]["ImageOutput"]; + img_nsfw: components["schemas"]["ImageOutput"]; + canny_image_processor: components["schemas"]["ImageOutput"]; + show_image: components["schemas"]["ImageOutput"]; + lresize: components["schemas"]["LatentsOutput"]; + blank_image: components["schemas"]["ImageOutput"]; + color_correct: components["schemas"]["ImageOutput"]; + tile_image_processor: components["schemas"]["ImageOutput"]; + string: components["schemas"]["StringOutput"]; + pidi_image_processor: components["schemas"]["ImageOutput"]; + dynamic_prompt: components["schemas"]["StringCollectionOutput"]; + mask_edge: components["schemas"]["ImageOutput"]; + compel: components["schemas"]["ConditioningOutput"]; + hed_image_processor: components["schemas"]["ImageOutput"]; + img_watermark: components["schemas"]["ImageOutput"]; + alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; + mask_from_id: components["schemas"]["ImageOutput"]; + tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; + tomask: components["schemas"]["ImageOutput"]; + sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + tiled_multi_diffusion_denoise_latents: components["schemas"]["LatentsOutput"]; + sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; + img_hue_adjust: components["schemas"]["ImageOutput"]; + face_off: components["schemas"]["FaceOffOutput"]; + img_mul: components["schemas"]["ImageOutput"]; + rand_int: components["schemas"]["IntegerOutput"]; + image_mask_to_tensor: components["schemas"]["MaskOutput"]; + rand_float: components["schemas"]["FloatOutput"]; + img_lerp: components["schemas"]["ImageOutput"]; + i2l: components["schemas"]["LatentsOutput"]; + image_collection: components["schemas"]["ImageCollectionOutput"]; + round_float: components["schemas"]["FloatOutput"]; + leres_image_processor: components["schemas"]["ImageOutput"]; + lora_loader: components["schemas"]["LoRALoaderOutput"]; + infill_cv2: components["schemas"]["ImageOutput"]; + infill_lama: components["schemas"]["ImageOutput"]; + ideal_size: components["schemas"]["IdealSizeOutput"]; + sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; + midas_depth_image_processor: components["schemas"]["ImageOutput"]; + color_map_image_processor: components["schemas"]["ImageOutput"]; + img_channel_multiply: components["schemas"]["ImageOutput"]; + iterate: components["schemas"]["IterateInvocationOutput"]; + string_collection: components["schemas"]["StringCollectionOutput"]; + step_param_easing: components["schemas"]["FloatCollectionOutput"]; + main_model_loader: components["schemas"]["ModelLoaderOutput"]; + range_of_size: components["schemas"]["IntegerCollectionOutput"]; }; /** * InvocationStartedEvent @@ -7769,6 +7776,12 @@ export type components = { * @default false */ tiled?: boolean; + /** + * Tile Size + * @description The tile size for VAE tiling in pixels (image space). If set to 0, the default tile size for the + * @default 0 + */ + tile_size?: number; /** * Fp32 * @description Whether or not to use full float32 precision From 9d3739244f037302da7c302a59242fc06c798ff7 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 3 Jul 2024 15:57:02 -0400 Subject: [PATCH 16/38] Prettier formatting. --- .../subpanels/ModelManagerPanel/ModelList.tsx | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx index 7e9ec2cad6..f531fc2f9a 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx @@ -72,7 +72,8 @@ const ModelList = () => { [vaeModels, searchTerm, filteredModelType] ); - const [spandrelImageToImageModels, { isLoading: isLoadingSpandrelImageToImageModels }] = useSpandrelImageToImageModels(); + const [spandrelImageToImageModels, { isLoading: isLoadingSpandrelImageToImageModels }] = + useSpandrelImageToImageModels(); const filteredSpandrelImageToImageModels = useMemo( () => modelsFilter(spandrelImageToImageModels, searchTerm, filteredModelType), [spandrelImageToImageModels, searchTerm, filteredModelType] @@ -153,9 +154,15 @@ const ModelList = () => { )} {/* Spandrel Image to Image List */} - {isLoadingSpandrelImageToImageModels && } + {isLoadingSpandrelImageToImageModels && ( + + )} {!isLoadingSpandrelImageToImageModels && filteredSpandrelImageToImageModels.length > 0 && ( - + )} {totalFilteredModels === 0 && ( From a405f14ea2853a3a5cf10076511f2d918fceaef6 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 3 Jul 2024 16:38:16 -0400 Subject: [PATCH 17/38] Fix SpandrelImageToImageModel size calculation for the model cache. --- invokeai/backend/model_manager/load/model_util.py | 3 ++- invokeai/backend/spandrel_image_to_image_model.py | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/model_manager/load/model_util.py b/invokeai/backend/model_manager/load/model_util.py index c798b92d8c..57ff81c2ef 100644 --- a/invokeai/backend/model_manager/load/model_util.py +++ b/invokeai/backend/model_manager/load/model_util.py @@ -15,6 +15,7 @@ from invokeai.backend.ip_adapter.ip_adapter import IPAdapter from invokeai.backend.lora import LoRAModelRaw from invokeai.backend.model_manager.config import AnyModel from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel +from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel from invokeai.backend.textual_inversion import TextualInversionModelRaw @@ -33,7 +34,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int: elif isinstance(model, CLIPTokenizer): # TODO(ryand): Accurately calculate the tokenizer's size. It's small enough that it shouldn't matter for now. return 0 - elif isinstance(model, (TextualInversionModelRaw, IPAdapter, LoRAModelRaw)): + elif isinstance(model, (TextualInversionModelRaw, IPAdapter, LoRAModelRaw, SpandrelImageToImageModel)): return model.calc_size() else: # TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the diff --git a/invokeai/backend/spandrel_image_to_image_model.py b/invokeai/backend/spandrel_image_to_image_model.py index 1b5a032d17..adb78d0d71 100644 --- a/invokeai/backend/spandrel_image_to_image_model.py +++ b/invokeai/backend/spandrel_image_to_image_model.py @@ -125,3 +125,10 @@ class SpandrelImageToImageModel(RawModel): def dtype(self) -> torch.dtype: """The dtype of the underlying model.""" return self._spandrel_model.dtype + + def calc_size(self) -> int: + """Get size of the model in memory in bytes.""" + # HACK(ryand): Fix this issue with circular imports. + from invokeai.backend.model_manager.load.model_util import calc_module_size + + return calc_module_size(self._spandrel_model.model) From d09999736ca416e46d1d7e4c252f7afe81f9e368 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 5 Jul 2024 14:04:08 -0400 Subject: [PATCH 18/38] Rename spandrel models to 'Image-to-Image Model' throughout the UI. --- invokeai/app/invocations/fields.py | 2 +- invokeai/app/invocations/spandrel_upscale.py | 47 --- .../subpanels/ModelManagerPanel/ModelList.tsx | 4 +- .../ModelManagerPanel/ModelTypeFilter.tsx | 2 +- .../frontend/web/src/services/api/schema.ts | 389 +++++++++--------- 5 files changed, 199 insertions(+), 245 deletions(-) delete mode 100644 invokeai/app/invocations/spandrel_upscale.py diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py index f341039fe0..dff2084de7 100644 --- a/invokeai/app/invocations/fields.py +++ b/invokeai/app/invocations/fields.py @@ -135,7 +135,7 @@ class FieldDescriptions: sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load" sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load" onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load" - spandrel_image_to_image_model = "Spandrel Image-to-Image model" + spandrel_image_to_image_model = "Image-to-Image model" lora_weight = "The weight at which the LoRA is applied to each model" compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor" raw_prompt = "Raw prompt text (no parsing)" diff --git a/invokeai/app/invocations/spandrel_upscale.py b/invokeai/app/invocations/spandrel_upscale.py deleted file mode 100644 index 8123691668..0000000000 --- a/invokeai/app/invocations/spandrel_upscale.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch - -from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.fields import ( - FieldDescriptions, - ImageField, - InputField, - UIType, - WithBoard, - WithMetadata, -) -from invokeai.app.invocations.model import ModelIdentifierField -from invokeai.app.invocations.primitives import ImageOutput -from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel - - -@invocation("upscale_spandrel", title="Upscale (spandrel)", tags=["upscale"], category="upscale", version="1.0.0") -class UpscaleSpandrelInvocation(BaseInvocation, WithMetadata, WithBoard): - """Upscales an image using any upscaler supported by spandrel (https://github.com/chaiNNer-org/spandrel).""" - - image: ImageField = InputField(description="The input image") - spandrel_image_to_image_model: ModelIdentifierField = InputField( - description=FieldDescriptions.spandrel_image_to_image_model, ui_type=UIType.SpandrelImageToImageModel - ) - - @torch.inference_mode() - def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.images.get_pil(self.image.image_name) - - # Load the model. - spandrel_model_info = context.models.load(self.spandrel_image_to_image_model) - - with spandrel_model_info as spandrel_model: - assert isinstance(spandrel_model, SpandrelImageToImageModel) - - # Prepare input image for inference. - image_tensor = SpandrelImageToImageModel.pil_to_tensor(image) - image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype) - - # Run inference. - image_tensor = spandrel_model.run(image_tensor) - - # Convert the output tensor to a PIL image. - pil_image = SpandrelImageToImageModel.tensor_to_pil(image_tensor) - image_dto = context.images.save(image=pil_image) - return ImageOutput.build(image_dto) diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx index f531fc2f9a..b82917221e 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelList.tsx @@ -155,11 +155,11 @@ const ModelList = () => { )} {/* Spandrel Image to Image List */} {isLoadingSpandrelImageToImageModels && ( - + )} {!isLoadingSpandrelImageToImageModels && filteredSpandrelImageToImageModels.length > 0 && ( diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelTypeFilter.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelTypeFilter.tsx index 85f3fd5bf6..1a2444870b 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelTypeFilter.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelTypeFilter.tsx @@ -21,7 +21,7 @@ export const ModelTypeFilter = () => { t2i_adapter: t('common.t2iAdapter'), ip_adapter: t('common.ipAdapter'), clip_vision: 'Clip Vision', - spandrel_image_to_image: 'Spandrel Image to Image', + spandrel_image_to_image: 'Image-to-Image', }), [t] ); diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 4c1a9fdbe9..3bd322278b 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -4784,7 +4784,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["UpscaleSpandrelInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; }; /** * Edges @@ -7155,7 +7155,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["UpscaleSpandrelInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -7201,7 +7201,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["UpscaleSpandrelInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -7264,7 +7264,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["UpscaleSpandrelInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -7299,146 +7299,146 @@ export type components = { project_id: string | null; }; InvocationOutputMap: { - normalbae_image_processor: components["schemas"]["ImageOutput"]; - mask_combine: components["schemas"]["ImageOutput"]; - save_image: components["schemas"]["ImageOutput"]; - lineart_anime_image_processor: components["schemas"]["ImageOutput"]; - latents: components["schemas"]["LatentsOutput"]; - add: components["schemas"]["IntegerOutput"]; - sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - t2i_adapter: components["schemas"]["T2IAdapterOutput"]; - lblend: components["schemas"]["LatentsOutput"]; - sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; - lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; - esrgan: components["schemas"]["ImageOutput"]; - noise: components["schemas"]["NoiseOutput"]; - conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; - mlsd_image_processor: components["schemas"]["ImageOutput"]; - canvas_paste_back: components["schemas"]["ImageOutput"]; - cv_inpaint: components["schemas"]["ImageOutput"]; - prompt_from_file: components["schemas"]["StringCollectionOutput"]; - unsharp_mask: components["schemas"]["ImageOutput"]; - lora_selector: components["schemas"]["LoRASelectorOutput"]; - img_scale: components["schemas"]["ImageOutput"]; - img_resize: components["schemas"]["ImageOutput"]; - integer_collection: components["schemas"]["IntegerCollectionOutput"]; - latents_collection: components["schemas"]["LatentsCollectionOutput"]; - conditioning: components["schemas"]["ConditioningOutput"]; - segment_anything_processor: components["schemas"]["ImageOutput"]; - calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; - sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; - lscale: components["schemas"]["LatentsOutput"]; - crop_latents: components["schemas"]["LatentsOutput"]; - sub: components["schemas"]["IntegerOutput"]; - infill_patchmatch: components["schemas"]["ImageOutput"]; - create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; - freeu: components["schemas"]["UNetOutput"]; - heuristic_resize: components["schemas"]["ImageOutput"]; - calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; - image: components["schemas"]["ImageOutput"]; - controlnet: components["schemas"]["ControlOutput"]; - img_pad_crop: components["schemas"]["ImageOutput"]; - img_paste: components["schemas"]["ImageOutput"]; - string_split_neg: components["schemas"]["StringPosNegOutput"]; - face_identifier: components["schemas"]["ImageOutput"]; - model_identifier: components["schemas"]["ModelIdentifierOutput"]; - integer: components["schemas"]["IntegerOutput"]; - merge_tiles_to_image: components["schemas"]["ImageOutput"]; - scheduler: components["schemas"]["SchedulerOutput"]; - mul: components["schemas"]["IntegerOutput"]; - metadata: components["schemas"]["MetadataOutput"]; - boolean: components["schemas"]["BooleanOutput"]; - lineart_image_processor: components["schemas"]["ImageOutput"]; - clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; - img_ilerp: components["schemas"]["ImageOutput"]; - dw_openpose_image_processor: components["schemas"]["ImageOutput"]; - float_range: components["schemas"]["FloatCollectionOutput"]; - rectangle_mask: components["schemas"]["MaskOutput"]; - metadata_item: components["schemas"]["MetadataItemOutput"]; - seamless: components["schemas"]["SeamlessModeOutput"]; - float_to_int: components["schemas"]["IntegerOutput"]; - calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; - string_replace: components["schemas"]["StringOutput"]; - infill_rgba: components["schemas"]["ImageOutput"]; - depth_anything_image_processor: components["schemas"]["ImageOutput"]; - mediapipe_face_processor: components["schemas"]["ImageOutput"]; - img_blur: components["schemas"]["ImageOutput"]; - face_mask_detection: components["schemas"]["FaceMaskOutput"]; - color: components["schemas"]["ColorOutput"]; - string_join: components["schemas"]["StringOutput"]; - content_shuffle_image_processor: components["schemas"]["ImageOutput"]; - invert_tensor_mask: components["schemas"]["MaskOutput"]; - float_collection: components["schemas"]["FloatCollectionOutput"]; - create_gradient_mask: components["schemas"]["GradientMaskOutput"]; - integer_math: components["schemas"]["IntegerOutput"]; - ip_adapter: components["schemas"]["IPAdapterOutput"]; - img_conv: components["schemas"]["ImageOutput"]; - img_crop: components["schemas"]["ImageOutput"]; - pair_tile_image: components["schemas"]["PairTileImageOutput"]; - string_join_three: components["schemas"]["StringOutput"]; - merge_metadata: components["schemas"]["MetadataOutput"]; - upscale_spandrel: components["schemas"]["ImageOutput"]; - denoise_latents: components["schemas"]["LatentsOutput"]; - float: components["schemas"]["FloatOutput"]; - collect: components["schemas"]["CollectInvocationOutput"]; - boolean_collection: components["schemas"]["BooleanCollectionOutput"]; - infill_tile: components["schemas"]["ImageOutput"]; - zoe_depth_image_processor: components["schemas"]["ImageOutput"]; - core_metadata: components["schemas"]["MetadataOutput"]; - string_split: components["schemas"]["String2Output"]; - random_range: components["schemas"]["IntegerCollectionOutput"]; - range: components["schemas"]["IntegerCollectionOutput"]; - div: components["schemas"]["IntegerOutput"]; - float_math: components["schemas"]["FloatOutput"]; - l2i: components["schemas"]["ImageOutput"]; - vae_loader: components["schemas"]["VAEOutput"]; - img_channel_offset: components["schemas"]["ImageOutput"]; - img_chan: components["schemas"]["ImageOutput"]; - img_nsfw: components["schemas"]["ImageOutput"]; - canny_image_processor: components["schemas"]["ImageOutput"]; - show_image: components["schemas"]["ImageOutput"]; - lresize: components["schemas"]["LatentsOutput"]; - blank_image: components["schemas"]["ImageOutput"]; - color_correct: components["schemas"]["ImageOutput"]; - tile_image_processor: components["schemas"]["ImageOutput"]; - string: components["schemas"]["StringOutput"]; - pidi_image_processor: components["schemas"]["ImageOutput"]; - dynamic_prompt: components["schemas"]["StringCollectionOutput"]; - mask_edge: components["schemas"]["ImageOutput"]; - compel: components["schemas"]["ConditioningOutput"]; - hed_image_processor: components["schemas"]["ImageOutput"]; - img_watermark: components["schemas"]["ImageOutput"]; - alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; - mask_from_id: components["schemas"]["ImageOutput"]; - tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; - tomask: components["schemas"]["ImageOutput"]; - sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - tiled_multi_diffusion_denoise_latents: components["schemas"]["LatentsOutput"]; - sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; - img_hue_adjust: components["schemas"]["ImageOutput"]; - face_off: components["schemas"]["FaceOffOutput"]; - img_mul: components["schemas"]["ImageOutput"]; - rand_int: components["schemas"]["IntegerOutput"]; - image_mask_to_tensor: components["schemas"]["MaskOutput"]; - rand_float: components["schemas"]["FloatOutput"]; - img_lerp: components["schemas"]["ImageOutput"]; - i2l: components["schemas"]["LatentsOutput"]; - image_collection: components["schemas"]["ImageCollectionOutput"]; - round_float: components["schemas"]["FloatOutput"]; - leres_image_processor: components["schemas"]["ImageOutput"]; - lora_loader: components["schemas"]["LoRALoaderOutput"]; - infill_cv2: components["schemas"]["ImageOutput"]; - infill_lama: components["schemas"]["ImageOutput"]; - ideal_size: components["schemas"]["IdealSizeOutput"]; - sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; - midas_depth_image_processor: components["schemas"]["ImageOutput"]; - color_map_image_processor: components["schemas"]["ImageOutput"]; - img_channel_multiply: components["schemas"]["ImageOutput"]; - iterate: components["schemas"]["IterateInvocationOutput"]; - string_collection: components["schemas"]["StringCollectionOutput"]; - step_param_easing: components["schemas"]["FloatCollectionOutput"]; - main_model_loader: components["schemas"]["ModelLoaderOutput"]; range_of_size: components["schemas"]["IntegerCollectionOutput"]; + color_correct: components["schemas"]["ImageOutput"]; + image: components["schemas"]["ImageOutput"]; + latents_collection: components["schemas"]["LatentsCollectionOutput"]; + img_blur: components["schemas"]["ImageOutput"]; + lineart_anime_image_processor: components["schemas"]["ImageOutput"]; + sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + boolean: components["schemas"]["BooleanOutput"]; + infill_cv2: components["schemas"]["ImageOutput"]; + i2l: components["schemas"]["LatentsOutput"]; + tile_image_processor: components["schemas"]["ImageOutput"]; + dw_openpose_image_processor: components["schemas"]["ImageOutput"]; + canvas_paste_back: components["schemas"]["ImageOutput"]; + heuristic_resize: components["schemas"]["ImageOutput"]; + mediapipe_face_processor: components["schemas"]["ImageOutput"]; + conditioning: components["schemas"]["ConditioningOutput"]; + mask_from_id: components["schemas"]["ImageOutput"]; + img_nsfw: components["schemas"]["ImageOutput"]; + conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; + invert_tensor_mask: components["schemas"]["MaskOutput"]; + sub: components["schemas"]["IntegerOutput"]; + infill_lama: components["schemas"]["ImageOutput"]; + calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; + leres_image_processor: components["schemas"]["ImageOutput"]; + img_scale: components["schemas"]["ImageOutput"]; + mask_edge: components["schemas"]["ImageOutput"]; + esrgan: components["schemas"]["ImageOutput"]; + sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + pair_tile_image: components["schemas"]["PairTileImageOutput"]; + string: components["schemas"]["StringOutput"]; + calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; + t2i_adapter: components["schemas"]["T2IAdapterOutput"]; + img_paste: components["schemas"]["ImageOutput"]; + midas_depth_image_processor: components["schemas"]["ImageOutput"]; + pidi_image_processor: components["schemas"]["ImageOutput"]; + sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; + tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; + clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; + blank_image: components["schemas"]["ImageOutput"]; + img_channel_multiply: components["schemas"]["ImageOutput"]; + canny_image_processor: components["schemas"]["ImageOutput"]; + l2i: components["schemas"]["ImageOutput"]; + img_mul: components["schemas"]["ImageOutput"]; + vae_loader: components["schemas"]["VAEOutput"]; + string_collection: components["schemas"]["StringCollectionOutput"]; + tomask: components["schemas"]["ImageOutput"]; + infill_patchmatch: components["schemas"]["ImageOutput"]; + compel: components["schemas"]["ConditioningOutput"]; + calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; + tiled_multi_diffusion_denoise_latents: components["schemas"]["LatentsOutput"]; + string_join_three: components["schemas"]["StringOutput"]; + rectangle_mask: components["schemas"]["MaskOutput"]; + crop_latents: components["schemas"]["LatentsOutput"]; + mul: components["schemas"]["IntegerOutput"]; + merge_tiles_to_image: components["schemas"]["ImageOutput"]; + integer_math: components["schemas"]["IntegerOutput"]; + iterate: components["schemas"]["IterateInvocationOutput"]; + range: components["schemas"]["IntegerCollectionOutput"]; + collect: components["schemas"]["CollectInvocationOutput"]; + img_ilerp: components["schemas"]["ImageOutput"]; + rand_float: components["schemas"]["FloatOutput"]; + latents: components["schemas"]["LatentsOutput"]; + face_identifier: components["schemas"]["ImageOutput"]; + depth_anything_image_processor: components["schemas"]["ImageOutput"]; + round_float: components["schemas"]["FloatOutput"]; + create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; + core_metadata: components["schemas"]["MetadataOutput"]; + float_range: components["schemas"]["FloatCollectionOutput"]; + face_mask_detection: components["schemas"]["FaceMaskOutput"]; + ip_adapter: components["schemas"]["IPAdapterOutput"]; + lscale: components["schemas"]["LatentsOutput"]; + lineart_image_processor: components["schemas"]["ImageOutput"]; + integer_collection: components["schemas"]["IntegerCollectionOutput"]; + cv_inpaint: components["schemas"]["ImageOutput"]; + mlsd_image_processor: components["schemas"]["ImageOutput"]; + ideal_size: components["schemas"]["IdealSizeOutput"]; + segment_anything_processor: components["schemas"]["ImageOutput"]; + img_watermark: components["schemas"]["ImageOutput"]; + hed_image_processor: components["schemas"]["ImageOutput"]; + normalbae_image_processor: components["schemas"]["ImageOutput"]; + infill_tile: components["schemas"]["ImageOutput"]; + sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; + denoise_latents: components["schemas"]["LatentsOutput"]; + unsharp_mask: components["schemas"]["ImageOutput"]; + float_collection: components["schemas"]["FloatCollectionOutput"]; + show_image: components["schemas"]["ImageOutput"]; + img_conv: components["schemas"]["ImageOutput"]; + model_identifier: components["schemas"]["ModelIdentifierOutput"]; + step_param_easing: components["schemas"]["FloatCollectionOutput"]; + float_math: components["schemas"]["FloatOutput"]; + color_map_image_processor: components["schemas"]["ImageOutput"]; + spandrel_image_to_image: components["schemas"]["ImageOutput"]; + img_crop: components["schemas"]["ImageOutput"]; + lblend: components["schemas"]["LatentsOutput"]; + random_range: components["schemas"]["IntegerCollectionOutput"]; + float: components["schemas"]["FloatOutput"]; + merge_metadata: components["schemas"]["MetadataOutput"]; + alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; + image_collection: components["schemas"]["ImageCollectionOutput"]; + image_mask_to_tensor: components["schemas"]["MaskOutput"]; + color: components["schemas"]["ColorOutput"]; + img_hue_adjust: components["schemas"]["ImageOutput"]; + string_split: components["schemas"]["String2Output"]; + prompt_from_file: components["schemas"]["StringCollectionOutput"]; + metadata: components["schemas"]["MetadataOutput"]; + freeu: components["schemas"]["UNetOutput"]; + create_gradient_mask: components["schemas"]["GradientMaskOutput"]; + img_chan: components["schemas"]["ImageOutput"]; + div: components["schemas"]["IntegerOutput"]; + save_image: components["schemas"]["ImageOutput"]; + img_pad_crop: components["schemas"]["ImageOutput"]; + lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; + scheduler: components["schemas"]["SchedulerOutput"]; + face_off: components["schemas"]["FaceOffOutput"]; + img_lerp: components["schemas"]["ImageOutput"]; + zoe_depth_image_processor: components["schemas"]["ImageOutput"]; + add: components["schemas"]["IntegerOutput"]; + string_split_neg: components["schemas"]["StringPosNegOutput"]; + img_channel_offset: components["schemas"]["ImageOutput"]; + main_model_loader: components["schemas"]["ModelLoaderOutput"]; + rand_int: components["schemas"]["IntegerOutput"]; + float_to_int: components["schemas"]["IntegerOutput"]; + controlnet: components["schemas"]["ControlOutput"]; + metadata_item: components["schemas"]["MetadataItemOutput"]; + integer: components["schemas"]["IntegerOutput"]; + dynamic_prompt: components["schemas"]["StringCollectionOutput"]; + sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; + lora_selector: components["schemas"]["LoRASelectorOutput"]; + lresize: components["schemas"]["LatentsOutput"]; + noise: components["schemas"]["NoiseOutput"]; + lora_loader: components["schemas"]["LoRALoaderOutput"]; + seamless: components["schemas"]["SeamlessModeOutput"]; + mask_combine: components["schemas"]["ImageOutput"]; + string_join: components["schemas"]["StringOutput"]; + string_replace: components["schemas"]["StringOutput"]; + sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; + img_resize: components["schemas"]["ImageOutput"]; + boolean_collection: components["schemas"]["BooleanCollectionOutput"]; + infill_rgba: components["schemas"]["ImageOutput"]; + content_shuffle_image_processor: components["schemas"]["ImageOutput"]; }; /** * InvocationStartedEvent @@ -7474,7 +7474,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["UpscaleSpandrelInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -11934,6 +11934,57 @@ export type components = { */ format: "checkpoint"; }; + /** + * Image-to-Image + * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel). + */ + SpandrelImageToImageInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The input image + * @default null + */ + image?: components["schemas"]["ImageField"]; + /** + * Image-to-Image Model + * @description Image-to-Image model + * @default null + */ + image_to_image_model?: components["schemas"]["ModelIdentifierField"]; + /** + * type + * @default spandrel_image_to_image + * @constant + * @enum {string} + */ + type: "spandrel_image_to_image"; + }; /** StarterModel */ StarterModel: { /** Description */ @@ -13323,56 +13374,6 @@ export type components = { */ type: "unsharp_mask"; }; - /** - * Upscale (spandrel) - * @description Upscales an image using any upscaler supported by spandrel (https://github.com/chaiNNer-org/spandrel). - */ - UpscaleSpandrelInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * @description The input image - * @default null - */ - image?: components["schemas"]["ImageField"]; - /** - * @description Spandrel Image-to-Image model - * @default null - */ - spandrel_image_to_image_model?: components["schemas"]["ModelIdentifierField"]; - /** - * type - * @default upscale_spandrel - * @constant - * @enum {string} - */ - type: "upscale_spandrel"; - }; /** Upscaler */ Upscaler: { /** From 0ce6ec634d26a63e0df022391616e61c53a55d03 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 5 Jul 2024 14:05:12 -0400 Subject: [PATCH 19/38] Do not assign the result of SpandrelImageToImageModel.load_from_file(...) during probe to ensure that the model is immediately gc'd. --- invokeai/backend/model_manager/probe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index c7267e9f1e..3b36e2f5af 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -253,7 +253,7 @@ class ModelProbe(object): # 2. Spandrel has internal logic to determine a model's type from its state_dict before loading the model. # This logic is not exposed in spandrel's public API. We could copy the logic here, but then we have to # maintain it, and the risk of false positive detections is higher. - _ = SpandrelImageToImageModel.load_from_file(model_path) + SpandrelImageToImageModel.load_from_file(model_path) return ModelType.SpandrelImageToImage except spandrel.UnsupportedModelError: pass From ecbff2aa44e346bc70baef7c1dab9431c4185130 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 5 Jul 2024 14:57:05 -0400 Subject: [PATCH 20/38] Whoops... forgot to commit this file. --- .../invocations/spandrel_image_to_image.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 invokeai/app/invocations/spandrel_image_to_image.py diff --git a/invokeai/app/invocations/spandrel_image_to_image.py b/invokeai/app/invocations/spandrel_image_to_image.py new file mode 100644 index 0000000000..76cf31480c --- /dev/null +++ b/invokeai/app/invocations/spandrel_image_to_image.py @@ -0,0 +1,49 @@ +import torch + +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import ( + FieldDescriptions, + ImageField, + InputField, + UIType, + WithBoard, + WithMetadata, +) +from invokeai.app.invocations.model import ModelIdentifierField +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel + + +@invocation("spandrel_image_to_image", title="Image-to-Image", tags=["upscale"], category="upscale", version="1.0.0") +class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): + """Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel).""" + + image: ImageField = InputField(description="The input image") + image_to_image_model: ModelIdentifierField = InputField( + title="Image-to-Image Model", + description=FieldDescriptions.spandrel_image_to_image_model, + ui_type=UIType.SpandrelImageToImageModel, + ) + + @torch.inference_mode() + def invoke(self, context: InvocationContext) -> ImageOutput: + image = context.images.get_pil(self.image.image_name) + + # Load the model. + spandrel_model_info = context.models.load(self.image_to_image_model) + + with spandrel_model_info as spandrel_model: + assert isinstance(spandrel_model, SpandrelImageToImageModel) + + # Prepare input image for inference. + image_tensor = SpandrelImageToImageModel.pil_to_tensor(image) + image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype) + + # Run inference. + image_tensor = spandrel_model.run(image_tensor) + + # Convert the output tensor to a PIL image. + pil_image = SpandrelImageToImageModel.tensor_to_pil(image_tensor) + image_dto = context.images.save(image=pil_image) + return ImageOutput.build(image_dto) From 650902dc2924f7aced87fb74906e8a444b5ae3c1 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 10 Jul 2024 13:59:17 -0400 Subject: [PATCH 21/38] Fix broken unit test caused by non-existent model path. --- invokeai/backend/model_manager/probe.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index 42727a31d7..1929b3f4fd 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -256,6 +256,12 @@ class ModelProbe(object): return ModelType.SpandrelImageToImage except spandrel.UnsupportedModelError: pass + except RuntimeError as e: + if "No such file or directory" in str(e): + # This error is expected if the model_path does not exist (which is the case in some unit tests). + pass + else: + raise e raise InvalidModelConfigException(f"Unable to determine model type for {model_path}") From ab775726b7b61ce06142a1e9f2546d5528829ba5 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 9 Jul 2024 17:52:28 -0400 Subject: [PATCH 22/38] Add tiling support to the SpoandrelImageToImage node. --- .../invocations/spandrel_image_to_image.py | 79 +++++++++++++++++-- .../backend/spandrel_image_to_image_model.py | 5 ++ 2 files changed, 77 insertions(+), 7 deletions(-) diff --git a/invokeai/app/invocations/spandrel_image_to_image.py b/invokeai/app/invocations/spandrel_image_to_image.py index 76cf31480c..1591f51bec 100644 --- a/invokeai/app/invocations/spandrel_image_to_image.py +++ b/invokeai/app/invocations/spandrel_image_to_image.py @@ -1,4 +1,7 @@ +import numpy as np import torch +from PIL import Image +from tqdm import tqdm from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.fields import ( @@ -13,9 +16,11 @@ from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel +from invokeai.backend.tiles.tiles import calc_tiles_min_overlap, merge_tiles_with_linear_blending +from invokeai.backend.tiles.utils import TBLR, Tile -@invocation("spandrel_image_to_image", title="Image-to-Image", tags=["upscale"], category="upscale", version="1.0.0") +@invocation("spandrel_image_to_image", title="Image-to-Image", tags=["upscale"], category="upscale", version="1.1.0") class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): """Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel).""" @@ -25,25 +30,85 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): description=FieldDescriptions.spandrel_image_to_image_model, ui_type=UIType.SpandrelImageToImageModel, ) + tile_size: int = InputField( + default=512, description="The tile size for tiled image-to-image. Set to 0 to disable tiling." + ) + + def _scale_tile(self, tile: Tile, scale: int) -> Tile: + return Tile( + coords=TBLR( + top=tile.coords.top * scale, + bottom=tile.coords.bottom * scale, + left=tile.coords.left * scale, + right=tile.coords.right * scale, + ), + overlap=TBLR( + top=tile.overlap.top * scale, + bottom=tile.overlap.bottom * scale, + left=tile.overlap.left * scale, + right=tile.overlap.right * scale, + ), + ) @torch.inference_mode() def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.images.get_pil(self.image.image_name) + # Images are converted to RGB, because most models don't support an alpha channel. In the future, we may want to + # revisit this. + image = context.images.get_pil(self.image.image_name, mode="RGB") + + # Compute the image tiles. + if self.tile_size > 0: + min_overlap = 20 + tiles = calc_tiles_min_overlap( + image_height=image.height, + image_width=image.width, + tile_height=self.tile_size, + tile_width=self.tile_size, + min_overlap=min_overlap, + ) + else: + # No tiling. Generate a single tile that covers the entire image. + min_overlap = 0 + tiles = [ + Tile( + coords=TBLR(top=0, bottom=image.height, left=0, right=image.width), + overlap=TBLR(top=0, bottom=0, left=0, right=0), + ) + ] + + # Prepare input image for inference. + image_tensor = SpandrelImageToImageModel.pil_to_tensor(image) # Load the model. spandrel_model_info = context.models.load(self.image_to_image_model) + # Run the model on each tile. + output_tiles: list[torch.Tensor] = [] + scale: int = 1 with spandrel_model_info as spandrel_model: assert isinstance(spandrel_model, SpandrelImageToImageModel) - # Prepare input image for inference. - image_tensor = SpandrelImageToImageModel.pil_to_tensor(image) + # Scale the tiles for re-assembling the final image. + scale = spandrel_model.scale + scaled_tiles = [self._scale_tile(tile, scale=scale) for tile in tiles] + image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype) - # Run inference. - image_tensor = spandrel_model.run(image_tensor) + for tile in tqdm(tiles, desc="Upscaling Tiles"): + output_tile = spandrel_model.run( + image_tensor[:, :, tile.coords.top : tile.coords.bottom, tile.coords.left : tile.coords.right] + ) + output_tiles.append(output_tile) + + # Merge tiles into output image. + np_output_tiles = [np.array(SpandrelImageToImageModel.tensor_to_pil(tile)) for tile in output_tiles] + _, channels, height, width = image_tensor.shape + np_out_image = np.zeros((height * scale, width * scale, channels), dtype=np.uint8) + merge_tiles_with_linear_blending( + dst_image=np_out_image, tiles=scaled_tiles, tile_images=np_output_tiles, blend_amount=min_overlap // 2 + ) # Convert the output tensor to a PIL image. - pil_image = SpandrelImageToImageModel.tensor_to_pil(image_tensor) + pil_image = Image.fromarray(np_out_image) image_dto = context.images.save(image=pil_image) return ImageOutput.build(image_dto) diff --git a/invokeai/backend/spandrel_image_to_image_model.py b/invokeai/backend/spandrel_image_to_image_model.py index adb78d0d71..ccf02c57ac 100644 --- a/invokeai/backend/spandrel_image_to_image_model.py +++ b/invokeai/backend/spandrel_image_to_image_model.py @@ -126,6 +126,11 @@ class SpandrelImageToImageModel(RawModel): """The dtype of the underlying model.""" return self._spandrel_model.dtype + @property + def scale(self) -> int: + """The scale of the model (e.g. 1x, 2x, 4x, etc.).""" + return self._spandrel_model.scale + def calc_size(self) -> int: """Get size of the model in memory in bytes.""" # HACK(ryand): Fix this issue with circular imports. From d868d5d584a2f50efffb004da2a15bdfb50f0166 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 10 Jul 2024 12:25:00 -0400 Subject: [PATCH 23/38] Make SpandrelImageToImage tiling much faster. --- .../invocations/spandrel_image_to_image.py | 45 ++++++++++++++----- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/invokeai/app/invocations/spandrel_image_to_image.py b/invokeai/app/invocations/spandrel_image_to_image.py index 1591f51bec..788a59f36b 100644 --- a/invokeai/app/invocations/spandrel_image_to_image.py +++ b/invokeai/app/invocations/spandrel_image_to_image.py @@ -1,6 +1,4 @@ -import numpy as np import torch -from PIL import Image from tqdm import tqdm from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation @@ -16,7 +14,7 @@ from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel -from invokeai.backend.tiles.tiles import calc_tiles_min_overlap, merge_tiles_with_linear_blending +from invokeai.backend.tiles.tiles import calc_tiles_min_overlap from invokeai.backend.tiles.utils import TBLR, Tile @@ -50,6 +48,29 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): ), ) + def _merge_tiles(self, tiles: list[Tile], tile_tensors: list[torch.Tensor], out_tensor: torch.Tensor): + """A simple tile merging algorithm. tile_tensors are merged into out_tensor. When adjacent tiles overlap, we + split the overlap in half. No 'blending' is applied. + """ + # Sort tiles and images first by left x coordinate, then by top y coordinate. During tile processing, we want to + # iterate over tiles left-to-right, top-to-bottom. + tiles_and_tensors = list(zip(tiles, tile_tensors, strict=True)) + tiles_and_tensors = sorted(tiles_and_tensors, key=lambda x: x[0].coords.left) + tiles_and_tensors = sorted(tiles_and_tensors, key=lambda x: x[0].coords.top) + + for tile, tile_tensor in tiles_and_tensors: + # We only keep half of the overlap on the top and left side of the tile. We do this in case there are edge + # artifacts. We don't bother with any 'blending' in the current implementation - for most upscalers it seems + # unnecessary, but we may find a need in the future. + top_overlap = tile.overlap.top // 2 + left_overlap = tile.overlap.left // 2 + out_tensor[ + :, + :, + tile.coords.top + top_overlap : tile.coords.bottom, + tile.coords.left + left_overlap : tile.coords.right, + ] = tile_tensor[:, :, top_overlap:, left_overlap:] + @torch.inference_mode() def invoke(self, context: InvocationContext) -> ImageOutput: # Images are converted to RGB, because most models don't support an alpha channel. In the future, we may want to @@ -100,15 +121,19 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): ) output_tiles.append(output_tile) - # Merge tiles into output image. - np_output_tiles = [np.array(SpandrelImageToImageModel.tensor_to_pil(tile)) for tile in output_tiles] - _, channels, height, width = image_tensor.shape - np_out_image = np.zeros((height * scale, width * scale, channels), dtype=np.uint8) - merge_tiles_with_linear_blending( - dst_image=np_out_image, tiles=scaled_tiles, tile_images=np_output_tiles, blend_amount=min_overlap // 2 + # TODO(ryand): There are opportunities to reduce peak VRAM utilization here if it becomes an issue: + # - Keep the input tensor on the CPU. + # - Move each tile to the GPU as it is processed. + # - Move output tensors back to the CPU as they are produced, and merge them into the output tensor. + + # Merge the tiles to an output tensor. + batch_size, channels, height, width = image_tensor.shape + output_tensor = torch.zeros( + (batch_size, channels, height * scale, width * scale), dtype=image_tensor.dtype, device=image_tensor.device ) + self._merge_tiles(scaled_tiles, output_tiles, output_tensor) # Convert the output tensor to a PIL image. - pil_image = Image.fromarray(np_out_image) + pil_image = SpandrelImageToImageModel.tensor_to_pil(output_tensor) image_dto = context.images.save(image=pil_image) return ImageOutput.build(image_dto) From d0d295599215fcb7eb87981a79ad10450595679b Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 10 Jul 2024 12:56:09 -0400 Subject: [PATCH 24/38] Reduce peak VRAM utilization of SpandrelImageToImageInvocation. --- .../invocations/spandrel_image_to_image.py | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/invokeai/app/invocations/spandrel_image_to_image.py b/invokeai/app/invocations/spandrel_image_to_image.py index 788a59f36b..650c9bb547 100644 --- a/invokeai/app/invocations/spandrel_image_to_image.py +++ b/invokeai/app/invocations/spandrel_image_to_image.py @@ -1,4 +1,6 @@ +import numpy as np import torch +from PIL import Image from tqdm import tqdm from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation @@ -48,29 +50,6 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): ), ) - def _merge_tiles(self, tiles: list[Tile], tile_tensors: list[torch.Tensor], out_tensor: torch.Tensor): - """A simple tile merging algorithm. tile_tensors are merged into out_tensor. When adjacent tiles overlap, we - split the overlap in half. No 'blending' is applied. - """ - # Sort tiles and images first by left x coordinate, then by top y coordinate. During tile processing, we want to - # iterate over tiles left-to-right, top-to-bottom. - tiles_and_tensors = list(zip(tiles, tile_tensors, strict=True)) - tiles_and_tensors = sorted(tiles_and_tensors, key=lambda x: x[0].coords.left) - tiles_and_tensors = sorted(tiles_and_tensors, key=lambda x: x[0].coords.top) - - for tile, tile_tensor in tiles_and_tensors: - # We only keep half of the overlap on the top and left side of the tile. We do this in case there are edge - # artifacts. We don't bother with any 'blending' in the current implementation - for most upscalers it seems - # unnecessary, but we may find a need in the future. - top_overlap = tile.overlap.top // 2 - left_overlap = tile.overlap.left // 2 - out_tensor[ - :, - :, - tile.coords.top + top_overlap : tile.coords.bottom, - tile.coords.left + left_overlap : tile.coords.right, - ] = tile_tensor[:, :, top_overlap:, left_overlap:] - @torch.inference_mode() def invoke(self, context: InvocationContext) -> ImageOutput: # Images are converted to RGB, because most models don't support an alpha channel. In the future, we may want to @@ -97,6 +76,11 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): ) ] + # Sort tiles first by left x coordinate, then by top y coordinate. During tile processing, we want to iterate + # over tiles left-to-right, top-to-bottom. + tiles = sorted(tiles, key=lambda x: x.coords.left) + tiles = sorted(tiles, key=lambda x: x.coords.top) + # Prepare input image for inference. image_tensor = SpandrelImageToImageModel.pil_to_tensor(image) @@ -104,8 +88,6 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): spandrel_model_info = context.models.load(self.image_to_image_model) # Run the model on each tile. - output_tiles: list[torch.Tensor] = [] - scale: int = 1 with spandrel_model_info as spandrel_model: assert isinstance(spandrel_model, SpandrelImageToImageModel) @@ -113,27 +95,45 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): scale = spandrel_model.scale scaled_tiles = [self._scale_tile(tile, scale=scale) for tile in tiles] + # Prepare the output tensor. + _, channels, height, width = image_tensor.shape + output_tensor = torch.zeros( + (height * scale, width * scale, channels), dtype=torch.uint8, device=torch.device("cpu") + ) + image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype) - for tile in tqdm(tiles, desc="Upscaling Tiles"): - output_tile = spandrel_model.run( - image_tensor[:, :, tile.coords.top : tile.coords.bottom, tile.coords.left : tile.coords.right] - ) - output_tiles.append(output_tile) + for tile, scaled_tile in tqdm(list(zip(tiles, scaled_tiles, strict=True)), desc="Upscaling Tiles"): + # Extract the current tile from the input tensor. + input_tile = image_tensor[ + :, :, tile.coords.top : tile.coords.bottom, tile.coords.left : tile.coords.right + ].to(device=spandrel_model.device, dtype=spandrel_model.dtype) - # TODO(ryand): There are opportunities to reduce peak VRAM utilization here if it becomes an issue: - # - Keep the input tensor on the CPU. - # - Move each tile to the GPU as it is processed. - # - Move output tensors back to the CPU as they are produced, and merge them into the output tensor. + # Run the model on the tile. + output_tile = spandrel_model.run(input_tile) - # Merge the tiles to an output tensor. - batch_size, channels, height, width = image_tensor.shape - output_tensor = torch.zeros( - (batch_size, channels, height * scale, width * scale), dtype=image_tensor.dtype, device=image_tensor.device - ) - self._merge_tiles(scaled_tiles, output_tiles, output_tensor) + # Convert the output tile into the output tensor's format. + # (N, C, H, W) -> (C, H, W) + output_tile = output_tile.squeeze(0) + # (C, H, W) -> (H, W, C) + output_tile = output_tile.permute(1, 2, 0) + output_tile = output_tile.clamp(0, 1) + output_tile = (output_tile * 255).to(dtype=torch.uint8, device=torch.device("cpu")) + + # Merge the output tile into the output tensor. + # We only keep half of the overlap on the top and left side of the tile. We do this in case there are + # edge artifacts. We don't bother with any 'blending' in the current implementation - for most upscalers + # it seems unnecessary, but we may find a need in the future. + top_overlap = scaled_tile.overlap.top // 2 + left_overlap = scaled_tile.overlap.left // 2 + output_tensor[ + scaled_tile.coords.top + top_overlap : scaled_tile.coords.bottom, + scaled_tile.coords.left + left_overlap : scaled_tile.coords.right, + :, + ] = output_tile[top_overlap:, left_overlap:, :] # Convert the output tensor to a PIL image. - pil_image = SpandrelImageToImageModel.tensor_to_pil(output_tensor) + np_image = output_tensor.detach().numpy().astype(np.uint8) + pil_image = Image.fromarray(np_image) image_dto = context.images.save(image=pil_image) return ImageOutput.build(image_dto) From 0428ce73a9c10a1fe449e361b6957ecf4dc5c71d Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 11 Jul 2024 15:42:33 -0400 Subject: [PATCH 25/38] Add early cancellation to SpandrelImageToImageInvocation. --- invokeai/app/invocations/spandrel_image_to_image.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/invokeai/app/invocations/spandrel_image_to_image.py b/invokeai/app/invocations/spandrel_image_to_image.py index 650c9bb547..bbe31af644 100644 --- a/invokeai/app/invocations/spandrel_image_to_image.py +++ b/invokeai/app/invocations/spandrel_image_to_image.py @@ -14,6 +14,7 @@ from invokeai.app.invocations.fields import ( ) from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.session_processor.session_processor_common import CanceledException from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel from invokeai.backend.tiles.tiles import calc_tiles_min_overlap @@ -104,6 +105,10 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard): image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype) for tile, scaled_tile in tqdm(list(zip(tiles, scaled_tiles, strict=True)), desc="Upscaling Tiles"): + # Exit early if the invocation has been canceled. + if context.util.is_canceled(): + raise CanceledException + # Extract the current tile from the input tensor. input_tile = image_tensor[ :, :, tile.coords.top : tile.coords.bottom, tile.coords.left : tile.coords.right From 84f136e7374e9f24e5099ea53c0124c573b6eb80 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Fri, 12 Jul 2024 17:09:27 +0200 Subject: [PATCH 26/38] translationBot(ui): update translation (Italian) Currently translated at 98.4% (1262 of 1282 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 25c2e5b9a8..3704ba66a6 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -962,8 +962,8 @@ "selectedForAutoAdd": "Selezionato per l'aggiunta automatica", "addSharedBoard": "Aggiungi una Bacheca Condivisa", "boards": "Bacheche", - "private": "Privata", - "shared": "Condivisa", + "private": "Bacheche private", + "shared": "Bacheche condivise", "addPrivateBoard": "Aggiungi una Bacheca Privata" }, "controlnet": { From 2c1a91241e2afe2cb30287ebcfb0ee04c7fd37a5 Mon Sep 17 00:00:00 2001 From: "psychedelicious@windows" <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 13 Jul 2024 14:11:44 +1000 Subject: [PATCH 27/38] fix(app): windows indefinite hang while finding port For some reason, I started getting this indefinite hang when the app checks if port 9090 is available. After some fiddling around, I found that adding a timeout resolves the issue. I confirmed that the util still works by starting the app on 9090, then starting a second instance. The second instance correctly saw 9090 in use and moved to 9091. --- invokeai/app/api_app.py | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index dca0bc139d..88820a0c4c 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -161,6 +161,7 @@ def invoke_api() -> None: # Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon! # https://github.com/WaylonWalker with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.settimeout(1) if s.connect_ex(("localhost", port)) == 0: return find_port(port=port + 1) else: From 7c0dfd74a5f6159ec4671eef47dec1b70c226e33 Mon Sep 17 00:00:00 2001 From: "psychedelicious@windows" <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 13 Jul 2024 14:28:03 +1000 Subject: [PATCH 28/38] fix(api): deleting large images fails This issue is caused by a race condition. When a large image is served to the client, it is done using a streaming `FileResponse`. This concurrently serves the image straight from disk. The file is kept open by FastAPI until the image is fully served. When a user deletes an image before the file is done serving, the delete fails because the file is still held by FastAPI. To reproduce the issue: - Create a very large image (8k reliably creates the issue). - Create a smaller image, so that the first image in the gallery is not the large image. - Refresh the app. The small image should be selected. - Select the large image and immediately delete it. You have to be fast, to delete it before it finishes loading. - In the terminal, we expect to see an error saying `Failed to delete image file`, and the image does not disappear from the UI. - After a short wait, once the image has fully loaded, try deleting it again. We expect this to work. The workaround is to instead serve the image from memory. Loading the image to memory is very fast, so there is only a tiny window in which we could create the race condition, but it technically could still occur, because FastAPI is asynchronous and handles requests concurrently. Once we load the image into memory, deletions of that image will work. Then we return a normal `Response` object with the image bytes. This is essentially what `FileResponse` does - except it uses `anyio.open_file`, which is async. The tradeoff is that the server thread is blocked while opening the file. I think this is a fair tradeoff. A future enhancement could be to implement soft deletion of images (db is already set up for this), and then clean up deleted image files on startup/shutdown. We could move back to using the async `FileResponse` for best responsiveness in the server without any risk of race conditions. --- invokeai/app/api/routers/images.py | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 8e3824ce93..2bc0b48251 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -233,21 +233,14 @@ async def get_image_workflow( ) async def get_image_full( image_name: str = Path(description="The name of full-resolution image file to get"), -) -> FileResponse: +) -> Response: """Gets a full-resolution image file""" try: path = ApiDependencies.invoker.services.images.get_path(image_name) - - if not ApiDependencies.invoker.services.images.validate_path(path): - raise HTTPException(status_code=404) - - response = FileResponse( - path, - media_type="image/png", - filename=image_name, - content_disposition_type="inline", - ) + with open(path, "rb") as f: + content = f.read() + response = Response(content, media_type="image/png") response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}" return response except Exception: @@ -268,15 +261,14 @@ async def get_image_full( ) async def get_image_thumbnail( image_name: str = Path(description="The name of thumbnail image file to get"), -) -> FileResponse: +) -> Response: """Gets a thumbnail image file""" try: path = ApiDependencies.invoker.services.images.get_path(image_name, thumbnail=True) - if not ApiDependencies.invoker.services.images.validate_path(path): - raise HTTPException(status_code=404) - - response = FileResponse(path, media_type="image/webp", content_disposition_type="inline") + with open(path, "rb") as f: + content = f.read() + response = Response(content, media_type="image/webp") response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}" return response except Exception: From 3ecd14f39424c4a51e83ea4eb65e128a78c7fee8 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 13 Jul 2024 14:31:00 +1000 Subject: [PATCH 29/38] chore: bump version to 4.2.6rc1 --- invokeai/version/invokeai_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/version/invokeai_version.py b/invokeai/version/invokeai_version.py index da1546b0a0..021e20d448 100644 --- a/invokeai/version/invokeai_version.py +++ b/invokeai/version/invokeai_version.py @@ -1 +1 @@ -__version__ = "4.2.6a1" +__version__ = "4.2.6rc1" From 5cbe9fafb2ae82a27f6cd9d9e5371e074f8af761 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 14 Jul 2024 21:08:33 +1000 Subject: [PATCH 30/38] fix(ui): clear selection when deleting last image in board --- .../listeners/imageDeletionListeners.ts | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners.ts index 056346cb68..489adb7476 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners.ts @@ -136,7 +136,12 @@ export const addImageDeletionListeners = (startAppListening: AppStartListening) if (data) { const deletedImageIndex = data.items.findIndex((i) => i.image_name === imageDTO.image_name); const nextImage = data.items[deletedImageIndex + 1] ?? data.items[0] ?? null; - dispatch(imageSelected(nextImage)); + if (nextImage?.image_name === imageDTO.image_name) { + // If the next image is the same as the deleted one, it means it was the last image, reset selection + dispatch(imageSelected(null)); + } else { + dispatch(imageSelected(nextImage)); + } } } @@ -176,6 +181,8 @@ export const addImageDeletionListeners = (startAppListening: AppStartListening) const queryArgs = selectListImagesQueryArgs(state); const { data } = imagesApi.endpoints.listImages.select(queryArgs)(state); if (data) { + // When we delete multiple images, we clear the selection. Then, the the next time we load images, we will + // select the first one. This is handled below in the listener for `imagesApi.endpoints.listImages.matchFulfilled`. dispatch(imageSelected(null)); } } From 8539c601e6cd7dee2a438cf6db03f9ae903baa80 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Sun, 14 Jul 2024 18:09:17 +0000 Subject: [PATCH 31/38] translationBot(ui): update translation (Italian) Currently translated at 98.4% (1262 of 1282 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 3704ba66a6..eced64a1e3 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -1028,7 +1028,7 @@ "minConfidence": "Confidenza minima", "scribble": "Scarabocchio", "amult": "Angolo di illuminazione", - "coarse": "Approssimativo", + "coarse": "Grossolano", "resizeSimple": "Ridimensiona (semplice)", "large": "Grande", "small": "Piccolo", @@ -1353,7 +1353,7 @@ "lora": { "heading": "LoRA", "paragraphs": [ - "Modelli leggeri utilizzati insieme ai modelli base." + "Modelli concettuali utilizzati insieme ai modelli di base." ] }, "controlNet": { From d7d59d704b4e1aa436b487dfdb55918c5dd1cdfd Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 15 Jul 2024 12:54:17 +1000 Subject: [PATCH 32/38] chore: update default workflows - Update all existing defaults - Add Tiled MultiDiffusion workflow --- ...SRGAN Upscaling with Canny ControlNet.json | 470 ++-- ...Adapter & Canny (See Note in Details).json | 654 ++--- .../Multi ControlNet (Canny & Depth).json | 551 +++-- .../MultiDiffusion SDXL (Beta).json | 2181 +++++++++++++++++ .../default_workflows/Prompt from File.json | 229 +- .../Text to Image - SD1.5.json | 225 +- .../Text to Image - SDXL.json | 515 ++-- .../Text to Image with LoRA.json | 185 +- .../Tiled Upscaling (Beta).json | 590 ++--- 9 files changed, 3962 insertions(+), 1638 deletions(-) create mode 100644 invokeai/app/services/workflow_records/default_workflows/MultiDiffusion SDXL (Beta).json diff --git a/invokeai/app/services/workflow_records/default_workflows/ESRGAN Upscaling with Canny ControlNet.json b/invokeai/app/services/workflow_records/default_workflows/ESRGAN Upscaling with Canny ControlNet.json index dd98eca18f..2cadcae961 100644 --- a/invokeai/app/services/workflow_records/default_workflows/ESRGAN Upscaling with Canny ControlNet.json +++ b/invokeai/app/services/workflow_records/default_workflows/ESRGAN Upscaling with Canny ControlNet.json @@ -2,7 +2,7 @@ "name": "ESRGAN Upscaling with Canny ControlNet", "author": "InvokeAI", "description": "Sample workflow for using Upscaling with ControlNet with SD1.5", - "version": "2.0.0", + "version": "2.1.0", "contact": "invoke@invoke.ai", "tags": "upscale, controlnet, default", "notes": "", @@ -36,14 +36,13 @@ "version": "3.0.0", "category": "default" }, - "id": "0e71a27e-a22b-4a9b-b20a-6d789abff2bc", "nodes": [ { - "id": "e8bf67fe-67de-4227-87eb-79e86afdfc74", + "id": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", "type": "invocation", "data": { - "id": "e8bf67fe-67de-4227-87eb-79e86afdfc74", - "version": "1.1.1", + "id": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", + "version": "1.2.0", "nodePack": "invokeai", "label": "", "notes": "", @@ -57,6 +56,10 @@ "clip": { "name": "clip", "label": "" + }, + "mask": { + "name": "mask", + "label": "" } }, "isOpen": true, @@ -65,122 +68,63 @@ }, "position": { "x": 1250, - "y": 1500 + "y": 1200 } }, { - "id": "d8ace142-c05f-4f1d-8982-88dc7473958d", + "id": "5ca498a4-c8c8-4580-a396-0c984317205d", "type": "invocation", "data": { - "id": "d8ace142-c05f-4f1d-8982-88dc7473958d", - "version": "1.0.2", + "id": "5ca498a4-c8c8-4580-a396-0c984317205d", + "version": "1.1.0", "nodePack": "invokeai", "label": "", "notes": "", - "type": "main_model_loader", + "type": "i2l", "inputs": { - "model": { - "name": "model", + "image": { + "name": "image", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", "label": "", - "value": { - "key": "5cd43ca0-dd0a-418d-9f7e-35b2b9d5e106", - "hash": "blake3:6987f323017f597213cc3264250edf57056d21a40a0a85d83a1a33a7d44dc41a", - "name": "Deliberate_v5", - "base": "sd-1", - "type": "main" - } - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 700, - "y": 1375 - } - }, - { - "id": "771bdf6a-0813-4099-a5d8-921a138754d4", - "type": "invocation", - "data": { - "id": "771bdf6a-0813-4099-a5d8-921a138754d4", - "version": "1.0.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "image", - "inputs": { - "image": { - "name": "image", - "label": "Image To Upscale", - "value": { - "image_name": "d2e42ba6-d420-496b-82db-91c9b75956c1.png" - } - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 344.5593065887157, - "y": 1698.161491368619 - } - }, - { - "id": "f7564dd2-9539-47f2-ac13-190804461f4e", - "type": "invocation", - "data": { - "id": "f7564dd2-9539-47f2-ac13-190804461f4e", - "version": "1.3.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "esrgan", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "image": { - "name": "image", - "label": "" - }, - "model_name": { - "name": "model_name", - "label": "Upscaler Model", - "value": "RealESRGAN_x2plus.pth" + "value": false }, "tile_size": { "name": "tile_size", "label": "", - "value": 400 + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": false } }, - "isOpen": true, + "isOpen": false, "isIntermediate": true, "useCache": true }, "position": { - "x": 717.3863693661265, - "y": 1721.9215053134815 + "x": 1650, + "y": 1675 } }, { - "id": "1d887701-df21-4966-ae6e-a7d82307d7bd", + "id": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0", "type": "invocation", "data": { - "id": "1d887701-df21-4966-ae6e-a7d82307d7bd", - "version": "1.3.2", + "id": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0", + "version": "1.3.0", "nodePack": "invokeai", "label": "", "notes": "", - "type": "canny_image_processor", + "type": "l2i", "inputs": { "board": { "name": "board", @@ -190,38 +134,37 @@ "name": "metadata", "label": "" }, - "image": { - "name": "image", + "latents": { + "name": "latents", "label": "" }, - "detect_resolution": { - "name": "detect_resolution", - "label": "", - "value": 512 + "vae": { + "name": "vae", + "label": "" }, - "image_resolution": { - "name": "image_resolution", + "tiled": { + "name": "tiled", "label": "", - "value": 512 + "value": false }, - "low_threshold": { - "name": "low_threshold", + "tile_size": { + "name": "tile_size", "label": "", - "value": 100 + "value": 0 }, - "high_threshold": { - "name": "high_threshold", + "fp32": { + "name": "fp32", "label": "", - "value": 200 + "value": false } }, "isOpen": true, - "isIntermediate": true, + "isIntermediate": false, "useCache": true }, "position": { - "x": 1200, - "y": 1900 + "x": 2559.4751127537957, + "y": 1246.6000376741406 } }, { @@ -229,7 +172,7 @@ "type": "invocation", "data": { "id": "ca1d020c-89a8-4958-880a-016d28775cfa", - "version": "1.1.1", + "version": "1.1.2", "nodePack": "invokeai", "label": "", "notes": "", @@ -285,6 +228,193 @@ "y": 1902.9649340196056 } }, + { + "id": "1d887701-df21-4966-ae6e-a7d82307d7bd", + "type": "invocation", + "data": { + "id": "1d887701-df21-4966-ae6e-a7d82307d7bd", + "version": "1.3.3", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "canny_image_processor", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "detect_resolution": { + "name": "detect_resolution", + "label": "", + "value": 512 + }, + "image_resolution": { + "name": "image_resolution", + "label": "", + "value": 512 + }, + "low_threshold": { + "name": "low_threshold", + "label": "", + "value": 100 + }, + "high_threshold": { + "name": "high_threshold", + "label": "", + "value": 200 + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 1200, + "y": 1900 + } + }, + { + "id": "d8ace142-c05f-4f1d-8982-88dc7473958d", + "type": "invocation", + "data": { + "id": "d8ace142-c05f-4f1d-8982-88dc7473958d", + "version": "1.0.3", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "main_model_loader", + "inputs": { + "model": { + "name": "model", + "label": "", + "value": { + "key": "5cd43ca0-dd0a-418d-9f7e-35b2b9d5e106", + "hash": "blake3:6987f323017f597213cc3264250edf57056d21a40a0a85d83a1a33a7d44dc41a", + "name": "Deliberate_v5", + "base": "sd-1", + "type": "main" + } + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 700, + "y": 1375 + } + }, + { + "id": "e8bf67fe-67de-4227-87eb-79e86afdfc74", + "type": "invocation", + "data": { + "id": "e8bf67fe-67de-4227-87eb-79e86afdfc74", + "version": "1.2.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "compel", + "inputs": { + "prompt": { + "name": "prompt", + "label": "", + "value": "" + }, + "clip": { + "name": "clip", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 1250, + "y": 1500 + } + }, + { + "id": "771bdf6a-0813-4099-a5d8-921a138754d4", + "type": "invocation", + "data": { + "id": "771bdf6a-0813-4099-a5d8-921a138754d4", + "version": "1.0.2", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "image", + "inputs": { + "image": { + "name": "image", + "label": "Image To Upscale" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 344.5593065887157, + "y": 1698.161491368619 + } + }, + { + "id": "f7564dd2-9539-47f2-ac13-190804461f4e", + "type": "invocation", + "data": { + "id": "f7564dd2-9539-47f2-ac13-190804461f4e", + "version": "1.3.2", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "esrgan", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "model_name": { + "name": "model_name", + "label": "Upscaler Model", + "value": "RealESRGAN_x2plus.pth" + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 400 + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 717.3863693661265, + "y": 1721.9215053134815 + } + }, { "id": "f50624ce-82bf-41d0-bdf7-8aab11a80d48", "type": "invocation", @@ -413,122 +543,6 @@ "y": 1232.6219060454753 } }, - { - "id": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0", - "type": "invocation", - "data": { - "id": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0", - "version": "1.2.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "l2i", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "latents": { - "name": "latents", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": false - } - }, - "isOpen": true, - "isIntermediate": false, - "useCache": true - }, - "position": { - "x": 2559.4751127537957, - "y": 1246.6000376741406 - } - }, - { - "id": "5ca498a4-c8c8-4580-a396-0c984317205d", - "type": "invocation", - "data": { - "id": "5ca498a4-c8c8-4580-a396-0c984317205d", - "version": "1.0.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "i2l", - "inputs": { - "image": { - "name": "image", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": false - } - }, - "isOpen": false, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 1650, - "y": 1675 - } - }, - { - "id": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", - "type": "invocation", - "data": { - "id": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "compel", - "inputs": { - "prompt": { - "name": "prompt", - "label": "", - "value": "" - }, - "clip": { - "name": "clip", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 1250, - "y": 1200 - } - }, { "id": "eb8f6f8a-c7b1-4914-806e-045ee2717a35", "type": "invocation", diff --git a/invokeai/app/services/workflow_records/default_workflows/Face Detailer with IP-Adapter & Canny (See Note in Details).json b/invokeai/app/services/workflow_records/default_workflows/Face Detailer with IP-Adapter & Canny (See Note in Details).json index 8c7dcee30c..481ba85e64 100644 --- a/invokeai/app/services/workflow_records/default_workflows/Face Detailer with IP-Adapter & Canny (See Note in Details).json +++ b/invokeai/app/services/workflow_records/default_workflows/Face Detailer with IP-Adapter & Canny (See Note in Details).json @@ -2,7 +2,7 @@ "name": "Face Detailer with IP-Adapter & Canny (See Note in Details)", "author": "kosmoskatten", "description": "A workflow to add detail to and improve faces. This workflow is most effective when used with a model that creates realistic outputs. ", - "version": "2.0.0", + "version": "2.1.0", "contact": "invoke@invoke.ai", "tags": "face detailer, IP-Adapter, Canny", "notes": "Set this image as the blur mask: https://i.imgur.com/Gxi61zP.png", @@ -37,16 +37,219 @@ } ], "meta": { - "category": "default", - "version": "3.0.0" + "version": "3.0.0", + "category": "default" }, "nodes": [ { - "id": "44f2c190-eb03-460d-8d11-a94d13b33f19", + "id": "c6359181-6479-40ec-bf3a-b7e8451683b8", "type": "invocation", "data": { - "id": "44f2c190-eb03-460d-8d11-a94d13b33f19", - "version": "1.1.1", + "id": "c6359181-6479-40ec-bf3a-b7e8451683b8", + "version": "1.0.3", + "label": "", + "notes": "", + "type": "main_model_loader", + "inputs": { + "model": { + "name": "model", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 2031.5518710051792, + "y": -492.1742944307074 + } + }, + { + "id": "8fe598c6-d447-44fa-a165-4975af77d080", + "type": "invocation", + "data": { + "id": "8fe598c6-d447-44fa-a165-4975af77d080", + "version": "1.3.3", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "canny_image_processor", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "detect_resolution": { + "name": "detect_resolution", + "label": "", + "value": 512 + }, + "image_resolution": { + "name": "image_resolution", + "label": "", + "value": 512 + }, + "low_threshold": { + "name": "low_threshold", + "label": "", + "value": 100 + }, + "high_threshold": { + "name": "high_threshold", + "label": "", + "value": 200 + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 3519.4131037388597, + "y": 576.7946795840575 + } + }, + { + "id": "f60b6161-8f26-42f6-89ff-545e6011e501", + "type": "invocation", + "data": { + "id": "f60b6161-8f26-42f6-89ff-545e6011e501", + "version": "1.1.2", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "controlnet", + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "control_model": { + "name": "control_model", + "label": "Control Model (select canny)", + "value": { + "key": "5bdaacf7-a7a3-4fb8-b394-cc0ffbb8941d", + "hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e", + "name": "sd-controlnet-canny", + "base": "sd-1", + "type": "controlnet" + } + }, + "control_weight": { + "name": "control_weight", + "label": "", + "value": 0.5 + }, + "begin_step_percent": { + "name": "begin_step_percent", + "label": "", + "value": 0 + }, + "end_step_percent": { + "name": "end_step_percent", + "label": "", + "value": 0.5 + }, + "control_mode": { + "name": "control_mode", + "label": "", + "value": "balanced" + }, + "resize_mode": { + "name": "resize_mode", + "label": "", + "value": "just_resize" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 3950, + "y": 150 + } + }, + { + "id": "22b750db-b85e-486b-b278-ac983e329813", + "type": "invocation", + "data": { + "id": "22b750db-b85e-486b-b278-ac983e329813", + "version": "1.4.1", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "ip_adapter", + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "ip_adapter_model": { + "name": "ip_adapter_model", + "label": "IP-Adapter Model (select IP Adapter Face)", + "value": { + "key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e", + "hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5", + "name": "ip_adapter_sd15", + "base": "sd-1", + "type": "ip_adapter" + } + }, + "clip_vision_model": { + "name": "clip_vision_model", + "label": "", + "value": "ViT-H" + }, + "weight": { + "name": "weight", + "label": "", + "value": 0.5 + }, + "method": { + "name": "method", + "label": "", + "value": "full" + }, + "begin_step_percent": { + "name": "begin_step_percent", + "label": "", + "value": 0 + }, + "end_step_percent": { + "name": "end_step_percent", + "label": "", + "value": 0.8 + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 3575, + "y": -200 + } + }, + { + "id": "f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65", + "type": "invocation", + "data": { + "id": "f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65", + "version": "1.2.0", "nodePack": "invokeai", "label": "", "notes": "", @@ -60,6 +263,140 @@ "clip": { "name": "clip", "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 2550, + "y": -525 + } + }, + { + "id": "2224ed72-2453-4252-bd89-3085240e0b6f", + "type": "invocation", + "data": { + "id": "2224ed72-2453-4252-bd89-3085240e0b6f", + "version": "1.3.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "l2i", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "latents": { + "name": "latents", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": true + } + }, + "isOpen": true, + "isIntermediate": false, + "useCache": true + }, + "position": { + "x": 4980.1395106966565, + "y": -255.9158921745602 + } + }, + { + "id": "de8b1a48-a2e4-42ca-90bb-66058bffd534", + "type": "invocation", + "data": { + "id": "de8b1a48-a2e4-42ca-90bb-66058bffd534", + "version": "1.1.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "i2l", + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": true + } + }, + "isOpen": false, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 3100, + "y": -275 + } + }, + { + "id": "44f2c190-eb03-460d-8d11-a94d13b33f19", + "type": "invocation", + "data": { + "id": "44f2c190-eb03-460d-8d11-a94d13b33f19", + "version": "1.2.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "compel", + "inputs": { + "prompt": { + "name": "prompt", + "label": "", + "value": "" + }, + "clip": { + "name": "clip", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" } }, "isOpen": true, @@ -251,45 +588,6 @@ "y": 0 } }, - { - "id": "de8b1a48-a2e4-42ca-90bb-66058bffd534", - "type": "invocation", - "data": { - "id": "de8b1a48-a2e4-42ca-90bb-66058bffd534", - "version": "1.0.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "i2l", - "inputs": { - "image": { - "name": "image", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": true - } - }, - "isOpen": false, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 3100, - "y": -275 - } - }, { "id": "bd06261d-a74a-4d1f-8374-745ed6194bc2", "type": "invocation", @@ -418,53 +716,6 @@ "y": -175 } }, - { - "id": "2224ed72-2453-4252-bd89-3085240e0b6f", - "type": "invocation", - "data": { - "id": "2224ed72-2453-4252-bd89-3085240e0b6f", - "version": "1.2.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "l2i", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "latents": { - "name": "latents", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": true - } - }, - "isOpen": true, - "isIntermediate": false, - "useCache": true - }, - "position": { - "x": 4980.1395106966565, - "y": -255.9158921745602 - } - }, { "id": "2974e5b3-3d41-4b6f-9953-cd21e8f3a323", "type": "invocation", @@ -692,201 +943,6 @@ "y": -275 } }, - { - "id": "f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65", - "type": "invocation", - "data": { - "id": "f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "compel", - "inputs": { - "prompt": { - "name": "prompt", - "label": "", - "value": "" - }, - "clip": { - "name": "clip", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 2550, - "y": -525 - } - }, - { - "id": "22b750db-b85e-486b-b278-ac983e329813", - "type": "invocation", - "data": { - "id": "22b750db-b85e-486b-b278-ac983e329813", - "version": "1.2.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "ip_adapter", - "inputs": { - "image": { - "name": "image", - "label": "" - }, - "ip_adapter_model": { - "name": "ip_adapter_model", - "label": "IP-Adapter Model (select IP Adapter Face)", - "value": { - "key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e", - "hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5", - "name": "ip_adapter_sd15", - "base": "sd-1", - "type": "ip_adapter" - } - }, - "weight": { - "name": "weight", - "label": "", - "value": 0.5 - }, - "begin_step_percent": { - "name": "begin_step_percent", - "label": "", - "value": 0 - }, - "end_step_percent": { - "name": "end_step_percent", - "label": "", - "value": 0.8 - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 3575, - "y": -200 - } - }, - { - "id": "f60b6161-8f26-42f6-89ff-545e6011e501", - "type": "invocation", - "data": { - "id": "f60b6161-8f26-42f6-89ff-545e6011e501", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "controlnet", - "inputs": { - "image": { - "name": "image", - "label": "" - }, - "control_model": { - "name": "control_model", - "label": "Control Model (select canny)", - "value": { - "key": "5bdaacf7-a7a3-4fb8-b394-cc0ffbb8941d", - "hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e", - "name": "sd-controlnet-canny", - "base": "sd-1", - "type": "controlnet" - } - }, - "control_weight": { - "name": "control_weight", - "label": "", - "value": 0.5 - }, - "begin_step_percent": { - "name": "begin_step_percent", - "label": "", - "value": 0 - }, - "end_step_percent": { - "name": "end_step_percent", - "label": "", - "value": 0.5 - }, - "control_mode": { - "name": "control_mode", - "label": "", - "value": "balanced" - }, - "resize_mode": { - "name": "resize_mode", - "label": "", - "value": "just_resize" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 3950, - "y": 150 - } - }, - { - "id": "8fe598c6-d447-44fa-a165-4975af77d080", - "type": "invocation", - "data": { - "id": "8fe598c6-d447-44fa-a165-4975af77d080", - "version": "1.3.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "canny_image_processor", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "image": { - "name": "image", - "label": "" - }, - "detect_resolution": { - "name": "detect_resolution", - "label": "", - "value": 512 - }, - "image_resolution": { - "name": "image_resolution", - "label": "", - "value": 512 - }, - "low_threshold": { - "name": "low_threshold", - "label": "", - "value": 100 - }, - "high_threshold": { - "name": "high_threshold", - "label": "", - "value": 200 - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 3519.4131037388597, - "y": 576.7946795840575 - } - }, { "id": "4bd4ae80-567f-4366-b8c6-3bb06f4fb46a", "type": "invocation", @@ -1035,30 +1091,6 @@ "x": 2578.2364832140506, "y": 78.7948456497351 } - }, - { - "id": "c6359181-6479-40ec-bf3a-b7e8451683b8", - "type": "invocation", - "data": { - "id": "c6359181-6479-40ec-bf3a-b7e8451683b8", - "version": "1.0.2", - "label": "", - "notes": "", - "type": "main_model_loader", - "inputs": { - "model": { - "name": "model", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 2031.5518710051792, - "y": -492.1742944307074 - } } ], "edges": [ diff --git a/invokeai/app/services/workflow_records/default_workflows/Multi ControlNet (Canny & Depth).json b/invokeai/app/services/workflow_records/default_workflows/Multi ControlNet (Canny & Depth).json index d859094216..3ff99b5eb3 100644 --- a/invokeai/app/services/workflow_records/default_workflows/Multi ControlNet (Canny & Depth).json +++ b/invokeai/app/services/workflow_records/default_workflows/Multi ControlNet (Canny & Depth).json @@ -2,7 +2,7 @@ "name": "Multi ControlNet (Canny & Depth)", "author": "InvokeAI", "description": "A sample workflow using canny & depth ControlNets to guide the generation process. ", - "version": "2.0.0", + "version": "2.1.0", "contact": "invoke@invoke.ai", "tags": "ControlNet, canny, depth", "notes": "", @@ -37,140 +37,104 @@ } ], "meta": { - "category": "default", - "version": "3.0.0" + "version": "3.0.0", + "category": "default" }, "nodes": [ { - "id": "8e860e51-5045-456e-bf04-9a62a2a5c49e", + "id": "9db25398-c869-4a63-8815-c6559341ef12", "type": "invocation", "data": { - "id": "8e860e51-5045-456e-bf04-9a62a2a5c49e", - "version": "1.0.2", + "id": "9db25398-c869-4a63-8815-c6559341ef12", + "version": "1.3.0", "nodePack": "invokeai", "label": "", "notes": "", - "type": "image", + "type": "l2i", "inputs": { - "image": { - "name": "image", - "label": "Depth Input Image" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 3666.135718057363, - "y": 186.66887319822808 - } - }, - { - "id": "a33199c2-8340-401e-b8a2-42ffa875fc1c", - "type": "invocation", - "data": { - "id": "a33199c2-8340-401e-b8a2-42ffa875fc1c", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "controlnet", - "inputs": { - "image": { - "name": "image", + "board": { + "name": "board", "label": "" }, - "control_model": { - "name": "control_model", - "label": "Control Model (select depth)", - "value": { - "key": "87e8855c-671f-4c9e-bbbb-8ed47ccb4aac", - "hash": "blake3:2550bf22a53942dfa28ab2fed9d10d80851112531f44d977168992edf9d0534c", - "name": "control_v11f1p_sd15_depth", - "base": "sd-1", - "type": "controlnet" - } + "metadata": { + "name": "metadata", + "label": "" }, - "control_weight": { - "name": "control_weight", + "latents": { + "name": "latents", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", "label": "", - "value": 1 + "value": false }, - "begin_step_percent": { - "name": "begin_step_percent", + "tile_size": { + "name": "tile_size", "label": "", "value": 0 }, - "end_step_percent": { - "name": "end_step_percent", + "fp32": { + "name": "fp32", "label": "", - "value": 1 - }, - "control_mode": { - "name": "control_mode", - "label": "", - "value": "balanced" - }, - "resize_mode": { - "name": "resize_mode", - "label": "", - "value": "just_resize" + "value": false } }, "isOpen": true, - "isIntermediate": true, + "isIntermediate": false, "useCache": true }, "position": { - "x": 4477.604342844504, - "y": -49.39005411272677 - } - }, - { - "id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", - "type": "invocation", - "data": { - "id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "compel", - "inputs": { - "prompt": { - "name": "prompt", - "label": "Negative Prompt", - "value": "" - }, - "clip": { - "name": "clip", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 4075, + "x": 5675, "y": -825 } }, { - "id": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "id": "c826ba5e-9676-4475-b260-07b85e88753c", "type": "invocation", "data": { - "id": "54486974-835b-4d81-8f82-05f9f32ce9e9", - "version": "1.0.2", + "id": "c826ba5e-9676-4475-b260-07b85e88753c", + "version": "1.3.3", "nodePack": "invokeai", "label": "", "notes": "", - "type": "main_model_loader", + "type": "canny_image_processor", "inputs": { - "model": { - "name": "model", + "board": { + "name": "board", "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "detect_resolution": { + "name": "detect_resolution", + "label": "", + "value": 512 + }, + "image_resolution": { + "name": "image_resolution", + "label": "", + "value": 512 + }, + "low_threshold": { + "name": "low_threshold", + "label": "", + "value": 100 + }, + "high_threshold": { + "name": "high_threshold", + "label": "", + "value": 200 } }, "isOpen": true, @@ -178,29 +142,52 @@ "useCache": true }, "position": { - "x": 3600, - "y": -1000 + "x": 4095.757337055795, + "y": -455.63440891935863 } }, { - "id": "7ce68934-3419-42d4-ac70-82cfc9397306", + "id": "018b1214-c2af-43a7-9910-fb687c6726d7", "type": "invocation", "data": { - "id": "7ce68934-3419-42d4-ac70-82cfc9397306", - "version": "1.1.1", + "id": "018b1214-c2af-43a7-9910-fb687c6726d7", + "version": "1.2.4", "nodePack": "invokeai", "label": "", "notes": "", - "type": "compel", + "type": "midas_depth_image_processor", "inputs": { - "prompt": { - "name": "prompt", - "label": "Positive Prompt", - "value": "" - }, - "clip": { - "name": "clip", + "board": { + "name": "board", "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "a_mult": { + "name": "a_mult", + "label": "", + "value": 2 + }, + "bg_th": { + "name": "bg_th", + "label": "", + "value": 0.1 + }, + "detect_resolution": { + "name": "detect_resolution", + "label": "", + "value": 512 + }, + "image_resolution": { + "name": "image_resolution", + "label": "", + "value": 512 } }, "isOpen": true, @@ -208,8 +195,8 @@ "useCache": true }, "position": { - "x": 4075, - "y": -1125 + "x": 4082.783145980783, + "y": 0.01629251229994111 } }, { @@ -217,7 +204,7 @@ "type": "invocation", "data": { "id": "d204d184-f209-4fae-a0a1-d152800844e1", - "version": "1.1.1", + "version": "1.1.2", "nodePack": "invokeai", "label": "", "notes": "", @@ -273,6 +260,185 @@ "y": -618.4221638099414 } }, + { + "id": "7ce68934-3419-42d4-ac70-82cfc9397306", + "type": "invocation", + "data": { + "id": "7ce68934-3419-42d4-ac70-82cfc9397306", + "version": "1.2.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "compel", + "inputs": { + "prompt": { + "name": "prompt", + "label": "Positive Prompt", + "value": "" + }, + "clip": { + "name": "clip", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 4075, + "y": -1125 + } + }, + { + "id": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "type": "invocation", + "data": { + "id": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "version": "1.0.3", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "main_model_loader", + "inputs": { + "model": { + "name": "model", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 3600, + "y": -1000 + } + }, + { + "id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", + "type": "invocation", + "data": { + "id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", + "version": "1.2.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "compel", + "inputs": { + "prompt": { + "name": "prompt", + "label": "Negative Prompt", + "value": "" + }, + "clip": { + "name": "clip", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 4075, + "y": -825 + } + }, + { + "id": "a33199c2-8340-401e-b8a2-42ffa875fc1c", + "type": "invocation", + "data": { + "id": "a33199c2-8340-401e-b8a2-42ffa875fc1c", + "version": "1.1.2", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "controlnet", + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "control_model": { + "name": "control_model", + "label": "Control Model (select depth)", + "value": { + "key": "87e8855c-671f-4c9e-bbbb-8ed47ccb4aac", + "hash": "blake3:2550bf22a53942dfa28ab2fed9d10d80851112531f44d977168992edf9d0534c", + "name": "control_v11f1p_sd15_depth", + "base": "sd-1", + "type": "controlnet" + } + }, + "control_weight": { + "name": "control_weight", + "label": "", + "value": 1 + }, + "begin_step_percent": { + "name": "begin_step_percent", + "label": "", + "value": 0 + }, + "end_step_percent": { + "name": "end_step_percent", + "label": "", + "value": 1 + }, + "control_mode": { + "name": "control_mode", + "label": "", + "value": "balanced" + }, + "resize_mode": { + "name": "resize_mode", + "label": "", + "value": "just_resize" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 4477.604342844504, + "y": -49.39005411272677 + } + }, + { + "id": "8e860e51-5045-456e-bf04-9a62a2a5c49e", + "type": "invocation", + "data": { + "id": "8e860e51-5045-456e-bf04-9a62a2a5c49e", + "version": "1.0.2", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "image", + "inputs": { + "image": { + "name": "image", + "label": "Depth Input Image" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 3666.135718057363, + "y": 186.66887319822808 + } + }, { "id": "c4b23e64-7986-40c4-9cad-46327b12e204", "type": "invocation", @@ -322,159 +488,6 @@ "y": -575 } }, - { - "id": "018b1214-c2af-43a7-9910-fb687c6726d7", - "type": "invocation", - "data": { - "id": "018b1214-c2af-43a7-9910-fb687c6726d7", - "version": "1.2.3", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "midas_depth_image_processor", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "image": { - "name": "image", - "label": "" - }, - "a_mult": { - "name": "a_mult", - "label": "", - "value": 2 - }, - "bg_th": { - "name": "bg_th", - "label": "", - "value": 0.1 - }, - "detect_resolution": { - "name": "detect_resolution", - "label": "", - "value": 512 - }, - "image_resolution": { - "name": "image_resolution", - "label": "", - "value": 512 - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 4082.783145980783, - "y": 0.01629251229994111 - } - }, - { - "id": "c826ba5e-9676-4475-b260-07b85e88753c", - "type": "invocation", - "data": { - "id": "c826ba5e-9676-4475-b260-07b85e88753c", - "version": "1.3.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "canny_image_processor", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "image": { - "name": "image", - "label": "" - }, - "detect_resolution": { - "name": "detect_resolution", - "label": "", - "value": 512 - }, - "image_resolution": { - "name": "image_resolution", - "label": "", - "value": 512 - }, - "low_threshold": { - "name": "low_threshold", - "label": "", - "value": 100 - }, - "high_threshold": { - "name": "high_threshold", - "label": "", - "value": 200 - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 4095.757337055795, - "y": -455.63440891935863 - } - }, - { - "id": "9db25398-c869-4a63-8815-c6559341ef12", - "type": "invocation", - "data": { - "id": "9db25398-c869-4a63-8815-c6559341ef12", - "version": "1.2.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "l2i", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "latents": { - "name": "latents", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": false - } - }, - "isOpen": true, - "isIntermediate": false, - "useCache": true - }, - "position": { - "x": 5675, - "y": -825 - } - }, { "id": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce", "type": "invocation", diff --git a/invokeai/app/services/workflow_records/default_workflows/MultiDiffusion SDXL (Beta).json b/invokeai/app/services/workflow_records/default_workflows/MultiDiffusion SDXL (Beta).json new file mode 100644 index 0000000000..b2842315c4 --- /dev/null +++ b/invokeai/app/services/workflow_records/default_workflows/MultiDiffusion SDXL (Beta).json @@ -0,0 +1,2181 @@ +{ + "name": "MultiDiffusion SDXL (Beta)", + "author": "Invoke", + "description": "A workflow to upscale an input image with tiled upscaling, using SDXL based models.", + "version": "1.0.0", + "contact": "invoke@invoke.ai", + "tags": "tiled, upscaling, sdxl", + "notes": "", + "exposedFields": [ + { + "nodeId": "1ba845a6-eb88-49a1-a490-5fe6754f3ec9", + "fieldName": "value" + }, + { + "nodeId": "c3b60a50-8039-4924-90e3-8c608e1fecb5", + "fieldName": "board" + }, + { + "nodeId": "5ca87ace-edf9-49c7-a424-cd42416b86a7", + "fieldName": "image" + }, + { + "nodeId": "1dd915a3-6756-48ed-b68b-ee3b4bd06c1d", + "fieldName": "a" + }, + { + "nodeId": "696de0e1-cdd2-42e8-abeb-57a926bc6df6", + "fieldName": "a" + }, + { + "nodeId": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa", + "fieldName": "a" + }, + { + "nodeId": "e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fd", + "fieldName": "model" + }, + { + "nodeId": "f0cd0d2f-9614-43f7-9944-a75b8d5ccd65", + "fieldName": "model_name" + }, + { + "nodeId": "c26bff37-4f12-482f-ba45-3a5d729b4c4f", + "fieldName": "value" + }, + { + "nodeId": "f5ca24ee-21c5-4c8c-8d3c-371b5079b086", + "fieldName": "value" + }, + { + "nodeId": "094bc4ed-5c68-4342-84f4-51056c755796", + "fieldName": "value" + }, + { + "nodeId": "100b3143-b3fb-4ff3-bb3c-8d4d3f89ae3a", + "fieldName": "vae_model" + }, + { + "nodeId": "f936ebb3-6902-4df9-a775-6a68bac2da70", + "fieldName": "model" + } + ], + "meta": { + "version": "3.0.0", + "category": "default" + }, + "nodes": [ + { + "id": "f936ebb3-6902-4df9-a775-6a68bac2da70", + "type": "invocation", + "data": { + "id": "f936ebb3-6902-4df9-a775-6a68bac2da70", + "type": "model_identifier", + "version": "1.0.0", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "model": { + "name": "model", + "label": "ControlNet Model - choose xinsir's tile ControlNet", + "value": { + "key": "845b6959-1657-4164-be33-fe0f63ad1752", + "hash": "random:3b602344599a53b4e4c80a2259362e122543e6f9e8e428be76ab910f9368704b", + "name": "controlnet-tile-sdxl-1.0", + "base": "sdxl", + "type": "controlnet" + } + } + } + }, + "position": { + "x": -3983.6167650620723, + "y": -1329.1431151846386 + } + }, + { + "id": "00239057-20d4-4cd2-a010-28727b256ea2", + "type": "invocation", + "data": { + "id": "00239057-20d4-4cd2-a010-28727b256ea2", + "type": "rand_int", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": false, + "inputs": { + "low": { + "name": "low", + "label": "", + "value": 0 + }, + "high": { + "name": "high", + "label": "", + "value": 2147483647 + } + } + }, + "position": { + "x": -4000, + "y": -1800 + } + }, + { + "id": "094bc4ed-5c68-4342-84f4-51056c755796", + "type": "invocation", + "data": { + "id": "094bc4ed-5c68-4342-84f4-51056c755796", + "type": "boolean", + "version": "1.0.1", + "label": "Tiled Option", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "value": { + "name": "value", + "label": "Tiled VAE (Saves VRAM, Color Inconsistency)", + "value": false + } + } + }, + "position": { + "x": -2746.0467136971292, + "y": -2219.070070545694 + } + }, + { + "id": "f5ca24ee-21c5-4c8c-8d3c-371b5079b086", + "type": "invocation", + "data": { + "id": "f5ca24ee-21c5-4c8c-8d3c-371b5079b086", + "type": "string", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "value": { + "name": "value", + "label": "Negative Prompt (Optional)", + "value": "" + } + } + }, + "position": { + "x": -3525, + "y": -2525 + } + }, + { + "id": "c26bff37-4f12-482f-ba45-3a5d729b4c4f", + "type": "invocation", + "data": { + "id": "c26bff37-4f12-482f-ba45-3a5d729b4c4f", + "type": "string", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "value": { + "name": "value", + "label": "Positive Prompt (Optional)", + "value": "" + } + } + }, + "position": { + "x": -3525, + "y": -2825 + } + }, + { + "id": "6daa9526-382b-491d-964f-f53fc308664f", + "type": "invocation", + "data": { + "id": "6daa9526-382b-491d-964f-f53fc308664f", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "ADD" + }, + "a": { + "name": "a", + "label": "", + "value": 0.35 + }, + "b": { + "name": "b", + "label": "", + "value": 100 + } + } + }, + "position": { + "x": -3500, + "y": -1450 + } + }, + { + "id": "f1afd295-860f-48b6-a76a-90609bf2cc11", + "type": "invocation", + "data": { + "id": "f1afd295-860f-48b6-a76a-90609bf2cc11", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "MUL" + }, + "a": { + "name": "a", + "label": "", + "value": 1 + }, + "b": { + "name": "b", + "label": "", + "value": 0.013 + } + } + }, + "position": { + "x": -3500, + "y": -1550 + } + }, + { + "id": "88ae723e-4933-4371-b52d-3ada52a59d36", + "type": "invocation", + "data": { + "id": "88ae723e-4933-4371-b52d-3ada52a59d36", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "ADD" + }, + "a": { + "name": "a", + "label": "", + "value": 0 + }, + "b": { + "name": "b", + "label": "", + "value": 100 + } + } + }, + "position": { + "x": -3500, + "y": -1500 + } + }, + { + "id": "1dd915a3-6756-48ed-b68b-ee3b4bd06c1d", + "type": "invocation", + "data": { + "id": "1dd915a3-6756-48ed-b68b-ee3b4bd06c1d", + "type": "float_math", + "version": "1.0.1", + "label": "Creativity Input", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "MUL" + }, + "a": { + "name": "a", + "label": "Creativity Control (-10 to 10)", + "value": 5 + }, + "b": { + "name": "b", + "label": "", + "value": -1 + } + } + }, + "position": { + "x": -3500, + "y": -2125 + } + }, + { + "id": "c8f5c671-8c87-4d96-a75e-a9937ac6bc03", + "type": "invocation", + "data": { + "id": "c8f5c671-8c87-4d96-a75e-a9937ac6bc03", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "DIV" + }, + "a": { + "name": "a", + "label": "", + "value": 1 + }, + "b": { + "name": "b", + "label": "", + "value": 100 + } + } + }, + "position": { + "x": -3500, + "y": -1975 + } + }, + { + "id": "14e65dbe-4249-4b25-9a63-3a10cfaeb61c", + "type": "invocation", + "data": { + "id": "14e65dbe-4249-4b25-9a63-3a10cfaeb61c", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "ADD" + }, + "a": { + "name": "a", + "label": "A", + "value": 0 + }, + "b": { + "name": "b", + "label": "", + "value": 10 + } + } + }, + "position": { + "x": -3500, + "y": -2075 + } + }, + { + "id": "49a8cc12-aa19-48c5-b6b3-04e0b603b384", + "type": "invocation", + "data": { + "id": "49a8cc12-aa19-48c5-b6b3-04e0b603b384", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "MUL" + }, + "a": { + "name": "a", + "label": "", + "value": 1 + }, + "b": { + "name": "b", + "label": "", + "value": 4.99 + } + } + }, + "position": { + "x": -3500, + "y": -2025 + } + }, + { + "id": "e4d5ca7c-8fcf-4c59-9c58-67194c80dc73", + "type": "invocation", + "data": { + "id": "e4d5ca7c-8fcf-4c59-9c58-67194c80dc73", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "ADD" + }, + "a": { + "name": "a", + "label": "", + "value": 0 + }, + "b": { + "name": "b", + "label": "", + "value": 1 + } + } + }, + "position": { + "x": -3500, + "y": -1925 + } + }, + { + "id": "696de0e1-cdd2-42e8-abeb-57a926bc6df6", + "type": "invocation", + "data": { + "id": "696de0e1-cdd2-42e8-abeb-57a926bc6df6", + "type": "float_math", + "version": "1.0.1", + "label": "Sharpness Input", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "ADD" + }, + "a": { + "name": "a", + "label": "Sharpness Control (-10 to 10)", + "value": 0 + }, + "b": { + "name": "b", + "label": "", + "value": 10 + } + } + }, + "position": { + "x": -4750, + "y": -2275 + } + }, + { + "id": "79390b60-4077-4f94-ad0a-4229cc73ddb2", + "type": "invocation", + "data": { + "id": "79390b60-4077-4f94-ad0a-4229cc73ddb2", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "MUL" + }, + "a": { + "name": "a", + "label": "", + "value": 1 + }, + "b": { + "name": "b", + "label": "", + "value": 3.75 + } + } + }, + "position": { + "x": -4750, + "y": -2000 + } + }, + { + "id": "4950132a-2d06-4571-b2c0-55cb37a31e9b", + "type": "invocation", + "data": { + "id": "4950132a-2d06-4571-b2c0-55cb37a31e9b", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "ADD" + }, + "a": { + "name": "a", + "label": "", + "value": 25 + }, + "b": { + "name": "b", + "label": "", + "value": 1 + } + } + }, + "position": { + "x": -4750, + "y": -1950 + } + }, + { + "id": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa", + "type": "invocation", + "data": { + "id": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa", + "type": "float_math", + "version": "1.0.1", + "label": "Structural Input", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "ADD" + }, + "a": { + "name": "a", + "label": "Structural Control (-10 to 10)", + "value": 0 + }, + "b": { + "name": "b", + "label": "", + "value": 10 + } + } + }, + "position": { + "x": -3500, + "y": -1700 + } + }, + { + "id": "bc53651f-208b-440c-be30-f93f72ae700e", + "type": "invocation", + "data": { + "id": "bc53651f-208b-440c-be30-f93f72ae700e", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "MUL" + }, + "a": { + "name": "a", + "label": "", + "value": 1 + }, + "b": { + "name": "b", + "label": "", + "value": 0.025 + } + } + }, + "position": { + "x": -3500, + "y": -1650 + } + }, + { + "id": "67346654-cac0-446a-8cde-9af4b5a029a6", + "type": "invocation", + "data": { + "id": "67346654-cac0-446a-8cde-9af4b5a029a6", + "type": "float_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "ADD" + }, + "a": { + "name": "a", + "label": "", + "value": 0.3 + }, + "b": { + "name": "b", + "label": "", + "value": 1 + } + } + }, + "position": { + "x": -3500, + "y": -1600 + } + }, + { + "id": "6636a27a-f130-4a13-b3e5-50b44e4a566f", + "type": "invocation", + "data": { + "id": "6636a27a-f130-4a13-b3e5-50b44e4a566f", + "type": "collect", + "version": "1.0.0", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "item": { + "name": "item", + "label": "" + } + } + }, + "position": { + "x": -3125, + "y": -1500 + } + }, + { + "id": "b78f53b6-2eae-4956-97b4-7e73768d1491", + "type": "invocation", + "data": { + "id": "b78f53b6-2eae-4956-97b4-7e73768d1491", + "type": "controlnet", + "version": "1.1.2", + "label": "ControlNet (use xinsir's tile ControlNet)", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "control_model": { + "name": "control_model", + "label": "" + }, + "control_weight": { + "name": "control_weight", + "label": "", + "value": 0.6 + }, + "begin_step_percent": { + "name": "begin_step_percent", + "label": "", + "value": 0 + }, + "end_step_percent": { + "name": "end_step_percent", + "label": "", + "value": 0.5 + }, + "control_mode": { + "name": "control_mode", + "label": "", + "value": "balanced" + }, + "resize_mode": { + "name": "resize_mode", + "label": "", + "value": "just_resize" + } + } + }, + "position": { + "x": -3493.4229674963885, + "y": -1359.2223984776113 + } + }, + { + "id": "27215391-b20e-412a-b854-7fa5927f5437", + "type": "invocation", + "data": { + "id": "27215391-b20e-412a-b854-7fa5927f5437", + "type": "sdxl_compel_prompt", + "version": "1.2.0", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "prompt": { + "name": "prompt", + "label": "", + "value": "" + }, + "style": { + "name": "style", + "label": "", + "value": "" + }, + "original_width": { + "name": "original_width", + "label": "", + "value": 4096 + }, + "original_height": { + "name": "original_height", + "label": "", + "value": 4096 + }, + "crop_top": { + "name": "crop_top", + "label": "", + "value": 0 + }, + "crop_left": { + "name": "crop_left", + "label": "", + "value": 0 + }, + "target_width": { + "name": "target_width", + "label": "", + "value": 1024 + }, + "target_height": { + "name": "target_height", + "label": "", + "value": 1024 + }, + "clip": { + "name": "clip", + "label": "" + }, + "clip2": { + "name": "clip2", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + } + }, + "position": { + "x": -3525, + "y": -2300 + } + }, + { + "id": "100b3143-b3fb-4ff3-bb3c-8d4d3f89ae3a", + "type": "invocation", + "data": { + "id": "100b3143-b3fb-4ff3-bb3c-8d4d3f89ae3a", + "type": "vae_loader", + "version": "1.0.3", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "vae_model": { + "name": "vae_model", + "label": "", + "value": { + "key": "4bc2bddf-94d9-4efe-a8e2-5eda28710f4c", + "hash": "random:67e47a77a1fcef9c0f5cd5d889d71c191f07383a0bf587f1849b2bc3f359440a", + "name": "sdxl-vae-fp16-fix", + "base": "sdxl", + "type": "vae" + } + } + } + }, + "position": { + "x": -4000, + "y": -2575 + } + }, + { + "id": "e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fd", + "type": "invocation", + "data": { + "id": "e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fd", + "type": "sdxl_model_loader", + "version": "1.0.3", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "model": { + "name": "model", + "label": "SDXL Model" + } + } + }, + "position": { + "x": -4000, + "y": -2825 + } + }, + { + "id": "6142b69a-323f-4ecd-a7e5-67dc61349c51", + "type": "invocation", + "data": { + "id": "6142b69a-323f-4ecd-a7e5-67dc61349c51", + "type": "sdxl_compel_prompt", + "version": "1.2.0", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "prompt": { + "name": "prompt", + "label": "", + "value": "" + }, + "style": { + "name": "style", + "label": "", + "value": "" + }, + "original_width": { + "name": "original_width", + "label": "", + "value": 4096 + }, + "original_height": { + "name": "original_height", + "label": "", + "value": 4096 + }, + "crop_top": { + "name": "crop_top", + "label": "", + "value": 0 + }, + "crop_left": { + "name": "crop_left", + "label": "", + "value": 0 + }, + "target_width": { + "name": "target_width", + "label": "", + "value": 1024 + }, + "target_height": { + "name": "target_height", + "label": "", + "value": 1024 + }, + "clip": { + "name": "clip", + "label": "" + }, + "clip2": { + "name": "clip2", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + } + }, + "position": { + "x": -3525, + "y": -2600 + } + }, + { + "id": "041c59cc-f9e4-4dc9-8b31-84648c5f3ebe", + "type": "invocation", + "data": { + "id": "041c59cc-f9e4-4dc9-8b31-84648c5f3ebe", + "type": "unsharp_mask", + "version": "1.2.2", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "radius": { + "name": "radius", + "label": "", + "value": 2 + }, + "strength": { + "name": "strength", + "label": "", + "value": 50 + } + } + }, + "position": { + "x": -4400, + "y": -1875 + } + }, + { + "id": "53c2d5fd-863d-4950-93e0-628f3d61b493", + "type": "invocation", + "data": { + "id": "53c2d5fd-863d-4950-93e0-628f3d61b493", + "type": "unsharp_mask", + "version": "1.2.2", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "radius": { + "name": "radius", + "label": "", + "value": 2 + }, + "strength": { + "name": "strength", + "label": "", + "value": 50 + } + } + }, + "position": { + "x": -4750, + "y": -1875 + } + }, + { + "id": "117f982a-03da-49b1-bf9f-29711160ac02", + "type": "invocation", + "data": { + "id": "117f982a-03da-49b1-bf9f-29711160ac02", + "type": "i2l", + "version": "1.1.0", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": false + } + } + }, + "position": { + "x": -4000, + "y": -1875 + } + }, + { + "id": "c3b60a50-8039-4924-90e3-8c608e1fecb5", + "type": "invocation", + "data": { + "id": "c3b60a50-8039-4924-90e3-8c608e1fecb5", + "type": "l2i", + "version": "1.3.0", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": false, + "useCache": true, + "inputs": { + "board": { + "name": "board", + "label": "Output Board" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "latents": { + "name": "latents", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": false + } + } + }, + "position": { + "x": -2750, + "y": -2575 + } + }, + { + "id": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "type": "invocation", + "data": { + "id": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "type": "tiled_multi_diffusion_denoise_latents", + "version": "1.0.0", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "positive_conditioning": { + "name": "positive_conditioning", + "label": "" + }, + "negative_conditioning": { + "name": "negative_conditioning", + "label": "" + }, + "noise": { + "name": "noise", + "label": "" + }, + "latents": { + "name": "latents", + "label": "" + }, + "tile_height": { + "name": "tile_height", + "label": "", + "value": 1024 + }, + "tile_width": { + "name": "tile_width", + "label": "", + "value": 1024 + }, + "tile_overlap": { + "name": "tile_overlap", + "label": "", + "value": 128 + }, + "steps": { + "name": "steps", + "label": "", + "value": 25 + }, + "cfg_scale": { + "name": "cfg_scale", + "label": "", + "value": 5 + }, + "denoising_start": { + "name": "denoising_start", + "label": "", + "value": 0.6 + }, + "denoising_end": { + "name": "denoising_end", + "label": "", + "value": 1 + }, + "scheduler": { + "name": "scheduler", + "label": "", + "value": "kdpm_2" + }, + "unet": { + "name": "unet", + "label": "" + }, + "cfg_rescale_multiplier": { + "name": "cfg_rescale_multiplier", + "label": "", + "value": 0 + }, + "control": { + "name": "control", + "label": "" + } + } + }, + "position": { + "x": -3125, + "y": -2575 + } + }, + { + "id": "1ba845a6-eb88-49a1-a490-5fe6754f3ec9", + "type": "invocation", + "data": { + "id": "1ba845a6-eb88-49a1-a490-5fe6754f3ec9", + "type": "integer", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "value": { + "name": "value", + "label": "Scale (2x, 4x)", + "value": 2 + } + } + }, + "position": { + "x": -4400, + "y": -2175 + } + }, + { + "id": "d350feac-9686-4e0d-bd46-a96bd2630818", + "type": "invocation", + "data": { + "id": "d350feac-9686-4e0d-bd46-a96bd2630818", + "type": "integer_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "MUL" + }, + "a": { + "name": "a", + "label": "", + "value": 1 + }, + "b": { + "name": "b", + "label": "", + "value": 1 + } + } + }, + "position": { + "x": -4400, + "y": -1950 + } + }, + { + "id": "5b256f14-caab-40ff-b8f0-9679cd542163", + "type": "invocation", + "data": { + "id": "5b256f14-caab-40ff-b8f0-9679cd542163", + "type": "integer_math", + "version": "1.0.1", + "label": "", + "notes": "", + "isOpen": false, + "isIntermediate": true, + "useCache": true, + "inputs": { + "operation": { + "name": "operation", + "label": "", + "value": "MUL" + }, + "a": { + "name": "a", + "label": "", + "value": 1 + }, + "b": { + "name": "b", + "label": "", + "value": 1 + } + } + }, + "position": { + "x": -4400, + "y": -2000 + } + }, + { + "id": "7671553a-cd4b-4e25-8332-9d5667e64493", + "type": "invocation", + "data": { + "id": "7671553a-cd4b-4e25-8332-9d5667e64493", + "type": "img_resize", + "version": "1.2.2", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "width": { + "name": "width", + "label": "", + "value": 512 + }, + "height": { + "name": "height", + "label": "", + "value": 512 + }, + "resample_mode": { + "name": "resample_mode", + "label": "", + "value": "lanczos" + } + } + }, + "position": { + "x": -4375, + "y": -1825 + } + }, + { + "id": "be4082d6-e238-40ea-a9df-fc0d725e8895", + "type": "invocation", + "data": { + "id": "be4082d6-e238-40ea-a9df-fc0d725e8895", + "type": "controlnet", + "version": "1.1.2", + "label": "ControlNet (use xinsir's tile ControlNet)", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "control_model": { + "name": "control_model", + "label": "" + }, + "control_weight": { + "name": "control_weight", + "label": "", + "value": 0.25 + }, + "begin_step_percent": { + "name": "begin_step_percent", + "label": "", + "value": 0.5 + }, + "end_step_percent": { + "name": "end_step_percent", + "label": "", + "value": 0.8 + }, + "control_mode": { + "name": "control_mode", + "label": "Control Mode", + "value": "balanced" + }, + "resize_mode": { + "name": "resize_mode", + "label": "", + "value": "just_resize" + } + } + }, + "position": { + "x": -3131.577032503611, + "y": -1392.1075609956667 + } + }, + { + "id": "8923451b-5a27-4395-b7f2-dce875fca6f5", + "type": "invocation", + "data": { + "id": "8923451b-5a27-4395-b7f2-dce875fca6f5", + "type": "noise", + "version": "1.0.2", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "seed": { + "name": "seed", + "label": "", + "value": 3 + }, + "width": { + "name": "width", + "label": "", + "value": 512 + }, + "height": { + "name": "height", + "label": "", + "value": 512 + }, + "use_cpu": { + "name": "use_cpu", + "label": "", + "value": true + } + } + }, + "position": { + "x": -4000, + "y": -1750 + } + }, + { + "id": "f0cd0d2f-9614-43f7-9944-a75b8d5ccd65", + "type": "invocation", + "data": { + "id": "f0cd0d2f-9614-43f7-9944-a75b8d5ccd65", + "type": "esrgan", + "version": "1.3.2", + "label": "", + "notes": "", + "isOpen": true, + "isIntermediate": true, + "useCache": true, + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "image": { + "name": "image", + "label": "" + }, + "model_name": { + "name": "model_name", + "label": "Upscaling Model", + "value": "RealESRGAN_x4plus.pth" + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 500 + } + } + }, + "position": { + "x": -4750, + "y": -1825 + } + }, + { + "id": "7dbb756b-7d79-431c-a46d-d8f7b082c127", + "type": "invocation", + "data": { + "id": "7dbb756b-7d79-431c-a46d-d8f7b082c127", + "version": "1.0.1", + "label": "", + "notes": "", + "type": "float_to_int", + "inputs": { + "value": { + "name": "value", + "label": "", + "value": 0 + }, + "multiple": { + "name": "multiple", + "label": "", + "value": 8 + }, + "method": { + "name": "method", + "label": "", + "value": "Floor" + } + }, + "isOpen": false, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -4000, + "y": -1950 + } + }, + { + "id": "5ca87ace-edf9-49c7-a424-cd42416b86a7", + "type": "invocation", + "data": { + "id": "5ca87ace-edf9-49c7-a424-cd42416b86a7", + "version": "1.0.2", + "label": "", + "notes": "", + "type": "image", + "inputs": { + "image": { + "name": "image", + "label": "Image to Upscale" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -4750, + "y": -2850 + } + }, + { + "id": "f5d9bf3b-2646-4b17-9894-20fd2b4218ea", + "type": "invocation", + "data": { + "id": "f5d9bf3b-2646-4b17-9894-20fd2b4218ea", + "version": "1.0.1", + "label": "", + "notes": "", + "type": "float_to_int", + "inputs": { + "value": { + "name": "value", + "label": "", + "value": 8 + }, + "multiple": { + "name": "multiple", + "label": "", + "value": 8 + }, + "method": { + "name": "method", + "label": "", + "value": "Floor" + } + }, + "isOpen": false, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -4000, + "y": -2000 + } + } + ], + "edges": [ + { + "id": "reactflow__edge-f936ebb3-6902-4df9-a775-6a68bac2da70model-be4082d6-e238-40ea-a9df-fc0d725e8895control_model", + "type": "default", + "source": "f936ebb3-6902-4df9-a775-6a68bac2da70", + "target": "be4082d6-e238-40ea-a9df-fc0d725e8895", + "sourceHandle": "model", + "targetHandle": "control_model" + }, + { + "id": "reactflow__edge-f936ebb3-6902-4df9-a775-6a68bac2da70model-b78f53b6-2eae-4956-97b4-7e73768d1491control_model", + "type": "default", + "source": "f936ebb3-6902-4df9-a775-6a68bac2da70", + "target": "b78f53b6-2eae-4956-97b4-7e73768d1491", + "sourceHandle": "model", + "targetHandle": "control_model" + }, + { + "id": "49a8cc12-aa19-48c5-b6b3-04e0b603b384-c8f5c671-8c87-4d96-a75e-a9937ac6bc03-collapsed", + "type": "collapsed", + "source": "49a8cc12-aa19-48c5-b6b3-04e0b603b384", + "target": "c8f5c671-8c87-4d96-a75e-a9937ac6bc03" + }, + { + "id": "14e65dbe-4249-4b25-9a63-3a10cfaeb61c-49a8cc12-aa19-48c5-b6b3-04e0b603b384-collapsed", + "type": "collapsed", + "source": "14e65dbe-4249-4b25-9a63-3a10cfaeb61c", + "target": "49a8cc12-aa19-48c5-b6b3-04e0b603b384" + }, + { + "id": "1dd915a3-6756-48ed-b68b-ee3b4bd06c1d-14e65dbe-4249-4b25-9a63-3a10cfaeb61c-collapsed", + "type": "collapsed", + "source": "1dd915a3-6756-48ed-b68b-ee3b4bd06c1d", + "target": "14e65dbe-4249-4b25-9a63-3a10cfaeb61c" + }, + { + "id": "reactflow__edge-00239057-20d4-4cd2-a010-28727b256ea2value-8923451b-5a27-4395-b7f2-dce875fca6f5seed", + "type": "default", + "source": "00239057-20d4-4cd2-a010-28727b256ea2", + "target": "8923451b-5a27-4395-b7f2-dce875fca6f5", + "sourceHandle": "value", + "targetHandle": "seed" + }, + { + "id": "reactflow__edge-094bc4ed-5c68-4342-84f4-51056c755796value-c3b60a50-8039-4924-90e3-8c608e1fecb5tiled", + "type": "default", + "source": "094bc4ed-5c68-4342-84f4-51056c755796", + "target": "c3b60a50-8039-4924-90e3-8c608e1fecb5", + "sourceHandle": "value", + "targetHandle": "tiled" + }, + { + "id": "reactflow__edge-094bc4ed-5c68-4342-84f4-51056c755796value-117f982a-03da-49b1-bf9f-29711160ac02tiled", + "type": "default", + "source": "094bc4ed-5c68-4342-84f4-51056c755796", + "target": "117f982a-03da-49b1-bf9f-29711160ac02", + "sourceHandle": "value", + "targetHandle": "tiled" + }, + { + "id": "c8f5c671-8c87-4d96-a75e-a9937ac6bc03-e4d5ca7c-8fcf-4c59-9c58-67194c80dc73-collapsed", + "type": "collapsed", + "source": "c8f5c671-8c87-4d96-a75e-a9937ac6bc03", + "target": "e4d5ca7c-8fcf-4c59-9c58-67194c80dc73" + }, + { + "id": "d350feac-9686-4e0d-bd46-a96bd2630818-7dbb756b-7d79-431c-a46d-d8f7b082c127-collapsed", + "type": "collapsed", + "source": "d350feac-9686-4e0d-bd46-a96bd2630818", + "target": "7dbb756b-7d79-431c-a46d-d8f7b082c127" + }, + { + "id": "5b256f14-caab-40ff-b8f0-9679cd542163-f5d9bf3b-2646-4b17-9894-20fd2b4218ea-collapsed", + "type": "collapsed", + "source": "5b256f14-caab-40ff-b8f0-9679cd542163", + "target": "f5d9bf3b-2646-4b17-9894-20fd2b4218ea" + }, + { + "id": "4950132a-2d06-4571-b2c0-55cb37a31e9b-041c59cc-f9e4-4dc9-8b31-84648c5f3ebe-collapsed", + "type": "collapsed", + "source": "4950132a-2d06-4571-b2c0-55cb37a31e9b", + "target": "041c59cc-f9e4-4dc9-8b31-84648c5f3ebe" + }, + { + "id": "4950132a-2d06-4571-b2c0-55cb37a31e9b-53c2d5fd-863d-4950-93e0-628f3d61b493-collapsed", + "type": "collapsed", + "source": "4950132a-2d06-4571-b2c0-55cb37a31e9b", + "target": "53c2d5fd-863d-4950-93e0-628f3d61b493" + }, + { + "id": "reactflow__edge-f5ca24ee-21c5-4c8c-8d3c-371b5079b086value-27215391-b20e-412a-b854-7fa5927f5437style", + "type": "default", + "source": "f5ca24ee-21c5-4c8c-8d3c-371b5079b086", + "target": "27215391-b20e-412a-b854-7fa5927f5437", + "sourceHandle": "value", + "targetHandle": "style" + }, + { + "id": "reactflow__edge-f5ca24ee-21c5-4c8c-8d3c-371b5079b086value-27215391-b20e-412a-b854-7fa5927f5437prompt", + "type": "default", + "source": "f5ca24ee-21c5-4c8c-8d3c-371b5079b086", + "target": "27215391-b20e-412a-b854-7fa5927f5437", + "sourceHandle": "value", + "targetHandle": "prompt" + }, + { + "id": "reactflow__edge-c26bff37-4f12-482f-ba45-3a5d729b4c4fvalue-6142b69a-323f-4ecd-a7e5-67dc61349c51style", + "type": "default", + "source": "c26bff37-4f12-482f-ba45-3a5d729b4c4f", + "target": "6142b69a-323f-4ecd-a7e5-67dc61349c51", + "sourceHandle": "value", + "targetHandle": "style" + }, + { + "id": "reactflow__edge-c26bff37-4f12-482f-ba45-3a5d729b4c4fvalue-6142b69a-323f-4ecd-a7e5-67dc61349c51prompt", + "type": "default", + "source": "c26bff37-4f12-482f-ba45-3a5d729b4c4f", + "target": "6142b69a-323f-4ecd-a7e5-67dc61349c51", + "sourceHandle": "value", + "targetHandle": "prompt" + }, + { + "id": "88ae723e-4933-4371-b52d-3ada52a59d36-6daa9526-382b-491d-964f-f53fc308664f-collapsed", + "type": "collapsed", + "source": "88ae723e-4933-4371-b52d-3ada52a59d36", + "target": "6daa9526-382b-491d-964f-f53fc308664f" + }, + { + "id": "f1afd295-860f-48b6-a76a-90609bf2cc11-88ae723e-4933-4371-b52d-3ada52a59d36-collapsed", + "type": "collapsed", + "source": "f1afd295-860f-48b6-a76a-90609bf2cc11", + "target": "88ae723e-4933-4371-b52d-3ada52a59d36" + }, + { + "id": "bc53651f-208b-440c-be30-f93f72ae700e-67346654-cac0-446a-8cde-9af4b5a029a6-collapsed", + "type": "collapsed", + "source": "bc53651f-208b-440c-be30-f93f72ae700e", + "target": "67346654-cac0-446a-8cde-9af4b5a029a6" + }, + { + "id": "reactflow__edge-67346654-cac0-446a-8cde-9af4b5a029a6value-be4082d6-e238-40ea-a9df-fc0d725e8895begin_step_percent", + "type": "default", + "source": "67346654-cac0-446a-8cde-9af4b5a029a6", + "target": "be4082d6-e238-40ea-a9df-fc0d725e8895", + "sourceHandle": "value", + "targetHandle": "begin_step_percent" + }, + { + "id": "reactflow__edge-67346654-cac0-446a-8cde-9af4b5a029a6value-b78f53b6-2eae-4956-97b4-7e73768d1491end_step_percent", + "type": "default", + "source": "67346654-cac0-446a-8cde-9af4b5a029a6", + "target": "b78f53b6-2eae-4956-97b4-7e73768d1491", + "sourceHandle": "value", + "targetHandle": "end_step_percent" + }, + { + "id": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa-f1afd295-860f-48b6-a76a-90609bf2cc11-collapsed", + "type": "collapsed", + "source": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa", + "target": "f1afd295-860f-48b6-a76a-90609bf2cc11" + }, + { + "id": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa-bc53651f-208b-440c-be30-f93f72ae700e-collapsed", + "type": "collapsed", + "source": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa", + "target": "bc53651f-208b-440c-be30-f93f72ae700e" + }, + { + "id": "reactflow__edge-bc53651f-208b-440c-be30-f93f72ae700evalue-67346654-cac0-446a-8cde-9af4b5a029a6b", + "type": "default", + "source": "bc53651f-208b-440c-be30-f93f72ae700e", + "target": "67346654-cac0-446a-8cde-9af4b5a029a6", + "sourceHandle": "value", + "targetHandle": "b", + "hidden": true + }, + { + "id": "reactflow__edge-6daa9526-382b-491d-964f-f53fc308664fvalue-b78f53b6-2eae-4956-97b4-7e73768d1491control_weight", + "type": "default", + "source": "6daa9526-382b-491d-964f-f53fc308664f", + "target": "b78f53b6-2eae-4956-97b4-7e73768d1491", + "sourceHandle": "value", + "targetHandle": "control_weight" + }, + { + "id": "reactflow__edge-88ae723e-4933-4371-b52d-3ada52a59d36value-6daa9526-382b-491d-964f-f53fc308664fb", + "type": "default", + "source": "88ae723e-4933-4371-b52d-3ada52a59d36", + "target": "6daa9526-382b-491d-964f-f53fc308664f", + "sourceHandle": "value", + "targetHandle": "b", + "hidden": true + }, + { + "id": "reactflow__edge-88ae723e-4933-4371-b52d-3ada52a59d36value-be4082d6-e238-40ea-a9df-fc0d725e8895control_weight", + "type": "default", + "source": "88ae723e-4933-4371-b52d-3ada52a59d36", + "target": "be4082d6-e238-40ea-a9df-fc0d725e8895", + "sourceHandle": "value", + "targetHandle": "control_weight" + }, + { + "id": "reactflow__edge-f1afd295-860f-48b6-a76a-90609bf2cc11value-88ae723e-4933-4371-b52d-3ada52a59d36b", + "type": "default", + "source": "f1afd295-860f-48b6-a76a-90609bf2cc11", + "target": "88ae723e-4933-4371-b52d-3ada52a59d36", + "sourceHandle": "value", + "targetHandle": "b", + "hidden": true + }, + { + "id": "reactflow__edge-bd094e2f-41e5-4b61-9f7b-56cf337d53favalue-f1afd295-860f-48b6-a76a-90609bf2cc11a", + "type": "default", + "source": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa", + "target": "f1afd295-860f-48b6-a76a-90609bf2cc11", + "sourceHandle": "value", + "targetHandle": "a", + "hidden": true + }, + { + "id": "reactflow__edge-1dd915a3-6756-48ed-b68b-ee3b4bd06c1dvalue-14e65dbe-4249-4b25-9a63-3a10cfaeb61ca", + "type": "default", + "source": "1dd915a3-6756-48ed-b68b-ee3b4bd06c1d", + "target": "14e65dbe-4249-4b25-9a63-3a10cfaeb61c", + "sourceHandle": "value", + "targetHandle": "a", + "hidden": true + }, + { + "id": "reactflow__edge-e4d5ca7c-8fcf-4c59-9c58-67194c80dc73value-8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7adenoising_start", + "type": "default", + "source": "e4d5ca7c-8fcf-4c59-9c58-67194c80dc73", + "target": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "sourceHandle": "value", + "targetHandle": "denoising_start" + }, + { + "id": "reactflow__edge-c8f5c671-8c87-4d96-a75e-a9937ac6bc03value-e4d5ca7c-8fcf-4c59-9c58-67194c80dc73b", + "type": "default", + "source": "c8f5c671-8c87-4d96-a75e-a9937ac6bc03", + "target": "e4d5ca7c-8fcf-4c59-9c58-67194c80dc73", + "sourceHandle": "value", + "targetHandle": "b", + "hidden": true + }, + { + "id": "reactflow__edge-49a8cc12-aa19-48c5-b6b3-04e0b603b384value-c8f5c671-8c87-4d96-a75e-a9937ac6bc03a", + "type": "default", + "source": "49a8cc12-aa19-48c5-b6b3-04e0b603b384", + "target": "c8f5c671-8c87-4d96-a75e-a9937ac6bc03", + "sourceHandle": "value", + "targetHandle": "a", + "hidden": true + }, + { + "id": "reactflow__edge-14e65dbe-4249-4b25-9a63-3a10cfaeb61cvalue-49a8cc12-aa19-48c5-b6b3-04e0b603b384a", + "type": "default", + "source": "14e65dbe-4249-4b25-9a63-3a10cfaeb61c", + "target": "49a8cc12-aa19-48c5-b6b3-04e0b603b384", + "sourceHandle": "value", + "targetHandle": "a", + "hidden": true + }, + { + "id": "79390b60-4077-4f94-ad0a-4229cc73ddb2-4950132a-2d06-4571-b2c0-55cb37a31e9b-collapsed", + "type": "collapsed", + "source": "79390b60-4077-4f94-ad0a-4229cc73ddb2", + "target": "4950132a-2d06-4571-b2c0-55cb37a31e9b" + }, + { + "id": "reactflow__edge-4950132a-2d06-4571-b2c0-55cb37a31e9bvalue-041c59cc-f9e4-4dc9-8b31-84648c5f3ebestrength", + "type": "default", + "source": "4950132a-2d06-4571-b2c0-55cb37a31e9b", + "target": "041c59cc-f9e4-4dc9-8b31-84648c5f3ebe", + "sourceHandle": "value", + "targetHandle": "strength", + "hidden": true + }, + { + "id": "reactflow__edge-4950132a-2d06-4571-b2c0-55cb37a31e9bvalue-53c2d5fd-863d-4950-93e0-628f3d61b493strength", + "type": "default", + "source": "4950132a-2d06-4571-b2c0-55cb37a31e9b", + "target": "53c2d5fd-863d-4950-93e0-628f3d61b493", + "sourceHandle": "value", + "targetHandle": "strength", + "hidden": true + }, + { + "id": "reactflow__edge-79390b60-4077-4f94-ad0a-4229cc73ddb2value-4950132a-2d06-4571-b2c0-55cb37a31e9bb", + "type": "default", + "source": "79390b60-4077-4f94-ad0a-4229cc73ddb2", + "target": "4950132a-2d06-4571-b2c0-55cb37a31e9b", + "sourceHandle": "value", + "targetHandle": "b", + "hidden": true + }, + { + "id": "reactflow__edge-696de0e1-cdd2-42e8-abeb-57a926bc6df6value-79390b60-4077-4f94-ad0a-4229cc73ddb2a", + "type": "default", + "source": "696de0e1-cdd2-42e8-abeb-57a926bc6df6", + "target": "79390b60-4077-4f94-ad0a-4229cc73ddb2", + "sourceHandle": "value", + "targetHandle": "a" + }, + { + "id": "reactflow__edge-bd094e2f-41e5-4b61-9f7b-56cf337d53favalue-bc53651f-208b-440c-be30-f93f72ae700ea", + "type": "default", + "source": "bd094e2f-41e5-4b61-9f7b-56cf337d53fa", + "target": "bc53651f-208b-440c-be30-f93f72ae700e", + "sourceHandle": "value", + "targetHandle": "a", + "hidden": true + }, + { + "id": "reactflow__edge-6636a27a-f130-4a13-b3e5-50b44e4a566fcollection-8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7acontrol", + "type": "default", + "source": "6636a27a-f130-4a13-b3e5-50b44e4a566f", + "target": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "sourceHandle": "collection", + "targetHandle": "control" + }, + { + "id": "reactflow__edge-b78f53b6-2eae-4956-97b4-7e73768d1491control-6636a27a-f130-4a13-b3e5-50b44e4a566fitem", + "type": "default", + "source": "b78f53b6-2eae-4956-97b4-7e73768d1491", + "target": "6636a27a-f130-4a13-b3e5-50b44e4a566f", + "sourceHandle": "control", + "targetHandle": "item" + }, + { + "id": "reactflow__edge-be4082d6-e238-40ea-a9df-fc0d725e8895control-6636a27a-f130-4a13-b3e5-50b44e4a566fitem", + "type": "default", + "source": "be4082d6-e238-40ea-a9df-fc0d725e8895", + "target": "6636a27a-f130-4a13-b3e5-50b44e4a566f", + "sourceHandle": "control", + "targetHandle": "item" + }, + { + "id": "reactflow__edge-7671553a-cd4b-4e25-8332-9d5667e64493image-b78f53b6-2eae-4956-97b4-7e73768d1491image", + "type": "default", + "source": "7671553a-cd4b-4e25-8332-9d5667e64493", + "target": "b78f53b6-2eae-4956-97b4-7e73768d1491", + "sourceHandle": "image", + "targetHandle": "image" + }, + { + "id": "reactflow__edge-e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fdclip2-27215391-b20e-412a-b854-7fa5927f5437clip2", + "type": "default", + "source": "e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fd", + "target": "27215391-b20e-412a-b854-7fa5927f5437", + "sourceHandle": "clip2", + "targetHandle": "clip2" + }, + { + "id": "reactflow__edge-e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fdclip-27215391-b20e-412a-b854-7fa5927f5437clip", + "type": "default", + "source": "e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fd", + "target": "27215391-b20e-412a-b854-7fa5927f5437", + "sourceHandle": "clip", + "targetHandle": "clip" + }, + { + "id": "reactflow__edge-e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fdclip2-6142b69a-323f-4ecd-a7e5-67dc61349c51clip2", + "type": "default", + "source": "e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fd", + "target": "6142b69a-323f-4ecd-a7e5-67dc61349c51", + "sourceHandle": "clip2", + "targetHandle": "clip2" + }, + { + "id": "reactflow__edge-6142b69a-323f-4ecd-a7e5-67dc61349c51conditioning-8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7apositive_conditioning", + "type": "default", + "source": "6142b69a-323f-4ecd-a7e5-67dc61349c51", + "target": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "sourceHandle": "conditioning", + "targetHandle": "positive_conditioning" + }, + { + "id": "reactflow__edge-27215391-b20e-412a-b854-7fa5927f5437conditioning-8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7anegative_conditioning", + "type": "default", + "source": "27215391-b20e-412a-b854-7fa5927f5437", + "target": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "sourceHandle": "conditioning", + "targetHandle": "negative_conditioning" + }, + { + "id": "reactflow__edge-e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fdunet-8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7aunet", + "type": "default", + "source": "e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fd", + "target": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "sourceHandle": "unet", + "targetHandle": "unet" + }, + { + "id": "reactflow__edge-100b3143-b3fb-4ff3-bb3c-8d4d3f89ae3avae-117f982a-03da-49b1-bf9f-29711160ac02vae", + "type": "default", + "source": "100b3143-b3fb-4ff3-bb3c-8d4d3f89ae3a", + "target": "117f982a-03da-49b1-bf9f-29711160ac02", + "sourceHandle": "vae", + "targetHandle": "vae" + }, + { + "id": "reactflow__edge-100b3143-b3fb-4ff3-bb3c-8d4d3f89ae3avae-c3b60a50-8039-4924-90e3-8c608e1fecb5vae", + "type": "default", + "source": "100b3143-b3fb-4ff3-bb3c-8d4d3f89ae3a", + "target": "c3b60a50-8039-4924-90e3-8c608e1fecb5", + "sourceHandle": "vae", + "targetHandle": "vae" + }, + { + "id": "reactflow__edge-e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fdclip-6142b69a-323f-4ecd-a7e5-67dc61349c51clip", + "type": "default", + "source": "e277e4b7-01cd-4daa-86ab-7bfa3cdcd9fd", + "target": "6142b69a-323f-4ecd-a7e5-67dc61349c51", + "sourceHandle": "clip", + "targetHandle": "clip" + }, + { + "id": "reactflow__edge-041c59cc-f9e4-4dc9-8b31-84648c5f3ebeimage-7671553a-cd4b-4e25-8332-9d5667e64493image", + "type": "default", + "source": "041c59cc-f9e4-4dc9-8b31-84648c5f3ebe", + "target": "7671553a-cd4b-4e25-8332-9d5667e64493", + "sourceHandle": "image", + "targetHandle": "image" + }, + { + "id": "reactflow__edge-f0cd0d2f-9614-43f7-9944-a75b8d5ccd65image-041c59cc-f9e4-4dc9-8b31-84648c5f3ebeimage", + "type": "default", + "source": "f0cd0d2f-9614-43f7-9944-a75b8d5ccd65", + "target": "041c59cc-f9e4-4dc9-8b31-84648c5f3ebe", + "sourceHandle": "image", + "targetHandle": "image" + }, + { + "id": "reactflow__edge-53c2d5fd-863d-4950-93e0-628f3d61b493image-f0cd0d2f-9614-43f7-9944-a75b8d5ccd65image", + "type": "default", + "source": "53c2d5fd-863d-4950-93e0-628f3d61b493", + "target": "f0cd0d2f-9614-43f7-9944-a75b8d5ccd65", + "sourceHandle": "image", + "targetHandle": "image" + }, + { + "id": "reactflow__edge-5ca87ace-edf9-49c7-a424-cd42416b86a7image-53c2d5fd-863d-4950-93e0-628f3d61b493image", + "type": "default", + "source": "5ca87ace-edf9-49c7-a424-cd42416b86a7", + "target": "53c2d5fd-863d-4950-93e0-628f3d61b493", + "sourceHandle": "image", + "targetHandle": "image" + }, + { + "id": "reactflow__edge-8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7alatents-c3b60a50-8039-4924-90e3-8c608e1fecb5latents", + "type": "default", + "source": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "target": "c3b60a50-8039-4924-90e3-8c608e1fecb5", + "sourceHandle": "latents", + "targetHandle": "latents" + }, + { + "id": "reactflow__edge-117f982a-03da-49b1-bf9f-29711160ac02latents-8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7alatents", + "type": "default", + "source": "117f982a-03da-49b1-bf9f-29711160ac02", + "target": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "sourceHandle": "latents", + "targetHandle": "latents" + }, + { + "id": "reactflow__edge-8923451b-5a27-4395-b7f2-dce875fca6f5noise-8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7anoise", + "type": "default", + "source": "8923451b-5a27-4395-b7f2-dce875fca6f5", + "target": "8dba0d37-cd2e-4fe5-ae9f-5464b85a8a7a", + "sourceHandle": "noise", + "targetHandle": "noise" + }, + { + "id": "reactflow__edge-d350feac-9686-4e0d-bd46-a96bd2630818value-7dbb756b-7d79-431c-a46d-d8f7b082c127value", + "type": "default", + "source": "d350feac-9686-4e0d-bd46-a96bd2630818", + "target": "7dbb756b-7d79-431c-a46d-d8f7b082c127", + "sourceHandle": "value", + "targetHandle": "value", + "hidden": true + }, + { + "id": "reactflow__edge-5b256f14-caab-40ff-b8f0-9679cd542163value-f5d9bf3b-2646-4b17-9894-20fd2b4218eavalue", + "type": "default", + "source": "5b256f14-caab-40ff-b8f0-9679cd542163", + "target": "f5d9bf3b-2646-4b17-9894-20fd2b4218ea", + "sourceHandle": "value", + "targetHandle": "value", + "hidden": true + }, + { + "id": "reactflow__edge-7671553a-cd4b-4e25-8332-9d5667e64493height-8923451b-5a27-4395-b7f2-dce875fca6f5height", + "type": "default", + "source": "7671553a-cd4b-4e25-8332-9d5667e64493", + "target": "8923451b-5a27-4395-b7f2-dce875fca6f5", + "sourceHandle": "height", + "targetHandle": "height" + }, + { + "id": "reactflow__edge-7671553a-cd4b-4e25-8332-9d5667e64493width-8923451b-5a27-4395-b7f2-dce875fca6f5width", + "type": "default", + "source": "7671553a-cd4b-4e25-8332-9d5667e64493", + "target": "8923451b-5a27-4395-b7f2-dce875fca6f5", + "sourceHandle": "width", + "targetHandle": "width" + }, + { + "id": "reactflow__edge-7671553a-cd4b-4e25-8332-9d5667e64493image-117f982a-03da-49b1-bf9f-29711160ac02image", + "type": "default", + "source": "7671553a-cd4b-4e25-8332-9d5667e64493", + "target": "117f982a-03da-49b1-bf9f-29711160ac02", + "sourceHandle": "image", + "targetHandle": "image" + }, + { + "id": "reactflow__edge-7671553a-cd4b-4e25-8332-9d5667e64493image-be4082d6-e238-40ea-a9df-fc0d725e8895image", + "type": "default", + "source": "7671553a-cd4b-4e25-8332-9d5667e64493", + "target": "be4082d6-e238-40ea-a9df-fc0d725e8895", + "sourceHandle": "image", + "targetHandle": "image" + }, + { + "id": "reactflow__edge-7dbb756b-7d79-431c-a46d-d8f7b082c127value-7671553a-cd4b-4e25-8332-9d5667e64493height", + "type": "default", + "source": "7dbb756b-7d79-431c-a46d-d8f7b082c127", + "target": "7671553a-cd4b-4e25-8332-9d5667e64493", + "sourceHandle": "value", + "targetHandle": "height" + }, + { + "id": "reactflow__edge-f5d9bf3b-2646-4b17-9894-20fd2b4218eavalue-7671553a-cd4b-4e25-8332-9d5667e64493width", + "type": "default", + "source": "f5d9bf3b-2646-4b17-9894-20fd2b4218ea", + "target": "7671553a-cd4b-4e25-8332-9d5667e64493", + "sourceHandle": "value", + "targetHandle": "width" + }, + { + "id": "reactflow__edge-5ca87ace-edf9-49c7-a424-cd42416b86a7height-d350feac-9686-4e0d-bd46-a96bd2630818a", + "type": "default", + "source": "5ca87ace-edf9-49c7-a424-cd42416b86a7", + "target": "d350feac-9686-4e0d-bd46-a96bd2630818", + "sourceHandle": "height", + "targetHandle": "a" + }, + { + "id": "reactflow__edge-1ba845a6-eb88-49a1-a490-5fe6754f3ec9value-d350feac-9686-4e0d-bd46-a96bd2630818b", + "type": "default", + "source": "1ba845a6-eb88-49a1-a490-5fe6754f3ec9", + "target": "d350feac-9686-4e0d-bd46-a96bd2630818", + "sourceHandle": "value", + "targetHandle": "b" + }, + { + "id": "reactflow__edge-1ba845a6-eb88-49a1-a490-5fe6754f3ec9value-5b256f14-caab-40ff-b8f0-9679cd542163b", + "type": "default", + "source": "1ba845a6-eb88-49a1-a490-5fe6754f3ec9", + "target": "5b256f14-caab-40ff-b8f0-9679cd542163", + "sourceHandle": "value", + "targetHandle": "b" + }, + { + "id": "reactflow__edge-5ca87ace-edf9-49c7-a424-cd42416b86a7width-5b256f14-caab-40ff-b8f0-9679cd542163a", + "type": "default", + "source": "5ca87ace-edf9-49c7-a424-cd42416b86a7", + "target": "5b256f14-caab-40ff-b8f0-9679cd542163", + "sourceHandle": "width", + "targetHandle": "a" + } + ] +} diff --git a/invokeai/app/services/workflow_records/default_workflows/Prompt from File.json b/invokeai/app/services/workflow_records/default_workflows/Prompt from File.json index 765b236714..de902bc77e 100644 --- a/invokeai/app/services/workflow_records/default_workflows/Prompt from File.json +++ b/invokeai/app/services/workflow_records/default_workflows/Prompt from File.json @@ -2,7 +2,7 @@ "name": "Prompt from File", "author": "InvokeAI", "description": "Sample workflow using Prompt from File node", - "version": "2.0.0", + "version": "2.1.0", "contact": "invoke@invoke.ai", "tags": "text2image, prompt from file, default", "notes": "", @@ -37,16 +37,68 @@ } ], "meta": { - "category": "default", - "version": "3.0.0" + "version": "3.0.0", + "category": "default" }, "nodes": [ { - "id": "c2eaf1ba-5708-4679-9e15-945b8b432692", + "id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1", "type": "invocation", "data": { - "id": "c2eaf1ba-5708-4679-9e15-945b8b432692", - "version": "1.1.1", + "id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1", + "version": "1.3.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "l2i", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "latents": { + "name": "latents", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": false + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 2037.861329274915, + "y": -329.8393457509562 + } + }, + { + "id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", + "type": "invocation", + "data": { + "id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", + "version": "1.2.0", "nodePack": "invokeai", "label": "", "notes": "", @@ -60,6 +112,69 @@ "clip": { "name": "clip", "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": false, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 925, + "y": -275 + } + }, + { + "id": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "type": "invocation", + "data": { + "id": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "version": "1.0.3", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "main_model_loader", + "inputs": { + "model": { + "name": "model", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 0, + "y": -375 + } + }, + { + "id": "c2eaf1ba-5708-4679-9e15-945b8b432692", + "type": "invocation", + "data": { + "id": "c2eaf1ba-5708-4679-9e15-945b8b432692", + "version": "1.2.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "compel", + "inputs": { + "prompt": { + "name": "prompt", + "label": "", + "value": "" + }, + "clip": { + "name": "clip", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" } }, "isOpen": false, @@ -141,61 +256,6 @@ "y": -400 } }, - { - "id": "d6353b7f-b447-4e17-8f2e-80a88c91d426", - "type": "invocation", - "data": { - "id": "d6353b7f-b447-4e17-8f2e-80a88c91d426", - "version": "1.0.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "main_model_loader", - "inputs": { - "model": { - "name": "model", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 0, - "y": -375 - } - }, - { - "id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", - "type": "invocation", - "data": { - "id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "compel", - "inputs": { - "prompt": { - "name": "prompt", - "label": "", - "value": "" - }, - "clip": { - "name": "clip", - "label": "" - } - }, - "isOpen": false, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 925, - "y": -275 - } - }, { "id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77", "type": "invocation", @@ -268,53 +328,6 @@ "y": -50 } }, - { - "id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1", - "type": "invocation", - "data": { - "id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1", - "version": "1.2.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "l2i", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "latents": { - "name": "latents", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": false - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 2037.861329274915, - "y": -329.8393457509562 - } - }, { "id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e", "type": "invocation", diff --git a/invokeai/app/services/workflow_records/default_workflows/Text to Image - SD1.5.json b/invokeai/app/services/workflow_records/default_workflows/Text to Image - SD1.5.json index d3d52150bc..65f894724c 100644 --- a/invokeai/app/services/workflow_records/default_workflows/Text to Image - SD1.5.json +++ b/invokeai/app/services/workflow_records/default_workflows/Text to Image - SD1.5.json @@ -2,7 +2,7 @@ "name": "Text to Image - SD1.5", "author": "InvokeAI", "description": "Sample text to image workflow for Stable Diffusion 1.5/2", - "version": "2.0.0", + "version": "2.1.0", "contact": "invoke@invoke.ai", "tags": "text2image, SD1.5, SD2, default", "notes": "", @@ -33,16 +33,127 @@ } ], "meta": { - "category": "default", - "version": "3.0.0" + "version": "3.0.0", + "category": "default" }, "nodes": [ + { + "id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", + "type": "invocation", + "data": { + "id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", + "version": "1.3.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "l2i", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "latents": { + "name": "latents", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": true + } + }, + "isOpen": true, + "isIntermediate": false, + "useCache": true + }, + "position": { + "x": 1800, + "y": 25 + } + }, + { + "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c", + "type": "invocation", + "data": { + "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c", + "version": "1.2.0", + "nodePack": "invokeai", + "label": "Positive Compel Prompt", + "notes": "", + "type": "compel", + "inputs": { + "prompt": { + "name": "prompt", + "label": "Positive Prompt", + "value": "Super cute tiger cub, national geographic award-winning photograph" + }, + "clip": { + "name": "clip", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 1000, + "y": 25 + } + }, + { + "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", + "type": "invocation", + "data": { + "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", + "version": "1.0.3", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "main_model_loader", + "inputs": { + "model": { + "name": "model", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 600, + "y": 25 + } + }, { "id": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "type": "invocation", "data": { "id": "93dc02a4-d05b-48ed-b99c-c9b616af3402", - "version": "1.1.1", + "version": "1.2.0", "nodePack": "invokeai", "label": "Negative Compel Prompt", "notes": "", @@ -56,6 +167,10 @@ "clip": { "name": "clip", "label": "" + }, + "mask": { + "name": "mask", + "label": "" } }, "isOpen": true, @@ -108,61 +223,6 @@ "y": 325 } }, - { - "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", - "type": "invocation", - "data": { - "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", - "version": "1.0.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "main_model_loader", - "inputs": { - "model": { - "name": "model", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 600, - "y": 25 - } - }, - { - "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c", - "type": "invocation", - "data": { - "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "Positive Compel Prompt", - "notes": "", - "type": "compel", - "inputs": { - "prompt": { - "name": "prompt", - "label": "Positive Prompt", - "value": "Super cute tiger cub, national geographic award-winning photograph" - }, - "clip": { - "name": "clip", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 1000, - "y": 25 - } - }, { "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "type": "invocation", @@ -280,53 +340,6 @@ "x": 1400, "y": 25 } - }, - { - "id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", - "type": "invocation", - "data": { - "id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", - "version": "1.2.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "l2i", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "latents": { - "name": "latents", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": true - } - }, - "isOpen": true, - "isIntermediate": false, - "useCache": true - }, - "position": { - "x": 1800, - "y": 25 - } } ], "edges": [ diff --git a/invokeai/app/services/workflow_records/default_workflows/Text to Image - SDXL.json b/invokeai/app/services/workflow_records/default_workflows/Text to Image - SDXL.json index 1527bbceb1..0f4777169e 100644 --- a/invokeai/app/services/workflow_records/default_workflows/Text to Image - SDXL.json +++ b/invokeai/app/services/workflow_records/default_workflows/Text to Image - SDXL.json @@ -2,7 +2,7 @@ "name": "Text to Image - SDXL", "author": "InvokeAI", "description": "Sample text to image workflow for SDXL", - "version": "2.0.0", + "version": "2.1.0", "contact": "invoke@invoke.ai", "tags": "text2image, SDXL, default", "notes": "", @@ -29,10 +29,271 @@ } ], "meta": { - "category": "default", - "version": "3.0.0" + "version": "3.0.0", + "category": "default" }, "nodes": [ + { + "id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8", + "type": "invocation", + "data": { + "id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8", + "version": "1.0.3", + "label": "", + "notes": "", + "type": "vae_loader", + "inputs": { + "vae_model": { + "name": "vae_model", + "label": "VAE (use the FP16 model)", + "value": { + "key": "f20f9e5c-1bce-4c46-a84d-34ebfa7df069", + "hash": "blake3:9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa", + "name": "sdxl-vae-fp16-fix", + "base": "sdxl", + "type": "vae" + } + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 375, + "y": -225 + } + }, + { + "id": "63e91020-83b2-4f35-b174-ad9692aabb48", + "type": "invocation", + "data": { + "id": "63e91020-83b2-4f35-b174-ad9692aabb48", + "version": "1.3.0", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "l2i", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "latents": { + "name": "latents", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": false + } + }, + "isOpen": true, + "isIntermediate": false, + "useCache": false + }, + "position": { + "x": 1475, + "y": -500 + } + }, + { + "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", + "type": "invocation", + "data": { + "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", + "version": "1.2.0", + "nodePack": "invokeai", + "label": "SDXL Positive Compel Prompt", + "notes": "", + "type": "sdxl_compel_prompt", + "inputs": { + "prompt": { + "name": "prompt", + "label": "Positive Prompt", + "value": "" + }, + "style": { + "name": "style", + "label": "Positive Style", + "value": "" + }, + "original_width": { + "name": "original_width", + "label": "", + "value": 1024 + }, + "original_height": { + "name": "original_height", + "label": "", + "value": 1024 + }, + "crop_top": { + "name": "crop_top", + "label": "", + "value": 0 + }, + "crop_left": { + "name": "crop_left", + "label": "", + "value": 0 + }, + "target_width": { + "name": "target_width", + "label": "", + "value": 1024 + }, + "target_height": { + "name": "target_height", + "label": "", + "value": 1024 + }, + "clip": { + "name": "clip", + "label": "" + }, + "clip2": { + "name": "clip2", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": false, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 750, + "y": -175 + } + }, + { + "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", + "type": "invocation", + "data": { + "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", + "version": "1.0.3", + "nodePack": "invokeai", + "label": "", + "notes": "", + "type": "sdxl_model_loader", + "inputs": { + "model": { + "name": "model", + "label": "", + "value": { + "key": "4a63b226-e8ff-4da4-854e-0b9f04b562ba", + "hash": "blake3:d279309ea6e5ee6e8fd52504275865cc280dac71cbf528c5b07c98b888bddaba", + "name": "dreamshaper-xl-v2-turbo", + "base": "sdxl", + "type": "main" + } + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 375, + "y": -500 + } + }, + { + "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", + "type": "invocation", + "data": { + "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", + "version": "1.2.0", + "nodePack": "invokeai", + "label": "SDXL Negative Compel Prompt", + "notes": "", + "type": "sdxl_compel_prompt", + "inputs": { + "prompt": { + "name": "prompt", + "label": "Negative Prompt", + "value": "" + }, + "style": { + "name": "style", + "label": "Negative Style", + "value": "" + }, + "original_width": { + "name": "original_width", + "label": "", + "value": 1024 + }, + "original_height": { + "name": "original_height", + "label": "", + "value": 1024 + }, + "crop_top": { + "name": "crop_top", + "label": "", + "value": 0 + }, + "crop_left": { + "name": "crop_left", + "label": "", + "value": 0 + }, + "target_width": { + "name": "target_width", + "label": "", + "value": 1024 + }, + "target_height": { + "name": "target_height", + "label": "", + "value": 1024 + }, + "clip": { + "name": "clip", + "label": "" + }, + "clip2": { + "name": "clip2", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": false, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 750, + "y": 200 + } + }, { "id": "3774ec24-a69e-4254-864c-097d07a6256f", "type": "invocation", @@ -88,75 +349,6 @@ "y": -125 } }, - { - "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", - "type": "invocation", - "data": { - "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "SDXL Negative Compel Prompt", - "notes": "", - "type": "sdxl_compel_prompt", - "inputs": { - "prompt": { - "name": "prompt", - "label": "Negative Prompt", - "value": "" - }, - "style": { - "name": "style", - "label": "Negative Style", - "value": "" - }, - "original_width": { - "name": "original_width", - "label": "", - "value": 1024 - }, - "original_height": { - "name": "original_height", - "label": "", - "value": 1024 - }, - "crop_top": { - "name": "crop_top", - "label": "", - "value": 0 - }, - "crop_left": { - "name": "crop_left", - "label": "", - "value": 0 - }, - "target_width": { - "name": "target_width", - "label": "", - "value": 1024 - }, - "target_height": { - "name": "target_height", - "label": "", - "value": 1024 - }, - "clip": { - "name": "clip", - "label": "" - }, - "clip2": { - "name": "clip2", - "label": "" - } - }, - "isOpen": false, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 750, - "y": 200 - } - }, { "id": "55705012-79b9-4aac-9f26-c0b10309785b", "type": "invocation", @@ -229,154 +421,6 @@ "y": -50 } }, - { - "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", - "type": "invocation", - "data": { - "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", - "version": "1.0.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "sdxl_model_loader", - "inputs": { - "model": { - "name": "model", - "label": "", - "value": { - "key": "4a63b226-e8ff-4da4-854e-0b9f04b562ba", - "hash": "blake3:d279309ea6e5ee6e8fd52504275865cc280dac71cbf528c5b07c98b888bddaba", - "name": "dreamshaper-xl-v2-turbo", - "base": "sdxl", - "type": "main" - } - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 375, - "y": -500 - } - }, - { - "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", - "type": "invocation", - "data": { - "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", - "version": "1.1.1", - "nodePack": "invokeai", - "label": "SDXL Positive Compel Prompt", - "notes": "", - "type": "sdxl_compel_prompt", - "inputs": { - "prompt": { - "name": "prompt", - "label": "Positive Prompt", - "value": "" - }, - "style": { - "name": "style", - "label": "Positive Style", - "value": "" - }, - "original_width": { - "name": "original_width", - "label": "", - "value": 1024 - }, - "original_height": { - "name": "original_height", - "label": "", - "value": 1024 - }, - "crop_top": { - "name": "crop_top", - "label": "", - "value": 0 - }, - "crop_left": { - "name": "crop_left", - "label": "", - "value": 0 - }, - "target_width": { - "name": "target_width", - "label": "", - "value": 1024 - }, - "target_height": { - "name": "target_height", - "label": "", - "value": 1024 - }, - "clip": { - "name": "clip", - "label": "" - }, - "clip2": { - "name": "clip2", - "label": "" - } - }, - "isOpen": false, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 750, - "y": -175 - } - }, - { - "id": "63e91020-83b2-4f35-b174-ad9692aabb48", - "type": "invocation", - "data": { - "id": "63e91020-83b2-4f35-b174-ad9692aabb48", - "version": "1.2.2", - "nodePack": "invokeai", - "label": "", - "notes": "", - "type": "l2i", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "latents": { - "name": "latents", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": false - } - }, - "isOpen": true, - "isIntermediate": false, - "useCache": false - }, - "position": { - "x": 1475, - "y": -500 - } - }, { "id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", "type": "invocation", @@ -464,37 +508,6 @@ "y": -500 } }, - { - "id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8", - "type": "invocation", - "data": { - "id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8", - "version": "1.0.2", - "label": "", - "notes": "", - "type": "vae_loader", - "inputs": { - "vae_model": { - "name": "vae_model", - "label": "VAE (use the FP16 model)", - "value": { - "key": "f20f9e5c-1bce-4c46-a84d-34ebfa7df069", - "hash": "blake3:9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa", - "name": "sdxl-vae-fp16-fix", - "base": "sdxl", - "type": "vae" - } - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 375, - "y": -225 - } - }, { "id": "ade2c0d3-0384-4157-b39b-29ce429cfa15", "type": "invocation", diff --git a/invokeai/app/services/workflow_records/default_workflows/Text to Image with LoRA.json b/invokeai/app/services/workflow_records/default_workflows/Text to Image with LoRA.json index 6df02b675d..b4df4b921c 100644 --- a/invokeai/app/services/workflow_records/default_workflows/Text to Image with LoRA.json +++ b/invokeai/app/services/workflow_records/default_workflows/Text to Image with LoRA.json @@ -2,7 +2,7 @@ "name": "Text to Image with LoRA", "author": "InvokeAI", "description": "Simple text to image workflow with a LoRA", - "version": "2.0.0", + "version": "2.1.0", "contact": "invoke@invoke.ai", "tags": "text to image, lora, default", "notes": "", @@ -37,28 +37,83 @@ } ], "meta": { - "category": "default", - "version": "3.0.0" + "version": "3.0.0", + "category": "default" }, "nodes": [ { - "id": "85b77bb2-c67a-416a-b3e8-291abe746c44", + "id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400", "type": "invocation", "data": { - "id": "85b77bb2-c67a-416a-b3e8-291abe746c44", - "version": "1.1.1", + "id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400", + "version": "1.3.0", + "label": "", + "notes": "", + "type": "l2i", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "latents": { + "name": "latents", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": false + } + }, + "isOpen": true, + "isIntermediate": false, + "useCache": true + }, + "position": { + "x": 4450, + "y": -550 + } + }, + { + "id": "c3fa6872-2599-4a82-a596-b3446a66cf8b", + "type": "invocation", + "data": { + "id": "c3fa6872-2599-4a82-a596-b3446a66cf8b", + "version": "1.2.0", "label": "", "notes": "", "type": "compel", "inputs": { "prompt": { "name": "prompt", - "label": "Negative Prompt", - "value": "" + "label": "Positive Prompt", + "value": "super cute tiger cub" }, "clip": { "name": "clip", "label": "" + }, + "mask": { + "name": "mask", + "label": "" } }, "isOpen": true, @@ -67,31 +122,7 @@ }, "position": { "x": 3425, - "y": -300 - } - }, - { - "id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818", - "type": "invocation", - "data": { - "id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818", - "version": "1.0.2", - "label": "", - "notes": "", - "type": "main_model_loader", - "inputs": { - "model": { - "name": "model", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": 2500, - "y": -600 + "y": -575 } }, { @@ -99,7 +130,7 @@ "type": "invocation", "data": { "id": "c41e705b-f2e3-4d1a-83c4-e34bb9344966", - "version": "1.0.2", + "version": "1.0.3", "label": "", "notes": "", "type": "lora_loader", @@ -132,23 +163,51 @@ } }, { - "id": "c3fa6872-2599-4a82-a596-b3446a66cf8b", + "id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818", "type": "invocation", "data": { - "id": "c3fa6872-2599-4a82-a596-b3446a66cf8b", - "version": "1.1.1", + "id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818", + "version": "1.0.3", + "label": "", + "notes": "", + "type": "main_model_loader", + "inputs": { + "model": { + "name": "model", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": 2500, + "y": -600 + } + }, + { + "id": "85b77bb2-c67a-416a-b3e8-291abe746c44", + "type": "invocation", + "data": { + "id": "85b77bb2-c67a-416a-b3e8-291abe746c44", + "version": "1.2.0", "label": "", "notes": "", "type": "compel", "inputs": { "prompt": { "name": "prompt", - "label": "Positive Prompt", - "value": "super cute tiger cub" + "label": "Negative Prompt", + "value": "" }, "clip": { "name": "clip", "label": "" + }, + "mask": { + "name": "mask", + "label": "" } }, "isOpen": true, @@ -157,7 +216,7 @@ }, "position": { "x": 3425, - "y": -575 + "y": -300 } }, { @@ -315,52 +374,6 @@ "x": 3425, "y": 0 } - }, - { - "id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400", - "type": "invocation", - "data": { - "id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400", - "version": "1.2.2", - "label": "", - "notes": "", - "type": "l2i", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "latents": { - "name": "latents", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": false - } - }, - "isOpen": true, - "isIntermediate": false, - "useCache": true - }, - "position": { - "x": 4450, - "y": -550 - } } ], "edges": [ diff --git a/invokeai/app/services/workflow_records/default_workflows/Tiled Upscaling (Beta).json b/invokeai/app/services/workflow_records/default_workflows/Tiled Upscaling (Beta).json index bb0e9062e4..426fe49c41 100644 --- a/invokeai/app/services/workflow_records/default_workflows/Tiled Upscaling (Beta).json +++ b/invokeai/app/services/workflow_records/default_workflows/Tiled Upscaling (Beta).json @@ -2,7 +2,7 @@ "name": "Tiled Upscaling (Beta)", "author": "Invoke", "description": "A workflow to upscale an input image with tiled upscaling. ", - "version": "2.0.0", + "version": "2.1.0", "contact": "invoke@invoke.ai", "tags": "tiled, upscaling, sd1.5", "notes": "", @@ -41,10 +41,318 @@ } ], "meta": { - "category": "default", - "version": "3.0.0" + "version": "3.0.0", + "category": "default" }, "nodes": [ + { + "id": "2ff466b8-5e2a-4d8f-923a-a3884c7ecbc5", + "type": "invocation", + "data": { + "id": "2ff466b8-5e2a-4d8f-923a-a3884c7ecbc5", + "version": "1.0.3", + "label": "", + "notes": "", + "type": "main_model_loader", + "inputs": { + "model": { + "name": "model", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -4514.466823162653, + "y": -1235.7908800002283 + } + }, + { + "id": "287f134f-da8d-41d1-884e-5940e8f7b816", + "type": "invocation", + "data": { + "id": "287f134f-da8d-41d1-884e-5940e8f7b816", + "version": "1.4.1", + "label": "", + "notes": "", + "type": "ip_adapter", + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "ip_adapter_model": { + "name": "ip_adapter_model", + "label": "IP-Adapter Model (select ip_adapter_sd15)", + "value": { + "key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e", + "hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5", + "name": "ip_adapter_sd15", + "base": "sd-1", + "type": "ip_adapter" + } + }, + "clip_vision_model": { + "name": "clip_vision_model", + "label": "", + "value": "ViT-H" + }, + "weight": { + "name": "weight", + "label": "", + "value": 0.2 + }, + "method": { + "name": "method", + "label": "", + "value": "full" + }, + "begin_step_percent": { + "name": "begin_step_percent", + "label": "", + "value": 0 + }, + "end_step_percent": { + "name": "end_step_percent", + "label": "", + "value": 1 + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -2855.8555540799207, + "y": -183.58854843775742 + } + }, + { + "id": "b76fe66f-7884-43ad-b72c-fadc81d7a73c", + "type": "invocation", + "data": { + "id": "b76fe66f-7884-43ad-b72c-fadc81d7a73c", + "version": "1.3.0", + "label": "", + "notes": "", + "type": "l2i", + "inputs": { + "board": { + "name": "board", + "label": "" + }, + "metadata": { + "name": "metadata", + "label": "" + }, + "latents": { + "name": "latents", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": false + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -1999.770193862987, + "y": -1075 + } + }, + { + "id": "d334f2da-016a-4524-9911-bdab85546888", + "type": "invocation", + "data": { + "id": "d334f2da-016a-4524-9911-bdab85546888", + "version": "1.1.2", + "label": "", + "notes": "", + "type": "controlnet", + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "control_model": { + "name": "control_model", + "label": "Control Model (select contro_v11f1e_sd15_tile)", + "value": { + "key": "773843c8-db1f-4502-8f65-59782efa7960", + "hash": "blake3:f0812e13758f91baf4e54b7dbb707b70642937d3b2098cd2b94cc36d3eba308e", + "name": "control_v11f1e_sd15_tile", + "base": "sd-1", + "type": "controlnet" + } + }, + "control_weight": { + "name": "control_weight", + "label": "", + "value": 1 + }, + "begin_step_percent": { + "name": "begin_step_percent", + "label": "", + "value": 0 + }, + "end_step_percent": { + "name": "end_step_percent", + "label": "Structural Control", + "value": 1 + }, + "control_mode": { + "name": "control_mode", + "label": "", + "value": "more_control" + }, + "resize_mode": { + "name": "resize_mode", + "label": "", + "value": "just_resize" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -2481.9569385477016, + "y": -181.06590482739782 + } + }, + { + "id": "338b883c-3728-4f18-b3a6-6e7190c2f850", + "type": "invocation", + "data": { + "id": "338b883c-3728-4f18-b3a6-6e7190c2f850", + "version": "1.1.0", + "label": "", + "notes": "", + "type": "i2l", + "inputs": { + "image": { + "name": "image", + "label": "" + }, + "vae": { + "name": "vae", + "label": "" + }, + "tiled": { + "name": "tiled", + "label": "", + "value": false + }, + "tile_size": { + "name": "tile_size", + "label": "", + "value": 0 + }, + "fp32": { + "name": "fp32", + "label": "", + "value": false + } + }, + "isOpen": false, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -2908.4791167517287, + "y": -408.87504820159086 + } + }, + { + "id": "947c3f88-0305-4695-8355-df4abac64b1c", + "type": "invocation", + "data": { + "id": "947c3f88-0305-4695-8355-df4abac64b1c", + "version": "1.2.0", + "label": "", + "notes": "", + "type": "compel", + "inputs": { + "prompt": { + "name": "prompt", + "label": "", + "value": "" + }, + "clip": { + "name": "clip", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -4014.4136788915944, + "y": -968.5677253775948 + } + }, + { + "id": "9b2d8c58-ce8f-4162-a5a1-48de854040d6", + "type": "invocation", + "data": { + "id": "9b2d8c58-ce8f-4162-a5a1-48de854040d6", + "version": "1.2.0", + "label": "", + "notes": "", + "type": "compel", + "inputs": { + "prompt": { + "name": "prompt", + "label": "Positive Prompt", + "value": "" + }, + "clip": { + "name": "clip", + "label": "" + }, + "mask": { + "name": "mask", + "label": "" + } + }, + "isOpen": true, + "isIntermediate": true, + "useCache": true + }, + "position": { + "x": -4014.4136788915944, + "y": -1243.5677253775948 + } + }, { "id": "b875cae6-d8a3-4fdc-b969-4d53cbd03f9a", "type": "invocation", @@ -181,64 +489,6 @@ "y": 3.422855503409039 } }, - { - "id": "9b2d8c58-ce8f-4162-a5a1-48de854040d6", - "type": "invocation", - "data": { - "id": "9b2d8c58-ce8f-4162-a5a1-48de854040d6", - "version": "1.1.1", - "label": "", - "notes": "", - "type": "compel", - "inputs": { - "prompt": { - "name": "prompt", - "label": "Positive Prompt", - "value": "" - }, - "clip": { - "name": "clip", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": -4014.4136788915944, - "y": -1243.5677253775948 - } - }, - { - "id": "947c3f88-0305-4695-8355-df4abac64b1c", - "type": "invocation", - "data": { - "id": "947c3f88-0305-4695-8355-df4abac64b1c", - "version": "1.1.1", - "label": "", - "notes": "", - "type": "compel", - "inputs": { - "prompt": { - "name": "prompt", - "label": "", - "value": "" - }, - "clip": { - "name": "clip", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": -4014.4136788915944, - "y": -968.5677253775948 - } - }, { "id": "b3513fed-ed42-408d-b382-128fdb0de523", "type": "invocation", @@ -379,104 +629,6 @@ "y": -29.08699277598673 } }, - { - "id": "338b883c-3728-4f18-b3a6-6e7190c2f850", - "type": "invocation", - "data": { - "id": "338b883c-3728-4f18-b3a6-6e7190c2f850", - "version": "1.0.2", - "label": "", - "notes": "", - "type": "i2l", - "inputs": { - "image": { - "name": "image", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": false - } - }, - "isOpen": false, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": -2908.4791167517287, - "y": -408.87504820159086 - } - }, - { - "id": "d334f2da-016a-4524-9911-bdab85546888", - "type": "invocation", - "data": { - "id": "d334f2da-016a-4524-9911-bdab85546888", - "version": "1.1.1", - "label": "", - "notes": "", - "type": "controlnet", - "inputs": { - "image": { - "name": "image", - "label": "" - }, - "control_model": { - "name": "control_model", - "label": "Control Model (select contro_v11f1e_sd15_tile)", - "value": { - "key": "773843c8-db1f-4502-8f65-59782efa7960", - "hash": "blake3:f0812e13758f91baf4e54b7dbb707b70642937d3b2098cd2b94cc36d3eba308e", - "name": "control_v11f1e_sd15_tile", - "base": "sd-1", - "type": "controlnet" - } - }, - "control_weight": { - "name": "control_weight", - "label": "", - "value": 1 - }, - "begin_step_percent": { - "name": "begin_step_percent", - "label": "", - "value": 0 - }, - "end_step_percent": { - "name": "end_step_percent", - "label": "Structural Control", - "value": 1 - }, - "control_mode": { - "name": "control_mode", - "label": "", - "value": "more_control" - }, - "resize_mode": { - "name": "resize_mode", - "label": "", - "value": "just_resize" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": -2481.9569385477016, - "y": -181.06590482739782 - } - }, { "id": "1011539e-85de-4e02-a003-0b22358491b8", "type": "invocation", @@ -563,52 +715,6 @@ "y": -1006.415909408244 } }, - { - "id": "b76fe66f-7884-43ad-b72c-fadc81d7a73c", - "type": "invocation", - "data": { - "id": "b76fe66f-7884-43ad-b72c-fadc81d7a73c", - "version": "1.2.2", - "label": "", - "notes": "", - "type": "l2i", - "inputs": { - "board": { - "name": "board", - "label": "" - }, - "metadata": { - "name": "metadata", - "label": "" - }, - "latents": { - "name": "latents", - "label": "" - }, - "vae": { - "name": "vae", - "label": "" - }, - "tiled": { - "name": "tiled", - "label": "", - "value": false - }, - "fp32": { - "name": "fp32", - "label": "", - "value": false - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": -1999.770193862987, - "y": -1075 - } - }, { "id": "ab6f5dda-4b60-4ddf-99f2-f61fb5937527", "type": "invocation", @@ -779,56 +885,6 @@ "y": -78.2819050861178 } }, - { - "id": "287f134f-da8d-41d1-884e-5940e8f7b816", - "type": "invocation", - "data": { - "id": "287f134f-da8d-41d1-884e-5940e8f7b816", - "version": "1.2.2", - "label": "", - "notes": "", - "type": "ip_adapter", - "inputs": { - "image": { - "name": "image", - "label": "" - }, - "ip_adapter_model": { - "name": "ip_adapter_model", - "label": "IP-Adapter Model (select ip_adapter_sd15)", - "value": { - "key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e", - "hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5", - "name": "ip_adapter_sd15", - "base": "sd-1", - "type": "ip_adapter" - } - }, - "weight": { - "name": "weight", - "label": "", - "value": 0.2 - }, - "begin_step_percent": { - "name": "begin_step_percent", - "label": "", - "value": 0 - }, - "end_step_percent": { - "name": "end_step_percent", - "label": "", - "value": 1 - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": -2855.8555540799207, - "y": -183.58854843775742 - } - }, { "id": "1f86c8bf-06f9-4e28-abee-02f46f445ac4", "type": "invocation", @@ -899,30 +955,6 @@ "y": -41.810810454906914 } }, - { - "id": "2ff466b8-5e2a-4d8f-923a-a3884c7ecbc5", - "type": "invocation", - "data": { - "id": "2ff466b8-5e2a-4d8f-923a-a3884c7ecbc5", - "version": "1.0.2", - "label": "", - "notes": "", - "type": "main_model_loader", - "inputs": { - "model": { - "name": "model", - "label": "" - } - }, - "isOpen": true, - "isIntermediate": true, - "useCache": true - }, - "position": { - "x": -4514.466823162653, - "y": -1235.7908800002283 - } - }, { "id": "f5d9bf3b-2646-4b17-9894-20fd2b4218ea", "type": "invocation", From 28e79c4c5ef179d15ddbdea6970898b340da9a80 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:02:08 +1000 Subject: [PATCH 33/38] chore: ruff Looks like an upstream change to ruff resulted in this file being a violation. --- invokeai/backend/image_util/lineart_anime.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/image_util/lineart_anime.py b/invokeai/backend/image_util/lineart_anime.py index 5185d92c51..33d16bb361 100644 --- a/invokeai/backend/image_util/lineart_anime.py +++ b/invokeai/backend/image_util/lineart_anime.py @@ -98,7 +98,7 @@ class UnetSkipConnectionBlock(nn.Module): """ super(UnetSkipConnectionBlock, self).__init__() self.outermost = outermost - if type(norm_layer) == functools.partial: + if isinstance(norm_layer, functools.partial): use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d From 24bf1ea65aced337b13a5c8f3fbc28d7ca9f9d4d Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 15 Jul 2024 11:52:40 +1000 Subject: [PATCH 34/38] fix(ui): boards cut off when search open --- .../Boards/BoardsList/BoardsList.tsx | 86 ++++++++++--------- .../components/ImageGalleryContent.tsx | 25 +++--- 2 files changed, 60 insertions(+), 51 deletions(-) diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx index bd4c42e8d1..4325281e0f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx @@ -1,4 +1,4 @@ -import { Flex, Text } from '@invoke-ai/ui-library'; +import { Box, Flex, Text } from '@invoke-ai/ui-library'; import { EMPTY_ARRAY } from 'app/store/constants'; import { useAppSelector } from 'app/store/storeHooks'; import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants'; @@ -40,9 +40,41 @@ const BoardsList = () => { return ( <> - - - {allowPrivateBoards && ( + + + + {allowPrivateBoards && ( + + + + {t('boards.private')} + + + + + + {filteredPrivateBoards.map((board) => ( + + ))} + + + )} { justifyContent="space-between" alignItems="center" ps={2} - py={1} + pb={1} + pt={2} zIndex={1} top={0} bg="base.900" > - {t('boards.private')} + {allowPrivateBoards ? t('boards.shared') : t('boards.boards')} - + - - {filteredPrivateBoards.map((board) => ( + {!allowPrivateBoards && } + {filteredSharedBoards.map((board) => ( { ))} - )} - - - - {allowPrivateBoards ? t('boards.shared') : t('boards.boards')} - - - - - {!allowPrivateBoards && } - {filteredSharedBoards.map((board) => ( - - ))} - - - - + + + ); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx index 7c992c65d6..5a096f5cef 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx @@ -16,6 +16,7 @@ import { GalleryHeader } from 'features/gallery/components/GalleryHeader'; import { galleryViewChanged } from 'features/gallery/store/gallerySlice'; import ResizeHandle from 'features/ui/components/tabs/ResizeHandle'; import { usePanel, type UsePanelOptions } from 'features/ui/hooks/usePanel'; +import type { CSSProperties } from 'react'; import { memo, useCallback, useMemo, useRef } from 'react'; import { useTranslation } from 'react-i18next'; import { PiMagnifyingGlassBold } from 'react-icons/pi'; @@ -29,13 +30,15 @@ import GalleryImageGrid from './ImageGrid/GalleryImageGrid'; import { GalleryPagination } from './ImageGrid/GalleryPagination'; import { GallerySearch } from './ImageGrid/GallerySearch'; -const baseStyles: ChakraProps['sx'] = { +const COLLAPSE_STYLES: CSSProperties = { flexShrink: 0, minHeight: 0 }; + +const BASE_STYLES: ChakraProps['sx'] = { fontWeight: 'semibold', fontSize: 'sm', color: 'base.300', }; -const selectedStyles: ChakraProps['sx'] = { +const SELECTED_STYLES: ChakraProps['sx'] = { borderColor: 'base.800', borderBottomColor: 'base.900', color: 'invokeBlue.300', @@ -110,11 +113,13 @@ const ImageGalleryContent = () => { onExpand={boardsListPanel.onExpand} collapsible > - - - - - + + + + + + + { - + {t('parameters.images')} - + {t('gallery.assets')} @@ -157,7 +162,7 @@ const ImageGalleryContent = () => { - + From 5a0c99816c5216e060d23e1a76fd50f9d85f541f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 15 Jul 2024 11:55:22 +1000 Subject: [PATCH 35/38] chore: bump version to v4.2.6 --- invokeai/version/invokeai_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/version/invokeai_version.py b/invokeai/version/invokeai_version.py index 021e20d448..1eecfc4998 100644 --- a/invokeai/version/invokeai_version.py +++ b/invokeai/version/invokeai_version.py @@ -1 +1 @@ -__version__ = "4.2.6rc1" +__version__ = "4.2.6" From 38343917f8c8bff9fe5b13a7d4069e27177dbc39 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 16 Jul 2024 07:05:29 +1000 Subject: [PATCH 36/38] fix(backend): revert non-blocking device transfer In #6490 we enabled non-blocking torch device transfers throughout the model manager's memory management code. When using this torch feature, torch attempts to wait until the tensor transfer has completed before allowing any access to the tensor. Theoretically, that should make this a safe feature to use. This provides a small performance improvement but causes race conditions in some situations. Specific platforms/systems are affected, and complicated data dependencies can make this unsafe. - Intermittent black images on MPS devices - reported on discord and #6545, fixed with special handling in #6549. - Intermittent OOMs and black images on a P4000 GPU on Windows - reported in #6613, fixed in this commit. On my system, I haven't experience any issues with generation, but targeted testing of non-blocking ops did expose a race condition when moving tensors from CUDA to CPU. One workaround is to use torch streams with manual sync points. Our application logic is complicated enough that this would be a lot of work and feels ripe for edge cases and missed spots. Much safer is to fully revert non-locking - which is what this change does. --- invokeai/backend/ip_adapter/ip_adapter.py | 8 +- invokeai/backend/lora.py | 94 ++++++------------- .../load/model_cache/model_cache_default.py | 6 +- invokeai/backend/model_patcher.py | 15 +-- invokeai/backend/onnx/onnx_runtime.py | 7 +- invokeai/backend/raw_model.py | 7 +- invokeai/backend/textual_inversion.py | 9 +- invokeai/backend/util/devices.py | 12 --- 8 files changed, 43 insertions(+), 115 deletions(-) diff --git a/invokeai/backend/ip_adapter/ip_adapter.py b/invokeai/backend/ip_adapter/ip_adapter.py index 75286f4733..87ce029a87 100644 --- a/invokeai/backend/ip_adapter/ip_adapter.py +++ b/invokeai/backend/ip_adapter/ip_adapter.py @@ -124,16 +124,14 @@ class IPAdapter(RawModel): self.device, dtype=self.dtype ) - def to( - self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, non_blocking: bool = False - ): + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None): if device is not None: self.device = device if dtype is not None: self.dtype = dtype - self._image_proj_model.to(device=self.device, dtype=self.dtype, non_blocking=non_blocking) - self.attn_weights.to(device=self.device, dtype=self.dtype, non_blocking=non_blocking) + self._image_proj_model.to(device=self.device, dtype=self.dtype) + self.attn_weights.to(device=self.device, dtype=self.dtype) def calc_size(self) -> int: # HACK(ryand): Fix this issue with circular imports. diff --git a/invokeai/backend/lora.py b/invokeai/backend/lora.py index 9c669a4c78..8ef81915f1 100644 --- a/invokeai/backend/lora.py +++ b/invokeai/backend/lora.py @@ -11,7 +11,6 @@ from typing_extensions import Self from invokeai.backend.model_manager import BaseModelType from invokeai.backend.raw_model import RawModel -from invokeai.backend.util.devices import TorchDevice class LoRALayerBase: @@ -57,14 +56,9 @@ class LoRALayerBase: model_size += val.nelement() * val.element_size() return model_size - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: if self.bias is not None: - self.bias = self.bias.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.bias = self.bias.to(device=device, dtype=dtype) # TODO: find and debug lora/locon with bias @@ -106,19 +100,14 @@ class LoRALayer(LoRALayerBase): model_size += val.nelement() * val.element_size() return model_size - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: - super().to(device=device, dtype=dtype, non_blocking=non_blocking) + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: + super().to(device=device, dtype=dtype) - self.up = self.up.to(device=device, dtype=dtype, non_blocking=non_blocking) - self.down = self.down.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.up = self.up.to(device=device, dtype=dtype) + self.down = self.down.to(device=device, dtype=dtype) if self.mid is not None: - self.mid = self.mid.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.mid = self.mid.to(device=device, dtype=dtype) class LoHALayer(LoRALayerBase): @@ -167,23 +156,18 @@ class LoHALayer(LoRALayerBase): model_size += val.nelement() * val.element_size() return model_size - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: super().to(device=device, dtype=dtype) - self.w1_a = self.w1_a.to(device=device, dtype=dtype, non_blocking=non_blocking) - self.w1_b = self.w1_b.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.w1_a = self.w1_a.to(device=device, dtype=dtype) + self.w1_b = self.w1_b.to(device=device, dtype=dtype) if self.t1 is not None: - self.t1 = self.t1.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.t1 = self.t1.to(device=device, dtype=dtype) - self.w2_a = self.w2_a.to(device=device, dtype=dtype, non_blocking=non_blocking) - self.w2_b = self.w2_b.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.w2_a = self.w2_a.to(device=device, dtype=dtype) + self.w2_b = self.w2_b.to(device=device, dtype=dtype) if self.t2 is not None: - self.t2 = self.t2.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.t2 = self.t2.to(device=device, dtype=dtype) class LoKRLayer(LoRALayerBase): @@ -264,12 +248,7 @@ class LoKRLayer(LoRALayerBase): model_size += val.nelement() * val.element_size() return model_size - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: super().to(device=device, dtype=dtype) if self.w1 is not None: @@ -277,19 +256,19 @@ class LoKRLayer(LoRALayerBase): else: assert self.w1_a is not None assert self.w1_b is not None - self.w1_a = self.w1_a.to(device=device, dtype=dtype, non_blocking=non_blocking) - self.w1_b = self.w1_b.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.w1_a = self.w1_a.to(device=device, dtype=dtype) + self.w1_b = self.w1_b.to(device=device, dtype=dtype) if self.w2 is not None: - self.w2 = self.w2.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.w2 = self.w2.to(device=device, dtype=dtype) else: assert self.w2_a is not None assert self.w2_b is not None - self.w2_a = self.w2_a.to(device=device, dtype=dtype, non_blocking=non_blocking) - self.w2_b = self.w2_b.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.w2_a = self.w2_a.to(device=device, dtype=dtype) + self.w2_b = self.w2_b.to(device=device, dtype=dtype) if self.t2 is not None: - self.t2 = self.t2.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.t2 = self.t2.to(device=device, dtype=dtype) class FullLayer(LoRALayerBase): @@ -319,15 +298,10 @@ class FullLayer(LoRALayerBase): model_size += self.weight.nelement() * self.weight.element_size() return model_size - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: super().to(device=device, dtype=dtype) - self.weight = self.weight.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.weight = self.weight.to(device=device, dtype=dtype) class IA3Layer(LoRALayerBase): @@ -359,16 +333,11 @@ class IA3Layer(LoRALayerBase): model_size += self.on_input.nelement() * self.on_input.element_size() return model_size - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ): + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None): super().to(device=device, dtype=dtype) - self.weight = self.weight.to(device=device, dtype=dtype, non_blocking=non_blocking) - self.on_input = self.on_input.to(device=device, dtype=dtype, non_blocking=non_blocking) + self.weight = self.weight.to(device=device, dtype=dtype) + self.on_input = self.on_input.to(device=device, dtype=dtype) AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer] @@ -390,15 +359,10 @@ class LoRAModelRaw(RawModel): # (torch.nn.Module): def name(self) -> str: return self._name - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: # TODO: try revert if exception? for _key, layer in self.layers.items(): - layer.to(device=device, dtype=dtype, non_blocking=non_blocking) + layer.to(device=device, dtype=dtype) def calc_size(self) -> int: model_size = 0 @@ -521,7 +485,7 @@ class LoRAModelRaw(RawModel): # (torch.nn.Module): # lower memory consumption by removing already parsed layer values state_dict[layer_key].clear() - layer.to(device=device, dtype=dtype, non_blocking=TorchDevice.get_non_blocking(device)) + layer.to(device=device, dtype=dtype) model.layers[layer_key] = layer return model diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index 9027b7b5b7..e69201e739 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -289,11 +289,9 @@ class ModelCache(ModelCacheBase[AnyModel]): else: new_dict: Dict[str, torch.Tensor] = {} for k, v in cache_entry.state_dict.items(): - new_dict[k] = v.to( - target_device, copy=True, non_blocking=TorchDevice.get_non_blocking(target_device) - ) + new_dict[k] = v.to(target_device, copy=True) cache_entry.model.load_state_dict(new_dict, assign=True) - cache_entry.model.to(target_device, non_blocking=TorchDevice.get_non_blocking(target_device)) + cache_entry.model.to(target_device) cache_entry.device = target_device except Exception as e: # blow away cache entry self._delete_cache_entry(cache_entry) diff --git a/invokeai/backend/model_patcher.py b/invokeai/backend/model_patcher.py index 8c7a62c371..8b8aa6d5a5 100644 --- a/invokeai/backend/model_patcher.py +++ b/invokeai/backend/model_patcher.py @@ -139,15 +139,12 @@ class ModelPatcher: # We intentionally move to the target device first, then cast. Experimentally, this was found to # be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the # same thing in a single call to '.to(...)'. - layer.to(device=device, non_blocking=TorchDevice.get_non_blocking(device)) - layer.to(dtype=torch.float32, non_blocking=TorchDevice.get_non_blocking(device)) + layer.to(device=device) + layer.to(dtype=torch.float32) # TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA # devices here. Experimentally, it was found to be very slow on CPU. More investigation needed. layer_weight = layer.get_weight(module.weight) * (lora_weight * layer_scale) - layer.to( - device=TorchDevice.CPU_DEVICE, - non_blocking=TorchDevice.get_non_blocking(TorchDevice.CPU_DEVICE), - ) + layer.to(device=TorchDevice.CPU_DEVICE) assert isinstance(layer_weight, torch.Tensor) # mypy thinks layer_weight is a float|Any ??! if module.weight.shape != layer_weight.shape: @@ -156,7 +153,7 @@ class ModelPatcher: layer_weight = layer_weight.reshape(module.weight.shape) assert isinstance(layer_weight, torch.Tensor) # mypy thinks layer_weight is a float|Any ??! - module.weight += layer_weight.to(dtype=dtype, non_blocking=TorchDevice.get_non_blocking(device)) + module.weight += layer_weight.to(dtype=dtype) yield # wait for context manager exit @@ -164,9 +161,7 @@ class ModelPatcher: assert hasattr(model, "get_submodule") # mypy not picking up fact that torch.nn.Module has get_submodule() with torch.no_grad(): for module_key, weight in original_weights.items(): - model.get_submodule(module_key).weight.copy_( - weight, non_blocking=TorchDevice.get_non_blocking(weight.device) - ) + model.get_submodule(module_key).weight.copy_(weight) @classmethod @contextmanager diff --git a/invokeai/backend/onnx/onnx_runtime.py b/invokeai/backend/onnx/onnx_runtime.py index d562a46dff..a8132d4b23 100644 --- a/invokeai/backend/onnx/onnx_runtime.py +++ b/invokeai/backend/onnx/onnx_runtime.py @@ -190,12 +190,7 @@ class IAIOnnxRuntimeModel(RawModel): return self.session.run(None, inputs) # compatability with RawModel ABC - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: pass # compatability with diffusers load code diff --git a/invokeai/backend/raw_model.py b/invokeai/backend/raw_model.py index 7bca6945d9..931804c985 100644 --- a/invokeai/backend/raw_model.py +++ b/invokeai/backend/raw_model.py @@ -20,10 +20,5 @@ class RawModel(ABC): """Abstract base class for 'Raw' model wrappers.""" @abstractmethod - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: pass diff --git a/invokeai/backend/textual_inversion.py b/invokeai/backend/textual_inversion.py index 483f2da88c..0345478b97 100644 --- a/invokeai/backend/textual_inversion.py +++ b/invokeai/backend/textual_inversion.py @@ -65,17 +65,12 @@ class TextualInversionModelRaw(RawModel): return result - def to( - self, - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - non_blocking: bool = False, - ) -> None: + def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None: if not torch.cuda.is_available(): return for emb in [self.embedding, self.embedding_2]: if emb is not None: - emb.to(device=device, dtype=dtype, non_blocking=non_blocking) + emb.to(device=device, dtype=dtype) def calc_size(self) -> int: """Get the size of this model in bytes.""" diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index 1cba70c662..83ce055024 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -112,15 +112,3 @@ class TorchDevice: @classmethod def _to_dtype(cls, precision_name: TorchPrecisionNames) -> torch.dtype: return NAME_TO_PRECISION[precision_name] - - @staticmethod - def get_non_blocking(to_device: torch.device) -> bool: - """Return the non_blocking flag to be used when moving a tensor to a given device. - MPS may have unexpected errors with non-blocking operations - we should not use non-blocking when moving _to_ MPS. - When moving _from_ MPS, we can use non-blocking operations. - - See: - - https://github.com/pytorch/pytorch/issues/107455 - - https://discuss.pytorch.org/t/should-we-set-non-blocking-to-true/38234/28 - """ - return False if to_device.type == "mps" else True From 7905a46ca4001ebd748b91aea0ba04a790250ed5 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 16 Jul 2024 07:13:56 +1000 Subject: [PATCH 37/38] chore: bump version to 4.2.6post1 --- invokeai/version/invokeai_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/version/invokeai_version.py b/invokeai/version/invokeai_version.py index 1eecfc4998..09545bce26 100644 --- a/invokeai/version/invokeai_version.py +++ b/invokeai/version/invokeai_version.py @@ -1 +1 @@ -__version__ = "4.2.6" +__version__ = "4.2.6post1" From f866b49255420cf43e625fc42838dabb5673a908 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 12 Jul 2024 16:28:31 -0400 Subject: [PATCH 38/38] Add some ESRGAN and SwinIR upscale models to the starter models list. --- .../backend/model_manager/starter_models.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/invokeai/backend/model_manager/starter_models.py b/invokeai/backend/model_manager/starter_models.py index a397a2a5dc..2a860eeac1 100644 --- a/invokeai/backend/model_manager/starter_models.py +++ b/invokeai/backend/model_manager/starter_models.py @@ -399,6 +399,43 @@ STARTER_MODELS: list[StarterModel] = [ type=ModelType.T2IAdapter, ), # endregion + # region SpandrelImageToImage + StarterModel( + name="RealESRGAN_x4plus_anime_6B", + base=BaseModelType.Any, + source="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth", + description="A Real-ESRGAN 4x upscaling model (optimized for anime images).", + type=ModelType.SpandrelImageToImage, + ), + StarterModel( + name="RealESRGAN_x4plus", + base=BaseModelType.Any, + source="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth", + description="A Real-ESRGAN 4x upscaling model (general-purpose).", + type=ModelType.SpandrelImageToImage, + ), + StarterModel( + name="ESRGAN_SRx4_DF2KOST_official", + base=BaseModelType.Any, + source="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth", + description="The official ESRGAN 4x upscaling model.", + type=ModelType.SpandrelImageToImage, + ), + StarterModel( + name="RealESRGAN_x2plus", + base=BaseModelType.Any, + source="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth", + description="A Real-ESRGAN 2x upscaling model (general-purpose).", + type=ModelType.SpandrelImageToImage, + ), + StarterModel( + name="SwinIR - realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN", + base=BaseModelType.Any, + source="https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN-with-dict-keys-params-and-params_ema.pth", + description="A SwinIR 4x upscaling model.", + type=ModelType.SpandrelImageToImage, + ), + # endregion ] assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models"