ext and revious fields.
Use cursor arrows to make a checkbox selection, and space to toggle.
"""
@@ -376,15 +394,47 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
max_width=80,
scroll_exit=True,
)
- self.max_cache_size = self.add_widget_intelligent(
- IntTitleSlider,
- name="Size of the RAM cache used for fast model switching (GB)",
- value=old_opts.max_cache_size,
- out_of=20,
- lowest=3,
- begin_entry_at=6,
+ self.nextrely += 1
+ self.add_widget_intelligent(
+ npyscreen.TitleFixedText,
+ name="RAM cache size (GB). Make this at least large enough to hold a single full model.",
+ begin_entry_at=0,
+ editable=False,
+ color="CONTROL",
scroll_exit=True,
)
+ self.nextrely -= 1
+ self.max_cache_size = self.add_widget_intelligent(
+ npyscreen.Slider,
+ value=clip(old_opts.max_cache_size, range=(3.0, MAX_RAM), step=0.5),
+ out_of=round(MAX_RAM),
+ lowest=0.0,
+ step=0.5,
+ relx=8,
+ scroll_exit=True,
+ )
+ if HAS_CUDA:
+ self.nextrely += 1
+ self.add_widget_intelligent(
+ npyscreen.TitleFixedText,
+ name="VRAM cache size (GB). Reserving a small amount of VRAM will modestly speed up the start of image generation.",
+ begin_entry_at=0,
+ editable=False,
+ color="CONTROL",
+ scroll_exit=True,
+ )
+ self.nextrely -= 1
+ self.max_vram_cache_size = self.add_widget_intelligent(
+ npyscreen.Slider,
+ value=clip(old_opts.max_vram_cache_size, range=(0, MAX_VRAM), step=0.25),
+ out_of=round(MAX_VRAM * 2) / 2,
+ lowest=0.0,
+ relx=8,
+ step=0.25,
+ scroll_exit=True,
+ )
+ else:
+ self.max_vram_cache_size = DummyWidgetValue.zero
self.nextrely += 1
self.outdir = self.add_widget_intelligent(
FileBox,
@@ -401,7 +451,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
self.autoimport_dirs = {}
self.autoimport_dirs["autoimport_dir"] = self.add_widget_intelligent(
FileBox,
- name=f"Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models",
+ name="Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models",
value=str(config.root_path / config.autoimport_dir),
select_dir=True,
must_exist=False,
@@ -476,6 +526,7 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
"outdir",
"free_gpu_mem",
"max_cache_size",
+ "max_vram_cache_size",
"xformers_enabled",
"always_use_cpu",
]:
@@ -553,6 +604,16 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
)
+# -------------------------------------
+def clip(value: float, range: tuple[float, float], step: float) -> float:
+ minimum, maximum = range
+ if value < minimum:
+ value = minimum
+ if value > maximum:
+ value = maximum
+ return round(value / step) * step
+
+
# -------------------------------------
def initialize_rootdir(root: Path, yes_to_all: bool = False):
logger.info("Initializing InvokeAI runtime directory")
@@ -592,13 +653,13 @@ def maybe_create_models_yaml(root: Path):
# -------------------------------------
def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace, Namespace):
- # parse_args() will read from init file if present
invokeai_opts = default_startup_options(initfile)
invokeai_opts.root = program_opts.root
- # The third argument is needed in the Windows 11 environment to
- # launch a console window running this program.
- set_min_terminal_size(MIN_COLS, MIN_LINES)
+ if not set_min_terminal_size(MIN_COLS, MIN_LINES):
+ raise WindowTooSmallException(
+ "Could not increase terminal size. Try running again with a larger window or smaller font size."
+ )
# the install-models application spawns a subprocess to install
# models, and will crash unless this is set before running.
@@ -654,10 +715,13 @@ def migrate_init_file(legacy_format: Path):
old = legacy_parser.parse_args([f"@{str(legacy_format)}"])
new = InvokeAIAppConfig.get_config()
- fields = list(get_type_hints(InvokeAIAppConfig).keys())
+ fields = [x for x, y in InvokeAIAppConfig.__fields__.items() if y.field_info.extra.get("category") != "DEPRECATED"]
for attr in fields:
if hasattr(old, attr):
- setattr(new, attr, getattr(old, attr))
+ try:
+ setattr(new, attr, getattr(old, attr))
+ except ValidationError as e:
+ print(f"* Ignoring incompatible value for field {attr}:\n {str(e)}")
# a few places where the field names have changed and we have to
# manually add in the new names/values
@@ -777,6 +841,7 @@ def main():
models_to_download = default_user_selections(opt)
new_init_file = config.root_path / "invokeai.yaml"
+
if opt.yes_to_all:
write_default_options(opt, new_init_file)
init_options = Namespace(precision="float32" if opt.full_precision else "float16")
@@ -802,6 +867,8 @@ def main():
postscript(errors=errors)
if not opt.yes_to_all:
input("Press any key to continue...")
+ except WindowTooSmallException as e:
+ logger.error(str(e))
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")
diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py
index 9152e46951..e53f211647 100644
--- a/invokeai/backend/install/migrate_to_3.py
+++ b/invokeai/backend/install/migrate_to_3.py
@@ -116,7 +116,7 @@ class MigrateTo3(object):
appropriate location within the destination models directory.
"""
directories_scanned = set()
- for root, dirs, files in os.walk(src_dir):
+ for root, dirs, files in os.walk(src_dir, followlinks=True):
for d in dirs:
try:
model = Path(root, d)
@@ -525,7 +525,7 @@ def do_migrate(src_directory: Path, dest_directory: Path):
if version_3: # write into the dest directory
try:
shutil.copy(dest_directory / "configs" / "models.yaml", config_file)
- except:
+ except Exception:
MigrateTo3.initialize_yaml(config_file)
mgr = ModelManager(config_file) # important to initialize BEFORE moving the models directory
(dest_directory / "models").replace(dest_models)
@@ -553,7 +553,7 @@ def main():
parser = argparse.ArgumentParser(
prog="invokeai-migrate3",
description="""
-This will copy and convert the models directory and the configs/models.yaml from the InvokeAI 2.3 format
+This will copy and convert the models directory and the configs/models.yaml from the InvokeAI 2.3 format
'--from-directory' root to the InvokeAI 3.0 '--to-directory' root. These may be abbreviated '--from' and '--to'.a
The old models directory and config file will be renamed 'models.orig' and 'models.yaml.orig' respectively.
@@ -591,7 +591,6 @@ script, which will perform a full upgrade in place.""",
# TODO: revisit - don't rely on invokeai.yaml to exist yet!
dest_is_setup = (dest_root / "models/core").exists() and (dest_root / "databases").exists()
if not dest_is_setup:
- import invokeai.frontend.install.invokeai_configure
from invokeai.backend.install.invokeai_configure import initialize_rootdir
initialize_rootdir(dest_root, True)
diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py
index c0a7244367..e41783ab09 100644
--- a/invokeai/backend/install/model_install_backend.py
+++ b/invokeai/backend/install/model_install_backend.py
@@ -12,7 +12,7 @@ from typing import Optional, List, Dict, Callable, Union, Set
import requests
from diffusers import DiffusionPipeline
from diffusers import logging as dlogging
-import onnx
+import torch
from huggingface_hub import hf_hub_url, HfFolder, HfApi
from omegaconf import OmegaConf
from tqdm import tqdm
@@ -23,6 +23,7 @@ from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType, AddModelResult
from invokeai.backend.model_management.model_probe import ModelProbe, SchedulerPredictionType, ModelProbeInfo
from invokeai.backend.util import download_with_resume
+from invokeai.backend.util.devices import torch_dtype, choose_torch_device
from ..util.logging import InvokeAILogger
warnings.filterwarnings("ignore")
@@ -99,9 +100,9 @@ class ModelInstall(object):
def __init__(
self,
config: InvokeAIAppConfig,
- prediction_type_helper: Callable[[Path], SchedulerPredictionType] = None,
- model_manager: ModelManager = None,
- access_token: str = None,
+ prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
+ model_manager: Optional[ModelManager] = None,
+ access_token: Optional[str] = None,
):
self.config = config
self.mgr = model_manager or ModelManager(config.model_conf_path)
@@ -303,7 +304,7 @@ class ModelInstall(object):
with TemporaryDirectory(dir=self.config.models_path) as staging:
staging = Path(staging)
- if "model_index.json" in files and "unet/model.onnx" not in files:
+ if "model_index.json" in files:
location = self._download_hf_pipeline(repo_id, staging) # pipeline
elif "unet/model.onnx" in files:
location = self._download_hf_model(repo_id, files, staging)
@@ -416,15 +417,25 @@ class ModelInstall(object):
does a save_pretrained() to the indicated staging area.
"""
_, name = repo_id.split("/")
- revisions = ["fp16", "main"] if self.config.precision == "float16" else ["main"]
+ precision = torch_dtype(choose_torch_device())
+ variants = ["fp16", None] if precision == torch.float16 else [None, "fp16"]
+
model = None
- for revision in revisions:
+ for variant in variants:
try:
- model = DiffusionPipeline.from_pretrained(repo_id, revision=revision, safety_checker=None)
- except: # most errors are due to fp16 not being present. Fix this to catch other errors
- pass
+ model = DiffusionPipeline.from_pretrained(
+ repo_id,
+ variant=variant,
+ torch_dtype=precision,
+ safety_checker=None,
+ )
+ except Exception as e: # most errors are due to fp16 not being present. Fix this to catch other errors
+ if "fp16" not in str(e):
+ print(e)
+
if model:
break
+
if not model:
logger.error(f"Diffusers model {repo_id} could not be downloaded. Skipping.")
return None
diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py
index cf057f3a89..6376585fe5 100644
--- a/invokeai/backend/model_management/__init__.py
+++ b/invokeai/backend/model_management/__init__.py
@@ -1,10 +1,10 @@
"""
Initialization file for invokeai.backend.model_management
"""
-from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType
-from .model_cache import ModelCache
-from .lora import ModelPatcher, ONNXModelPatcher
-from .models import (
+from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401
+from .model_cache import ModelCache # noqa: F401
+from .lora import ModelPatcher, ONNXModelPatcher # noqa: F401
+from .models import ( # noqa: F401
BaseModelType,
ModelType,
SubModelType,
@@ -12,4 +12,4 @@ from .models import (
ModelNotFoundException,
DuplicateModelException,
)
-from .model_merge import ModelMerger, MergeInterpolationMethod
+from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401
diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py
index 4287072a65..d0d8d4226c 100644
--- a/invokeai/backend/model_management/lora.py
+++ b/invokeai/backend/model_management/lora.py
@@ -5,437 +5,14 @@ from contextlib import contextmanager
from typing import Optional, Dict, Tuple, Any, Union, List
from pathlib import Path
-import torch
-from safetensors.torch import load_file
-from torch.utils.hooks import RemovableHandle
-
-from diffusers.models import UNet2DConditionModel
-from transformers import CLIPTextModel
-from onnx import numpy_helper
-from onnxruntime import OrtValue
import numpy as np
-
+import torch
from compel.embeddings_provider import BaseTextualInversionManager
from diffusers.models import UNet2DConditionModel
from safetensors.torch import load_file
from transformers import CLIPTextModel, CLIPTokenizer
-# TODO: rename and split this file
-
-
-class LoRALayerBase:
- # rank: Optional[int]
- # alpha: Optional[float]
- # bias: Optional[torch.Tensor]
- # layer_key: str
-
- # @property
- # def scale(self):
- # return self.alpha / self.rank if (self.alpha and self.rank) else 1.0
-
- def __init__(
- self,
- layer_key: str,
- values: dict,
- ):
- if "alpha" in values:
- self.alpha = values["alpha"].item()
- else:
- self.alpha = None
-
- if "bias_indices" in values and "bias_values" in values and "bias_size" in values:
- self.bias = torch.sparse_coo_tensor(
- values["bias_indices"],
- values["bias_values"],
- tuple(values["bias_size"]),
- )
-
- else:
- self.bias = None
-
- self.rank = None # set in layer implementation
- self.layer_key = layer_key
-
- def forward(
- self,
- module: torch.nn.Module,
- input_h: Any, # for real looks like Tuple[torch.nn.Tensor] but not sure
- multiplier: float,
- ):
- if type(module) == torch.nn.Conv2d:
- op = torch.nn.functional.conv2d
- extra_args = dict(
- stride=module.stride,
- padding=module.padding,
- dilation=module.dilation,
- groups=module.groups,
- )
-
- else:
- op = torch.nn.functional.linear
- extra_args = {}
-
- weight = self.get_weight()
-
- bias = self.bias if self.bias is not None else 0
- scale = self.alpha / self.rank if (self.alpha and self.rank) else 1.0
- return (
- op(
- *input_h,
- (weight + bias).view(module.weight.shape),
- None,
- **extra_args,
- )
- * multiplier
- * scale
- )
-
- def get_weight(self):
- raise NotImplementedError()
-
- def calc_size(self) -> int:
- model_size = 0
- for val in [self.bias]:
- if val is not None:
- model_size += val.nelement() * val.element_size()
- return model_size
-
- def to(
- self,
- device: Optional[torch.device] = None,
- dtype: Optional[torch.dtype] = None,
- ):
- if self.bias is not None:
- self.bias = self.bias.to(device=device, dtype=dtype)
-
-
-# TODO: find and debug lora/locon with bias
-class LoRALayer(LoRALayerBase):
- # up: torch.Tensor
- # mid: Optional[torch.Tensor]
- # down: torch.Tensor
-
- def __init__(
- self,
- layer_key: str,
- values: dict,
- ):
- super().__init__(layer_key, values)
-
- self.up = values["lora_up.weight"]
- self.down = values["lora_down.weight"]
- if "lora_mid.weight" in values:
- self.mid = values["lora_mid.weight"]
- else:
- self.mid = None
-
- self.rank = self.down.shape[0]
-
- def get_weight(self):
- if self.mid is not None:
- up = self.up.reshape(self.up.shape[0], self.up.shape[1])
- down = self.down.reshape(self.down.shape[0], self.down.shape[1])
- weight = torch.einsum("m n w h, i m, n j -> i j w h", self.mid, up, down)
- else:
- weight = self.up.reshape(self.up.shape[0], -1) @ self.down.reshape(self.down.shape[0], -1)
-
- return weight
-
- def calc_size(self) -> int:
- model_size = super().calc_size()
- for val in [self.up, self.mid, self.down]:
- if val is not None:
- model_size += val.nelement() * val.element_size()
- return model_size
-
- def to(
- self,
- device: Optional[torch.device] = None,
- dtype: Optional[torch.dtype] = None,
- ):
- super().to(device=device, dtype=dtype)
-
- self.up = self.up.to(device=device, dtype=dtype)
- self.down = self.down.to(device=device, dtype=dtype)
-
- if self.mid is not None:
- self.mid = self.mid.to(device=device, dtype=dtype)
-
-
-class LoHALayer(LoRALayerBase):
- # w1_a: torch.Tensor
- # w1_b: torch.Tensor
- # w2_a: torch.Tensor
- # w2_b: torch.Tensor
- # t1: Optional[torch.Tensor] = None
- # t2: Optional[torch.Tensor] = None
-
- def __init__(
- self,
- layer_key: str,
- values: dict,
- ):
- super().__init__(layer_key, values)
-
- self.w1_a = values["hada_w1_a"]
- self.w1_b = values["hada_w1_b"]
- self.w2_a = values["hada_w2_a"]
- self.w2_b = values["hada_w2_b"]
-
- if "hada_t1" in values:
- self.t1 = values["hada_t1"]
- else:
- self.t1 = None
-
- if "hada_t2" in values:
- self.t2 = values["hada_t2"]
- else:
- self.t2 = None
-
- self.rank = self.w1_b.shape[0]
-
- def get_weight(self):
- if self.t1 is None:
- weight = (self.w1_a @ self.w1_b) * (self.w2_a @ self.w2_b)
-
- else:
- rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", self.t1, self.w1_b, self.w1_a)
- rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", self.t2, self.w2_b, self.w2_a)
- weight = rebuild1 * rebuild2
-
- return weight
-
- def calc_size(self) -> int:
- model_size = super().calc_size()
- for val in [self.w1_a, self.w1_b, self.w2_a, self.w2_b, self.t1, self.t2]:
- if val is not None:
- model_size += val.nelement() * val.element_size()
- return model_size
-
- def to(
- self,
- device: Optional[torch.device] = None,
- dtype: Optional[torch.dtype] = None,
- ):
- super().to(device=device, dtype=dtype)
-
- self.w1_a = self.w1_a.to(device=device, dtype=dtype)
- self.w1_b = self.w1_b.to(device=device, dtype=dtype)
- if self.t1 is not None:
- self.t1 = self.t1.to(device=device, dtype=dtype)
-
- self.w2_a = self.w2_a.to(device=device, dtype=dtype)
- self.w2_b = self.w2_b.to(device=device, dtype=dtype)
- if self.t2 is not None:
- self.t2 = self.t2.to(device=device, dtype=dtype)
-
-
-class LoKRLayer(LoRALayerBase):
- # w1: Optional[torch.Tensor] = None
- # w1_a: Optional[torch.Tensor] = None
- # w1_b: Optional[torch.Tensor] = None
- # w2: Optional[torch.Tensor] = None
- # w2_a: Optional[torch.Tensor] = None
- # w2_b: Optional[torch.Tensor] = None
- # t2: Optional[torch.Tensor] = None
-
- def __init__(
- self,
- layer_key: str,
- values: dict,
- ):
- super().__init__(layer_key, values)
-
- if "lokr_w1" in values:
- self.w1 = values["lokr_w1"]
- self.w1_a = None
- self.w1_b = None
- else:
- self.w1 = None
- self.w1_a = values["lokr_w1_a"]
- self.w1_b = values["lokr_w1_b"]
-
- if "lokr_w2" in values:
- self.w2 = values["lokr_w2"]
- self.w2_a = None
- self.w2_b = None
- else:
- self.w2 = None
- self.w2_a = values["lokr_w2_a"]
- self.w2_b = values["lokr_w2_b"]
-
- if "lokr_t2" in values:
- self.t2 = values["lokr_t2"]
- else:
- self.t2 = None
-
- if "lokr_w1_b" in values:
- self.rank = values["lokr_w1_b"].shape[0]
- elif "lokr_w2_b" in values:
- self.rank = values["lokr_w2_b"].shape[0]
- else:
- self.rank = None # unscaled
-
- def get_weight(self):
- w1 = self.w1
- if w1 is None:
- w1 = self.w1_a @ self.w1_b
-
- w2 = self.w2
- if w2 is None:
- if self.t2 is None:
- w2 = self.w2_a @ self.w2_b
- else:
- w2 = torch.einsum("i j k l, i p, j r -> p r k l", self.t2, self.w2_a, self.w2_b)
-
- if len(w2.shape) == 4:
- w1 = w1.unsqueeze(2).unsqueeze(2)
- w2 = w2.contiguous()
- weight = torch.kron(w1, w2)
-
- return weight
-
- def calc_size(self) -> int:
- model_size = super().calc_size()
- for val in [self.w1, self.w1_a, self.w1_b, self.w2, self.w2_a, self.w2_b, self.t2]:
- if val is not None:
- model_size += val.nelement() * val.element_size()
- return model_size
-
- def to(
- self,
- device: Optional[torch.device] = None,
- dtype: Optional[torch.dtype] = None,
- ):
- super().to(device=device, dtype=dtype)
-
- if self.w1 is not None:
- self.w1 = self.w1.to(device=device, dtype=dtype)
- else:
- self.w1_a = self.w1_a.to(device=device, dtype=dtype)
- self.w1_b = self.w1_b.to(device=device, dtype=dtype)
-
- if self.w2 is not None:
- self.w2 = self.w2.to(device=device, dtype=dtype)
- else:
- self.w2_a = self.w2_a.to(device=device, dtype=dtype)
- self.w2_b = self.w2_b.to(device=device, dtype=dtype)
-
- if self.t2 is not None:
- self.t2 = self.t2.to(device=device, dtype=dtype)
-
-
-class LoRAModel: # (torch.nn.Module):
- _name: str
- layers: Dict[str, LoRALayer]
- _device: torch.device
- _dtype: torch.dtype
-
- def __init__(
- self,
- name: str,
- layers: Dict[str, LoRALayer],
- device: torch.device,
- dtype: torch.dtype,
- ):
- self._name = name
- self._device = device or torch.cpu
- self._dtype = dtype or torch.float32
- self.layers = layers
-
- @property
- def name(self):
- return self._name
-
- @property
- def device(self):
- return self._device
-
- @property
- def dtype(self):
- return self._dtype
-
- def to(
- self,
- device: Optional[torch.device] = None,
- dtype: Optional[torch.dtype] = None,
- ) -> LoRAModel:
- # TODO: try revert if exception?
- for key, layer in self.layers.items():
- layer.to(device=device, dtype=dtype)
- self._device = device
- self._dtype = dtype
-
- def calc_size(self) -> int:
- model_size = 0
- for _, layer in self.layers.items():
- model_size += layer.calc_size()
- return model_size
-
- @classmethod
- def from_checkpoint(
- cls,
- file_path: Union[str, Path],
- device: Optional[torch.device] = None,
- dtype: Optional[torch.dtype] = None,
- ):
- device = device or torch.device("cpu")
- dtype = dtype or torch.float32
-
- if isinstance(file_path, str):
- file_path = Path(file_path)
-
- model = cls(
- device=device,
- dtype=dtype,
- name=file_path.stem, # TODO:
- layers=dict(),
- )
-
- if file_path.suffix == ".safetensors":
- state_dict = load_file(file_path.absolute().as_posix(), device="cpu")
- else:
- state_dict = torch.load(file_path, map_location="cpu")
-
- state_dict = cls._group_state(state_dict)
-
- for layer_key, values in state_dict.items():
- # lora and locon
- if "lora_down.weight" in values:
- layer = LoRALayer(layer_key, values)
-
- # loha
- elif "hada_w1_b" in values:
- layer = LoHALayer(layer_key, values)
-
- # lokr
- elif "lokr_w1_b" in values or "lokr_w1" in values:
- layer = LoKRLayer(layer_key, values)
-
- else:
- # TODO: diff/ia3/... format
- print(f">> Encountered unknown lora layer module in {model.name}: {layer_key}")
- return
-
- # lower memory consumption by removing already parsed layer values
- state_dict[layer_key].clear()
-
- layer.to(device=device, dtype=dtype)
- model.layers[layer_key] = layer
-
- return model
-
- @staticmethod
- def _group_state(state_dict: dict):
- state_dict_groupped = dict()
-
- for key, value in state_dict.items():
- stem, leaf = key.split(".", 1)
- if stem not in state_dict_groupped:
- state_dict_groupped[stem] = dict()
- state_dict_groupped[stem][leaf] = value
-
- return state_dict_groupped
+from .models.lora import LoRAModel
"""
@@ -470,7 +47,7 @@ class ModelPatcher:
module = module.get_submodule(submodule_name)
module_key += "." + submodule_name
submodule_name = key_parts.pop(0)
- except:
+ except Exception:
submodule_name += "_" + key_parts.pop(0)
module = module.get_submodule(submodule_name)
@@ -516,6 +93,26 @@ class ModelPatcher:
with cls.apply_lora(text_encoder, loras, "lora_te_"):
yield
+ @classmethod
+ @contextmanager
+ def apply_sdxl_lora_text_encoder(
+ cls,
+ text_encoder: CLIPTextModel,
+ loras: List[Tuple[LoRAModel, float]],
+ ):
+ with cls.apply_lora(text_encoder, loras, "lora_te1_"):
+ yield
+
+ @classmethod
+ @contextmanager
+ def apply_sdxl_lora_text_encoder2(
+ cls,
+ text_encoder: CLIPTextModel,
+ loras: List[Tuple[LoRAModel, float]],
+ ):
+ with cls.apply_lora(text_encoder, loras, "lora_te2_"):
+ yield
+
@classmethod
@contextmanager
def apply_lora(
@@ -541,7 +138,7 @@ class ModelPatcher:
# with torch.autocast(device_type="cpu"):
layer.to(dtype=torch.float32)
layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0
- layer_weight = layer.get_weight() * lora_weight * layer_scale
+ layer_weight = layer.get_weight(original_weights[module_key]) * lora_weight * layer_scale
if module.weight.shape != layer_weight.shape:
# TODO: debug on lycoris
@@ -562,7 +159,7 @@ class ModelPatcher:
cls,
tokenizer: CLIPTokenizer,
text_encoder: CLIPTextModel,
- ti_list: List[Any],
+ ti_list: List[Tuple[str, Any]],
) -> Tuple[CLIPTokenizer, TextualInversionManager]:
init_tokens_count = None
new_tokens_added = None
@@ -572,27 +169,27 @@ class ModelPatcher:
ti_manager = TextualInversionManager(ti_tokenizer)
init_tokens_count = text_encoder.resize_token_embeddings(None).num_embeddings
- def _get_trigger(ti, index):
- trigger = ti.name
+ def _get_trigger(ti_name, index):
+ trigger = ti_name
if index > 0:
trigger += f"-!pad-{i}"
return f"<{trigger}>"
# modify tokenizer
new_tokens_added = 0
- for ti in ti_list:
+ for ti_name, ti in ti_list:
for i in range(ti.embedding.shape[0]):
- new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti, i))
+ new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))
# modify text_encoder
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added)
model_embeddings = text_encoder.get_input_embeddings()
- for ti in ti_list:
+ for ti_name, ti in ti_list:
ti_tokens = []
for i in range(ti.embedding.shape[0]):
embedding = ti.embedding[i]
- trigger = _get_trigger(ti, i)
+ trigger = _get_trigger(ti_name, i)
token_id = ti_tokenizer.convert_tokens_to_ids(trigger)
if token_id == ti_tokenizer.unk_token_id:
@@ -637,7 +234,6 @@ class ModelPatcher:
class TextualInversionModel:
- name: str
embedding: torch.Tensor # [n, 768]|[n, 1280]
@classmethod
@@ -651,7 +247,6 @@ class TextualInversionModel:
file_path = Path(file_path)
result = cls() # TODO:
- result.name = file_path.stem # TODO:
if file_path.suffix == ".safetensors":
state_dict = load_file(file_path.absolute().as_posix(), device="cpu")
@@ -712,7 +307,8 @@ class TextualInversionManager(BaseTextualInversionManager):
class ONNXModelPatcher:
- from .models.base import IAIOnnxRuntimeModel, OnnxRuntimeModel
+ from .models.base import IAIOnnxRuntimeModel
+ from diffusers import OnnxRuntimeModel
@classmethod
@contextmanager
@@ -741,7 +337,7 @@ class ONNXModelPatcher:
def apply_lora(
cls,
model: IAIOnnxRuntimeModel,
- loras: List[Tuple[LoraModel, float]],
+ loras: List[Tuple[LoRAModel, float]],
prefix: str,
):
from .models.base import IAIOnnxRuntimeModel
@@ -761,7 +357,8 @@ class ONNXModelPatcher:
layer.to(dtype=torch.float32)
layer_key = layer_key.replace(prefix, "")
- layer_weight = layer.get_weight().detach().cpu().numpy() * lora_weight
+ # TODO: rewrite to pass original tensor weight(required by ia3)
+ layer_weight = layer.get_weight(None).detach().cpu().numpy() * lora_weight
if layer_key is blended_loras:
blended_loras[layer_key] += layer_weight
else:
@@ -828,7 +425,7 @@ class ONNXModelPatcher:
cls,
tokenizer: CLIPTokenizer,
text_encoder: IAIOnnxRuntimeModel,
- ti_list: List[Any],
+ ti_list: List[Tuple[str, Any]],
) -> Tuple[CLIPTokenizer, TextualInversionManager]:
from .models.base import IAIOnnxRuntimeModel
@@ -841,17 +438,17 @@ class ONNXModelPatcher:
ti_tokenizer = copy.deepcopy(tokenizer)
ti_manager = TextualInversionManager(ti_tokenizer)
- def _get_trigger(ti, index):
- trigger = ti.name
+ def _get_trigger(ti_name, index):
+ trigger = ti_name
if index > 0:
trigger += f"-!pad-{i}"
return f"<{trigger}>"
# modify tokenizer
new_tokens_added = 0
- for ti in ti_list:
+ for ti_name, ti in ti_list:
for i in range(ti.embedding.shape[0]):
- new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti, i))
+ new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))
# modify text_encoder
orig_embeddings = text_encoder.tensors["text_model.embeddings.token_embedding.weight"]
@@ -861,11 +458,11 @@ class ONNXModelPatcher:
axis=0,
)
- for ti in ti_list:
+ for ti_name, ti in ti_list:
ti_tokens = []
for i in range(ti.embedding.shape[0]):
embedding = ti.embedding[i].detach().numpy()
- trigger = _get_trigger(ti, i)
+ trigger = _get_trigger(ti_name, i)
token_id = ti_tokenizer.convert_tokens_to_ids(trigger)
if token_id == ti_tokenizer.unk_token_id:
diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py
index b4c3e48a48..6f3e5bd6a5 100644
--- a/invokeai/backend/model_management/model_cache.py
+++ b/invokeai/backend/model_management/model_cache.py
@@ -21,15 +21,13 @@ import os
import sys
import hashlib
from contextlib import suppress
+from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Union, types, Optional, Type, Any
import torch
-import logging
import invokeai.backend.util.logging as logger
-from invokeai.app.services.config import get_invokeai_config
-from .lora import LoRAModel, TextualInversionModel
from .models import BaseModelType, ModelType, SubModelType, ModelBase
# Maximum size of the cache, in gigs
@@ -43,6 +41,18 @@ DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75
GIG = 1073741824
+@dataclass
+class CacheStats(object):
+ hits: int = 0 # cache hits
+ misses: int = 0 # cache misses
+ high_watermark: int = 0 # amount of cache used
+ in_cache: int = 0 # number of models in cache
+ cleared: int = 0 # number of models cleared to make space
+ cache_size: int = 0 # total size of cache
+ # {submodel_key => size}
+ loaded_model_sizes: Dict[str, int] = field(default_factory=dict)
+
+
class ModelLocker(object):
"Forward declaration"
pass
@@ -117,6 +127,9 @@ class ModelCache(object):
self.sha_chunksize = sha_chunksize
self.logger = logger
+ # used for stats collection
+ self.stats = None
+
self._cached_models = dict()
self._cache_stack = list()
@@ -183,13 +196,14 @@ class ModelCache(object):
model_type=model_type,
submodel_type=submodel,
)
-
# TODO: lock for no copies on simultaneous calls?
cache_entry = self._cached_models.get(key, None)
if cache_entry is None:
self.logger.info(
- f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}"
+ f"Loading model {model_path}, type {base_model.value}:{model_type.value}{':'+submodel.value if submodel else ''}"
)
+ if self.stats:
+ self.stats.misses += 1
# this will remove older cached models until
# there is sufficient room to load the requested model
@@ -203,6 +217,17 @@ class ModelCache(object):
cache_entry = _CacheRecord(self, model, mem_used)
self._cached_models[key] = cache_entry
+ else:
+ if self.stats:
+ self.stats.hits += 1
+
+ if self.stats:
+ self.stats.cache_size = self.max_cache_size * GIG
+ self.stats.high_watermark = max(self.stats.high_watermark, self._cache_size())
+ self.stats.in_cache = len(self._cached_models)
+ self.stats.loaded_model_sizes[key] = max(
+ self.stats.loaded_model_sizes.get(key, 0), model_info.get_size(submodel)
+ )
with suppress(Exception):
self._cache_stack.remove(key)
@@ -248,7 +273,7 @@ class ModelCache(object):
self.cache.logger.debug(f"Locking {self.key} in {self.cache.execution_device}")
self.cache._print_cuda_stats()
- except:
+ except Exception:
self.cache_entry.unlock()
raise
@@ -282,14 +307,14 @@ class ModelCache(object):
"""
Given the HF repo id or path to a model on disk, returns a unique
hash. Works for legacy checkpoint files, HF models on disk, and HF repo IDs
+
:param model_path: Path to model file/directory on disk.
"""
return self._local_model_hash(model_path)
def cache_size(self) -> float:
- "Return the current size of the cache, in GB"
- current_cache_size = sum([m.size for m in self._cached_models.values()])
- return current_cache_size / GIG
+ """Return the current size of the cache, in GB."""
+ return self._cache_size() / GIG
def _has_cuda(self) -> bool:
return self.execution_device.type == "cuda"
@@ -312,12 +337,15 @@ class ModelCache(object):
f"Current VRAM/RAM usage: {vram}/{ram}; cached_models/loaded_models/locked_models/ = {cached_models}/{loaded_models}/{locked_models}"
)
+ def _cache_size(self) -> int:
+ return sum([m.size for m in self._cached_models.values()])
+
def _make_cache_room(self, model_size):
# calculate how much memory this model will require
# multiplier = 2 if self.precision==torch.float32 else 1
bytes_needed = model_size
maximum_size = self.max_cache_size * GIG # stored in GB, convert to bytes
- current_size = sum([m.size for m in self._cached_models.values()])
+ current_size = self._cache_size()
if current_size + bytes_needed > maximum_size:
self.logger.debug(
@@ -366,6 +394,8 @@ class ModelCache(object):
f"Unloading model {model_key} to free {(model_size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)"
)
current_size -= cache_entry.size
+ if self.stats:
+ self.stats.cleared += 1
del self._cache_stack[pos]
del self._cached_models[model_key]
del cache_entry
diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py
index 832a96e18f..1b10554e69 100644
--- a/invokeai/backend/model_management/model_manager.py
+++ b/invokeai/backend/model_management/model_manager.py
@@ -228,19 +228,19 @@ the root is the InvokeAI ROOTDIR.
"""
from __future__ import annotations
-import os
import hashlib
+import os
import textwrap
-import yaml
+import types
from dataclasses import dataclass
from pathlib import Path
-from typing import Optional, List, Tuple, Union, Dict, Set, Callable, types
from shutil import rmtree, move
+from typing import Optional, List, Literal, Tuple, Union, Dict, Set, Callable
import torch
+import yaml
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
-
from pydantic import BaseModel, Field
import invokeai.backend.util.logging as logger
@@ -259,6 +259,7 @@ from .models import (
ModelNotFoundException,
InvalidModelException,
DuplicateModelException,
+ ModelBase,
)
# We are only starting to number the config file with release 3.
@@ -361,7 +362,7 @@ class ModelManager(object):
if model_key.startswith("_"):
continue
model_name, base_model, model_type = self.parse_key(model_key)
- model_class = MODEL_CLASSES[base_model][model_type]
+ model_class = self._get_implementation(base_model, model_type)
# alias for config file
model_config["model_format"] = model_config.pop("format")
self.models[model_key] = model_class.create_config(**model_config)
@@ -381,18 +382,24 @@ class ModelManager(object):
# causing otherwise unreferenced models to be removed from memory
self._read_models()
- def model_exists(
- self,
- model_name: str,
- base_model: BaseModelType,
- model_type: ModelType,
- ) -> bool:
+ def model_exists(self, model_name: str, base_model: BaseModelType, model_type: ModelType, *, rescan=False) -> bool:
"""
- Given a model name, returns True if it is a valid
- identifier.
+ Given a model name, returns True if it is a valid identifier.
+
+ :param model_name: symbolic name of the model in models.yaml
+ :param model_type: ModelType enum indicating the type of model to return
+ :param base_model: BaseModelType enum indicating the base model used by this model
+ :param rescan: if True, scan_models_directory
"""
model_key = self.create_key(model_name, base_model, model_type)
- return model_key in self.models
+ exists = model_key in self.models
+
+ # if model not found try to find it (maybe file just pasted)
+ if rescan and not exists:
+ self.scan_models_directory(base_model=base_model, model_type=model_type)
+ exists = self.model_exists(model_name, base_model, model_type, rescan=False)
+
+ return exists
@classmethod
def create_key(
@@ -412,12 +419,12 @@ class ModelManager(object):
base_model_str, model_type_str, model_name = model_key.split("/", 2)
try:
model_type = ModelType(model_type_str)
- except:
+ except Exception:
raise Exception(f"Unknown model type: {model_type_str}")
try:
base_model = BaseModelType(base_model_str)
- except:
+ except Exception:
raise Exception(f"Unknown base model: {base_model_str}")
return (model_name, base_model, model_type)
@@ -443,39 +450,32 @@ class ModelManager(object):
:param model_name: symbolic name of the model in models.yaml
:param model_type: ModelType enum indicating the type of model to return
:param base_model: BaseModelType enum indicating the base model used by this model
- :param submode_typel: an ModelType enum indicating the portion of
+ :param submodel_type: an ModelType enum indicating the portion of
the model to retrieve (e.g. ModelType.Vae)
"""
- model_class = MODEL_CLASSES[base_model][model_type]
model_key = self.create_key(model_name, base_model, model_type)
- # if model not found try to find it (maybe file just pasted)
- if model_key not in self.models:
- self.scan_models_directory(base_model=base_model, model_type=model_type)
- if model_key not in self.models:
- raise ModelNotFoundException(f"Model not found - {model_key}")
+ if not self.model_exists(model_name, base_model, model_type, rescan=True):
+ raise ModelNotFoundException(f"Model not found - {model_key}")
- model_config = self.models[model_key]
- model_path = self.resolve_model_path(model_config.path)
+ model_config = self._get_model_config(base_model, model_name, model_type)
+
+ model_path, is_submodel_override = self._get_model_path(model_config, submodel_type)
+
+ if is_submodel_override:
+ model_type = submodel_type
+ submodel_type = None
+
+ model_class = self._get_implementation(base_model, model_type)
if not model_path.exists():
if model_class.save_to_config:
self.models[model_key].error = ModelError.NotFound
- raise Exception(f'Files for model "{model_key}" not found')
+ raise Exception(f'Files for model "{model_key}" not found at {model_path}')
else:
self.models.pop(model_key, None)
- raise ModelNotFoundException(f"Model not found - {model_key}")
-
- # vae/movq override
- # TODO:
- if submodel_type is not None and hasattr(model_config, submodel_type):
- override_path = getattr(model_config, submodel_type)
- if override_path:
- model_path = self.app_config.root_path / override_path
- model_type = submodel_type
- submodel_type = None
- model_class = MODEL_CLASSES[base_model][model_type]
+ raise ModelNotFoundException(f'Files for model "{model_key}" not found at {model_path}')
# TODO: path
# TODO: is it accurate to use path as id
@@ -513,12 +513,61 @@ class ModelManager(object):
_cache=self.cache,
)
+ def _get_model_path(
+ self, model_config: ModelConfigBase, submodel_type: Optional[SubModelType] = None
+ ) -> (Path, bool):
+ """Extract a model's filesystem path from its config.
+
+ :return: The fully qualified Path of the module (or submodule).
+ """
+ model_path = model_config.path
+ is_submodel_override = False
+
+ # Does the config explicitly override the submodel?
+ if submodel_type is not None and hasattr(model_config, submodel_type):
+ submodel_path = getattr(model_config, submodel_type)
+ if submodel_path is not None and len(submodel_path) > 0:
+ model_path = getattr(model_config, submodel_type)
+ is_submodel_override = True
+
+ model_path = self.resolve_model_path(model_path)
+ return model_path, is_submodel_override
+
+ def _get_model_config(self, base_model: BaseModelType, model_name: str, model_type: ModelType) -> ModelConfigBase:
+ """Get a model's config object."""
+ model_key = self.create_key(model_name, base_model, model_type)
+ try:
+ model_config = self.models[model_key]
+ except KeyError:
+ raise ModelNotFoundException(f"Model not found - {model_key}")
+ return model_config
+
+ def _get_implementation(self, base_model: BaseModelType, model_type: ModelType) -> type[ModelBase]:
+ """Get the concrete implementation class for a specific model type."""
+ model_class = MODEL_CLASSES[base_model][model_type]
+ return model_class
+
+ def _instantiate(
+ self,
+ model_name: str,
+ base_model: BaseModelType,
+ model_type: ModelType,
+ submodel_type: Optional[SubModelType] = None,
+ ) -> ModelBase:
+ """Make a new instance of this model, without loading it."""
+ model_config = self._get_model_config(base_model, model_name, model_type)
+ model_path, is_submodel_override = self._get_model_path(model_config, submodel_type)
+ # FIXME: do non-overriden submodels get the right class?
+ constructor = self._get_implementation(base_model, model_type)
+ instance = constructor(model_path, base_model, model_type)
+ return instance
+
def model_info(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
- ) -> dict:
+ ) -> Union[dict, None]:
"""
Given a model name returns the OmegaConf (dict-like) object describing it.
"""
@@ -540,13 +589,16 @@ class ModelManager(object):
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
- ) -> dict:
+ ) -> Union[dict, None]:
"""
Returns a dict describing one installed model, using
the combined format of the list_models() method.
"""
models = self.list_models(base_model, model_type, model_name)
- return models[0] if models else None
+ if len(models) >= 1:
+ return models[0]
+ else:
+ return None
def list_models(
self,
@@ -560,7 +612,7 @@ class ModelManager(object):
model_keys = (
[self.create_key(model_name, base_model, model_type)]
- if model_name
+ if model_name and base_model and model_type
else sorted(self.models, key=str.casefold)
)
models = []
@@ -596,7 +648,7 @@ class ModelManager(object):
Print a table of models and their descriptions. This needs to be redone
"""
# TODO: redo
- for model_type, model_dict in self.list_models().items():
+ for model_dict in self.list_models():
for model_name, model_info in model_dict.items():
line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}'
print(line)
@@ -658,7 +710,7 @@ class ModelManager(object):
if path := model_attributes.get("path"):
model_attributes["path"] = str(self.relative_model_path(Path(path)))
- model_class = MODEL_CLASSES[base_model][model_type]
+ model_class = self._get_implementation(base_model, model_type)
model_config = model_class.create_config(**model_attributes)
model_key = self.create_key(model_name, base_model, model_type)
@@ -670,7 +722,7 @@ class ModelManager(object):
# TODO: if path changed and old_model.path inside models folder should we delete this too?
# remove conversion cache as config changed
- old_model_path = self.app_config.root_path / old_model.path
+ old_model_path = self.resolve_model_path(old_model.path)
old_model_cache = self._get_model_cache_path(old_model_path)
if old_model_cache.exists():
if old_model_cache.is_dir():
@@ -699,8 +751,8 @@ class ModelManager(object):
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
- new_name: str = None,
- new_base: BaseModelType = None,
+ new_name: Optional[str] = None,
+ new_base: Optional[BaseModelType] = None,
):
"""
Rename or rebase a model.
@@ -753,7 +805,7 @@ class ModelManager(object):
self,
model_name: str,
base_model: BaseModelType,
- model_type: Union[ModelType.Main, ModelType.Vae],
+ model_type: Literal[ModelType.Main, ModelType.Vae],
dest_directory: Optional[Path] = None,
) -> AddModelResult:
"""
@@ -767,6 +819,10 @@ class ModelManager(object):
This will raise a ValueError unless the model is a checkpoint.
"""
info = self.model_info(model_name, base_model, model_type)
+
+ if info is None:
+ raise FileNotFoundError(f"model not found: {model_name}")
+
if info["model_format"] != "checkpoint":
raise ValueError(f"not a checkpoint format model: {model_name}")
@@ -780,7 +836,7 @@ class ModelManager(object):
model_type,
**submodel,
)
- checkpoint_path = self.app_config.root_path / info["path"]
+ checkpoint_path = self.resolve_model_path(info["path"])
old_diffusers_path = self.resolve_model_path(model.location)
new_diffusers_path = (
dest_directory or self.app_config.models_path / base_model.value / model_type.value
@@ -799,7 +855,7 @@ class ModelManager(object):
info.pop("config")
result = self.add_model(model_name, base_model, model_type, model_attributes=info, clobber=True)
- except:
+ except Exception:
# something went wrong, so don't leave dangling diffusers model in directory or it will cause a duplicate model error!
rmtree(new_diffusers_path)
raise
@@ -836,7 +892,7 @@ class ModelManager(object):
return search_folder, found_models
- def commit(self, conf_file: Path = None) -> None:
+ def commit(self, conf_file: Optional[Path] = None) -> None:
"""
Write current configuration out to the indicated file.
"""
@@ -845,7 +901,7 @@ class ModelManager(object):
for model_key, model_config in self.models.items():
model_name, base_model, model_type = self.parse_key(model_key)
- model_class = MODEL_CLASSES[base_model][model_type]
+ model_class = self._get_implementation(base_model, model_type)
if model_class.save_to_config:
# TODO: or exclude_unset better fits here?
data_to_save[model_key] = model_config.dict(exclude_defaults=True, exclude={"error"})
@@ -903,7 +959,7 @@ class ModelManager(object):
model_path = self.resolve_model_path(model_config.path).absolute()
if not model_path.exists():
- model_class = MODEL_CLASSES[cur_base_model][cur_model_type]
+ model_class = self._get_implementation(cur_base_model, cur_model_type)
if model_class.save_to_config:
model_config.error = ModelError.NotFound
self.models.pop(model_key, None)
@@ -919,7 +975,7 @@ class ModelManager(object):
for cur_model_type in ModelType:
if model_type is not None and cur_model_type != model_type:
continue
- model_class = MODEL_CLASSES[cur_base_model][cur_model_type]
+ model_class = self._get_implementation(cur_base_model, cur_model_type)
models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value))
if not models_dir.exists():
@@ -935,7 +991,9 @@ class ModelManager(object):
raise DuplicateModelException(f"Model with key {model_key} added twice")
model_path = self.relative_model_path(model_path)
- model_config: ModelConfigBase = model_class.probe_config(str(model_path))
+ model_config: ModelConfigBase = model_class.probe_config(
+ str(model_path), model_base=cur_base_model
+ )
self.models[model_key] = model_config
new_models_found = True
except DuplicateModelException as e:
@@ -983,8 +1041,8 @@ class ModelManager(object):
# LS: hacky
# Patch in the SD VAE from core so that it is available for use by the UI
try:
- self.heuristic_import({self.resolve_model_path("core/convert/sd-vae-ft-mse")})
- except:
+ self.heuristic_import({str(self.resolve_model_path("core/convert/sd-vae-ft-mse"))})
+ except Exception:
pass
installer = ModelInstall(
@@ -992,7 +1050,7 @@ class ModelManager(object):
model_manager=self,
prediction_type_helper=ask_user_for_prediction_type,
)
- known_paths = {config.root_path / x["path"] for x in self.list_models()}
+ known_paths = {self.resolve_model_path(x["path"]) for x in self.list_models()}
directories = {
config.root_path / x
for x in [
@@ -1011,7 +1069,7 @@ class ModelManager(object):
def heuristic_import(
self,
items_to_import: Set[str],
- prediction_type_helper: Callable[[Path], SchedulerPredictionType] = None,
+ prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
) -> Dict[str, AddModelResult]:
"""Import a list of paths, repo_ids or URLs. Returns the set of
successfully imported items.
diff --git a/invokeai/backend/model_management/model_merge.py b/invokeai/backend/model_management/model_merge.py
index 8cf3ce4ad0..a34d9b0e3e 100644
--- a/invokeai/backend/model_management/model_merge.py
+++ b/invokeai/backend/model_management/model_merge.py
@@ -33,7 +33,7 @@ class ModelMerger(object):
self,
model_paths: List[Path],
alpha: float = 0.5,
- interp: MergeInterpolationMethod = None,
+ interp: Optional[MergeInterpolationMethod] = None,
force: bool = False,
**kwargs,
) -> DiffusionPipeline:
@@ -73,7 +73,7 @@ class ModelMerger(object):
base_model: Union[BaseModelType, str],
merged_model_name: str,
alpha: float = 0.5,
- interp: MergeInterpolationMethod = None,
+ interp: Optional[MergeInterpolationMethod] = None,
force: bool = False,
merge_dest_directory: Optional[Path] = None,
**kwargs,
@@ -109,7 +109,7 @@ class ModelMerger(object):
# pick up the first model's vae
if mod == model_names[0]:
vae = info.get("vae")
- model_paths.extend([config.root_path / info["path"]])
+ model_paths.extend([(config.root_path / info["path"]).as_posix()])
merge_method = None if interp == "weighted_sum" else MergeInterpolationMethod(interp)
logger.debug(f"interp = {interp}, merge_method={merge_method}")
@@ -120,11 +120,11 @@ class ModelMerger(object):
else config.models_path / base_model.value / ModelType.Main.value
)
dump_path.mkdir(parents=True, exist_ok=True)
- dump_path = dump_path / merged_model_name
+ dump_path = (dump_path / merged_model_name).as_posix()
- merged_pipe.save_pretrained(dump_path, safe_serialization=1)
+ merged_pipe.save_pretrained(dump_path, safe_serialization=True)
attributes = dict(
- path=str(dump_path),
+ path=dump_path,
description=f"Merge of models {', '.join(model_names)}",
model_format="diffusers",
variant=ModelVariantType.Normal.value,
diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py
index c3964d760c..9f56958366 100644
--- a/invokeai/backend/model_management/model_probe.py
+++ b/invokeai/backend/model_management/model_probe.py
@@ -17,6 +17,7 @@ from .models import (
SilenceWarnings,
InvalidModelException,
)
+from .util import lora_token_vector_length
from .models.base import read_checkpoint_meta
@@ -216,9 +217,9 @@ class ModelProbe(object):
raise "The model {model_name} is potentially infected by malware. Aborting import."
-###################################################3
+# ##################################################3
# Checkpoint probing
-###################################################3
+# ##################################################3
class ProbeBase(object):
def get_base_type(self) -> BaseModelType:
pass
@@ -315,21 +316,16 @@ class LoRACheckpointProbe(CheckpointProbeBase):
def get_base_type(self) -> BaseModelType:
checkpoint = self.checkpoint
- key1 = "lora_te_text_model_encoder_layers_0_mlp_fc1.lora_down.weight"
- key2 = "lora_te_text_model_encoder_layers_0_self_attn_k_proj.hada_w1_a"
- lora_token_vector_length = (
- checkpoint[key1].shape[1]
- if key1 in checkpoint
- else checkpoint[key2].shape[0]
- if key2 in checkpoint
- else 768
- )
- if lora_token_vector_length == 768:
+ token_vector_length = lora_token_vector_length(checkpoint)
+
+ if token_vector_length == 768:
return BaseModelType.StableDiffusion1
- elif lora_token_vector_length == 1024:
+ elif token_vector_length == 1024:
return BaseModelType.StableDiffusion2
+ elif token_vector_length == 2048:
+ return BaseModelType.StableDiffusionXL
else:
- return None
+ raise InvalidModelException(f"Unknown LoRA type: {self.checkpoint_path}")
class TextualInversionCheckpointProbe(CheckpointProbeBase):
@@ -435,7 +431,7 @@ class PipelineFolderProbe(FolderProbeBase):
return ModelVariantType.Depth
elif in_channels == 4:
return ModelVariantType.Normal
- except:
+ except Exception:
pass
return ModelVariantType.Normal
@@ -485,9 +481,19 @@ class ControlNetFolderProbe(FolderProbeBase):
with open(config_file, "r") as file:
config = json.load(file)
# no obvious way to distinguish between sd2-base and sd2-768
- return (
- BaseModelType.StableDiffusion1 if config["cross_attention_dim"] == 768 else BaseModelType.StableDiffusion2
+ dimension = config["cross_attention_dim"]
+ base_model = (
+ BaseModelType.StableDiffusion1
+ if dimension == 768
+ else BaseModelType.StableDiffusion2
+ if dimension == 1024
+ else BaseModelType.StableDiffusionXL
+ if dimension == 2048
+ else None
)
+ if not base_model:
+ raise InvalidModelException(f"Unable to determine model base for {self.folder_path}")
+ return base_model
class LoRAFolderProbe(FolderProbeBase):
diff --git a/invokeai/backend/model_management/model_search.py b/invokeai/backend/model_management/model_search.py
index 9c87d6c408..0a98091f4a 100644
--- a/invokeai/backend/model_management/model_search.py
+++ b/invokeai/backend/model_management/model_search.py
@@ -56,7 +56,7 @@ class ModelSearch(ABC):
self.on_search_completed()
def walk_directory(self, path: Path):
- for root, dirs, files in os.walk(path):
+ for root, dirs, files in os.walk(path, followlinks=True):
if str(Path(root).name).startswith("."):
self._pruned_paths.add(root)
if any([Path(root).is_relative_to(x) for x in self._pruned_paths]):
diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py
index 931da1b159..2de206257b 100644
--- a/invokeai/backend/model_management/models/__init__.py
+++ b/invokeai/backend/model_management/models/__init__.py
@@ -2,7 +2,7 @@ import inspect
from enum import Enum
from pydantic import BaseModel
from typing import Literal, get_origin
-from .base import (
+from .base import ( # noqa: F401
BaseModelType,
ModelType,
SubModelType,
@@ -118,7 +118,7 @@ def get_model_config_enums():
fields = model_config.__annotations__
try:
field = fields["model_format"]
- except:
+ except Exception:
raise Exception("format field not found")
# model_format: None
diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py
index e6a20e79ec..ed1c2c6098 100644
--- a/invokeai/backend/model_management/models/base.py
+++ b/invokeai/backend/model_management/models/base.py
@@ -3,27 +3,28 @@ import os
import sys
import typing
import inspect
-from enum import Enum
+import warnings
from abc import ABCMeta, abstractmethod
+from contextlib import suppress
+from enum import Enum
from pathlib import Path
from picklescan.scanner import scan_file_path
+
import torch
import numpy as np
-import safetensors.torch
-from pathlib import Path
-from diffusers import DiffusionPipeline, ConfigMixin, OnnxRuntimeModel
-
-from contextlib import suppress
-from pydantic import BaseModel, Field
-from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
-
import onnx
+import safetensors.torch
+from diffusers import DiffusionPipeline, ConfigMixin
from onnx import numpy_helper
from onnxruntime import (
InferenceSession,
SessionOptions,
get_available_providers,
)
+from pydantic import BaseModel, Field
+from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
+from diffusers import logging as diffusers_logging
+from transformers import logging as transformers_logging
class DuplicateModelException(Exception):
@@ -171,7 +172,7 @@ class ModelBase(metaclass=ABCMeta):
fields = value.__annotations__
try:
field = fields["model_format"]
- except:
+ except Exception:
raise Exception(f"Invalid config definition - format field not found({cls.__qualname__})")
if isinstance(field, type) and issubclass(field, str) and issubclass(field, Enum):
@@ -244,7 +245,7 @@ class DiffusersModel(ModelBase):
try:
config_data = DiffusionPipeline.load_config(self.model_path)
# config_data = json.loads(os.path.join(self.model_path, "model_index.json"))
- except:
+ except Exception:
raise Exception("Invalid diffusers model! (model_index.json not found or invalid)")
config_data.pop("_ignore_files", None)
@@ -292,8 +293,9 @@ class DiffusersModel(ModelBase):
)
break
except Exception as e:
- # print("====ERR LOAD====")
- # print(f"{variant}: {e}")
+ if not str(e).startswith("Error no file"):
+ print("====ERR LOAD====")
+ print(f"{variant}: {e}")
pass
else:
raise Exception(f"Failed to load {self.base_model}:{self.model_type}:{child_type} model")
@@ -342,7 +344,7 @@ def calc_model_size_by_fs(model_path: str, subfolder: Optional[str] = None, vari
with open(os.path.join(model_path, file), "r") as f:
index_data = json.loads(f.read())
return int(index_data["metadata"]["total_size"])
- except:
+ except Exception:
pass
# calculate files size if there is no index file
@@ -439,7 +441,7 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False):
if str(path).endswith(".safetensors"):
try:
checkpoint = _fast_safetensors_reader(path)
- except:
+ except Exception:
# TODO: create issue for support "meta"?
checkpoint = safetensors.torch.load_file(path, device="cpu")
else:
@@ -451,11 +453,6 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False):
return checkpoint
-import warnings
-from diffusers import logging as diffusers_logging
-from transformers import logging as transformers_logging
-
-
class SilenceWarnings(object):
def __init__(self):
self.transformers_verbosity = transformers_logging.get_verbosity()
@@ -638,7 +635,7 @@ class IAIOnnxRuntimeModel:
raise Exception("You should call create_session before running model")
inputs = {k: np.array(v) for k, v in kwargs.items()}
- output_names = self.session.get_outputs()
+ # output_names = self.session.get_outputs()
# for k in inputs:
# self.io_binding.bind_cpu_input(k, inputs[k])
# for name in output_names:
diff --git a/invokeai/backend/model_management/models/controlnet.py b/invokeai/backend/model_management/models/controlnet.py
index 061be7ae49..ebc01399b5 100644
--- a/invokeai/backend/model_management/models/controlnet.py
+++ b/invokeai/backend/model_management/models/controlnet.py
@@ -43,7 +43,7 @@ class ControlNetModel(ModelBase):
try:
config = EmptyConfigLoader.load_config(self.model_path, config_name="config.json")
# config = json.loads(os.path.join(self.model_path, "config.json"))
- except:
+ except Exception:
raise Exception("Invalid controlnet model! (config.json not found or invalid)")
model_class_name = config.get("_class_name", None)
@@ -53,7 +53,7 @@ class ControlNetModel(ModelBase):
try:
self.model_class = self._hf_definition_to_type(["diffusers", model_class_name])
self.model_size = calc_model_size_by_fs(self.model_path)
- except:
+ except Exception:
raise Exception("Invalid ControlNet model!")
def get_size(self, child_type: Optional[SubModelType] = None):
@@ -78,7 +78,7 @@ class ControlNetModel(ModelBase):
variant=variant,
)
break
- except:
+ except Exception:
pass
if not model:
raise ModelNotFoundException()
diff --git a/invokeai/backend/model_management/models/lora.py b/invokeai/backend/model_management/models/lora.py
index 642f8bbeec..b6f321d60b 100644
--- a/invokeai/backend/model_management/models/lora.py
+++ b/invokeai/backend/model_management/models/lora.py
@@ -1,21 +1,23 @@
+import bisect
import os
-import torch
from enum import Enum
-from typing import Optional, Union, Literal
+from pathlib import Path
+from typing import Dict, Optional, Union
+
+import torch
+from safetensors.torch import load_file
+
from .base import (
+ BaseModelType,
+ InvalidModelException,
ModelBase,
ModelConfigBase,
- BaseModelType,
+ ModelNotFoundException,
ModelType,
SubModelType,
classproperty,
- InvalidModelException,
- ModelNotFoundException,
)
-# TODO: naming
-from ..lora import LoRAModel as LoRAModelRaw
-
class LoRAModelFormat(str, Enum):
LyCORIS = "lycoris"
@@ -50,6 +52,7 @@ class LoRAModel(ModelBase):
model = LoRAModelRaw.from_checkpoint(
file_path=self.model_path,
dtype=torch_dtype,
+ base_model=self.base_model,
)
self.model_size = model.calc_size()
@@ -87,3 +90,622 @@ class LoRAModel(ModelBase):
raise NotImplementedError("Diffusers lora not supported")
else:
return model_path
+
+
+class LoRALayerBase:
+ # rank: Optional[int]
+ # alpha: Optional[float]
+ # bias: Optional[torch.Tensor]
+ # layer_key: str
+
+ # @property
+ # def scale(self):
+ # return self.alpha / self.rank if (self.alpha and self.rank) else 1.0
+
+ def __init__(
+ self,
+ layer_key: str,
+ values: dict,
+ ):
+ if "alpha" in values:
+ self.alpha = values["alpha"].item()
+ else:
+ self.alpha = None
+
+ if "bias_indices" in values and "bias_values" in values and "bias_size" in values:
+ self.bias = torch.sparse_coo_tensor(
+ values["bias_indices"],
+ values["bias_values"],
+ tuple(values["bias_size"]),
+ )
+
+ else:
+ self.bias = None
+
+ self.rank = None # set in layer implementation
+ self.layer_key = layer_key
+
+ def get_weight(self, orig_weight: torch.Tensor):
+ raise NotImplementedError()
+
+ def calc_size(self) -> int:
+ model_size = 0
+ for val in [self.bias]:
+ if val is not None:
+ model_size += val.nelement() * val.element_size()
+ return model_size
+
+ def to(
+ self,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ ):
+ if self.bias is not None:
+ self.bias = self.bias.to(device=device, dtype=dtype)
+
+
+# TODO: find and debug lora/locon with bias
+class LoRALayer(LoRALayerBase):
+ # up: torch.Tensor
+ # mid: Optional[torch.Tensor]
+ # down: torch.Tensor
+
+ def __init__(
+ self,
+ layer_key: str,
+ values: dict,
+ ):
+ super().__init__(layer_key, values)
+
+ self.up = values["lora_up.weight"]
+ self.down = values["lora_down.weight"]
+ if "lora_mid.weight" in values:
+ self.mid = values["lora_mid.weight"]
+ else:
+ self.mid = None
+
+ self.rank = self.down.shape[0]
+
+ def get_weight(self, orig_weight: torch.Tensor):
+ if self.mid is not None:
+ up = self.up.reshape(self.up.shape[0], self.up.shape[1])
+ down = self.down.reshape(self.down.shape[0], self.down.shape[1])
+ weight = torch.einsum("m n w h, i m, n j -> i j w h", self.mid, up, down)
+ else:
+ weight = self.up.reshape(self.up.shape[0], -1) @ self.down.reshape(self.down.shape[0], -1)
+
+ return weight
+
+ def calc_size(self) -> int:
+ model_size = super().calc_size()
+ for val in [self.up, self.mid, self.down]:
+ if val is not None:
+ model_size += val.nelement() * val.element_size()
+ return model_size
+
+ def to(
+ self,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ ):
+ super().to(device=device, dtype=dtype)
+
+ self.up = self.up.to(device=device, dtype=dtype)
+ self.down = self.down.to(device=device, dtype=dtype)
+
+ if self.mid is not None:
+ self.mid = self.mid.to(device=device, dtype=dtype)
+
+
+class LoHALayer(LoRALayerBase):
+ # w1_a: torch.Tensor
+ # w1_b: torch.Tensor
+ # w2_a: torch.Tensor
+ # w2_b: torch.Tensor
+ # t1: Optional[torch.Tensor] = None
+ # t2: Optional[torch.Tensor] = None
+
+ def __init__(
+ self,
+ layer_key: str,
+ values: dict,
+ ):
+ super().__init__(layer_key, values)
+
+ self.w1_a = values["hada_w1_a"]
+ self.w1_b = values["hada_w1_b"]
+ self.w2_a = values["hada_w2_a"]
+ self.w2_b = values["hada_w2_b"]
+
+ if "hada_t1" in values:
+ self.t1 = values["hada_t1"]
+ else:
+ self.t1 = None
+
+ if "hada_t2" in values:
+ self.t2 = values["hada_t2"]
+ else:
+ self.t2 = None
+
+ self.rank = self.w1_b.shape[0]
+
+ def get_weight(self, orig_weight: torch.Tensor):
+ if self.t1 is None:
+ weight = (self.w1_a @ self.w1_b) * (self.w2_a @ self.w2_b)
+
+ else:
+ rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", self.t1, self.w1_b, self.w1_a)
+ rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", self.t2, self.w2_b, self.w2_a)
+ weight = rebuild1 * rebuild2
+
+ return weight
+
+ def calc_size(self) -> int:
+ model_size = super().calc_size()
+ for val in [self.w1_a, self.w1_b, self.w2_a, self.w2_b, self.t1, self.t2]:
+ if val is not None:
+ model_size += val.nelement() * val.element_size()
+ return model_size
+
+ def to(
+ self,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ ):
+ super().to(device=device, dtype=dtype)
+
+ self.w1_a = self.w1_a.to(device=device, dtype=dtype)
+ self.w1_b = self.w1_b.to(device=device, dtype=dtype)
+ if self.t1 is not None:
+ self.t1 = self.t1.to(device=device, dtype=dtype)
+
+ self.w2_a = self.w2_a.to(device=device, dtype=dtype)
+ self.w2_b = self.w2_b.to(device=device, dtype=dtype)
+ if self.t2 is not None:
+ self.t2 = self.t2.to(device=device, dtype=dtype)
+
+
+class LoKRLayer(LoRALayerBase):
+ # w1: Optional[torch.Tensor] = None
+ # w1_a: Optional[torch.Tensor] = None
+ # w1_b: Optional[torch.Tensor] = None
+ # w2: Optional[torch.Tensor] = None
+ # w2_a: Optional[torch.Tensor] = None
+ # w2_b: Optional[torch.Tensor] = None
+ # t2: Optional[torch.Tensor] = None
+
+ def __init__(
+ self,
+ layer_key: str,
+ values: dict,
+ ):
+ super().__init__(layer_key, values)
+
+ if "lokr_w1" in values:
+ self.w1 = values["lokr_w1"]
+ self.w1_a = None
+ self.w1_b = None
+ else:
+ self.w1 = None
+ self.w1_a = values["lokr_w1_a"]
+ self.w1_b = values["lokr_w1_b"]
+
+ if "lokr_w2" in values:
+ self.w2 = values["lokr_w2"]
+ self.w2_a = None
+ self.w2_b = None
+ else:
+ self.w2 = None
+ self.w2_a = values["lokr_w2_a"]
+ self.w2_b = values["lokr_w2_b"]
+
+ if "lokr_t2" in values:
+ self.t2 = values["lokr_t2"]
+ else:
+ self.t2 = None
+
+ if "lokr_w1_b" in values:
+ self.rank = values["lokr_w1_b"].shape[0]
+ elif "lokr_w2_b" in values:
+ self.rank = values["lokr_w2_b"].shape[0]
+ else:
+ self.rank = None # unscaled
+
+ def get_weight(self, orig_weight: torch.Tensor):
+ w1 = self.w1
+ if w1 is None:
+ w1 = self.w1_a @ self.w1_b
+
+ w2 = self.w2
+ if w2 is None:
+ if self.t2 is None:
+ w2 = self.w2_a @ self.w2_b
+ else:
+ w2 = torch.einsum("i j k l, i p, j r -> p r k l", self.t2, self.w2_a, self.w2_b)
+
+ if len(w2.shape) == 4:
+ w1 = w1.unsqueeze(2).unsqueeze(2)
+ w2 = w2.contiguous()
+ weight = torch.kron(w1, w2)
+
+ return weight
+
+ def calc_size(self) -> int:
+ model_size = super().calc_size()
+ for val in [self.w1, self.w1_a, self.w1_b, self.w2, self.w2_a, self.w2_b, self.t2]:
+ if val is not None:
+ model_size += val.nelement() * val.element_size()
+ return model_size
+
+ def to(
+ self,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ ):
+ super().to(device=device, dtype=dtype)
+
+ if self.w1 is not None:
+ self.w1 = self.w1.to(device=device, dtype=dtype)
+ else:
+ self.w1_a = self.w1_a.to(device=device, dtype=dtype)
+ self.w1_b = self.w1_b.to(device=device, dtype=dtype)
+
+ if self.w2 is not None:
+ self.w2 = self.w2.to(device=device, dtype=dtype)
+ else:
+ self.w2_a = self.w2_a.to(device=device, dtype=dtype)
+ self.w2_b = self.w2_b.to(device=device, dtype=dtype)
+
+ if self.t2 is not None:
+ self.t2 = self.t2.to(device=device, dtype=dtype)
+
+
+class FullLayer(LoRALayerBase):
+ # weight: torch.Tensor
+
+ def __init__(
+ self,
+ layer_key: str,
+ values: dict,
+ ):
+ super().__init__(layer_key, values)
+
+ self.weight = values["diff"]
+
+ if len(values.keys()) > 1:
+ _keys = list(values.keys())
+ _keys.remove("diff")
+ raise NotImplementedError(f"Unexpected keys in lora diff layer: {_keys}")
+
+ self.rank = None # unscaled
+
+ def get_weight(self, orig_weight: torch.Tensor):
+ return self.weight
+
+ def calc_size(self) -> int:
+ model_size = super().calc_size()
+ model_size += self.weight.nelement() * self.weight.element_size()
+ return model_size
+
+ def to(
+ self,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ ):
+ super().to(device=device, dtype=dtype)
+
+ self.weight = self.weight.to(device=device, dtype=dtype)
+
+
+class IA3Layer(LoRALayerBase):
+ # weight: torch.Tensor
+ # on_input: torch.Tensor
+
+ def __init__(
+ self,
+ layer_key: str,
+ values: dict,
+ ):
+ super().__init__(layer_key, values)
+
+ self.weight = values["weight"]
+ self.on_input = values["on_input"]
+
+ self.rank = None # unscaled
+
+ def get_weight(self, orig_weight: torch.Tensor):
+ weight = self.weight
+ if not self.on_input:
+ weight = weight.reshape(-1, 1)
+ return orig_weight * weight
+
+ def calc_size(self) -> int:
+ model_size = super().calc_size()
+ model_size += self.weight.nelement() * self.weight.element_size()
+ model_size += self.on_input.nelement() * self.on_input.element_size()
+ return model_size
+
+ def to(
+ self,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ ):
+ super().to(device=device, dtype=dtype)
+
+ self.weight = self.weight.to(device=device, dtype=dtype)
+ self.on_input = self.on_input.to(device=device, dtype=dtype)
+
+
+# TODO: rename all methods used in model logic with Info postfix and remove here Raw postfix
+class LoRAModelRaw: # (torch.nn.Module):
+ _name: str
+ layers: Dict[str, LoRALayer]
+ _device: torch.device
+ _dtype: torch.dtype
+
+ def __init__(
+ self,
+ name: str,
+ layers: Dict[str, LoRALayer],
+ device: torch.device,
+ dtype: torch.dtype,
+ ):
+ self._name = name
+ self._device = device or torch.cpu
+ self._dtype = dtype or torch.float32
+ self.layers = layers
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def device(self):
+ return self._device
+
+ @property
+ def dtype(self):
+ return self._dtype
+
+ def to(
+ self,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ ):
+ # TODO: try revert if exception?
+ for key, layer in self.layers.items():
+ layer.to(device=device, dtype=dtype)
+ self._device = device
+ self._dtype = dtype
+
+ def calc_size(self) -> int:
+ model_size = 0
+ for _, layer in self.layers.items():
+ model_size += layer.calc_size()
+ return model_size
+
+ @classmethod
+ def _convert_sdxl_keys_to_diffusers_format(cls, state_dict):
+ """Convert the keys of an SDXL LoRA state_dict to diffusers format.
+
+ The input state_dict can be in either Stability AI format or diffusers format. If the state_dict is already in
+ diffusers format, then this function will have no effect.
+
+ This function is adapted from:
+ https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L385-L409
+
+ Args:
+ state_dict (Dict[str, Tensor]): The SDXL LoRA state_dict.
+
+ Raises:
+ ValueError: If state_dict contains an unrecognized key, or not all keys could be converted.
+
+ Returns:
+ Dict[str, Tensor]: The diffusers-format state_dict.
+ """
+ converted_count = 0 # The number of Stability AI keys converted to diffusers format.
+ not_converted_count = 0 # The number of keys that were not converted.
+
+ # Get a sorted list of Stability AI UNet keys so that we can efficiently search for keys with matching prefixes.
+ # For example, we want to efficiently find `input_blocks_4_1` in the list when searching for
+ # `input_blocks_4_1_proj_in`.
+ stability_unet_keys = list(SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP)
+ stability_unet_keys.sort()
+
+ new_state_dict = dict()
+ for full_key, value in state_dict.items():
+ if full_key.startswith("lora_unet_"):
+ search_key = full_key.replace("lora_unet_", "")
+ # Use bisect to find the key in stability_unet_keys that *may* match the search_key's prefix.
+ position = bisect.bisect_right(stability_unet_keys, search_key)
+ map_key = stability_unet_keys[position - 1]
+ # Now, check if the map_key *actually* matches the search_key.
+ if search_key.startswith(map_key):
+ new_key = full_key.replace(map_key, SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP[map_key])
+ new_state_dict[new_key] = value
+ converted_count += 1
+ else:
+ new_state_dict[full_key] = value
+ not_converted_count += 1
+ elif full_key.startswith("lora_te1_") or full_key.startswith("lora_te2_"):
+ # The CLIP text encoders have the same keys in both Stability AI and diffusers formats.
+ new_state_dict[full_key] = value
+ continue
+ else:
+ raise ValueError(f"Unrecognized SDXL LoRA key prefix: '{full_key}'.")
+
+ if converted_count > 0 and not_converted_count > 0:
+ raise ValueError(
+ f"The SDXL LoRA could only be partially converted to diffusers format. converted={converted_count},"
+ f" not_converted={not_converted_count}"
+ )
+
+ return new_state_dict
+
+ @classmethod
+ def from_checkpoint(
+ cls,
+ file_path: Union[str, Path],
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ base_model: Optional[BaseModelType] = None,
+ ):
+ device = device or torch.device("cpu")
+ dtype = dtype or torch.float32
+
+ if isinstance(file_path, str):
+ file_path = Path(file_path)
+
+ model = cls(
+ device=device,
+ dtype=dtype,
+ name=file_path.stem, # TODO:
+ layers=dict(),
+ )
+
+ if file_path.suffix == ".safetensors":
+ state_dict = load_file(file_path.absolute().as_posix(), device="cpu")
+ else:
+ state_dict = torch.load(file_path, map_location="cpu")
+
+ state_dict = cls._group_state(state_dict)
+
+ if base_model == BaseModelType.StableDiffusionXL:
+ state_dict = cls._convert_sdxl_keys_to_diffusers_format(state_dict)
+
+ for layer_key, values in state_dict.items():
+ # lora and locon
+ if "lora_down.weight" in values:
+ layer = LoRALayer(layer_key, values)
+
+ # loha
+ elif "hada_w1_b" in values:
+ layer = LoHALayer(layer_key, values)
+
+ # lokr
+ elif "lokr_w1_b" in values or "lokr_w1" in values:
+ layer = LoKRLayer(layer_key, values)
+
+ # diff
+ elif "diff" in values:
+ layer = FullLayer(layer_key, values)
+
+ # ia3
+ elif "weight" in values and "on_input" in values:
+ layer = IA3Layer(layer_key, values)
+
+ else:
+ print(f">> Encountered unknown lora layer module in {model.name}: {layer_key} - {list(values.keys())}")
+ raise Exception("Unknown lora format!")
+
+ # lower memory consumption by removing already parsed layer values
+ state_dict[layer_key].clear()
+
+ layer.to(device=device, dtype=dtype)
+ model.layers[layer_key] = layer
+
+ return model
+
+ @staticmethod
+ def _group_state(state_dict: dict):
+ state_dict_groupped = dict()
+
+ for key, value in state_dict.items():
+ stem, leaf = key.split(".", 1)
+ if stem not in state_dict_groupped:
+ state_dict_groupped[stem] = dict()
+ state_dict_groupped[stem][leaf] = value
+
+ return state_dict_groupped
+
+
+# code from
+# https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L15C1-L97C32
+def make_sdxl_unet_conversion_map():
+ """Create a dict mapping state_dict keys from Stability AI SDXL format to diffusers SDXL format."""
+ unet_conversion_map_layer = []
+
+ for i in range(3): # num_blocks is 3 in sdxl
+ # loop over downblocks/upblocks
+ for j in range(2):
+ # loop over resnets/attentions for downblocks
+ hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
+ sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
+ unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
+
+ if i < 3:
+ # no attention layers in down_blocks.3
+ hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
+ sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
+ unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
+
+ for j in range(3):
+ # loop over resnets/attentions for upblocks
+ hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
+ sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
+ unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
+
+ # if i > 0: commentout for sdxl
+ # no attention layers in up_blocks.0
+ hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
+ sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
+ unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
+
+ if i < 3:
+ # no downsample in down_blocks.3
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
+ sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
+ unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
+
+ # no upsample in up_blocks.3
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
+ sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl
+ unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
+
+ hf_mid_atn_prefix = "mid_block.attentions.0."
+ sd_mid_atn_prefix = "middle_block.1."
+ unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
+
+ for j in range(2):
+ hf_mid_res_prefix = f"mid_block.resnets.{j}."
+ sd_mid_res_prefix = f"middle_block.{2*j}."
+ unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
+
+ unet_conversion_map_resnet = [
+ # (stable-diffusion, HF Diffusers)
+ ("in_layers.0.", "norm1."),
+ ("in_layers.2.", "conv1."),
+ ("out_layers.0.", "norm2."),
+ ("out_layers.3.", "conv2."),
+ ("emb_layers.1.", "time_emb_proj."),
+ ("skip_connection.", "conv_shortcut."),
+ ]
+
+ unet_conversion_map = []
+ for sd, hf in unet_conversion_map_layer:
+ if "resnets" in hf:
+ for sd_res, hf_res in unet_conversion_map_resnet:
+ unet_conversion_map.append((sd + sd_res, hf + hf_res))
+ else:
+ unet_conversion_map.append((sd, hf))
+
+ for j in range(2):
+ hf_time_embed_prefix = f"time_embedding.linear_{j+1}."
+ sd_time_embed_prefix = f"time_embed.{j*2}."
+ unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix))
+
+ for j in range(2):
+ hf_label_embed_prefix = f"add_embedding.linear_{j+1}."
+ sd_label_embed_prefix = f"label_emb.0.{j*2}."
+ unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix))
+
+ unet_conversion_map.append(("input_blocks.0.0.", "conv_in."))
+ unet_conversion_map.append(("out.0.", "conv_norm_out."))
+ unet_conversion_map.append(("out.2.", "conv_out."))
+
+ return unet_conversion_map
+
+
+SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP = {
+ sd.rstrip(".").replace(".", "_"): hf.rstrip(".").replace(".", "_") for sd, hf in make_sdxl_unet_conversion_map()
+}
diff --git a/invokeai/backend/model_management/models/sdxl.py b/invokeai/backend/model_management/models/sdxl.py
index 7fc3efb77c..5bbe05be98 100644
--- a/invokeai/backend/model_management/models/sdxl.py
+++ b/invokeai/backend/model_management/models/sdxl.py
@@ -1,6 +1,5 @@
import os
import json
-import invokeai.backend.util.logging as logger
from enum import Enum
from pydantic import Field
from typing import Literal, Optional
@@ -12,6 +11,7 @@ from .base import (
DiffusersModel,
read_checkpoint_meta,
classproperty,
+ InvalidModelException,
)
from omegaconf import OmegaConf
@@ -65,7 +65,7 @@ class StableDiffusionXLModel(DiffusersModel):
in_channels = unet_config["in_channels"]
else:
- raise Exception("Not supported stable diffusion diffusers format(possibly onnx?)")
+ raise InvalidModelException(f"{path} is not a recognized Stable Diffusion diffusers model")
else:
raise NotImplementedError(f"Unknown stable diffusion 2.* format: {model_format}")
@@ -80,8 +80,10 @@ class StableDiffusionXLModel(DiffusersModel):
raise Exception("Unkown stable diffusion 2.* model format")
if ckpt_config_path is None:
- # TO DO: implement picking
- pass
+ # avoid circular import
+ from .stable_diffusion import _select_ckpt_config
+
+ ckpt_config_path = _select_ckpt_config(kwargs.get("model_base", BaseModelType.StableDiffusionXL), variant)
return cls.create_config(
path=path,
diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py
index d81b0150e5..cc34f14b9c 100644
--- a/invokeai/backend/model_management/models/stable_diffusion.py
+++ b/invokeai/backend/model_management/models/stable_diffusion.py
@@ -4,6 +4,7 @@ from enum import Enum
from pydantic import Field
from pathlib import Path
from typing import Literal, Optional, Union
+from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
from .base import (
ModelConfigBase,
BaseModelType,
@@ -263,6 +264,8 @@ def _convert_ckpt_and_cache(
weights = app_config.models_path / model_config.path
config_file = app_config.root_path / model_config.config
output_path = Path(output_path)
+ variant = model_config.variant
+ pipeline_class = StableDiffusionInpaintPipeline if variant == "inpaint" else StableDiffusionPipeline
# return cached version if it exists
if output_path.exists():
@@ -289,6 +292,7 @@ def _convert_ckpt_and_cache(
original_config_file=config_file,
extract_ema=True,
scan_needed=True,
+ pipeline_class=pipeline_class,
from_safetensors=weights.suffix == ".safetensors",
precision=torch_dtype(choose_torch_device()),
**kwargs,
@@ -326,5 +330,5 @@ def _select_ckpt_config(version: BaseModelType, variant: ModelVariantType):
config_path = config_path.relative_to(app_config.root_path)
return str(config_path)
- except:
+ except Exception:
return None
diff --git a/invokeai/backend/model_management/models/stable_diffusion_onnx.py b/invokeai/backend/model_management/models/stable_diffusion_onnx.py
index 03693e2c3e..2780ba4728 100644
--- a/invokeai/backend/model_management/models/stable_diffusion_onnx.py
+++ b/invokeai/backend/model_management/models/stable_diffusion_onnx.py
@@ -1,25 +1,17 @@
-import os
-import json
from enum import Enum
-from pydantic import Field
-from pathlib import Path
-from typing import Literal, Optional, Union
+from typing import Literal
+
+from diffusers import OnnxRuntimeModel
from .base import (
- ModelBase,
ModelConfigBase,
BaseModelType,
ModelType,
- SubModelType,
ModelVariantType,
DiffusersModel,
SchedulerPredictionType,
- SilenceWarnings,
- read_checkpoint_meta,
classproperty,
- OnnxRuntimeModel,
IAIOnnxRuntimeModel,
)
-from invokeai.app.services.config import InvokeAIAppConfig
class StableDiffusionOnnxModelFormat(str, Enum):
diff --git a/invokeai/backend/model_management/models/vae.py b/invokeai/backend/model_management/models/vae.py
index b15844bcf8..cf7622a9aa 100644
--- a/invokeai/backend/model_management/models/vae.py
+++ b/invokeai/backend/model_management/models/vae.py
@@ -1,9 +1,14 @@
import os
-import torch
-import safetensors
from enum import Enum
from pathlib import Path
-from typing import Optional, Union, Literal
+from typing import Optional
+
+import safetensors
+import torch
+from diffusers.utils import is_safetensors_available
+from omegaconf import OmegaConf
+
+from invokeai.app.services.config import InvokeAIAppConfig
from .base import (
ModelBase,
ModelConfigBase,
@@ -18,9 +23,6 @@ from .base import (
InvalidModelException,
ModelNotFoundException,
)
-from invokeai.app.services.config import InvokeAIAppConfig
-from diffusers.utils import is_safetensors_available
-from omegaconf import OmegaConf
class VaeModelFormat(str, Enum):
@@ -42,14 +44,14 @@ class VaeModel(ModelBase):
try:
config = EmptyConfigLoader.load_config(self.model_path, config_name="config.json")
# config = json.loads(os.path.join(self.model_path, "config.json"))
- except:
+ except Exception:
raise Exception("Invalid vae model! (config.json not found or invalid)")
try:
vae_class_name = config.get("_class_name", "AutoencoderKL")
self.vae_class = self._hf_definition_to_type(["diffusers", vae_class_name])
self.model_size = calc_model_size_by_fs(self.model_path)
- except:
+ except Exception:
raise Exception("Invalid vae model! (Unkown vae type)")
def get_size(self, child_type: Optional[SubModelType] = None):
@@ -80,7 +82,7 @@ class VaeModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
if not os.path.exists(path):
- raise ModelNotFoundException()
+ raise ModelNotFoundException(f"Does not exist as local file: {path}")
if os.path.isdir(path):
if os.path.exists(os.path.join(path, "config.json")):
diff --git a/invokeai/backend/model_management/util.py b/invokeai/backend/model_management/util.py
new file mode 100644
index 0000000000..6d70107c93
--- /dev/null
+++ b/invokeai/backend/model_management/util.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2023 The InvokeAI Development Team
+"""Utilities used by the Model Manager"""
+
+
+def lora_token_vector_length(checkpoint: dict) -> int:
+ """
+ Given a checkpoint in memory, return the lora token vector length
+
+ :param checkpoint: The checkpoint
+ """
+
+ def _get_shape_1(key, tensor, checkpoint):
+ lora_token_vector_length = None
+
+ if "." not in key:
+ return lora_token_vector_length # wrong key format
+ model_key, lora_key = key.split(".", 1)
+
+ # check lora/locon
+ if lora_key == "lora_down.weight":
+ lora_token_vector_length = tensor.shape[1]
+
+ # check loha (don't worry about hada_t1/hada_t2 as it used only in 4d shapes)
+ elif lora_key in ["hada_w1_b", "hada_w2_b"]:
+ lora_token_vector_length = tensor.shape[1]
+
+ # check lokr (don't worry about lokr_t2 as it used only in 4d shapes)
+ elif "lokr_" in lora_key:
+ if model_key + ".lokr_w1" in checkpoint:
+ _lokr_w1 = checkpoint[model_key + ".lokr_w1"]
+ elif model_key + "lokr_w1_b" in checkpoint:
+ _lokr_w1 = checkpoint[model_key + ".lokr_w1_b"]
+ else:
+ return lora_token_vector_length # unknown format
+
+ if model_key + ".lokr_w2" in checkpoint:
+ _lokr_w2 = checkpoint[model_key + ".lokr_w2"]
+ elif model_key + "lokr_w2_b" in checkpoint:
+ _lokr_w2 = checkpoint[model_key + ".lokr_w2_b"]
+ else:
+ return lora_token_vector_length # unknown format
+
+ lora_token_vector_length = _lokr_w1.shape[1] * _lokr_w2.shape[1]
+
+ elif lora_key == "diff":
+ lora_token_vector_length = tensor.shape[1]
+
+ # ia3 can be detected only by shape[0] in text encoder
+ elif lora_key == "weight" and "lora_unet_" not in model_key:
+ lora_token_vector_length = tensor.shape[0]
+
+ return lora_token_vector_length
+
+ lora_token_vector_length = None
+ lora_te1_length = None
+ lora_te2_length = None
+ for key, tensor in checkpoint.items():
+ if key.startswith("lora_unet_") and ("_attn2_to_k." in key or "_attn2_to_v." in key):
+ lora_token_vector_length = _get_shape_1(key, tensor, checkpoint)
+ elif key.startswith("lora_te") and "_self_attn_" in key:
+ tmp_length = _get_shape_1(key, tensor, checkpoint)
+ if key.startswith("lora_te_"):
+ lora_token_vector_length = tmp_length
+ elif key.startswith("lora_te1_"):
+ lora_te1_length = tmp_length
+ elif key.startswith("lora_te2_"):
+ lora_te2_length = tmp_length
+
+ if lora_te1_length is not None and lora_te2_length is not None:
+ lora_token_vector_length = lora_te1_length + lora_te2_length
+
+ if lora_token_vector_length is not None:
+ break
+
+ return lora_token_vector_length
diff --git a/invokeai/backend/stable_diffusion/__init__.py b/invokeai/backend/stable_diffusion/__init__.py
index 37024ccace..a958750802 100644
--- a/invokeai/backend/stable_diffusion/__init__.py
+++ b/invokeai/backend/stable_diffusion/__init__.py
@@ -1,11 +1,15 @@
"""
Initialization file for the invokeai.backend.stable_diffusion package
"""
-from .diffusers_pipeline import (
+from .diffusers_pipeline import ( # noqa: F401
ConditioningData,
PipelineIntermediateState,
StableDiffusionGeneratorPipeline,
)
-from .diffusion import InvokeAIDiffuserComponent
-from .diffusion.cross_attention_map_saving import AttentionMapSaver
-from .diffusion.shared_invokeai_diffusion import PostprocessingSettings
+from .diffusion import InvokeAIDiffuserComponent # noqa: F401
+from .diffusion.cross_attention_map_saving import AttentionMapSaver # noqa: F401
+from .diffusion.shared_invokeai_diffusion import ( # noqa: F401
+ PostprocessingSettings,
+ BasicConditioningInfo,
+ SDXLConditioningInfo,
+)
diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py
index 624d47ff64..0180830b76 100644
--- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py
+++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py
@@ -2,17 +2,11 @@ from __future__ import annotations
import dataclasses
import inspect
-import math
-import secrets
-from collections.abc import Sequence
from dataclasses import dataclass, field
-from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union
-from pydantic import Field
+from typing import Any, Callable, List, Optional, Union
-import einops
import PIL.Image
-import numpy as np
-from accelerate.utils import set_seed
+import einops
import psutil
import torch
import torchvision.transforms as T
@@ -22,36 +16,31 @@ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import (
StableDiffusionPipeline,
)
-
-from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import (
- StableDiffusionImg2ImgPipeline,
-)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
-from diffusers.utils import PIL_INTERPOLATION
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.outputs import BaseOutput
-from torchvision.transforms.functional import resize as tv_resize
+from pydantic import Field
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
-from typing_extensions import ParamSpec
from invokeai.app.services.config import InvokeAIAppConfig
-from ..util import CPU_DEVICE, normalize_device
from .diffusion import (
AttentionMapSaver,
InvokeAIDiffuserComponent,
PostprocessingSettings,
+ BasicConditioningInfo,
)
-from .offloading import FullyLoadedModelGroup, ModelGroup
+from ..util import normalize_device
@dataclass
class PipelineIntermediateState:
- run_id: str
step: int
+ order: int
+ total_steps: int
timestep: int
latents: torch.Tensor
predicted_original: Optional[torch.Tensor] = None
@@ -102,7 +91,6 @@ class AddsMaskGuidance:
mask_latents: torch.FloatTensor
scheduler: SchedulerMixin
noise: torch.Tensor
- _debug: Optional[Callable] = None
def __call__(self, step_output: Union[BaseOutput, SchedulerOutput], t: torch.Tensor, conditioning) -> BaseOutput:
output_class = step_output.__class__ # We'll create a new one with masked data.
@@ -139,8 +127,6 @@ class AddsMaskGuidance:
# mask_latents = self.scheduler.scale_model_input(mask_latents, t)
mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size)
masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype))
- if self._debug:
- self._debug(masked_input, f"t={t} lerped")
return masked_input
@@ -172,33 +158,6 @@ def is_inpainting_model(unet: UNet2DConditionModel):
return unet.conv_in.in_channels == 9
-CallbackType = TypeVar("CallbackType")
-ReturnType = TypeVar("ReturnType")
-ParamType = ParamSpec("ParamType")
-
-
-@dataclass(frozen=True)
-class GeneratorToCallbackinator(Generic[ParamType, ReturnType, CallbackType]):
- """Convert a generator to a function with a callback and a return value."""
-
- generator_method: Callable[ParamType, ReturnType]
- callback_arg_type: Type[CallbackType]
-
- def __call__(
- self,
- *args: ParamType.args,
- callback: Callable[[CallbackType], Any] = None,
- **kwargs: ParamType.kwargs,
- ) -> ReturnType:
- result = None
- for result in self.generator_method(*args, **kwargs):
- if callback is not None and isinstance(result, self.callback_arg_type):
- callback(result)
- if result is None:
- raise AssertionError("why was that an empty generator?")
- return result
-
-
@dataclass
class ControlNetData:
model: ControlNetModel = Field(default=None)
@@ -212,8 +171,8 @@ class ControlNetData:
@dataclass
class ConditioningData:
- unconditioned_embeddings: torch.Tensor
- text_embeddings: torch.Tensor
+ unconditioned_embeddings: BasicConditioningInfo
+ text_embeddings: BasicConditioningInfo
guidance_scale: Union[float, List[float]]
"""
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
@@ -289,9 +248,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
- _model_group: ModelGroup
-
- ID_LENGTH = 8
def __init__(
self,
@@ -303,9 +259,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
safety_checker: Optional[StableDiffusionSafetyChecker],
feature_extractor: Optional[CLIPFeatureExtractor],
requires_safety_checker: bool = False,
- precision: str = "float32",
control_model: ControlNetModel = None,
- execution_device: Optional[torch.device] = None,
):
super().__init__(
vae,
@@ -330,9 +284,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
# control_model=control_model,
)
self.invokeai_diffuser = InvokeAIDiffuserComponent(self.unet, self._unet_forward)
-
- self._model_group = FullyLoadedModelGroup(execution_device or self.unet.device)
- self._model_group.install(*self._submodels)
self.control_model = control_model
def _adjust_memory_efficient_attention(self, latents: torch.Tensor):
@@ -340,99 +291,41 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if xformers is available, use it, otherwise use sliced attention.
"""
config = InvokeAIAppConfig.get_config()
- if torch.cuda.is_available() and is_xformers_available() and not config.disable_xformers:
- self.enable_xformers_memory_efficient_attention()
+ if self.unet.device.type == "cuda":
+ if is_xformers_available() and not config.disable_xformers:
+ self.enable_xformers_memory_efficient_attention()
+ return
+ elif hasattr(torch.nn.functional, "scaled_dot_product_attention"):
+ # diffusers enable sdp automatically
+ return
+
+ if self.unet.device.type == "cpu" or self.unet.device.type == "mps":
+ mem_free = psutil.virtual_memory().free
+ elif self.unet.device.type == "cuda":
+ mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.unet.device))
else:
- if self.device.type == "cpu" or self.device.type == "mps":
- mem_free = psutil.virtual_memory().free
- elif self.device.type == "cuda":
- mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.device))
- else:
- raise ValueError(f"unrecognized device {self.device}")
- # input tensor of [1, 4, h/8, w/8]
- # output tensor of [16, (h/8 * w/8), (h/8 * w/8)]
- bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4
- max_size_required_for_baddbmm = (
- 16
- * latents.size(dim=2)
- * latents.size(dim=3)
- * latents.size(dim=2)
- * latents.size(dim=3)
- * bytes_per_element_needed_for_baddbmm_duplication
- )
- if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code
- self.enable_attention_slicing(slice_size="max")
- elif torch.backends.mps.is_available():
- # diffusers recommends always enabling for mps
- self.enable_attention_slicing(slice_size="max")
- else:
- self.disable_attention_slicing()
+ raise ValueError(f"unrecognized device {self.unet.device}")
+ # input tensor of [1, 4, h/8, w/8]
+ # output tensor of [16, (h/8 * w/8), (h/8 * w/8)]
+ bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4
+ max_size_required_for_baddbmm = (
+ 16
+ * latents.size(dim=2)
+ * latents.size(dim=3)
+ * latents.size(dim=2)
+ * latents.size(dim=3)
+ * bytes_per_element_needed_for_baddbmm_duplication
+ )
+ if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code
+ self.enable_attention_slicing(slice_size="max")
+ elif torch.backends.mps.is_available():
+ # diffusers recommends always enabling for mps
+ self.enable_attention_slicing(slice_size="max")
+ else:
+ self.disable_attention_slicing()
def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings=False):
- # overridden method; types match the superclass.
- if torch_device is None:
- return self
- self._model_group.set_device(torch.device(torch_device))
- self._model_group.ready()
-
- @property
- def device(self) -> torch.device:
- return self._model_group.execution_device
-
- @property
- def _submodels(self) -> Sequence[torch.nn.Module]:
- module_names, _, _ = self.extract_init_dict(dict(self.config))
- submodels = []
- for name in module_names.keys():
- if hasattr(self, name):
- value = getattr(self, name)
- else:
- value = getattr(self.config, name)
- if isinstance(value, torch.nn.Module):
- submodels.append(value)
- return submodels
-
- def image_from_embeddings(
- self,
- latents: torch.Tensor,
- num_inference_steps: int,
- conditioning_data: ConditioningData,
- *,
- noise: torch.Tensor,
- callback: Callable[[PipelineIntermediateState], None] = None,
- run_id=None,
- ) -> InvokeAIStableDiffusionPipelineOutput:
- r"""
- Function invoked when calling the pipeline for generation.
-
- :param conditioning_data:
- :param latents: Pre-generated un-noised latents, to be used as inputs for
- image generation. Can be used to tweak the same generation with different prompts.
- :param num_inference_steps: The number of denoising steps. More denoising steps usually lead to a higher quality
- image at the expense of slower inference.
- :param noise: Noise to add to the latents, sampled from a Gaussian distribution.
- :param callback:
- :param run_id:
- """
- result_latents, result_attention_map_saver = self.latents_from_embeddings(
- latents,
- num_inference_steps,
- conditioning_data,
- noise=noise,
- run_id=run_id,
- callback=callback,
- )
- # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
- torch.cuda.empty_cache()
-
- with torch.inference_mode():
- image = self.decode_latents(result_latents)
- output = InvokeAIStableDiffusionPipelineOutput(
- images=image,
- nsfw_content_detected=[],
- attention_map_saver=result_attention_map_saver,
- )
- return self.check_for_safety(output, dtype=conditioning_data.dtype)
+ raise Exception("Should not be called")
def latents_from_embeddings(
self,
@@ -440,35 +333,72 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
num_inference_steps: int,
conditioning_data: ConditioningData,
*,
- noise: torch.Tensor,
- timesteps=None,
+ noise: Optional[torch.Tensor],
+ timesteps: torch.Tensor,
+ init_timestep: torch.Tensor,
additional_guidance: List[Callable] = None,
- run_id=None,
callback: Callable[[PipelineIntermediateState], None] = None,
control_data: List[ControlNetData] = None,
+ mask: Optional[torch.Tensor] = None,
+ seed: Optional[int] = None,
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
- if self.scheduler.config.get("cpu_only", False):
- scheduler_device = torch.device("cpu")
- else:
- scheduler_device = self._model_group.device_for(self.unet)
+ if init_timestep.shape[0] == 0:
+ return latents, None
- if timesteps is None:
- self.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
- timesteps = self.scheduler.timesteps
- infer_latents_from_embeddings = GeneratorToCallbackinator(
- self.generate_latents_from_embeddings, PipelineIntermediateState
- )
- result: PipelineIntermediateState = infer_latents_from_embeddings(
- latents,
- timesteps,
- conditioning_data,
- noise=noise,
- run_id=run_id,
- additional_guidance=additional_guidance,
- control_data=control_data,
- callback=callback,
- )
- return result.latents, result.attention_map_saver
+ if additional_guidance is None:
+ additional_guidance = []
+
+ orig_latents = latents.clone()
+
+ batch_size = latents.shape[0]
+ batched_t = init_timestep.expand(batch_size)
+
+ if noise is not None:
+ # latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers
+ latents = self.scheduler.add_noise(latents, noise, batched_t)
+
+ if mask is not None:
+ if is_inpainting_model(self.unet):
+ # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint
+ # (that's why there's a mask!) but it seems to really want that blanked out.
+ # masked_latents = latents * torch.where(mask < 0.5, 1, 0) TODO: inpaint/outpaint/infill
+
+ # TODO: we should probably pass this in so we don't have to try/finally around setting it.
+ self.invokeai_diffuser.model_forward_callback = AddsMaskLatents(self._unet_forward, mask, orig_latents)
+ else:
+ # if no noise provided, noisify unmasked area based on seed(or 0 as fallback)
+ if noise is None:
+ noise = torch.randn(
+ orig_latents.shape,
+ dtype=torch.float32,
+ device="cpu",
+ generator=torch.Generator(device="cpu").manual_seed(seed or 0),
+ ).to(device=orig_latents.device, dtype=orig_latents.dtype)
+
+ latents = self.scheduler.add_noise(latents, noise, batched_t)
+ latents = torch.lerp(
+ orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)
+ )
+
+ additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise))
+
+ try:
+ latents, attention_map_saver = self.generate_latents_from_embeddings(
+ latents,
+ timesteps,
+ conditioning_data,
+ additional_guidance=additional_guidance,
+ control_data=control_data,
+ callback=callback,
+ )
+ finally:
+ self.invokeai_diffuser.model_forward_callback = self._unet_forward
+
+ # restore unmasked part
+ if mask is not None:
+ latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype))
+
+ return latents, attention_map_saver
def generate_latents_from_embeddings(
self,
@@ -476,42 +406,40 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
timesteps,
conditioning_data: ConditioningData,
*,
- noise: torch.Tensor,
- run_id: str = None,
additional_guidance: List[Callable] = None,
control_data: List[ControlNetData] = None,
+ callback: Callable[[PipelineIntermediateState], None] = None,
):
self._adjust_memory_efficient_attention(latents)
- if run_id is None:
- run_id = secrets.token_urlsafe(self.ID_LENGTH)
if additional_guidance is None:
additional_guidance = []
+
+ batch_size = latents.shape[0]
+ attention_map_saver: Optional[AttentionMapSaver] = None
+
+ if timesteps.shape[0] == 0:
+ return latents, attention_map_saver
+
extra_conditioning_info = conditioning_data.extra
with self.invokeai_diffuser.custom_attention_context(
self.invokeai_diffuser.model,
extra_conditioning_info=extra_conditioning_info,
step_count=len(self.scheduler.timesteps),
):
- yield PipelineIntermediateState(
- run_id=run_id,
- step=-1,
- timestep=self.scheduler.config.num_train_timesteps,
- latents=latents,
- )
+ if callback is not None:
+ callback(
+ PipelineIntermediateState(
+ step=-1,
+ order=self.scheduler.order,
+ total_steps=len(timesteps),
+ timestep=self.scheduler.config.num_train_timesteps,
+ latents=latents,
+ )
+ )
- batch_size = latents.shape[0]
- batched_t = torch.full(
- (batch_size,),
- timesteps[0],
- dtype=timesteps.dtype,
- device=self._model_group.device_for(self.unet),
- )
- latents = self.scheduler.add_noise(latents, noise, batched_t)
-
- attention_map_saver: Optional[AttentionMapSaver] = None
# print("timesteps:", timesteps)
for i, t in enumerate(self.progress_bar(timesteps)):
- batched_t.fill_(t)
+ batched_t = t.expand(batch_size)
step_output = self.step(
batched_t,
latents,
@@ -540,14 +468,18 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
# attention_map_saver = AttentionMapSaver(token_ids=attention_map_token_ids, latents_shape=latents.shape[-2:])
# self.invokeai_diffuser.setup_attention_map_saving(attention_map_saver)
- yield PipelineIntermediateState(
- run_id=run_id,
- step=i,
- timestep=int(t),
- latents=latents,
- predicted_original=predicted_original,
- attention_map_saver=attention_map_saver,
- )
+ if callback is not None:
+ callback(
+ PipelineIntermediateState(
+ step=i,
+ order=self.scheduler.order,
+ total_steps=len(timesteps),
+ timestep=int(t),
+ latents=latents,
+ predicted_original=predicted_original,
+ attention_map_saver=attention_map_saver,
+ )
+ )
return latents, attention_map_saver
@@ -569,95 +501,39 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
# TODO: should this scaling happen here or inside self._unet_forward?
# i.e. before or after passing it to InvokeAIDiffuserComponent
- unet_latent_input = self.scheduler.scale_model_input(latents, timestep)
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
# default is no controlnet, so set controlnet processing output to None
- down_block_res_samples, mid_block_res_sample = None, None
-
+ controlnet_down_block_samples, controlnet_mid_block_sample = None, None
if control_data is not None:
- # control_data should be type List[ControlNetData]
- # this loop covers both ControlNet (one ControlNetData in list)
- # and MultiControlNet (multiple ControlNetData in list)
- for i, control_datum in enumerate(control_data):
- control_mode = control_datum.control_mode
- # soft_injection and cfg_injection are the two ControlNet control_mode booleans
- # that are combined at higher level to make control_mode enum
- # soft_injection determines whether to do per-layer re-weighting adjustment (if True)
- # or default weighting (if False)
- soft_injection = control_mode == "more_prompt" or control_mode == "more_control"
- # cfg_injection = determines whether to apply ControlNet to only the conditional (if True)
- # or the default both conditional and unconditional (if False)
- cfg_injection = control_mode == "more_control" or control_mode == "unbalanced"
+ controlnet_down_block_samples, controlnet_mid_block_sample = self.invokeai_diffuser.do_controlnet_step(
+ control_data=control_data,
+ sample=latent_model_input,
+ timestep=timestep,
+ step_index=step_index,
+ total_step_count=total_step_count,
+ conditioning_data=conditioning_data,
+ )
- first_control_step = math.floor(control_datum.begin_step_percent * total_step_count)
- last_control_step = math.ceil(control_datum.end_step_percent * total_step_count)
- # only apply controlnet if current step is within the controlnet's begin/end step range
- if step_index >= first_control_step and step_index <= last_control_step:
- if cfg_injection:
- control_latent_input = unet_latent_input
- else:
- # expand the latents input to control model if doing classifier free guidance
- # (which I think for now is always true, there is conditional elsewhere that stops execution if
- # classifier_free_guidance is <= 1.0 ?)
- control_latent_input = torch.cat([unet_latent_input] * 2)
-
- if cfg_injection: # only applying ControlNet to conditional instead of in unconditioned
- encoder_hidden_states = conditioning_data.text_embeddings
- encoder_attention_mask = None
- else:
- (
- encoder_hidden_states,
- encoder_attention_mask,
- ) = self.invokeai_diffuser._concat_conditionings_for_batch(
- conditioning_data.unconditioned_embeddings,
- conditioning_data.text_embeddings,
- )
- if isinstance(control_datum.weight, list):
- # if controlnet has multiple weights, use the weight for the current step
- controlnet_weight = control_datum.weight[step_index]
- else:
- # if controlnet has a single weight, use it for all steps
- controlnet_weight = control_datum.weight
-
- # controlnet(s) inference
- down_samples, mid_sample = control_datum.model(
- sample=control_latent_input,
- timestep=timestep,
- encoder_hidden_states=encoder_hidden_states,
- controlnet_cond=control_datum.image_tensor,
- conditioning_scale=controlnet_weight, # controlnet specific, NOT the guidance scale
- encoder_attention_mask=encoder_attention_mask,
- guess_mode=soft_injection, # this is still called guess_mode in diffusers ControlNetModel
- return_dict=False,
- )
- if cfg_injection:
- # Inferred ControlNet only for the conditional batch.
- # To apply the output of ControlNet to both the unconditional and conditional batches,
- # prepend zeros for unconditional batch
- down_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_samples]
- mid_sample = torch.cat([torch.zeros_like(mid_sample), mid_sample])
-
- if down_block_res_samples is None and mid_block_res_sample is None:
- down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
- else:
- # add controlnet outputs together if have multiple controlnets
- down_block_res_samples = [
- samples_prev + samples_curr
- for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
- ]
- mid_block_res_sample += mid_sample
-
- # predict the noise residual
- noise_pred = self.invokeai_diffuser.do_diffusion_step(
- x=unet_latent_input,
- sigma=t,
- unconditioning=conditioning_data.unconditioned_embeddings,
- conditioning=conditioning_data.text_embeddings,
- unconditional_guidance_scale=conditioning_data.guidance_scale,
+ uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step(
+ sample=latent_model_input,
+ timestep=t, # TODO: debug how handled batched and non batched timesteps
step_index=step_index,
total_step_count=total_step_count,
- down_block_additional_residuals=down_block_res_samples, # from controlnet(s)
- mid_block_additional_residual=mid_block_res_sample, # from controlnet(s)
+ conditioning_data=conditioning_data,
+ # extra:
+ down_block_additional_residuals=controlnet_down_block_samples, # from controlnet(s)
+ mid_block_additional_residual=controlnet_mid_block_sample, # from controlnet(s)
+ )
+
+ guidance_scale = conditioning_data.guidance_scale
+ if isinstance(guidance_scale, list):
+ guidance_scale = guidance_scale[step_index]
+
+ noise_pred = self.invokeai_diffuser._combine(
+ uc_noise_pred,
+ c_noise_pred,
+ guidance_scale,
)
# compute the previous noisy sample x_t -> x_t-1
@@ -699,224 +575,3 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
cross_attention_kwargs=cross_attention_kwargs,
**kwargs,
).sample
-
- def img2img_from_embeddings(
- self,
- init_image: Union[torch.FloatTensor, PIL.Image.Image],
- strength: float,
- num_inference_steps: int,
- conditioning_data: ConditioningData,
- *,
- callback: Callable[[PipelineIntermediateState], None] = None,
- run_id=None,
- noise_func=None,
- seed=None,
- ) -> InvokeAIStableDiffusionPipelineOutput:
- if isinstance(init_image, PIL.Image.Image):
- init_image = image_resized_to_grid_as_tensor(init_image.convert("RGB"))
-
- if init_image.dim() == 3:
- init_image = einops.rearrange(init_image, "c h w -> 1 c h w")
-
- # 6. Prepare latent variables
- initial_latents = self.non_noised_latents_from_image(
- init_image,
- device=self._model_group.device_for(self.unet),
- dtype=self.unet.dtype,
- )
- if seed is not None:
- set_seed(seed)
- noise = noise_func(initial_latents)
-
- return self.img2img_from_latents_and_embeddings(
- initial_latents,
- num_inference_steps,
- conditioning_data,
- strength,
- noise,
- run_id,
- callback,
- )
-
- def img2img_from_latents_and_embeddings(
- self,
- initial_latents,
- num_inference_steps,
- conditioning_data: ConditioningData,
- strength,
- noise: torch.Tensor,
- run_id=None,
- callback=None,
- ) -> InvokeAIStableDiffusionPipelineOutput:
- timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
- result_latents, result_attention_maps = self.latents_from_embeddings(
- latents=initial_latents
- if strength < 1.0
- else torch.zeros_like(initial_latents, device=initial_latents.device, dtype=initial_latents.dtype),
- num_inference_steps=num_inference_steps,
- conditioning_data=conditioning_data,
- timesteps=timesteps,
- noise=noise,
- run_id=run_id,
- callback=callback,
- )
-
- # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
- torch.cuda.empty_cache()
-
- with torch.inference_mode():
- image = self.decode_latents(result_latents)
- output = InvokeAIStableDiffusionPipelineOutput(
- images=image,
- nsfw_content_detected=[],
- attention_map_saver=result_attention_maps,
- )
- return self.check_for_safety(output, dtype=conditioning_data.dtype)
-
- def get_img2img_timesteps(self, num_inference_steps: int, strength: float, device=None) -> (torch.Tensor, int):
- img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components)
- assert img2img_pipeline.scheduler is self.scheduler
-
- if self.scheduler.config.get("cpu_only", False):
- scheduler_device = torch.device("cpu")
- else:
- scheduler_device = self._model_group.device_for(self.unet)
-
- img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
- timesteps, adjusted_steps = img2img_pipeline.get_timesteps(
- num_inference_steps, strength, device=scheduler_device
- )
- # Workaround for low strength resulting in zero timesteps.
- # TODO: submit upstream fix for zero-step img2img
- if timesteps.numel() == 0:
- timesteps = self.scheduler.timesteps[-1:]
- adjusted_steps = timesteps.numel()
- return timesteps, adjusted_steps
-
- def inpaint_from_embeddings(
- self,
- init_image: torch.FloatTensor,
- mask: torch.FloatTensor,
- strength: float,
- num_inference_steps: int,
- conditioning_data: ConditioningData,
- *,
- callback: Callable[[PipelineIntermediateState], None] = None,
- run_id=None,
- noise_func=None,
- seed=None,
- ) -> InvokeAIStableDiffusionPipelineOutput:
- device = self._model_group.device_for(self.unet)
- latents_dtype = self.unet.dtype
-
- if isinstance(init_image, PIL.Image.Image):
- init_image = image_resized_to_grid_as_tensor(init_image.convert("RGB"))
-
- init_image = init_image.to(device=device, dtype=latents_dtype)
- mask = mask.to(device=device, dtype=latents_dtype)
-
- if init_image.dim() == 3:
- init_image = init_image.unsqueeze(0)
-
- timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
-
- # 6. Prepare latent variables
- # can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents
- # because we have our own noise function
- init_image_latents = self.non_noised_latents_from_image(init_image, device=device, dtype=latents_dtype)
- if seed is not None:
- set_seed(seed)
- noise = noise_func(init_image_latents)
-
- if mask.dim() == 3:
- mask = mask.unsqueeze(0)
- latent_mask = tv_resize(mask, init_image_latents.shape[-2:], T.InterpolationMode.BILINEAR).to(
- device=device, dtype=latents_dtype
- )
-
- guidance: List[Callable] = []
-
- if is_inpainting_model(self.unet):
- # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint
- # (that's why there's a mask!) but it seems to really want that blanked out.
- masked_init_image = init_image * torch.where(mask < 0.5, 1, 0)
- masked_latents = self.non_noised_latents_from_image(masked_init_image, device=device, dtype=latents_dtype)
-
- # TODO: we should probably pass this in so we don't have to try/finally around setting it.
- self.invokeai_diffuser.model_forward_callback = AddsMaskLatents(
- self._unet_forward, latent_mask, masked_latents
- )
- else:
- guidance.append(AddsMaskGuidance(latent_mask, init_image_latents, self.scheduler, noise))
-
- try:
- result_latents, result_attention_maps = self.latents_from_embeddings(
- latents=init_image_latents
- if strength < 1.0
- else torch.zeros_like(
- init_image_latents, device=init_image_latents.device, dtype=init_image_latents.dtype
- ),
- num_inference_steps=num_inference_steps,
- conditioning_data=conditioning_data,
- noise=noise,
- timesteps=timesteps,
- additional_guidance=guidance,
- run_id=run_id,
- callback=callback,
- )
- finally:
- self.invokeai_diffuser.model_forward_callback = self._unet_forward
-
- # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
- torch.cuda.empty_cache()
-
- with torch.inference_mode():
- image = self.decode_latents(result_latents)
- output = InvokeAIStableDiffusionPipelineOutput(
- images=image,
- nsfw_content_detected=[],
- attention_map_saver=result_attention_maps,
- )
- return self.check_for_safety(output, dtype=conditioning_data.dtype)
-
- def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype):
- init_image = init_image.to(device=device, dtype=dtype)
- with torch.inference_mode():
- self._model_group.load(self.vae)
- init_latent_dist = self.vae.encode(init_image).latent_dist
- init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible!
-
- init_latents = 0.18215 * init_latents
- return init_latents
-
- def check_for_safety(self, output, dtype):
- with torch.inference_mode():
- screened_images, has_nsfw_concept = self.run_safety_checker(output.images, dtype=dtype)
- screened_attention_map_saver = None
- if has_nsfw_concept is None or not has_nsfw_concept:
- screened_attention_map_saver = output.attention_map_saver
- return InvokeAIStableDiffusionPipelineOutput(
- screened_images,
- has_nsfw_concept,
- # block the attention maps if NSFW content is detected
- attention_map_saver=screened_attention_map_saver,
- )
-
- def run_safety_checker(self, image, device=None, dtype=None):
- # overriding to use the model group for device info instead of requiring the caller to know.
- if self.safety_checker is not None:
- device = self._model_group.device_for(self.safety_checker)
- return super().run_safety_checker(image, device, dtype)
-
- def decode_latents(self, latents):
- # Explicit call to get the vae loaded, since `decode` isn't the forward method.
- self._model_group.load(self.vae)
- return super().decode_latents(latents)
-
- def debug_latents(self, latents, msg):
- from invokeai.backend.image_util import debug_image
-
- with torch.inference_mode():
- decoded = self.numpy_to_pil(self.decode_latents(latents))
- for i, img in enumerate(decoded):
- debug_image(img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True)
diff --git a/invokeai/backend/stable_diffusion/diffusion/__init__.py b/invokeai/backend/stable_diffusion/diffusion/__init__.py
index 6dd2817f29..2bcc595889 100644
--- a/invokeai/backend/stable_diffusion/diffusion/__init__.py
+++ b/invokeai/backend/stable_diffusion/diffusion/__init__.py
@@ -1,6 +1,11 @@
"""
Initialization file for invokeai.models.diffusion
"""
-from .cross_attention_control import InvokeAICrossAttentionMixin
-from .cross_attention_map_saving import AttentionMapSaver
-from .shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
+from .cross_attention_control import InvokeAICrossAttentionMixin # noqa: F401
+from .cross_attention_map_saving import AttentionMapSaver # noqa: F401
+from .shared_invokeai_diffusion import ( # noqa: F401
+ InvokeAIDiffuserComponent,
+ PostprocessingSettings,
+ BasicConditioningInfo,
+ SDXLConditioningInfo,
+)
diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py
index 38763ebbee..35d4800859 100644
--- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py
+++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py
@@ -4,6 +4,7 @@
import enum
import math
+from dataclasses import dataclass, field
from typing import Callable, Optional
import diffusers
@@ -12,6 +13,11 @@ import torch
from compel.cross_attention_control import Arguments
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.attention_processor import AttentionProcessor
+from diffusers.models.attention_processor import (
+ Attention,
+ AttnProcessor,
+ SlicedAttnProcessor,
+)
from torch import nn
import invokeai.backend.util.logging as logger
@@ -522,14 +528,6 @@ class AttnProcessor:
return hidden_states
"""
-from dataclasses import dataclass, field
-
-import torch
-from diffusers.models.attention_processor import (
- Attention,
- AttnProcessor,
- SlicedAttnProcessor,
-)
@dataclass
diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py
index b0174a455e..abef979b1c 100644
--- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py
+++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py
@@ -5,8 +5,6 @@ import torch
from torchvision.transforms.functional import InterpolationMode
from torchvision.transforms.functional import resize as tv_resize
-from .cross_attention_control import CrossAttentionType, get_cross_attention_modules
-
class AttentionMapSaver:
def __init__(self, token_ids: range, latents_shape: torch.Size):
diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py
index 272518e928..f05adafca2 100644
--- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py
+++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py
@@ -1,15 +1,14 @@
+from __future__ import annotations
+
from contextlib import contextmanager
from dataclasses import dataclass
-from math import ceil
-from typing import Any, Callable, Dict, Optional, Union, List
+import math
+from typing import Any, Callable, Optional, Union
-import numpy as np
import torch
from diffusers import UNet2DConditionModel
-from diffusers.models.attention_processor import AttentionProcessor
from typing_extensions import TypeAlias
-import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from .cross_attention_control import (
@@ -32,6 +31,29 @@ ModelForwardCallback: TypeAlias = Union[
]
+@dataclass
+class BasicConditioningInfo:
+ embeds: torch.Tensor
+ extra_conditioning: Optional[InvokeAIDiffuserComponent.ExtraConditioningInfo]
+ # weight: float
+ # mode: ConditioningAlgo
+
+ def to(self, device, dtype=None):
+ self.embeds = self.embeds.to(device=device, dtype=dtype)
+ return self
+
+
+@dataclass
+class SDXLConditioningInfo(BasicConditioningInfo):
+ pooled_embeds: torch.Tensor
+ add_time_ids: torch.Tensor
+
+ def to(self, device, dtype=None):
+ self.pooled_embeds = self.pooled_embeds.to(device=device, dtype=dtype)
+ self.add_time_ids = self.add_time_ids.to(device=device, dtype=dtype)
+ return super().to(device=device, dtype=dtype)
+
+
@dataclass(frozen=True)
class PostprocessingSettings:
threshold: float
@@ -78,10 +100,9 @@ class InvokeAIDiffuserComponent:
self.cross_attention_control_context = None
self.sequential_guidance = config.sequential_guidance
- @classmethod
@contextmanager
def custom_attention_context(
- cls,
+ self,
unet: UNet2DConditionModel, # note: also may futz with the text encoder depending on requested LoRAs
extra_conditioning_info: Optional[ExtraConditioningInfo],
step_count: int,
@@ -91,18 +112,19 @@ class InvokeAIDiffuserComponent:
old_attn_processors = unet.attn_processors
# Load lora conditions into the model
if extra_conditioning_info.wants_cross_attention_control:
- cross_attention_control_context = Context(
+ self.cross_attention_control_context = Context(
arguments=extra_conditioning_info.cross_attention_control_args,
step_count=step_count,
)
setup_cross_attention_control_attention_processors(
unet,
- cross_attention_control_context,
+ self.cross_attention_control_context,
)
try:
yield None
finally:
+ self.cross_attention_control_context = None
if old_attn_processors is not None:
unet.set_attn_processor(old_attn_processors)
# TODO resuscitate attention map saving
@@ -127,33 +149,126 @@ class InvokeAIDiffuserComponent:
for _, module in tokens_cross_attention_modules:
module.set_attention_slice_calculated_callback(None)
- def do_diffusion_step(
+ def do_controlnet_step(
self,
- x: torch.Tensor,
- sigma: torch.Tensor,
- unconditioning: Union[torch.Tensor, dict],
- conditioning: Union[torch.Tensor, dict],
- # unconditional_guidance_scale: float,
- unconditional_guidance_scale: Union[float, List[float]],
- step_index: Optional[int] = None,
- total_step_count: Optional[int] = None,
+ control_data,
+ sample: torch.Tensor,
+ timestep: torch.Tensor,
+ step_index: int,
+ total_step_count: int,
+ conditioning_data,
+ ):
+ down_block_res_samples, mid_block_res_sample = None, None
+
+ # control_data should be type List[ControlNetData]
+ # this loop covers both ControlNet (one ControlNetData in list)
+ # and MultiControlNet (multiple ControlNetData in list)
+ for i, control_datum in enumerate(control_data):
+ control_mode = control_datum.control_mode
+ # soft_injection and cfg_injection are the two ControlNet control_mode booleans
+ # that are combined at higher level to make control_mode enum
+ # soft_injection determines whether to do per-layer re-weighting adjustment (if True)
+ # or default weighting (if False)
+ soft_injection = control_mode == "more_prompt" or control_mode == "more_control"
+ # cfg_injection = determines whether to apply ControlNet to only the conditional (if True)
+ # or the default both conditional and unconditional (if False)
+ cfg_injection = control_mode == "more_control" or control_mode == "unbalanced"
+
+ first_control_step = math.floor(control_datum.begin_step_percent * total_step_count)
+ last_control_step = math.ceil(control_datum.end_step_percent * total_step_count)
+ # only apply controlnet if current step is within the controlnet's begin/end step range
+ if step_index >= first_control_step and step_index <= last_control_step:
+ if cfg_injection:
+ sample_model_input = sample
+ else:
+ # expand the latents input to control model if doing classifier free guidance
+ # (which I think for now is always true, there is conditional elsewhere that stops execution if
+ # classifier_free_guidance is <= 1.0 ?)
+ sample_model_input = torch.cat([sample] * 2)
+
+ added_cond_kwargs = None
+
+ if cfg_injection: # only applying ControlNet to conditional instead of in unconditioned
+ if type(conditioning_data.text_embeddings) is SDXLConditioningInfo:
+ added_cond_kwargs = {
+ "text_embeds": conditioning_data.text_embeddings.pooled_embeds,
+ "time_ids": conditioning_data.text_embeddings.add_time_ids,
+ }
+ encoder_hidden_states = conditioning_data.text_embeddings.embeds
+ encoder_attention_mask = None
+ else:
+ if type(conditioning_data.text_embeddings) is SDXLConditioningInfo:
+ added_cond_kwargs = {
+ "text_embeds": torch.cat(
+ [
+ # TODO: how to pad? just by zeros? or even truncate?
+ conditioning_data.unconditioned_embeddings.pooled_embeds,
+ conditioning_data.text_embeddings.pooled_embeds,
+ ],
+ dim=0,
+ ),
+ "time_ids": torch.cat(
+ [
+ conditioning_data.unconditioned_embeddings.add_time_ids,
+ conditioning_data.text_embeddings.add_time_ids,
+ ],
+ dim=0,
+ ),
+ }
+ (
+ encoder_hidden_states,
+ encoder_attention_mask,
+ ) = self._concat_conditionings_for_batch(
+ conditioning_data.unconditioned_embeddings.embeds,
+ conditioning_data.text_embeddings.embeds,
+ )
+ if isinstance(control_datum.weight, list):
+ # if controlnet has multiple weights, use the weight for the current step
+ controlnet_weight = control_datum.weight[step_index]
+ else:
+ # if controlnet has a single weight, use it for all steps
+ controlnet_weight = control_datum.weight
+
+ # controlnet(s) inference
+ down_samples, mid_sample = control_datum.model(
+ sample=sample_model_input,
+ timestep=timestep,
+ encoder_hidden_states=encoder_hidden_states,
+ controlnet_cond=control_datum.image_tensor,
+ conditioning_scale=controlnet_weight, # controlnet specific, NOT the guidance scale
+ encoder_attention_mask=encoder_attention_mask,
+ added_cond_kwargs=added_cond_kwargs,
+ guess_mode=soft_injection, # this is still called guess_mode in diffusers ControlNetModel
+ return_dict=False,
+ )
+ if cfg_injection:
+ # Inferred ControlNet only for the conditional batch.
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
+ # prepend zeros for unconditional batch
+ down_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_samples]
+ mid_sample = torch.cat([torch.zeros_like(mid_sample), mid_sample])
+
+ if down_block_res_samples is None and mid_block_res_sample is None:
+ down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
+ else:
+ # add controlnet outputs together if have multiple controlnets
+ down_block_res_samples = [
+ samples_prev + samples_curr
+ for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
+ ]
+ mid_block_res_sample += mid_sample
+
+ return down_block_res_samples, mid_block_res_sample
+
+ def do_unet_step(
+ self,
+ sample: torch.Tensor,
+ timestep: torch.Tensor,
+ conditioning_data, # TODO: type
+ step_index: int,
+ total_step_count: int,
**kwargs,
):
- """
- :param x: current latents
- :param sigma: aka t, passed to the internal model to control how much denoising will occur
- :param unconditioning: embeddings for unconditioned output. for hybrid conditioning this is a dict of tensors [B x 77 x 768], otherwise a single tensor [B x 77 x 768]
- :param conditioning: embeddings for conditioned output. for hybrid conditioning this is a dict of tensors [B x 77 x 768], otherwise a single tensor [B x 77 x 768]
- :param unconditional_guidance_scale: aka CFG scale, controls how much effect the conditioning tensor has
- :param step_index: counts upwards from 0 to (step_count-1) (as passed to setup_cross_attention_control, if using). May be called multiple times for a single step, therefore do not assume that its value will monotically increase. If None, will be estimated by comparing sigma against self.model.sigmas .
- :return: the new latents after applying the model to x using unscaled unconditioning and CFG-scaled conditioning.
- """
-
- if isinstance(unconditional_guidance_scale, list):
- guidance_scale = unconditional_guidance_scale[step_index]
- else:
- guidance_scale = unconditional_guidance_scale
-
cross_attention_control_types_to_do = []
context: Context = self.cross_attention_control_context
if self.cross_attention_control_context is not None:
@@ -163,25 +278,15 @@ class InvokeAIDiffuserComponent:
)
wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0
- wants_hybrid_conditioning = isinstance(conditioning, dict)
- if wants_hybrid_conditioning:
- unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning(
- x,
- sigma,
- unconditioning,
- conditioning,
- **kwargs,
- )
- elif wants_cross_attention_control:
+ if wants_cross_attention_control:
(
unconditioned_next_x,
conditioned_next_x,
) = self._apply_cross_attention_controlled_conditioning(
- x,
- sigma,
- unconditioning,
- conditioning,
+ sample,
+ timestep,
+ conditioning_data,
cross_attention_control_types_to_do,
**kwargs,
)
@@ -190,10 +295,9 @@ class InvokeAIDiffuserComponent:
unconditioned_next_x,
conditioned_next_x,
) = self._apply_standard_conditioning_sequentially(
- x,
- sigma,
- unconditioning,
- conditioning,
+ sample,
+ timestep,
+ conditioning_data,
**kwargs,
)
@@ -202,21 +306,13 @@ class InvokeAIDiffuserComponent:
unconditioned_next_x,
conditioned_next_x,
) = self._apply_standard_conditioning(
- x,
- sigma,
- unconditioning,
- conditioning,
+ sample,
+ timestep,
+ conditioning_data,
**kwargs,
)
- combined_next_x = self._combine(
- # unconditioned_next_x, conditioned_next_x, unconditional_guidance_scale
- unconditioned_next_x,
- conditioned_next_x,
- guidance_scale,
- )
-
- return combined_next_x
+ return unconditioned_next_x, conditioned_next_x
def do_latent_postprocessing(
self,
@@ -228,7 +324,6 @@ class InvokeAIDiffuserComponent:
) -> torch.Tensor:
if postprocessing_settings is not None:
percent_through = step_index / total_step_count
- latents = self.apply_threshold(postprocessing_settings, latents, percent_through)
latents = self.apply_symmetry(postprocessing_settings, latents, percent_through)
return latents
@@ -281,17 +376,40 @@ class InvokeAIDiffuserComponent:
# methods below are called from do_diffusion_step and should be considered private to this class.
- def _apply_standard_conditioning(self, x, sigma, unconditioning, conditioning, **kwargs):
+ def _apply_standard_conditioning(self, x, sigma, conditioning_data, **kwargs):
# fast batched path
x_twice = torch.cat([x] * 2)
sigma_twice = torch.cat([sigma] * 2)
- both_conditionings, encoder_attention_mask = self._concat_conditionings_for_batch(unconditioning, conditioning)
+ added_cond_kwargs = None
+ if type(conditioning_data.text_embeddings) is SDXLConditioningInfo:
+ added_cond_kwargs = {
+ "text_embeds": torch.cat(
+ [
+ # TODO: how to pad? just by zeros? or even truncate?
+ conditioning_data.unconditioned_embeddings.pooled_embeds,
+ conditioning_data.text_embeddings.pooled_embeds,
+ ],
+ dim=0,
+ ),
+ "time_ids": torch.cat(
+ [
+ conditioning_data.unconditioned_embeddings.add_time_ids,
+ conditioning_data.text_embeddings.add_time_ids,
+ ],
+ dim=0,
+ ),
+ }
+
+ both_conditionings, encoder_attention_mask = self._concat_conditionings_for_batch(
+ conditioning_data.unconditioned_embeddings.embeds, conditioning_data.text_embeddings.embeds
+ )
both_results = self.model_forward_callback(
x_twice,
sigma_twice,
both_conditionings,
encoder_attention_mask=encoder_attention_mask,
+ added_cond_kwargs=added_cond_kwargs,
**kwargs,
)
unconditioned_next_x, conditioned_next_x = both_results.chunk(2)
@@ -301,8 +419,7 @@ class InvokeAIDiffuserComponent:
self,
x: torch.Tensor,
sigma,
- unconditioning: torch.Tensor,
- conditioning: torch.Tensor,
+ conditioning_data,
**kwargs,
):
# low-memory sequential path
@@ -320,52 +437,46 @@ class InvokeAIDiffuserComponent:
if mid_block_additional_residual is not None:
uncond_mid_block, cond_mid_block = mid_block_additional_residual.chunk(2)
+ added_cond_kwargs = None
+ is_sdxl = type(conditioning_data.text_embeddings) is SDXLConditioningInfo
+ if is_sdxl:
+ added_cond_kwargs = {
+ "text_embeds": conditioning_data.unconditioned_embeddings.pooled_embeds,
+ "time_ids": conditioning_data.unconditioned_embeddings.add_time_ids,
+ }
+
unconditioned_next_x = self.model_forward_callback(
x,
sigma,
- unconditioning,
+ conditioning_data.unconditioned_embeddings.embeds,
down_block_additional_residuals=uncond_down_block,
mid_block_additional_residual=uncond_mid_block,
+ added_cond_kwargs=added_cond_kwargs,
**kwargs,
)
+
+ if is_sdxl:
+ added_cond_kwargs = {
+ "text_embeds": conditioning_data.text_embeddings.pooled_embeds,
+ "time_ids": conditioning_data.text_embeddings.add_time_ids,
+ }
+
conditioned_next_x = self.model_forward_callback(
x,
sigma,
- conditioning,
+ conditioning_data.text_embeddings.embeds,
down_block_additional_residuals=cond_down_block,
mid_block_additional_residual=cond_mid_block,
+ added_cond_kwargs=added_cond_kwargs,
**kwargs,
)
return unconditioned_next_x, conditioned_next_x
- # TODO: looks unused
- def _apply_hybrid_conditioning(self, x, sigma, unconditioning, conditioning, **kwargs):
- assert isinstance(conditioning, dict)
- assert isinstance(unconditioning, dict)
- x_twice = torch.cat([x] * 2)
- sigma_twice = torch.cat([sigma] * 2)
- both_conditionings = dict()
- for k in conditioning:
- if isinstance(conditioning[k], list):
- both_conditionings[k] = [
- torch.cat([unconditioning[k][i], conditioning[k][i]]) for i in range(len(conditioning[k]))
- ]
- else:
- both_conditionings[k] = torch.cat([unconditioning[k], conditioning[k]])
- unconditioned_next_x, conditioned_next_x = self.model_forward_callback(
- x_twice,
- sigma_twice,
- both_conditionings,
- **kwargs,
- ).chunk(2)
- return unconditioned_next_x, conditioned_next_x
-
def _apply_cross_attention_controlled_conditioning(
self,
x: torch.Tensor,
sigma,
- unconditioning,
- conditioning,
+ conditioning_data,
cross_attention_control_types_to_do,
**kwargs,
):
@@ -391,26 +502,43 @@ class InvokeAIDiffuserComponent:
mask=context.cross_attention_mask,
cross_attention_types_to_do=[],
)
+
+ added_cond_kwargs = None
+ is_sdxl = type(conditioning_data.text_embeddings) is SDXLConditioningInfo
+ if is_sdxl:
+ added_cond_kwargs = {
+ "text_embeds": conditioning_data.unconditioned_embeddings.pooled_embeds,
+ "time_ids": conditioning_data.unconditioned_embeddings.add_time_ids,
+ }
+
# no cross attention for unconditioning (negative prompt)
unconditioned_next_x = self.model_forward_callback(
x,
sigma,
- unconditioning,
+ conditioning_data.unconditioned_embeddings.embeds,
{"swap_cross_attn_context": cross_attn_processor_context},
down_block_additional_residuals=uncond_down_block,
mid_block_additional_residual=uncond_mid_block,
+ added_cond_kwargs=added_cond_kwargs,
**kwargs,
)
+ if is_sdxl:
+ added_cond_kwargs = {
+ "text_embeds": conditioning_data.text_embeddings.pooled_embeds,
+ "time_ids": conditioning_data.text_embeddings.add_time_ids,
+ }
+
# do requested cross attention types for conditioning (positive prompt)
cross_attn_processor_context.cross_attention_types_to_do = cross_attention_control_types_to_do
conditioned_next_x = self.model_forward_callback(
x,
sigma,
- conditioning,
+ conditioning_data.text_embeddings.embeds,
{"swap_cross_attn_context": cross_attn_processor_context},
down_block_additional_residuals=cond_down_block,
mid_block_additional_residual=cond_mid_block,
+ added_cond_kwargs=added_cond_kwargs,
**kwargs,
)
return unconditioned_next_x, conditioned_next_x
@@ -421,63 +549,6 @@ class InvokeAIDiffuserComponent:
combined_next_x = unconditioned_next_x + scaled_delta
return combined_next_x
- def apply_threshold(
- self,
- postprocessing_settings: PostprocessingSettings,
- latents: torch.Tensor,
- percent_through: float,
- ) -> torch.Tensor:
- if postprocessing_settings.threshold is None or postprocessing_settings.threshold == 0.0:
- return latents
-
- threshold = postprocessing_settings.threshold
- warmup = postprocessing_settings.warmup
-
- if percent_through < warmup:
- current_threshold = threshold + threshold * 5 * (1 - (percent_through / warmup))
- else:
- current_threshold = threshold
-
- if current_threshold <= 0:
- return latents
-
- maxval = latents.max().item()
- minval = latents.min().item()
-
- scale = 0.7 # default value from #395
-
- if self.debug_thresholding:
- std, mean = [i.item() for i in torch.std_mean(latents)]
- outside = torch.count_nonzero((latents < -current_threshold) | (latents > current_threshold))
- logger.info(f"Threshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})")
- logger.debug(f"min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}")
- logger.debug(f"{outside / latents.numel() * 100:.2f}% values outside threshold")
-
- if maxval < current_threshold and minval > -current_threshold:
- return latents
-
- num_altered = 0
-
- # MPS torch.rand_like is fine because torch.rand_like is wrapped in generate.py!
-
- if maxval > current_threshold:
- latents = torch.clone(latents)
- maxval = np.clip(maxval * scale, 1, current_threshold)
- num_altered += torch.count_nonzero(latents > maxval)
- latents[latents > maxval] = torch.rand_like(latents[latents > maxval]) * maxval
-
- if minval < -current_threshold:
- latents = torch.clone(latents)
- minval = np.clip(minval * scale, -current_threshold, -1)
- num_altered += torch.count_nonzero(latents < minval)
- latents[latents < minval] = torch.rand_like(latents[latents < minval]) * minval
-
- if self.debug_thresholding:
- logger.debug(f"min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})")
- logger.debug(f"{num_altered / latents.numel() * 100:.2f}% values altered")
-
- return latents
-
def apply_symmetry(
self,
postprocessing_settings: PostprocessingSettings,
@@ -505,7 +576,7 @@ class InvokeAIDiffuserComponent:
latents.to(device="cpu")
if (
- h_symmetry_time_pct != None
+ h_symmetry_time_pct is not None
and self.last_percent_through < h_symmetry_time_pct
and percent_through >= h_symmetry_time_pct
):
@@ -521,7 +592,7 @@ class InvokeAIDiffuserComponent:
)
if (
- v_symmetry_time_pct != None
+ v_symmetry_time_pct is not None
and self.last_percent_through < v_symmetry_time_pct
and percent_through >= v_symmetry_time_pct
):
@@ -539,18 +610,6 @@ class InvokeAIDiffuserComponent:
self.last_percent_through = percent_through
return latents.to(device=dev)
- def estimate_percent_through(self, step_index, sigma):
- if step_index is not None and self.cross_attention_control_context is not None:
- # percent_through will never reach 1.0 (but this is intended)
- return float(step_index) / float(self.cross_attention_control_context.step_count)
- # find the best possible index of the current sigma in the sigma sequence
- smaller_sigmas = torch.nonzero(self.model.sigmas <= sigma)
- sigma_index = smaller_sigmas[-1].item() if smaller_sigmas.shape[0] > 0 else 0
- # flip because sigmas[0] is for the fully denoised image
- # percent_through must be <1
- return 1.0 - float(sigma_index + 1) / float(self.model.sigmas.shape[0])
- # print('estimated percent_through', percent_through, 'from sigma', sigma.item())
-
# todo: make this work
@classmethod
def apply_conjunction(cls, x, t, forward_func, uc, c_or_weighted_c_list, global_guidance_scale):
@@ -564,7 +623,7 @@ class InvokeAIDiffuserComponent:
# below is fugly omg
conditionings = [uc] + [c for c, weight in weighted_cond_list]
weights = [1] + [weight for c, weight in weighted_cond_list]
- chunk_count = ceil(len(conditionings) / 2)
+ chunk_count = math.ceil(len(conditionings) / 2)
deltas = None
for chunk_index in range(chunk_count):
offset = chunk_index * 2
diff --git a/invokeai/backend/stable_diffusion/image_degradation/__init__.py b/invokeai/backend/stable_diffusion/image_degradation/__init__.py
index c6b3b62ea8..589e266c02 100644
--- a/invokeai/backend/stable_diffusion/image_degradation/__init__.py
+++ b/invokeai/backend/stable_diffusion/image_degradation/__init__.py
@@ -1,6 +1,6 @@
-from ldm.modules.image_degradation.bsrgan import (
+from ldm.modules.image_degradation.bsrgan import ( # noqa: F401
degradation_bsrgan_variant as degradation_fn_bsr,
)
-from ldm.modules.image_degradation.bsrgan_light import (
+from ldm.modules.image_degradation.bsrgan_light import ( # noqa: F401
degradation_bsrgan_variant as degradation_fn_bsr_light,
)
diff --git a/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py b/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py
index 493c8be781..e4d614207b 100644
--- a/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py
+++ b/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py
@@ -573,14 +573,15 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
image = util.uint2single(image)
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
- sf_ori = sf
+ jpeg_prob, scale2_prob = 0.9, 0.25
+ # isp_prob = 0.25 # uncomment with `if i== 6` block below
+ # sf_ori = sf # uncomment with `if i== 6` block below
h1, w1 = image.shape[:2]
image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop
h, w = image.shape[:2]
- hq = image.copy()
+ # hq = image.copy() # uncomment with `if i== 6` block below
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
@@ -777,7 +778,7 @@ if __name__ == "__main__":
img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
print(img_lq.shape)
print("bicubic", img_lq_bicubic.shape)
- print(img_hq.shape)
+ # print(img_hq.shape)
lq_nearest = cv2.resize(
util.single2uint(img_lq),
(int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
@@ -788,5 +789,6 @@ if __name__ == "__main__":
(int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
interpolation=0,
)
- img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
+ # img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
+ img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest], axis=1)
util.imsave(img_concat, str(i) + ".png")
diff --git a/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py b/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py
index d0e0abadbc..cd74adc519 100644
--- a/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py
+++ b/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py
@@ -577,14 +577,15 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
image = util.uint2single(image)
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
- sf_ori = sf
+ jpeg_prob, scale2_prob = 0.9, 0.25
+ # isp_prob = 0.25 # uncomment with `if i== 6` block below
+ # sf_ori = sf # uncomment with `if i== 6` block below
h1, w1 = image.shape[:2]
image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop
h, w = image.shape[:2]
- hq = image.copy()
+ # hq = image.copy() # uncomment with `if i== 6` block below
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
diff --git a/invokeai/backend/stable_diffusion/image_degradation/utils_image.py b/invokeai/backend/stable_diffusion/image_degradation/utils_image.py
index d45ca602e6..2a0773c3ed 100644
--- a/invokeai/backend/stable_diffusion/image_degradation/utils_image.py
+++ b/invokeai/backend/stable_diffusion/image_degradation/utils_image.py
@@ -8,8 +8,6 @@ import numpy as np
import torch
from torchvision.utils import make_grid
-# import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
-
import invokeai.backend.util.logging as logger
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
@@ -50,6 +48,8 @@ def get_timestamp():
def imshow(x, title=None, cbar=False, figsize=None):
+ import matplotlib.pyplot as plt
+
plt.figure(figsize=figsize)
plt.imshow(np.squeeze(x), interpolation="nearest", cmap="gray")
if title:
@@ -60,6 +60,8 @@ def imshow(x, title=None, cbar=False, figsize=None):
def surf(Z, cmap="rainbow", figsize=None):
+ import matplotlib.pyplot as plt
+
plt.figure(figsize=figsize)
ax3 = plt.axes(projection="3d")
@@ -89,7 +91,7 @@ def get_image_paths(dataroot):
def _get_paths_from_images(path):
assert os.path.isdir(path), "{:s} is not a valid directory".format(path)
images = []
- for dirpath, _, fnames in sorted(os.walk(path)):
+ for dirpath, _, fnames in sorted(os.walk(path, followlinks=True)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
diff --git a/invokeai/backend/stable_diffusion/offloading.py b/invokeai/backend/stable_diffusion/offloading.py
deleted file mode 100644
index aa2426d514..0000000000
--- a/invokeai/backend/stable_diffusion/offloading.py
+++ /dev/null
@@ -1,253 +0,0 @@
-from __future__ import annotations
-
-import warnings
-import weakref
-from abc import ABCMeta, abstractmethod
-from collections.abc import MutableMapping
-from typing import Callable, Union
-
-import torch
-from accelerate.utils import send_to_device
-from torch.utils.hooks import RemovableHandle
-
-OFFLOAD_DEVICE = torch.device("cpu")
-
-
-class _NoModel:
- """Symbol that indicates no model is loaded.
-
- (We can't weakref.ref(None), so this was my best idea at the time to come up with something
- type-checkable.)
- """
-
- def __bool__(self):
- return False
-
- def to(self, device: torch.device):
- pass
-
- def __repr__(self):
- return ""
-
-
-NO_MODEL = _NoModel()
-
-
-class ModelGroup(metaclass=ABCMeta):
- """
- A group of models.
-
- The use case I had in mind when writing this is the sub-models used by a DiffusionPipeline,
- e.g. its text encoder, U-net, VAE, etc.
-
- Those models are :py:class:`diffusers.ModelMixin`, but "model" is interchangeable with
- :py:class:`torch.nn.Module` here.
- """
-
- def __init__(self, execution_device: torch.device):
- self.execution_device = execution_device
-
- @abstractmethod
- def install(self, *models: torch.nn.Module):
- """Add models to this group."""
- pass
-
- @abstractmethod
- def uninstall(self, models: torch.nn.Module):
- """Remove models from this group."""
- pass
-
- @abstractmethod
- def uninstall_all(self):
- """Remove all models from this group."""
-
- @abstractmethod
- def load(self, model: torch.nn.Module):
- """Load this model to the execution device."""
- pass
-
- @abstractmethod
- def offload_current(self):
- """Offload the current model(s) from the execution device."""
- pass
-
- @abstractmethod
- def ready(self):
- """Ready this group for use."""
- pass
-
- @abstractmethod
- def set_device(self, device: torch.device):
- """Change which device models from this group will execute on."""
- pass
-
- @abstractmethod
- def device_for(self, model) -> torch.device:
- """Get the device the given model will execute on.
-
- The model should already be a member of this group.
- """
- pass
-
- @abstractmethod
- def __contains__(self, model):
- """Check if the model is a member of this group."""
- pass
-
- def __repr__(self) -> str:
- return f"<{self.__class__.__name__} object at {id(self):x}: " f"device={self.execution_device} >"
-
-
-class LazilyLoadedModelGroup(ModelGroup):
- """
- Only one model from this group is loaded on the GPU at a time.
-
- Running the forward method of a model will displace the previously-loaded model,
- offloading it to CPU.
-
- If you call other methods on the model, e.g. ``model.encode(x)`` instead of ``model(x)``,
- you will need to explicitly load it with :py:method:`.load(model)`.
-
- This implementation relies on pytorch forward-pre-hooks, and it will copy forward arguments
- to the appropriate execution device, as long as they are positional arguments and not keyword
- arguments. (I didn't make the rules; that's the way the pytorch 1.13 API works for hooks.)
- """
-
- _hooks: MutableMapping[torch.nn.Module, RemovableHandle]
- _current_model_ref: Callable[[], Union[torch.nn.Module, _NoModel]]
-
- def __init__(self, execution_device: torch.device):
- super().__init__(execution_device)
- self._hooks = weakref.WeakKeyDictionary()
- self._current_model_ref = weakref.ref(NO_MODEL)
-
- def install(self, *models: torch.nn.Module):
- for model in models:
- self._hooks[model] = model.register_forward_pre_hook(self._pre_hook)
-
- def uninstall(self, *models: torch.nn.Module):
- for model in models:
- hook = self._hooks.pop(model)
- hook.remove()
- if self.is_current_model(model):
- # no longer hooked by this object, so don't claim to manage it
- self.clear_current_model()
-
- def uninstall_all(self):
- self.uninstall(*self._hooks.keys())
-
- def _pre_hook(self, module: torch.nn.Module, forward_input):
- self.load(module)
- if len(forward_input) == 0:
- warnings.warn(
- f"Hook for {module.__class__.__name__} got no input. " f"Inputs must be positional, not keywords.",
- stacklevel=3,
- )
- return send_to_device(forward_input, self.execution_device)
-
- def load(self, module):
- if not self.is_current_model(module):
- self.offload_current()
- self._load(module)
-
- def offload_current(self):
- module = self._current_model_ref()
- if module is not NO_MODEL:
- module.to(OFFLOAD_DEVICE)
- self.clear_current_model()
-
- def _load(self, module: torch.nn.Module) -> torch.nn.Module:
- assert self.is_empty(), f"A model is already loaded: {self._current_model_ref()}"
- module = module.to(self.execution_device)
- self.set_current_model(module)
- return module
-
- def is_current_model(self, model: torch.nn.Module) -> bool:
- """Is the given model the one currently loaded on the execution device?"""
- return self._current_model_ref() is model
-
- def is_empty(self):
- """Are none of this group's models loaded on the execution device?"""
- return self._current_model_ref() is NO_MODEL
-
- def set_current_model(self, value):
- self._current_model_ref = weakref.ref(value)
-
- def clear_current_model(self):
- self._current_model_ref = weakref.ref(NO_MODEL)
-
- def set_device(self, device: torch.device):
- if device == self.execution_device:
- return
- self.execution_device = device
- current = self._current_model_ref()
- if current is not NO_MODEL:
- current.to(device)
-
- def device_for(self, model):
- if model not in self:
- raise KeyError(f"This does not manage this model {type(model).__name__}", model)
- return self.execution_device # this implementation only dispatches to one device
-
- def ready(self):
- pass # always ready to load on-demand
-
- def __contains__(self, model):
- return model in self._hooks
-
- def __repr__(self) -> str:
- return (
- f"<{self.__class__.__name__} object at {id(self):x}: "
- f"current_model={type(self._current_model_ref()).__name__} >"
- )
-
-
-class FullyLoadedModelGroup(ModelGroup):
- """
- A group of models without any implicit loading or unloading.
-
- :py:meth:`.ready` loads _all_ the models to the execution device at once.
- """
-
- _models: weakref.WeakSet
-
- def __init__(self, execution_device: torch.device):
- super().__init__(execution_device)
- self._models = weakref.WeakSet()
-
- def install(self, *models: torch.nn.Module):
- for model in models:
- self._models.add(model)
- model.to(self.execution_device)
-
- def uninstall(self, *models: torch.nn.Module):
- for model in models:
- self._models.remove(model)
-
- def uninstall_all(self):
- self.uninstall(*self._models)
-
- def load(self, model):
- model.to(self.execution_device)
-
- def offload_current(self):
- for model in self._models:
- model.to(OFFLOAD_DEVICE)
-
- def ready(self):
- for model in self._models:
- self.load(model)
-
- def set_device(self, device: torch.device):
- self.execution_device = device
- for model in self._models:
- if model.device != OFFLOAD_DEVICE:
- model.to(device)
-
- def device_for(self, model):
- if model not in self:
- raise KeyError("This does not manage this model f{type(model).__name__}", model)
- return self.execution_device # this implementation only dispatches to one device
-
- def __contains__(self, model):
- return model in self._models
diff --git a/invokeai/backend/stable_diffusion/schedulers/__init__.py b/invokeai/backend/stable_diffusion/schedulers/__init__.py
index 29a96eb3a5..a4e9dbf9da 100644
--- a/invokeai/backend/stable_diffusion/schedulers/__init__.py
+++ b/invokeai/backend/stable_diffusion/schedulers/__init__.py
@@ -1 +1 @@
-from .schedulers import SCHEDULER_MAP
+from .schedulers import SCHEDULER_MAP # noqa: F401
diff --git a/invokeai/backend/training/__init__.py b/invokeai/backend/training/__init__.py
index a85842dc72..ed3ceb90ec 100644
--- a/invokeai/backend/training/__init__.py
+++ b/invokeai/backend/training/__init__.py
@@ -1,4 +1,4 @@
"""
Initialization file for invokeai.backend.training
"""
-from .textual_inversion_training import do_textual_inversion_training, parse_args
+from .textual_inversion_training import do_textual_inversion_training, parse_args # noqa: F401
diff --git a/invokeai/backend/util/__init__.py b/invokeai/backend/util/__init__.py
index 2e69af5382..30bb0efc15 100644
--- a/invokeai/backend/util/__init__.py
+++ b/invokeai/backend/util/__init__.py
@@ -1,7 +1,7 @@
"""
Initialization file for invokeai.backend.util
"""
-from .devices import (
+from .devices import ( # noqa: F401
CPU_DEVICE,
CUDA_DEVICE,
MPS_DEVICE,
@@ -10,5 +10,5 @@ from .devices import (
normalize_device,
torch_dtype,
)
-from .log import write_log
-from .util import ask_user, download_with_resume, instantiate_from_config, url_attachment_name, Chdir
+from .log import write_log # noqa: F401
+from .util import ask_user, download_with_resume, instantiate_from_config, url_attachment_name, Chdir # noqa: F401
diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py
index eeabcc35db..1827f295e4 100644
--- a/invokeai/backend/util/devices.py
+++ b/invokeai/backend/util/devices.py
@@ -1,6 +1,8 @@
from __future__ import annotations
from contextlib import nullcontext
+from packaging import version
+import platform
import torch
from torch import autocast
@@ -30,7 +32,7 @@ def choose_precision(device: torch.device) -> str:
device_name = torch.cuda.get_device_name(device)
if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name):
return "float16"
- elif device.type == "mps":
+ elif device.type == "mps" and version.parse(platform.mac_ver()[0]) < version.parse("14.0.0"):
return "float16"
return "float32"
diff --git a/invokeai/backend/util/hotfixes.py b/invokeai/backend/util/hotfixes.py
index 4710682ac1..3d7f278f86 100644
--- a/invokeai/backend/util/hotfixes.py
+++ b/invokeai/backend/util/hotfixes.py
@@ -4,8 +4,15 @@ import torch
from torch import nn
from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.loaders import FromOriginalControlnetMixin
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
-from diffusers.models.embeddings import TimestepEmbedding, Timesteps
+from diffusers.models.embeddings import (
+ TextImageProjection,
+ TextImageTimeEmbedding,
+ TextTimeEmbedding,
+ TimestepEmbedding,
+ Timesteps,
+)
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import (
CrossAttnDownBlock2D,
@@ -18,10 +25,16 @@ from diffusers.models.unet_2d_condition import UNet2DConditionModel
import diffusers
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
+from invokeai.backend.util.logging import InvokeAILogger
+
+# TODO: create PR to diffusers
# Modified ControlNetModel with encoder_attention_mask argument added
-class ControlNetModel(ModelMixin, ConfigMixin):
+logger = InvokeAILogger.getLogger(__name__)
+
+
+class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
"""
A ControlNet model.
@@ -52,12 +65,25 @@ class ControlNetModel(ModelMixin, ConfigMixin):
The epsilon to use for the normalization.
cross_attention_dim (`int`, defaults to 1280):
The dimension of the cross attention features.
+ transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
+ encoder_hid_dim (`int`, *optional*, defaults to None):
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
+ dimension to `cross_attention_dim`.
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
The dimension of the attention heads.
use_linear_projection (`bool`, defaults to `False`):
class_embed_type (`str`, *optional*, defaults to `None`):
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
+ addition_embed_type (`str`, *optional*, defaults to `None`):
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
+ "text". "text" will use the `TextTimeEmbedding` layer.
num_class_embeds (`int`, *optional*, defaults to 0):
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
class conditioning with `class_embed_type` equal to `None`.
@@ -90,7 +116,7 @@ class ControlNetModel(ModelMixin, ConfigMixin):
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
- block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
+ block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
@@ -98,10 +124,15 @@ class ControlNetModel(ModelMixin, ConfigMixin):
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
+ transformer_layers_per_block: Union[int, Tuple[int]] = 1,
+ encoder_hid_dim: Optional[int] = None,
+ encoder_hid_dim_type: Optional[str] = None,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
+ addition_embed_type: Optional[str] = None,
+ addition_time_embed_dim: Optional[int] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
@@ -109,6 +140,7 @@ class ControlNetModel(ModelMixin, ConfigMixin):
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
global_pool_conditions: bool = False,
+ addition_embed_type_num_heads=64,
):
super().__init__()
@@ -136,6 +168,9 @@ class ControlNetModel(ModelMixin, ConfigMixin):
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
+ if isinstance(transformer_layers_per_block, int):
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
+
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
@@ -145,16 +180,43 @@ class ControlNetModel(ModelMixin, ConfigMixin):
# time
time_embed_dim = block_out_channels[0] * 4
-
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
-
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
+ encoder_hid_dim_type = "text_proj"
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
+
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
+ raise ValueError(
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
+ )
+
+ if encoder_hid_dim_type == "text_proj":
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
+ elif encoder_hid_dim_type == "text_image_proj":
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
+ # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
+ self.encoder_hid_proj = TextImageProjection(
+ text_embed_dim=encoder_hid_dim,
+ image_embed_dim=cross_attention_dim,
+ cross_attention_dim=cross_attention_dim,
+ )
+
+ elif encoder_hid_dim_type is not None:
+ raise ValueError(
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
+ )
+ else:
+ self.encoder_hid_proj = None
+
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
@@ -178,6 +240,29 @@ class ControlNetModel(ModelMixin, ConfigMixin):
else:
self.class_embedding = None
+ if addition_embed_type == "text":
+ if encoder_hid_dim is not None:
+ text_time_embedding_from_dim = encoder_hid_dim
+ else:
+ text_time_embedding_from_dim = cross_attention_dim
+
+ self.add_embedding = TextTimeEmbedding(
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
+ )
+ elif addition_embed_type == "text_image":
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
+ # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
+ self.add_embedding = TextImageTimeEmbedding(
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
+ )
+ elif addition_embed_type == "text_time":
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
+
+ elif addition_embed_type is not None:
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
+
# control net conditioning embedding
self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0],
@@ -212,6 +297,7 @@ class ControlNetModel(ModelMixin, ConfigMixin):
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
+ transformer_layers_per_block=transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
@@ -248,6 +334,7 @@ class ControlNetModel(ModelMixin, ConfigMixin):
self.controlnet_mid_block = controlnet_block
self.mid_block = UNetMidBlock2DCrossAttn(
+ transformer_layers_per_block=transformer_layers_per_block[-1],
in_channels=mid_block_channel,
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
@@ -277,7 +364,22 @@ class ControlNetModel(ModelMixin, ConfigMixin):
The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied
where applicable.
"""
+ transformer_layers_per_block = (
+ unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
+ )
+ encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None
+ encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None
+ addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None
+ addition_time_embed_dim = (
+ unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None
+ )
+
controlnet = cls(
+ encoder_hid_dim=encoder_hid_dim,
+ encoder_hid_dim_type=encoder_hid_dim_type,
+ addition_embed_type=addition_embed_type,
+ addition_time_embed_dim=addition_time_embed_dim,
+ transformer_layers_per_block=transformer_layers_per_block,
in_channels=unet.config.in_channels,
flip_sin_to_cos=unet.config.flip_sin_to_cos,
freq_shift=unet.config.freq_shift,
@@ -463,6 +565,7 @@ class ControlNetModel(ModelMixin, ConfigMixin):
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
guess_mode: bool = False,
@@ -486,7 +589,9 @@ class ControlNetModel(ModelMixin, ConfigMixin):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
- cross_attention_kwargs(`dict[str]`, *optional*, defaults to `None`):
+ added_cond_kwargs (`dict`):
+ Additional conditions for the Stable Diffusion XL UNet.
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
@@ -549,6 +654,7 @@ class ControlNetModel(ModelMixin, ConfigMixin):
t_emb = t_emb.to(dtype=sample.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
+ aug_emb = None
if self.class_embedding is not None:
if class_labels is None:
@@ -560,11 +666,34 @@ class ControlNetModel(ModelMixin, ConfigMixin):
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = emb + class_emb
+ if "addition_embed_type" in self.config:
+ if self.config.addition_embed_type == "text":
+ aug_emb = self.add_embedding(encoder_hidden_states)
+
+ elif self.config.addition_embed_type == "text_time":
+ if "text_embeds" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
+ )
+ text_embeds = added_cond_kwargs.get("text_embeds")
+ if "time_ids" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
+ )
+ time_ids = added_cond_kwargs.get("time_ids")
+ time_embeds = self.add_time_proj(time_ids.flatten())
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
+
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
+ add_embeds = add_embeds.to(emb.dtype)
+ aug_emb = self.add_embedding(add_embeds)
+
+ emb = emb + aug_emb if aug_emb is not None else emb
+
# 2. pre-process
sample = self.conv_in(sample)
controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
-
sample = sample + controlnet_cond
# 3. down
diff --git a/invokeai/backend/util/log.py b/invokeai/backend/util/log.py
index 4643f61a6b..3919d456b9 100644
--- a/invokeai/backend/util/log.py
+++ b/invokeai/backend/util/log.py
@@ -27,8 +27,8 @@ def write_log_message(results, output_cntr):
log_lines = [f"{path}: {prompt}\n" for path, prompt in results]
if len(log_lines) > 1:
subcntr = 1
- for l in log_lines:
- print(f"[{output_cntr}.{subcntr}] {l}", end="")
+ for ll in log_lines:
+ print(f"[{output_cntr}.{subcntr}] {ll}", end="")
subcntr += 1
else:
print(f"[{output_cntr}] {log_lines[0]}", end="")
diff --git a/invokeai/backend/util/logging.py b/invokeai/backend/util/logging.py
index 3a8d721aa5..82706d8181 100644
--- a/invokeai/backend/util/logging.py
+++ b/invokeai/backend/util/logging.py
@@ -182,13 +182,13 @@ import urllib.parse
from abc import abstractmethod
from pathlib import Path
-from invokeai.app.services.config import InvokeAIAppConfig, get_invokeai_config
+from invokeai.app.services.config import InvokeAIAppConfig
try:
import syslog
SYSLOG_AVAILABLE = True
-except:
+except ImportError:
SYSLOG_AVAILABLE = False
@@ -417,7 +417,7 @@ class InvokeAILogger(object):
syslog_args["socktype"] = _SOCK_MAP[arg_value[0]]
else:
syslog_args["address"] = arg_name
- except:
+ except Exception:
raise ValueError(f"{args} is not a value argument list for syslog logging")
return logging.handlers.SysLogHandler(**syslog_args)
diff --git a/invokeai/backend/util/mps_fixes.py b/invokeai/backend/util/mps_fixes.py
index 409eff1c8b..8a4e6baab5 100644
--- a/invokeai/backend/util/mps_fixes.py
+++ b/invokeai/backend/util/mps_fixes.py
@@ -191,7 +191,7 @@ class ChunkedSlicedAttnProcessor:
assert value.shape[0] == 1
assert hidden_states.shape[0] == 1
- dtype = query.dtype
+ # dtype = query.dtype
if attn.upcast_attention:
query = query.float()
key = key.float()
diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py
index f3c182c063..7ef9c72fb0 100644
--- a/invokeai/backend/util/util.py
+++ b/invokeai/backend/util/util.py
@@ -84,7 +84,7 @@ def count_params(model, verbose=False):
def instantiate_from_config(config, **kwargs):
- if not "target" in config:
+ if "target" not in config:
if config == "__is_first_stage__":
return None
elif config == "__is_unconditional__":
@@ -234,16 +234,17 @@ def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10
.repeat_interleave(d[1], 1)
)
- dot = lambda grad, shift: (
- torch.stack(
- (
- grid[: shape[0], : shape[1], 0] + shift[0],
- grid[: shape[0], : shape[1], 1] + shift[1],
- ),
- dim=-1,
- )
- * grad[: shape[0], : shape[1]]
- ).sum(dim=-1)
+ def dot(grad, shift):
+ return (
+ torch.stack(
+ (
+ grid[: shape[0], : shape[1], 0] + shift[0],
+ grid[: shape[0], : shape[1], 1] + shift[1],
+ ),
+ dim=-1,
+ )
+ * grad[: shape[0], : shape[1]]
+ ).sum(dim=-1)
n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]).to(device)
n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]).to(device)
@@ -287,7 +288,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
if dest.is_dir():
try:
file_name = re.search('filename="(.+)"', resp.headers.get("Content-Disposition")).group(1)
- except:
+ except AttributeError:
file_name = os.path.basename(url)
dest = dest / file_name
else:
@@ -342,7 +343,7 @@ def url_attachment_name(url: str) -> dict:
resp = requests.get(url, stream=True)
match = re.search('filename="(.+)"', resp.headers.get("Content-Disposition"))
return match.group(1)
- except:
+ except Exception:
return None
diff --git a/invokeai/backend/web/__init__.py b/invokeai/backend/web/__init__.py
deleted file mode 100644
index c57600f72b..0000000000
--- a/invokeai/backend/web/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-Initialization file for the web backend.
-"""
-from .invoke_ai_web_server import InvokeAIWebServer
diff --git a/invokeai/backend/web/invoke_ai_web_server.py b/invokeai/backend/web/invoke_ai_web_server.py
deleted file mode 100644
index 88eb77551f..0000000000
--- a/invokeai/backend/web/invoke_ai_web_server.py
+++ /dev/null
@@ -1,1654 +0,0 @@
-import base64
-import glob
-import io
-import json
-import math
-import mimetypes
-import os
-import shutil
-import traceback
-from pathlib import Path
-from threading import Event
-from uuid import uuid4
-
-import eventlet
-from compel.prompt_parser import Blend
-from flask import Flask, make_response, redirect, request, send_from_directory
-from flask_socketio import SocketIO
-from PIL import Image
-from PIL.Image import Image as ImageType
-from werkzeug.utils import secure_filename
-
-import invokeai.backend.util.logging as logger
-import invokeai.frontend.web.dist as frontend
-
-from .. import Generate
-from ..args import APP_ID, APP_VERSION, Args, calculate_init_img_hash
-from ..generator import infill_methods
-from ..globals import Globals, global_converted_ckpts_dir, global_models_dir
-from ..image_util import PngWriter, retrieve_metadata
-from ...frontend.merge.merge_diffusers import merge_diffusion_models
-from ..prompting import (
- get_prompt_structure,
- get_tokens_for_prompt_object,
-)
-from ..stable_diffusion import PipelineIntermediateState
-from .modules.get_canvas_generation_mode import get_canvas_generation_mode
-from .modules.parameters import parameters_to_command
-
-# Loading Arguments
-opt = Args()
-args = opt.parse_args()
-
-# Set the root directory for static files and relative paths
-args.root_dir = os.path.expanduser(args.root_dir or "..")
-if not os.path.isabs(args.outdir):
- args.outdir = os.path.join(args.root_dir, args.outdir)
-
-# normalize the config directory relative to root
-if not os.path.isabs(opt.conf):
- opt.conf = os.path.normpath(os.path.join(Globals.root, opt.conf))
-
-
-class InvokeAIWebServer:
- def __init__(self, generate: Generate, gfpgan, codeformer, esrgan) -> None:
- self.host = args.host
- self.port = args.port
-
- self.generate = generate
- self.gfpgan = gfpgan
- self.codeformer = codeformer
- self.esrgan = esrgan
-
- self.canceled = Event()
- self.ALLOWED_EXTENSIONS = {"png", "jpg", "jpeg"}
-
- def allowed_file(self, filename: str) -> bool:
- return "." in filename and filename.rsplit(".", 1)[1].lower() in self.ALLOWED_EXTENSIONS
-
- def run(self):
- self.setup_app()
- self.setup_flask()
-
- def setup_flask(self):
- # Fix missing mimetypes on Windows
- mimetypes.add_type("application/javascript", ".js")
- mimetypes.add_type("text/css", ".css")
- # Socket IO
- engineio_logger = True if args.web_verbose else False
- max_http_buffer_size = 10000000
-
- socketio_args = {
- "logger": logger,
- "engineio_logger": engineio_logger,
- "max_http_buffer_size": max_http_buffer_size,
- "ping_interval": (50, 50),
- "ping_timeout": 60,
- }
-
- if opt.cors:
- _cors = opt.cors
- # convert list back into comma-separated string,
- # be defensive here, not sure in what form this arrives
- if isinstance(_cors, list):
- _cors = ",".join(_cors)
- if "," in _cors:
- _cors = _cors.split(",")
- socketio_args["cors_allowed_origins"] = _cors
-
- self.app = Flask(__name__, static_url_path="", static_folder=frontend.__path__[0])
-
- self.socketio = SocketIO(self.app, **socketio_args)
-
- # Keep Server Alive Route
- @self.app.route("/flaskwebgui-keep-server-alive")
- def keep_alive():
- return {"message": "Server Running"}
-
- # Outputs Route
- self.app.config["OUTPUTS_FOLDER"] = os.path.abspath(args.outdir)
-
- @self.app.route("/outputs/")
- def outputs(file_path):
- return send_from_directory(self.app.config["OUTPUTS_FOLDER"], file_path)
-
- # Base Route
- @self.app.route("/")
- def serve():
- if args.web_develop:
- return redirect("http://127.0.0.1:5173")
- else:
- return send_from_directory(self.app.static_folder, "index.html")
-
- @self.app.route("/upload", methods=["POST"])
- def upload():
- try:
- data = json.loads(request.form["data"])
- filename = ""
- # check if the post request has the file part
- if "file" in request.files:
- file = request.files["file"]
- # If the user does not select a file, the browser submits an
- # empty file without a filename.
- if file.filename == "":
- return make_response("No file selected", 400)
- filename = file.filename
- elif "dataURL" in data:
- file = dataURL_to_bytes(data["dataURL"])
- if "filename" not in data or data["filename"] == "":
- return make_response("No filename provided", 400)
- filename = data["filename"]
- else:
- return make_response("No file or dataURL", 400)
-
- kind = data["kind"]
-
- if kind == "init":
- path = self.init_image_path
- elif kind == "temp":
- path = self.temp_image_path
- elif kind == "result":
- path = self.result_path
- elif kind == "mask":
- path = self.mask_image_path
- else:
- return make_response(f"Invalid upload kind: {kind}", 400)
-
- if not self.allowed_file(filename):
- return make_response(
- f'Invalid file type, must be one of: {", ".join(self.ALLOWED_EXTENSIONS)}',
- 400,
- )
-
- secured_filename = secure_filename(filename)
-
- uuid = uuid4().hex
- truncated_uuid = uuid[:8]
-
- split = os.path.splitext(secured_filename)
- name = f"{split[0]}.{truncated_uuid}{split[1]}"
-
- file_path = os.path.join(path, name)
-
- if "dataURL" in data:
- with open(file_path, "wb") as f:
- f.write(file)
- else:
- file.save(file_path)
-
- mtime = os.path.getmtime(file_path)
-
- pil_image = Image.open(file_path)
-
- if "cropVisible" in data and data["cropVisible"] == True:
- visible_image_bbox = pil_image.getbbox()
- pil_image = pil_image.crop(visible_image_bbox)
- pil_image.save(file_path)
-
- (width, height) = pil_image.size
-
- thumbnail_path = save_thumbnail(pil_image, os.path.basename(file_path), self.thumbnail_image_path)
-
- response = {
- "url": self.get_url_from_image_path(file_path),
- "thumbnail": self.get_url_from_image_path(thumbnail_path),
- "mtime": mtime,
- "width": width,
- "height": height,
- }
-
- return make_response(response, 200)
-
- except Exception as e:
- self.handle_exceptions(e)
- return make_response("Error uploading file", 500)
-
- self.load_socketio_listeners(self.socketio)
-
- if args.gui:
- logger.info("Launching Invoke AI GUI")
- try:
- from flaskwebgui import FlaskUI
-
- FlaskUI(
- app=self.app,
- socketio=self.socketio,
- server="flask_socketio",
- width=1600,
- height=1000,
- port=self.port,
- ).run()
- except KeyboardInterrupt:
- import sys
-
- sys.exit(0)
- else:
- useSSL = args.certfile or args.keyfile
- logger.info("Started Invoke AI Web Server")
- if self.host == "0.0.0.0":
- logger.info(
- f"Point your browser at http{'s' if useSSL else ''}://localhost:{self.port} or use the host's DNS name or IP address."
- )
- else:
- logger.info("Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.")
- logger.info(f"Point your browser at http{'s' if useSSL else ''}://{self.host}:{self.port}")
- if not useSSL:
- self.socketio.run(app=self.app, host=self.host, port=self.port)
- else:
- self.socketio.run(
- app=self.app,
- host=self.host,
- port=self.port,
- certfile=args.certfile,
- keyfile=args.keyfile,
- )
-
- def setup_app(self):
- self.result_url = "outputs/"
- self.init_image_url = "outputs/init-images/"
- self.mask_image_url = "outputs/mask-images/"
- self.intermediate_url = "outputs/intermediates/"
- self.temp_image_url = "outputs/temp-images/"
- self.thumbnail_image_url = "outputs/thumbnails/"
- # location for "finished" images
- self.result_path = args.outdir
- # temporary path for intermediates
- self.intermediate_path = os.path.join(self.result_path, "intermediates/")
- # path for user-uploaded init images and masks
- self.init_image_path = os.path.join(self.result_path, "init-images/")
- self.mask_image_path = os.path.join(self.result_path, "mask-images/")
- # path for temp images e.g. gallery generations which are not committed
- self.temp_image_path = os.path.join(self.result_path, "temp-images/")
- # path for thumbnail images
- self.thumbnail_image_path = os.path.join(self.result_path, "thumbnails/")
- # txt log
- self.log_path = os.path.join(self.result_path, "invoke_logger.txt")
- # make all output paths
- [
- os.makedirs(path, exist_ok=True)
- for path in [
- self.result_path,
- self.intermediate_path,
- self.init_image_path,
- self.mask_image_path,
- self.temp_image_path,
- self.thumbnail_image_path,
- ]
- ]
-
- def load_socketio_listeners(self, socketio):
- @socketio.on("requestSystemConfig")
- def handle_request_capabilities():
- logger.info("System config requested")
- config = self.get_system_config()
- config["model_list"] = self.generate.model_manager.list_models()
- config["infill_methods"] = infill_methods()
- socketio.emit("systemConfig", config)
-
- @socketio.on("searchForModels")
- def handle_search_models(search_folder: str):
- try:
- if not search_folder:
- socketio.emit(
- "foundModels",
- {"search_folder": None, "found_models": None},
- )
- else:
- (
- search_folder,
- found_models,
- ) = self.generate.model_manager.search_models(search_folder)
- socketio.emit(
- "foundModels",
- {"search_folder": search_folder, "found_models": found_models},
- )
- except Exception as e:
- self.handle_exceptions(e)
- print("\n")
-
- @socketio.on("addNewModel")
- def handle_add_model(new_model_config: dict):
- try:
- model_name = new_model_config["name"]
- del new_model_config["name"]
- model_attributes = new_model_config
- if len(model_attributes["vae"]) == 0:
- del model_attributes["vae"]
- update = False
- current_model_list = self.generate.model_manager.list_models()
- if model_name in current_model_list:
- update = True
-
- logger.info(f"Adding New Model: {model_name}")
-
- self.generate.model_manager.add_model(
- model_name=model_name,
- model_attributes=model_attributes,
- clobber=True,
- )
- self.generate.model_manager.commit(opt.conf)
-
- new_model_list = self.generate.model_manager.list_models()
- socketio.emit(
- "newModelAdded",
- {
- "new_model_name": model_name,
- "model_list": new_model_list,
- "update": update,
- },
- )
- logger.info(f"New Model Added: {model_name}")
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("deleteModel")
- def handle_delete_model(model_name: str):
- try:
- logger.info(f"Deleting Model: {model_name}")
- self.generate.model_manager.del_model(model_name)
- self.generate.model_manager.commit(opt.conf)
- updated_model_list = self.generate.model_manager.list_models()
- socketio.emit(
- "modelDeleted",
- {
- "deleted_model_name": model_name,
- "model_list": updated_model_list,
- },
- )
- logger.info(f"Model Deleted: {model_name}")
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("requestModelChange")
- def handle_set_model(model_name: str):
- try:
- logger.info(f"Model change requested: {model_name}")
- model = self.generate.set_model(model_name)
- model_list = self.generate.model_manager.list_models()
- if model is None:
- socketio.emit(
- "modelChangeFailed",
- {"model_name": model_name, "model_list": model_list},
- )
- else:
- socketio.emit(
- "modelChanged",
- {"model_name": model_name, "model_list": model_list},
- )
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("convertToDiffusers")
- def convert_to_diffusers(model_to_convert: dict):
- try:
- if model_info := self.generate.model_manager.model_info(model_name=model_to_convert["model_name"]):
- if "weights" in model_info:
- ckpt_path = Path(model_info["weights"])
- original_config_file = Path(model_info["config"])
- model_name = model_to_convert["model_name"]
- model_description = model_info["description"]
- else:
- self.socketio.emit("error", {"message": "Model is not a valid checkpoint file"})
- else:
- self.socketio.emit("error", {"message": "Could not retrieve model info."})
-
- if not ckpt_path.is_absolute():
- ckpt_path = Path(Globals.root, ckpt_path)
-
- if original_config_file and not original_config_file.is_absolute():
- original_config_file = Path(Globals.root, original_config_file)
-
- diffusers_path = Path(ckpt_path.parent.absolute(), f"{model_name}_diffusers")
-
- if model_to_convert["save_location"] == "root":
- diffusers_path = Path(global_converted_ckpts_dir(), f"{model_name}_diffusers")
-
- if model_to_convert["save_location"] == "custom" and model_to_convert["custom_location"] is not None:
- diffusers_path = Path(model_to_convert["custom_location"], f"{model_name}_diffusers")
-
- if diffusers_path.exists():
- shutil.rmtree(diffusers_path)
-
- self.generate.model_manager.convert_and_import(
- ckpt_path,
- diffusers_path,
- model_name=model_name,
- model_description=model_description,
- vae=None,
- original_config_file=original_config_file,
- commit_to_conf=opt.conf,
- )
-
- new_model_list = self.generate.model_manager.list_models()
- socketio.emit(
- "modelConverted",
- {
- "new_model_name": model_name,
- "model_list": new_model_list,
- "update": True,
- },
- )
- logger.info(f"Model Converted: {model_name}")
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("mergeDiffusersModels")
- def merge_diffusers_models(model_merge_info: dict):
- try:
- models_to_merge = model_merge_info["models_to_merge"]
- model_ids_or_paths = [self.generate.model_manager.model_name_or_path(x) for x in models_to_merge]
- merged_pipe = merge_diffusion_models(
- model_ids_or_paths,
- model_merge_info["alpha"],
- model_merge_info["interp"],
- model_merge_info["force"],
- )
-
- dump_path = global_models_dir() / "merged_models"
- if model_merge_info["model_merge_save_path"] is not None:
- dump_path = Path(model_merge_info["model_merge_save_path"])
-
- os.makedirs(dump_path, exist_ok=True)
- dump_path = dump_path / model_merge_info["merged_model_name"]
- merged_pipe.save_pretrained(dump_path, safe_serialization=1)
-
- merged_model_config = dict(
- model_name=model_merge_info["merged_model_name"],
- description=f'Merge of models {", ".join(models_to_merge)}',
- commit_to_conf=opt.conf,
- )
-
- if vae := self.generate.model_manager.config[models_to_merge[0]].get("vae", None):
- logger.info(f"Using configured VAE assigned to {models_to_merge[0]}")
- merged_model_config.update(vae=vae)
-
- self.generate.model_manager.import_diffuser_model(dump_path, **merged_model_config)
- new_model_list = self.generate.model_manager.list_models()
-
- socketio.emit(
- "modelsMerged",
- {
- "merged_models": models_to_merge,
- "merged_model_name": model_merge_info["merged_model_name"],
- "model_list": new_model_list,
- "update": True,
- },
- )
- logger.info(f"Models Merged: {models_to_merge}")
- logger.info(f"New Model Added: {model_merge_info['merged_model_name']}")
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("requestEmptyTempFolder")
- def empty_temp_folder():
- try:
- temp_files = glob.glob(os.path.join(self.temp_image_path, "*"))
- for f in temp_files:
- try:
- os.remove(f)
- thumbnail_path = os.path.join(
- self.thumbnail_image_path,
- os.path.splitext(os.path.basename(f))[0] + ".webp",
- )
- os.remove(thumbnail_path)
- except Exception as e:
- socketio.emit("error", {"message": f"Unable to delete {f}: {str(e)}"})
- pass
-
- socketio.emit("tempFolderEmptied")
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("requestSaveStagingAreaImageToGallery")
- def save_temp_image_to_gallery(url):
- try:
- image_path = self.get_image_path_from_url(url)
- new_path = os.path.join(self.result_path, os.path.basename(image_path))
- shutil.copy2(image_path, new_path)
-
- if os.path.splitext(new_path)[1] == ".png":
- metadata = retrieve_metadata(new_path)
- else:
- metadata = {}
-
- pil_image = Image.open(new_path)
-
- (width, height) = pil_image.size
-
- thumbnail_path = save_thumbnail(pil_image, os.path.basename(new_path), self.thumbnail_image_path)
-
- image_array = [
- {
- "url": self.get_url_from_image_path(new_path),
- "thumbnail": self.get_url_from_image_path(thumbnail_path),
- "mtime": os.path.getmtime(new_path),
- "metadata": metadata,
- "width": width,
- "height": height,
- "category": "result",
- }
- ]
-
- socketio.emit(
- "galleryImages",
- {"images": image_array, "category": "result"},
- )
-
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("requestLatestImages")
- def handle_request_latest_images(category, latest_mtime):
- try:
- base_path = self.result_path if category == "result" else self.init_image_path
-
- paths = []
-
- for ext in ("*.png", "*.jpg", "*.jpeg"):
- paths.extend(glob.glob(os.path.join(base_path, ext)))
-
- image_paths = sorted(paths, key=lambda x: os.path.getmtime(x), reverse=True)
-
- image_paths = list(
- filter(
- lambda x: os.path.getmtime(x) > latest_mtime,
- image_paths,
- )
- )
-
- image_array = []
-
- for path in image_paths:
- try:
- if os.path.splitext(path)[1] == ".png":
- metadata = retrieve_metadata(path)
- else:
- metadata = {}
-
- pil_image = Image.open(path)
- (width, height) = pil_image.size
-
- thumbnail_path = save_thumbnail(pil_image, os.path.basename(path), self.thumbnail_image_path)
-
- image_array.append(
- {
- "url": self.get_url_from_image_path(path),
- "thumbnail": self.get_url_from_image_path(thumbnail_path),
- "mtime": os.path.getmtime(path),
- "metadata": metadata.get("sd-metadata"),
- "dreamPrompt": metadata.get("Dream"),
- "width": width,
- "height": height,
- "category": category,
- }
- )
- except Exception as e:
- socketio.emit("error", {"message": f"Unable to load {path}: {str(e)}"})
- pass
-
- socketio.emit(
- "galleryImages",
- {"images": image_array, "category": category},
- )
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("requestImages")
- def handle_request_images(category, earliest_mtime=None):
- try:
- page_size = 50
-
- base_path = self.result_path if category == "result" else self.init_image_path
-
- paths = []
- for ext in ("*.png", "*.jpg", "*.jpeg"):
- paths.extend(glob.glob(os.path.join(base_path, ext)))
-
- image_paths = sorted(paths, key=lambda x: os.path.getmtime(x), reverse=True)
-
- if earliest_mtime:
- image_paths = list(
- filter(
- lambda x: os.path.getmtime(x) < earliest_mtime,
- image_paths,
- )
- )
-
- areMoreImagesAvailable = len(image_paths) >= page_size
- image_paths = image_paths[slice(0, page_size)]
-
- image_array = []
- for path in image_paths:
- try:
- if os.path.splitext(path)[1] == ".png":
- metadata = retrieve_metadata(path)
- else:
- metadata = {}
-
- pil_image = Image.open(path)
- (width, height) = pil_image.size
-
- thumbnail_path = save_thumbnail(pil_image, os.path.basename(path), self.thumbnail_image_path)
-
- image_array.append(
- {
- "url": self.get_url_from_image_path(path),
- "thumbnail": self.get_url_from_image_path(thumbnail_path),
- "mtime": os.path.getmtime(path),
- "metadata": metadata.get("sd-metadata"),
- "dreamPrompt": metadata.get("Dream"),
- "width": width,
- "height": height,
- "category": category,
- }
- )
- except Exception as e:
- logger.info(f"Unable to load {path}")
- socketio.emit("error", {"message": f"Unable to load {path}: {str(e)}"})
- pass
-
- socketio.emit(
- "galleryImages",
- {
- "images": image_array,
- "areMoreImagesAvailable": areMoreImagesAvailable,
- "category": category,
- },
- )
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("generateImage")
- def handle_generate_image_event(generation_parameters, esrgan_parameters, facetool_parameters):
- try:
- # truncate long init_mask/init_img base64 if needed
- printable_parameters = {
- **generation_parameters,
- }
-
- if "init_img" in generation_parameters:
- printable_parameters["init_img"] = printable_parameters["init_img"][:64] + "..."
-
- if "init_mask" in generation_parameters:
- printable_parameters["init_mask"] = printable_parameters["init_mask"][:64] + "..."
-
- logger.info(f"Image Generation Parameters:\n\n{printable_parameters}\n")
- logger.info(f"ESRGAN Parameters: {esrgan_parameters}")
- logger.info(f"Facetool Parameters: {facetool_parameters}")
-
- self.generate_images(
- generation_parameters,
- esrgan_parameters,
- facetool_parameters,
- )
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("runPostprocessing")
- def handle_run_postprocessing(original_image, postprocessing_parameters):
- try:
- logger.info(f'Postprocessing requested for "{original_image["url"]}": {postprocessing_parameters}')
-
- progress = Progress()
-
- socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- original_image_path = self.get_image_path_from_url(original_image["url"])
-
- image = Image.open(original_image_path)
-
- try:
- seed = original_image["metadata"]["image"]["seed"]
- except KeyError:
- seed = "unknown_seed"
- pass
-
- if postprocessing_parameters["type"] == "esrgan":
- progress.set_current_status("common.statusUpscalingESRGAN")
- elif postprocessing_parameters["type"] == "gfpgan":
- progress.set_current_status("common.statusRestoringFacesGFPGAN")
- elif postprocessing_parameters["type"] == "codeformer":
- progress.set_current_status("common.statusRestoringFacesCodeFormer")
-
- socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- if postprocessing_parameters["type"] == "esrgan":
- image = self.esrgan.process(
- image=image,
- upsampler_scale=postprocessing_parameters["upscale"][0],
- denoise_str=postprocessing_parameters["upscale"][1],
- strength=postprocessing_parameters["upscale"][2],
- seed=seed,
- )
- elif postprocessing_parameters["type"] == "gfpgan":
- image = self.gfpgan.process(
- image=image,
- strength=postprocessing_parameters["facetool_strength"],
- seed=seed,
- )
- elif postprocessing_parameters["type"] == "codeformer":
- image = self.codeformer.process(
- image=image,
- strength=postprocessing_parameters["facetool_strength"],
- fidelity=postprocessing_parameters["codeformer_fidelity"],
- seed=seed,
- device="cpu" if str(self.generate.device) == "mps" else self.generate.device,
- )
- else:
- raise TypeError(f'{postprocessing_parameters["type"]} is not a valid postprocessing type')
-
- progress.set_current_status("common.statusSavingImage")
- socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- postprocessing_parameters["seed"] = seed
- metadata = self.parameters_to_post_processed_image_metadata(
- parameters=postprocessing_parameters,
- original_image_path=original_image_path,
- )
-
- command = parameters_to_command(postprocessing_parameters)
-
- (width, height) = image.size
-
- path = self.save_result_image(
- image,
- command,
- metadata,
- self.result_path,
- postprocessing=postprocessing_parameters["type"],
- )
-
- thumbnail_path = save_thumbnail(image, os.path.basename(path), self.thumbnail_image_path)
-
- self.write_log_message(
- f'[Postprocessed] "{original_image_path}" > "{path}": {postprocessing_parameters}'
- )
-
- progress.mark_complete()
- socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- socketio.emit(
- "postprocessingResult",
- {
- "url": self.get_url_from_image_path(path),
- "thumbnail": self.get_url_from_image_path(thumbnail_path),
- "mtime": os.path.getmtime(path),
- "metadata": metadata,
- "dreamPrompt": command,
- "width": width,
- "height": height,
- },
- )
- except Exception as e:
- self.handle_exceptions(e)
-
- @socketio.on("cancel")
- def handle_cancel():
- logger.info("Cancel processing requested")
- self.canceled.set()
-
- # TODO: I think this needs a safety mechanism.
- @socketio.on("deleteImage")
- def handle_delete_image(url, thumbnail, uuid, category):
- try:
- logger.info(f'Delete requested "{url}"')
- from send2trash import send2trash
-
- path = self.get_image_path_from_url(url)
- thumbnail_path = self.get_image_path_from_url(thumbnail)
-
- send2trash(path)
- send2trash(thumbnail_path)
-
- socketio.emit(
- "imageDeleted",
- {"url": url, "uuid": uuid, "category": category},
- )
- except Exception as e:
- self.handle_exceptions(e)
-
- # App Functions
- def get_system_config(self):
- model_list: dict = self.generate.model_manager.list_models()
- active_model_name = None
-
- for model_name, model_dict in model_list.items():
- if model_dict["status"] == "active":
- active_model_name = model_name
-
- return {
- "model": "stable diffusion",
- "model_weights": active_model_name,
- "model_hash": self.generate.model_hash,
- "app_id": APP_ID,
- "app_version": APP_VERSION,
- }
-
- def generate_images(self, generation_parameters, esrgan_parameters, facetool_parameters):
- try:
- self.canceled.clear()
-
- step_index = 1
- prior_variations = (
- generation_parameters["with_variations"] if "with_variations" in generation_parameters else []
- )
-
- actual_generation_mode = generation_parameters["generation_mode"]
- original_bounding_box = None
-
- progress = Progress(generation_parameters=generation_parameters)
-
- self.socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- """
- TODO:
- If a result image is used as an init image, and then deleted, we will want to be
- able to use it as an init image in the future. Need to handle this case.
- """
-
- """
- Prepare for generation based on generation_mode
- """
- if generation_parameters["generation_mode"] == "unifiedCanvas":
- """
- generation_parameters["init_img"] is a base64 image
- generation_parameters["init_mask"] is a base64 image
-
- So we need to convert each into a PIL Image.
- """
-
- init_img_url = generation_parameters["init_img"]
-
- original_bounding_box = generation_parameters["bounding_box"].copy()
-
- initial_image = dataURL_to_image(generation_parameters["init_img"]).convert("RGBA")
-
- """
- The outpaint image and mask are pre-cropped by the UI, so the bounding box we pass
- to the generator should be:
- {
- "x": 0,
- "y": 0,
- "width": original_bounding_box["width"],
- "height": original_bounding_box["height"]
- }
- """
-
- generation_parameters["bounding_box"]["x"] = 0
- generation_parameters["bounding_box"]["y"] = 0
-
- # Convert mask dataURL to an image and convert to greyscale
- mask_image = dataURL_to_image(generation_parameters["init_mask"]).convert("L")
-
- actual_generation_mode = get_canvas_generation_mode(initial_image, mask_image)
-
- """
- Apply the mask to the init image, creating a "mask" image with
- transparency where inpainting should occur. This is the kind of
- mask that prompt2image() needs.
- """
- alpha_mask = initial_image.copy()
- alpha_mask.putalpha(mask_image)
-
- generation_parameters["init_img"] = initial_image
- generation_parameters["init_mask"] = alpha_mask
-
- # Remove the unneeded parameters for whichever mode we are doing
- if actual_generation_mode == "inpainting":
- generation_parameters.pop("seam_size", None)
- generation_parameters.pop("seam_blur", None)
- generation_parameters.pop("seam_strength", None)
- generation_parameters.pop("seam_steps", None)
- generation_parameters.pop("tile_size", None)
- generation_parameters.pop("force_outpaint", None)
- elif actual_generation_mode == "img2img":
- generation_parameters["height"] = original_bounding_box["height"]
- generation_parameters["width"] = original_bounding_box["width"]
- generation_parameters.pop("init_mask", None)
- generation_parameters.pop("seam_size", None)
- generation_parameters.pop("seam_blur", None)
- generation_parameters.pop("seam_strength", None)
- generation_parameters.pop("seam_steps", None)
- generation_parameters.pop("tile_size", None)
- generation_parameters.pop("force_outpaint", None)
- generation_parameters.pop("infill_method", None)
- elif actual_generation_mode == "txt2img":
- generation_parameters["height"] = original_bounding_box["height"]
- generation_parameters["width"] = original_bounding_box["width"]
- generation_parameters.pop("strength", None)
- generation_parameters.pop("fit", None)
- generation_parameters.pop("init_img", None)
- generation_parameters.pop("init_mask", None)
- generation_parameters.pop("seam_size", None)
- generation_parameters.pop("seam_blur", None)
- generation_parameters.pop("seam_strength", None)
- generation_parameters.pop("seam_steps", None)
- generation_parameters.pop("tile_size", None)
- generation_parameters.pop("force_outpaint", None)
- generation_parameters.pop("infill_method", None)
-
- elif generation_parameters["generation_mode"] == "img2img":
- init_img_url = generation_parameters["init_img"]
- init_img_path = self.get_image_path_from_url(init_img_url)
- generation_parameters["init_img"] = Image.open(init_img_path).convert("RGB")
-
- def image_progress(intermediate_state: PipelineIntermediateState):
- if self.canceled.is_set():
- raise CanceledException
-
- nonlocal step_index
- nonlocal generation_parameters
- nonlocal progress
-
- step = intermediate_state.step
- if intermediate_state.predicted_original is not None:
- # Some schedulers report not only the noisy latents at the current timestep,
- # but also their estimate so far of what the de-noised latents will be.
- sample = intermediate_state.predicted_original
- else:
- sample = intermediate_state.latents
-
- generation_messages = {
- "txt2img": "common.statusGeneratingTextToImage",
- "img2img": "common.statusGeneratingImageToImage",
- "inpainting": "common.statusGeneratingInpainting",
- "outpainting": "common.statusGeneratingOutpainting",
- }
-
- progress.set_current_step(step + 1)
- progress.set_current_status(f"{generation_messages[actual_generation_mode]}")
- progress.set_current_status_has_steps(True)
-
- if (
- generation_parameters["progress_images"]
- and step % generation_parameters["save_intermediates"] == 0
- and step < generation_parameters["steps"] - 1
- ):
- image = self.generate.sample_to_image(sample)
- metadata = self.parameters_to_generated_image_metadata(generation_parameters)
- command = parameters_to_command(generation_parameters)
-
- (width, height) = image.size
-
- path = self.save_result_image(
- image,
- command,
- metadata,
- self.intermediate_path,
- step_index=step_index,
- postprocessing=False,
- )
-
- step_index += 1
- self.socketio.emit(
- "intermediateResult",
- {
- "url": self.get_url_from_image_path(path),
- "mtime": os.path.getmtime(path),
- "metadata": metadata,
- "width": width,
- "height": height,
- "generationMode": generation_parameters["generation_mode"],
- "boundingBox": original_bounding_box,
- },
- )
-
- if generation_parameters["progress_latents"]:
- image = self.generate.sample_to_lowres_estimated_image(sample)
- (width, height) = image.size
- width *= 8
- height *= 8
- img_base64 = image_to_dataURL(image, image_format="JPEG")
- self.socketio.emit(
- "intermediateResult",
- {
- "url": img_base64,
- "isBase64": True,
- "mtime": 0,
- "metadata": {},
- "width": width,
- "height": height,
- "generationMode": generation_parameters["generation_mode"],
- "boundingBox": original_bounding_box,
- },
- )
-
- self.socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- def image_done(image, seed, first_seed, attention_maps_image=None):
- if self.canceled.is_set():
- raise CanceledException
-
- nonlocal generation_parameters
- nonlocal esrgan_parameters
- nonlocal facetool_parameters
- nonlocal progress
-
- nonlocal prior_variations
-
- """
- Tidy up after generation based on generation_mode
- """
- # paste the inpainting image back onto the original
- if generation_parameters["generation_mode"] == "inpainting":
- image = paste_image_into_bounding_box(
- Image.open(init_img_path),
- image,
- **generation_parameters["bounding_box"],
- )
-
- progress.set_current_status("common.statusGenerationComplete")
-
- self.socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- all_parameters = generation_parameters
- postprocessing = False
-
- if "variation_amount" in all_parameters and all_parameters["variation_amount"] > 0:
- first_seed = first_seed or seed
- this_variation = [[seed, all_parameters["variation_amount"]]]
- all_parameters["with_variations"] = prior_variations + this_variation
- all_parameters["seed"] = first_seed
- elif "with_variations" in all_parameters:
- all_parameters["seed"] = first_seed
- else:
- all_parameters["seed"] = seed
-
- if self.canceled.is_set():
- raise CanceledException
-
- if esrgan_parameters:
- progress.set_current_status("common.statusUpscaling")
- progress.set_current_status_has_steps(False)
- self.socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- image = self.esrgan.process(
- image=image,
- upsampler_scale=esrgan_parameters["level"],
- denoise_str=esrgan_parameters["denoise_str"],
- strength=esrgan_parameters["strength"],
- seed=seed,
- )
-
- postprocessing = True
- all_parameters["upscale"] = [
- esrgan_parameters["level"],
- esrgan_parameters["denoise_str"],
- esrgan_parameters["strength"],
- ]
-
- if self.canceled.is_set():
- raise CanceledException
-
- if facetool_parameters:
- if facetool_parameters["type"] == "gfpgan":
- progress.set_current_status("common.statusRestoringFacesGFPGAN")
- elif facetool_parameters["type"] == "codeformer":
- progress.set_current_status("common.statusRestoringFacesCodeFormer")
-
- progress.set_current_status_has_steps(False)
- self.socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- if facetool_parameters["type"] == "gfpgan":
- image = self.gfpgan.process(
- image=image,
- strength=facetool_parameters["strength"],
- seed=seed,
- )
- elif facetool_parameters["type"] == "codeformer":
- image = self.codeformer.process(
- image=image,
- strength=facetool_parameters["strength"],
- fidelity=facetool_parameters["codeformer_fidelity"],
- seed=seed,
- device="cpu" if str(self.generate.device) == "mps" else self.generate.device,
- )
- all_parameters["codeformer_fidelity"] = facetool_parameters["codeformer_fidelity"]
-
- postprocessing = True
- all_parameters["facetool_strength"] = facetool_parameters["strength"]
- all_parameters["facetool_type"] = facetool_parameters["type"]
-
- progress.set_current_status("common.statusSavingImage")
- self.socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- # restore the stashed URLS and discard the paths, we are about to send the result to client
- all_parameters["init_img"] = (
- init_img_url if generation_parameters["generation_mode"] == "img2img" else ""
- )
-
- if "init_mask" in all_parameters:
- # TODO: store the mask in metadata
- all_parameters["init_mask"] = ""
-
- if generation_parameters["generation_mode"] == "unifiedCanvas":
- all_parameters["bounding_box"] = original_bounding_box
-
- metadata = self.parameters_to_generated_image_metadata(all_parameters)
-
- command = parameters_to_command(all_parameters)
-
- (width, height) = image.size
-
- generated_image_outdir = (
- self.result_path
- if generation_parameters["generation_mode"] in ["txt2img", "img2img"]
- else self.temp_image_path
- )
-
- path = self.save_result_image(
- image,
- command,
- metadata,
- generated_image_outdir,
- postprocessing=postprocessing,
- )
-
- thumbnail_path = save_thumbnail(image, os.path.basename(path), self.thumbnail_image_path)
-
- logger.info(f'Image generated: "{path}"\n')
- self.write_log_message(f'[Generated] "{path}": {command}')
-
- if progress.total_iterations > progress.current_iteration:
- progress.set_current_step(1)
- progress.set_current_status("common.statusIterationComplete")
- progress.set_current_status_has_steps(False)
- else:
- progress.mark_complete()
-
- self.socketio.emit("progressUpdate", progress.to_formatted_dict())
- eventlet.sleep(0)
-
- parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"])
- with self.generate.model_context as model:
- tokens = (
- None
- if type(parsed_prompt) is Blend
- else get_tokens_for_prompt_object(model.tokenizer, parsed_prompt)
- )
- attention_maps_image_base64_url = (
- None if attention_maps_image is None else image_to_dataURL(attention_maps_image)
- )
-
- self.socketio.emit(
- "generationResult",
- {
- "url": self.get_url_from_image_path(path),
- "thumbnail": self.get_url_from_image_path(thumbnail_path),
- "mtime": os.path.getmtime(path),
- "metadata": metadata,
- "dreamPrompt": command,
- "width": width,
- "height": height,
- "boundingBox": original_bounding_box,
- "generationMode": generation_parameters["generation_mode"],
- "attentionMaps": attention_maps_image_base64_url,
- "tokens": tokens,
- },
- )
- eventlet.sleep(0)
-
- progress.set_current_iteration(progress.current_iteration + 1)
-
- self.generate.prompt2image(
- **generation_parameters,
- step_callback=image_progress,
- image_callback=image_done,
- )
-
- except KeyboardInterrupt:
- # Clear the CUDA cache on an exception
- self.empty_cuda_cache()
- self.socketio.emit("processingCanceled")
- raise
- except CanceledException:
- # Clear the CUDA cache on an exception
- self.empty_cuda_cache()
- self.socketio.emit("processingCanceled")
- pass
- except Exception as e:
- # Clear the CUDA cache on an exception
- self.empty_cuda_cache()
- logger.error(e)
- self.handle_exceptions(e)
-
- def empty_cuda_cache(self):
- if self.generate.device.type == "cuda":
- import torch.cuda
-
- torch.cuda.empty_cache()
-
- def parameters_to_generated_image_metadata(self, parameters):
- try:
- # top-level metadata minus `image` or `images`
- metadata = self.get_system_config()
- # remove any image keys not mentioned in RFC #266
- rfc266_img_fields = [
- "type",
- "postprocessing",
- "sampler",
- "prompt",
- "seed",
- "variations",
- "steps",
- "cfg_scale",
- "threshold",
- "perlin",
- "step_number",
- "width",
- "height",
- "extra",
- "seamless",
- "hires_fix",
- ]
-
- rfc_dict = {}
-
- for item in parameters.items():
- key, value = item
- if key in rfc266_img_fields:
- rfc_dict[key] = value
-
- postprocessing = []
-
- rfc_dict["type"] = parameters["generation_mode"]
-
- # 'postprocessing' is either null or an
- if "facetool_strength" in parameters:
- facetool_parameters = {
- "type": str(parameters["facetool_type"]),
- "strength": float(parameters["facetool_strength"]),
- }
-
- if parameters["facetool_type"] == "codeformer":
- facetool_parameters["fidelity"] = float(parameters["codeformer_fidelity"])
-
- postprocessing.append(facetool_parameters)
-
- if "upscale" in parameters:
- postprocessing.append(
- {
- "type": "esrgan",
- "scale": int(parameters["upscale"][0]),
- "denoise_str": int(parameters["upscale"][1]),
- "strength": float(parameters["upscale"][2]),
- }
- )
-
- rfc_dict["postprocessing"] = postprocessing if len(postprocessing) > 0 else None
-
- # semantic drift
- rfc_dict["sampler"] = parameters["sampler_name"]
-
- # 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs
- variations = []
-
- if "with_variations" in parameters:
- variations = [{"seed": x[0], "weight": x[1]} for x in parameters["with_variations"]]
-
- rfc_dict["variations"] = variations
-
- if rfc_dict["type"] == "img2img":
- rfc_dict["strength"] = parameters["strength"]
- rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant
- rfc_dict["orig_hash"] = calculate_init_img_hash(self.get_image_path_from_url(parameters["init_img"]))
- rfc_dict["init_image_path"] = parameters["init_img"] # TODO: Noncompliant
-
- metadata["image"] = rfc_dict
-
- return metadata
-
- except Exception as e:
- self.handle_exceptions(e)
-
- def parameters_to_post_processed_image_metadata(self, parameters, original_image_path):
- try:
- current_metadata = retrieve_metadata(original_image_path)["sd-metadata"]
- postprocessing_metadata = {}
-
- """
- if we don't have an original image metadata to reconstruct,
- need to record the original image and its hash
- """
- if "image" not in current_metadata:
- current_metadata["image"] = {}
-
- orig_hash = calculate_init_img_hash(self.get_image_path_from_url(original_image_path))
-
- postprocessing_metadata["orig_path"] = (original_image_path,)
- postprocessing_metadata["orig_hash"] = orig_hash
-
- if parameters["type"] == "esrgan":
- postprocessing_metadata["type"] = "esrgan"
- postprocessing_metadata["scale"] = parameters["upscale"][0]
- postprocessing_metadata["denoise_str"] = parameters["upscale"][1]
- postprocessing_metadata["strength"] = parameters["upscale"][2]
- elif parameters["type"] == "gfpgan":
- postprocessing_metadata["type"] = "gfpgan"
- postprocessing_metadata["strength"] = parameters["facetool_strength"]
- elif parameters["type"] == "codeformer":
- postprocessing_metadata["type"] = "codeformer"
- postprocessing_metadata["strength"] = parameters["facetool_strength"]
- postprocessing_metadata["fidelity"] = parameters["codeformer_fidelity"]
-
- else:
- raise TypeError(f"Invalid type: {parameters['type']}")
-
- if "postprocessing" in current_metadata["image"] and isinstance(
- current_metadata["image"]["postprocessing"], list
- ):
- current_metadata["image"]["postprocessing"].append(postprocessing_metadata)
- else:
- current_metadata["image"]["postprocessing"] = [postprocessing_metadata]
-
- return current_metadata
-
- except Exception as e:
- self.handle_exceptions(e)
-
- def save_result_image(
- self,
- image,
- command,
- metadata,
- output_dir,
- step_index=None,
- postprocessing=False,
- ):
- try:
- pngwriter = PngWriter(output_dir)
-
- number_prefix = pngwriter.unique_prefix()
-
- uuid = uuid4().hex
- truncated_uuid = uuid[:8]
-
- seed = "unknown_seed"
-
- if "image" in metadata:
- if "seed" in metadata["image"]:
- seed = metadata["image"]["seed"]
-
- filename = f"{number_prefix}.{truncated_uuid}.{seed}"
-
- if step_index:
- filename += f".{step_index}"
- if postprocessing:
- filename += ".postprocessed"
-
- filename += ".png"
-
- path = pngwriter.save_image_and_prompt_to_png(
- image=image,
- dream_prompt=command,
- metadata=metadata,
- name=filename,
- )
-
- return os.path.abspath(path)
-
- except Exception as e:
- self.handle_exceptions(e)
-
- def make_unique_init_image_filename(self, name):
- try:
- uuid = uuid4().hex
- split = os.path.splitext(name)
- name = f"{split[0]}.{uuid}{split[1]}"
- return name
- except Exception as e:
- self.handle_exceptions(e)
-
- def calculate_real_steps(self, steps, strength, has_init_image):
- import math
-
- return math.floor(strength * steps) if has_init_image else steps
-
- def write_log_message(self, message):
- """Logs the filename and parameters used to generate or process that image to log file"""
- try:
- message = f"{message}\n"
- with open(self.log_path, "a", encoding="utf-8") as file:
- file.writelines(message)
-
- except Exception as e:
- self.handle_exceptions(e)
-
- def get_image_path_from_url(self, url):
- """Given a url to an image used by the client, returns the absolute file path to that image"""
- try:
- if "init-images" in url:
- return os.path.abspath(os.path.join(self.init_image_path, os.path.basename(url)))
- elif "mask-images" in url:
- return os.path.abspath(os.path.join(self.mask_image_path, os.path.basename(url)))
- elif "intermediates" in url:
- return os.path.abspath(os.path.join(self.intermediate_path, os.path.basename(url)))
- elif "temp-images" in url:
- return os.path.abspath(os.path.join(self.temp_image_path, os.path.basename(url)))
- elif "thumbnails" in url:
- return os.path.abspath(os.path.join(self.thumbnail_image_path, os.path.basename(url)))
- else:
- return os.path.abspath(os.path.join(self.result_path, os.path.basename(url)))
- except Exception as e:
- self.handle_exceptions(e)
-
- def get_url_from_image_path(self, path):
- """Given an absolute file path to an image, returns the URL that the client can use to load the image"""
- try:
- if "init-images" in path:
- return os.path.join(self.init_image_url, os.path.basename(path))
- elif "mask-images" in path:
- return os.path.join(self.mask_image_url, os.path.basename(path))
- elif "intermediates" in path:
- return os.path.join(self.intermediate_url, os.path.basename(path))
- elif "temp-images" in path:
- return os.path.join(self.temp_image_url, os.path.basename(path))
- elif "thumbnails" in path:
- return os.path.join(self.thumbnail_image_url, os.path.basename(path))
- else:
- return os.path.join(self.result_url, os.path.basename(path))
- except Exception as e:
- self.handle_exceptions(e)
-
- def save_file_unique_uuid_name(self, bytes, name, path):
- try:
- uuid = uuid4().hex
- truncated_uuid = uuid[:8]
-
- split = os.path.splitext(name)
- name = f"{split[0]}.{truncated_uuid}{split[1]}"
-
- file_path = os.path.join(path, name)
-
- os.makedirs(os.path.dirname(file_path), exist_ok=True)
-
- newFile = open(file_path, "wb")
- newFile.write(bytes)
-
- return file_path
- except Exception as e:
- self.handle_exceptions(e)
-
- def handle_exceptions(self, exception, emit_key: str = "error"):
- self.socketio.emit(emit_key, {"message": (str(exception))})
- print("\n")
- traceback.print_exc()
- print("\n")
-
-
-class Progress:
- def __init__(self, generation_parameters=None):
- self.current_step = 1
- self.total_steps = (
- self._calculate_real_steps(
- steps=generation_parameters["steps"],
- strength=generation_parameters["strength"] if "strength" in generation_parameters else None,
- has_init_image="init_img" in generation_parameters,
- )
- if generation_parameters
- else 1
- )
- self.current_iteration = 1
- self.total_iterations = generation_parameters["iterations"] if generation_parameters else 1
- self.current_status = "common.statusPreparing"
- self.is_processing = True
- self.current_status_has_steps = False
- self.has_error = False
-
- def set_current_step(self, current_step):
- self.current_step = current_step
-
- def set_total_steps(self, total_steps):
- self.total_steps = total_steps
-
- def set_current_iteration(self, current_iteration):
- self.current_iteration = current_iteration
-
- def set_total_iterations(self, total_iterations):
- self.total_iterations = total_iterations
-
- def set_current_status(self, current_status):
- self.current_status = current_status
-
- def set_is_processing(self, is_processing):
- self.is_processing = is_processing
-
- def set_current_status_has_steps(self, current_status_has_steps):
- self.current_status_has_steps = current_status_has_steps
-
- def set_has_error(self, has_error):
- self.has_error = has_error
-
- def mark_complete(self):
- self.current_status = "common.statusProcessingComplete"
- self.current_step = 0
- self.total_steps = 0
- self.current_iteration = 0
- self.total_iterations = 0
- self.is_processing = False
-
- def to_formatted_dict(
- self,
- ):
- return {
- "currentStep": self.current_step,
- "totalSteps": self.total_steps,
- "currentIteration": self.current_iteration,
- "totalIterations": self.total_iterations,
- "currentStatus": self.current_status,
- "isProcessing": self.is_processing,
- "currentStatusHasSteps": self.current_status_has_steps,
- "hasError": self.has_error,
- }
-
- def _calculate_real_steps(self, steps, strength, has_init_image):
- return math.floor(strength * steps) if has_init_image else steps
-
-
-class CanceledException(Exception):
- pass
-
-
-def copy_image_from_bounding_box(image: ImageType, x: int, y: int, width: int, height: int) -> ImageType:
- """
- Returns a copy an image, cropped to a bounding box.
- """
- with image as im:
- bounds = (x, y, x + width, y + height)
- im_cropped = im.crop(bounds)
- return im_cropped
-
-
-def dataURL_to_image(dataURL: str) -> ImageType:
- """
- Converts a base64 image dataURL into an image.
- The dataURL is split on the first comma.
- """
- image = Image.open(
- io.BytesIO(
- base64.decodebytes(
- bytes(
- dataURL.split(",", 1)[1],
- "utf-8",
- )
- )
- )
- )
- return image
-
-
-def image_to_dataURL(image: ImageType, image_format: str = "PNG") -> str:
- """
- Converts an image into a base64 image dataURL.
- """
- buffered = io.BytesIO()
- image.save(buffered, format=image_format)
- mime_type = Image.MIME.get(image_format.upper(), "image/" + image_format.lower())
- image_base64 = f"data:{mime_type};base64," + base64.b64encode(buffered.getvalue()).decode("UTF-8")
- return image_base64
-
-
-def dataURL_to_bytes(dataURL: str) -> bytes:
- """
- Converts a base64 image dataURL into bytes.
- The dataURL is split on the first comma.
- """
- return base64.decodebytes(
- bytes(
- dataURL.split(",", 1)[1],
- "utf-8",
- )
- )
-
-
-def paste_image_into_bounding_box(
- recipient_image: ImageType,
- donor_image: ImageType,
- x: int,
- y: int,
- width: int,
- height: int,
-) -> ImageType:
- """
- Pastes an image onto another with a bounding box.
- """
- with recipient_image as im:
- bounds = (x, y, x + width, y + height)
- im.paste(donor_image, bounds)
- return recipient_image
-
-
-def save_thumbnail(
- image: ImageType,
- filename: str,
- path: str,
- size: int = 256,
-) -> str:
- """
- Saves a thumbnail of an image, returning its path.
- """
- base_filename = os.path.splitext(filename)[0]
- thumbnail_path = os.path.join(path, base_filename + ".webp")
-
- if os.path.exists(thumbnail_path):
- return thumbnail_path
-
- thumbnail_width = size
- thumbnail_height = round(size * (image.height / image.width))
-
- image_copy = image.copy()
- image_copy.thumbnail(size=(thumbnail_width, thumbnail_height))
-
- image_copy.save(thumbnail_path, "WEBP")
-
- return thumbnail_path
diff --git a/invokeai/backend/web/modules/create_cmd_parser.py b/invokeai/backend/web/modules/create_cmd_parser.py
deleted file mode 100644
index 856522989b..0000000000
--- a/invokeai/backend/web/modules/create_cmd_parser.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import argparse
-import os
-
-from ...args import PRECISION_CHOICES
-
-
-def create_cmd_parser():
- parser = argparse.ArgumentParser(description="InvokeAI web UI")
- parser.add_argument(
- "--host",
- type=str,
- help="The host to serve on",
- default="localhost",
- )
- parser.add_argument("--port", type=int, help="The port to serve on", default=9090)
- parser.add_argument(
- "--cors",
- nargs="*",
- type=str,
- help="Additional allowed origins, comma-separated",
- )
- parser.add_argument(
- "--embedding_path",
- type=str,
- help="Path to a pre-trained embedding manager checkpoint - can only be set on command line",
- )
- # TODO: Can't get flask to serve images from any dir (saving to the dir does work when specified)
- # parser.add_argument(
- # "--output_dir",
- # default="outputs/",
- # type=str,
- # help="Directory for output images",
- # )
- parser.add_argument(
- "-v",
- "--verbose",
- action="store_true",
- help="Enables verbose logging",
- )
- parser.add_argument(
- "--precision",
- dest="precision",
- type=str,
- choices=PRECISION_CHOICES,
- metavar="PRECISION",
- help=f'Set model precision. Defaults to auto selected based on device. Options: {", ".join(PRECISION_CHOICES)}',
- default="auto",
- )
- parser.add_argument(
- "--free_gpu_mem",
- dest="free_gpu_mem",
- action="store_true",
- help="Force free gpu memory before final decoding",
- )
-
- return parser
diff --git a/invokeai/backend/web/modules/get_canvas_generation_mode.py b/invokeai/backend/web/modules/get_canvas_generation_mode.py
deleted file mode 100644
index 6d680016e7..0000000000
--- a/invokeai/backend/web/modules/get_canvas_generation_mode.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from typing import Literal, Union
-
-from PIL import Image, ImageChops
-from PIL.Image import Image as ImageType
-
-
-# https://stackoverflow.com/questions/43864101/python-pil-check-if-image-is-transparent
-def check_for_any_transparency(img: Union[ImageType, str]) -> bool:
- if type(img) is str:
- img = Image.open(str)
-
- if img.info.get("transparency", None) is not None:
- return True
- if img.mode == "P":
- transparent = img.info.get("transparency", -1)
- for _, index in img.getcolors():
- if index == transparent:
- return True
- elif img.mode == "RGBA":
- extrema = img.getextrema()
- if extrema[3][0] < 255:
- return True
- return False
-
-
-def get_canvas_generation_mode(
- init_img: Union[ImageType, str], init_mask: Union[ImageType, str]
-) -> Literal["txt2img", "outpainting", "inpainting", "img2img",]:
- if type(init_img) is str:
- init_img = Image.open(init_img)
-
- if type(init_mask) is str:
- init_mask = Image.open(init_mask)
-
- init_img = init_img.convert("RGBA")
-
- # Get alpha from init_img
- init_img_alpha = init_img.split()[-1]
- init_img_alpha_mask = init_img_alpha.convert("L")
- init_img_has_transparency = check_for_any_transparency(init_img)
-
- if init_img_has_transparency:
- init_img_is_fully_transparent = True if init_img_alpha_mask.getbbox() is None else False
-
- """
- Mask images are white in areas where no change should be made, black where changes
- should be made.
- """
-
- # Fit the mask to init_img's size and convert it to greyscale
- init_mask = init_mask.resize(init_img.size).convert("L")
-
- """
- PIL.Image.getbbox() returns the bounding box of non-zero areas of the image, so we first
- invert the mask image so that masked areas are white and other areas black == zero.
- getbbox() now tells us if the are any masked areas.
- """
- init_mask_bbox = ImageChops.invert(init_mask).getbbox()
- init_mask_exists = False if init_mask_bbox is None else True
-
- if init_img_has_transparency:
- if init_img_is_fully_transparent:
- return "txt2img"
- else:
- return "outpainting"
- else:
- if init_mask_exists:
- return "inpainting"
- else:
- return "img2img"
-
-
-def main():
- # Testing
- init_img_opaque = "test_images/init-img_opaque.png"
- init_img_partial_transparency = "test_images/init-img_partial_transparency.png"
- init_img_full_transparency = "test_images/init-img_full_transparency.png"
- init_mask_no_mask = "test_images/init-mask_no_mask.png"
- init_mask_has_mask = "test_images/init-mask_has_mask.png"
-
- print(
- "OPAQUE IMAGE, NO MASK, expect img2img, got ",
- get_canvas_generation_mode(init_img_opaque, init_mask_no_mask),
- )
-
- print(
- "IMAGE WITH TRANSPARENCY, NO MASK, expect outpainting, got ",
- get_canvas_generation_mode(init_img_partial_transparency, init_mask_no_mask),
- )
-
- print(
- "FULLY TRANSPARENT IMAGE NO MASK, expect txt2img, got ",
- get_canvas_generation_mode(init_img_full_transparency, init_mask_no_mask),
- )
-
- print(
- "OPAQUE IMAGE, WITH MASK, expect inpainting, got ",
- get_canvas_generation_mode(init_img_opaque, init_mask_has_mask),
- )
-
- print(
- "IMAGE WITH TRANSPARENCY, WITH MASK, expect outpainting, got ",
- get_canvas_generation_mode(init_img_partial_transparency, init_mask_has_mask),
- )
-
- print(
- "FULLY TRANSPARENT IMAGE WITH MASK, expect txt2img, got ",
- get_canvas_generation_mode(init_img_full_transparency, init_mask_has_mask),
- )
-
-
-if __name__ == "__main__":
- main()
diff --git a/invokeai/backend/web/modules/parameters.py b/invokeai/backend/web/modules/parameters.py
deleted file mode 100644
index 8ab74adb92..0000000000
--- a/invokeai/backend/web/modules/parameters.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import argparse
-
-from .parse_seed_weights import parse_seed_weights
-
-SAMPLER_CHOICES = [
- "ddim",
- "ddpm",
- "deis",
- "lms",
- "lms_k",
- "pndm",
- "heun",
- "heun_k",
- "euler",
- "euler_k",
- "euler_a",
- "kdpm_2",
- "kdpm_2_a",
- "dpmpp_2s",
- "dpmpp_2s_k",
- "dpmpp_2m",
- "dpmpp_2m_k",
- "dpmpp_2m_sde",
- "dpmpp_2m_sde_k",
- "dpmpp_sde",
- "dpmpp_sde_k",
- "unipc",
-]
-
-
-def parameters_to_command(params):
- """
- Converts dict of parameters into a `invoke.py` REPL command.
- """
-
- switches = list()
-
- if "prompt" in params:
- switches.append(f'"{params["prompt"]}"')
- if "steps" in params:
- switches.append(f'-s {params["steps"]}')
- if "seed" in params:
- switches.append(f'-S {params["seed"]}')
- if "width" in params:
- switches.append(f'-W {params["width"]}')
- if "height" in params:
- switches.append(f'-H {params["height"]}')
- if "cfg_scale" in params:
- switches.append(f'-C {params["cfg_scale"]}')
- if "sampler_name" in params:
- switches.append(f'-A {params["sampler_name"]}')
- if "seamless" in params and params["seamless"] == True:
- switches.append(f"--seamless")
- if "hires_fix" in params and params["hires_fix"] == True:
- switches.append(f"--hires")
- if "init_img" in params and len(params["init_img"]) > 0:
- switches.append(f'-I {params["init_img"]}')
- if "init_mask" in params and len(params["init_mask"]) > 0:
- switches.append(f'-M {params["init_mask"]}')
- if "init_color" in params and len(params["init_color"]) > 0:
- switches.append(f'--init_color {params["init_color"]}')
- if "strength" in params and "init_img" in params:
- switches.append(f'-f {params["strength"]}')
- if "fit" in params and params["fit"] == True:
- switches.append(f"--fit")
- if "facetool" in params:
- switches.append(f'-ft {params["facetool"]}')
- if "facetool_strength" in params and params["facetool_strength"]:
- switches.append(f'-G {params["facetool_strength"]}')
- elif "gfpgan_strength" in params and params["gfpgan_strength"]:
- switches.append(f'-G {params["gfpgan_strength"]}')
- if "codeformer_fidelity" in params:
- switches.append(f'-cf {params["codeformer_fidelity"]}')
- if "upscale" in params and params["upscale"]:
- switches.append(f'-U {params["upscale"][0]} {params["upscale"][1]}')
- if "variation_amount" in params and params["variation_amount"] > 0:
- switches.append(f'-v {params["variation_amount"]}')
- if "with_variations" in params:
- seed_weight_pairs = ",".join(f"{seed}:{weight}" for seed, weight in params["with_variations"])
- switches.append(f"-V {seed_weight_pairs}")
-
- return " ".join(switches)
diff --git a/invokeai/backend/web/modules/parse_seed_weights.py b/invokeai/backend/web/modules/parse_seed_weights.py
deleted file mode 100644
index 7e15d4e166..0000000000
--- a/invokeai/backend/web/modules/parse_seed_weights.py
+++ /dev/null
@@ -1,47 +0,0 @@
-def parse_seed_weights(seed_weights):
- """
- Accepts seed weights as string in "12345:0.1,23456:0.2,3456:0.3" format
- Validates them
- If valid: returns as [[12345, 0.1], [23456, 0.2], [3456, 0.3]]
- If invalid: returns False
- """
-
- # Must be a string
- if not isinstance(seed_weights, str):
- return False
- # String must not be empty
- if len(seed_weights) == 0:
- return False
-
- pairs = []
-
- for pair in seed_weights.split(","):
- split_values = pair.split(":")
-
- # Seed and weight are required
- if len(split_values) != 2:
- return False
-
- if len(split_values[0]) == 0 or len(split_values[1]) == 1:
- return False
-
- # Try casting the seed to int and weight to float
- try:
- seed = int(split_values[0])
- weight = float(split_values[1])
- except ValueError:
- return False
-
- # Seed must be 0 or above
- if not seed >= 0:
- return False
-
- # Weight must be between 0 and 1
- if not (weight >= 0 and weight <= 1):
- return False
-
- # This pair is valid
- pairs.append([seed, weight])
-
- # All pairs are valid
- return pairs
diff --git a/invokeai/backend/web/modules/test_images/init-img_full_transparency.png b/invokeai/backend/web/modules/test_images/init-img_full_transparency.png
deleted file mode 100644
index 6cdeada609..0000000000
Binary files a/invokeai/backend/web/modules/test_images/init-img_full_transparency.png and /dev/null differ
diff --git a/invokeai/backend/web/modules/test_images/init-img_opaque.png b/invokeai/backend/web/modules/test_images/init-img_opaque.png
deleted file mode 100644
index a45aec75ed..0000000000
Binary files a/invokeai/backend/web/modules/test_images/init-img_opaque.png and /dev/null differ
diff --git a/invokeai/backend/web/modules/test_images/init-img_partial_transparency.png b/invokeai/backend/web/modules/test_images/init-img_partial_transparency.png
deleted file mode 100644
index 348e59fc8a..0000000000
Binary files a/invokeai/backend/web/modules/test_images/init-img_partial_transparency.png and /dev/null differ
diff --git a/invokeai/backend/web/modules/test_images/init-mask_has_mask.png b/invokeai/backend/web/modules/test_images/init-mask_has_mask.png
deleted file mode 100644
index 88fe072950..0000000000
Binary files a/invokeai/backend/web/modules/test_images/init-mask_has_mask.png and /dev/null differ
diff --git a/invokeai/backend/web/modules/test_images/init-mask_no_mask.png b/invokeai/backend/web/modules/test_images/init-mask_no_mask.png
deleted file mode 100644
index 2aecd3ea7d..0000000000
Binary files a/invokeai/backend/web/modules/test_images/init-mask_no_mask.png and /dev/null differ
diff --git a/invokeai/frontend/CLI/__init__.py b/invokeai/frontend/CLI/__init__.py
index 7e48534cb9..f8864bbe66 100644
--- a/invokeai/frontend/CLI/__init__.py
+++ b/invokeai/frontend/CLI/__init__.py
@@ -1,4 +1,4 @@
"""
Initialization file for invokeai.frontend.CLI
"""
-from .CLI import main as invokeai_command_line_interface
+from .CLI import main as invokeai_command_line_interface # noqa: F401
diff --git a/invokeai/frontend/install/__init__.py b/invokeai/frontend/install/__init__.py
index fb8cdff1b3..2a248eb49f 100644
--- a/invokeai/frontend/install/__init__.py
+++ b/invokeai/frontend/install/__init__.py
@@ -1,6 +1,3 @@
"""
Initialization file for invokeai.frontend.config
"""
-from .invokeai_configure import main as invokeai_configure
-from .invokeai_update import main as invokeai_update
-from .model_install import main as invokeai_model_install
diff --git a/invokeai/frontend/install/import_images.py b/invokeai/frontend/install/import_images.py
new file mode 100644
index 0000000000..1f68f23607
--- /dev/null
+++ b/invokeai/frontend/install/import_images.py
@@ -0,0 +1,795 @@
+# Copyright (c) 2023 - The InvokeAI Team
+# Primary Author: David Lovell (github @f412design, discord @techjedi)
+# co-author, minor tweaks - Lincoln Stein
+
+# pylint: disable=line-too-long
+# pylint: disable=broad-exception-caught
+"""Script to import images into the new database system for 3.0.0"""
+
+import os
+import datetime
+import shutil
+import locale
+import sqlite3
+import json
+import glob
+import re
+import uuid
+import yaml
+import PIL
+import PIL.ImageOps
+import PIL.PngImagePlugin
+
+from pathlib import Path
+from prompt_toolkit import prompt
+from prompt_toolkit.shortcuts import message_dialog
+from prompt_toolkit.completion import PathCompleter
+from prompt_toolkit.key_binding import KeyBindings
+
+from invokeai.app.services.config import InvokeAIAppConfig
+
+app_config = InvokeAIAppConfig.get_config()
+
+bindings = KeyBindings()
+
+
+@bindings.add("c-c")
+def _(event):
+ raise KeyboardInterrupt
+
+
+# release notes
+# "Use All" with size dimensions not selectable in the UI will not load dimensions
+
+
+class Config:
+ """Configuration loader."""
+
+ def __init__(self):
+ pass
+
+ TIMESTAMP_STRING = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
+
+ INVOKE_DIRNAME = "invokeai"
+ YAML_FILENAME = "invokeai.yaml"
+ DATABASE_FILENAME = "invokeai.db"
+
+ database_path = None
+ database_backup_dir = None
+ outputs_path = None
+ thumbnail_path = None
+
+ def find_and_load(self):
+ """find the yaml config file and load"""
+ root = app_config.root_path
+ if not self.confirm_and_load(os.path.abspath(root)):
+ print("\r\nSpecify custom database and outputs paths:")
+ self.confirm_and_load_from_user()
+
+ self.database_backup_dir = os.path.join(os.path.dirname(self.database_path), "backup")
+ self.thumbnail_path = os.path.join(self.outputs_path, "thumbnails")
+
+ def confirm_and_load(self, invoke_root):
+ """Validates a yaml path exists, confirms the user wants to use it and loads config."""
+ yaml_path = os.path.join(invoke_root, self.YAML_FILENAME)
+ if os.path.exists(yaml_path):
+ db_dir, outdir = self.load_paths_from_yaml(yaml_path)
+ if os.path.isabs(db_dir):
+ database_path = os.path.join(db_dir, self.DATABASE_FILENAME)
+ else:
+ database_path = os.path.join(invoke_root, db_dir, self.DATABASE_FILENAME)
+
+ if os.path.isabs(outdir):
+ outputs_path = os.path.join(outdir, "images")
+ else:
+ outputs_path = os.path.join(invoke_root, outdir, "images")
+
+ db_exists = os.path.exists(database_path)
+ outdir_exists = os.path.exists(outputs_path)
+
+ text = f"Found {self.YAML_FILENAME} file at {yaml_path}:"
+ text += f"\n Database : {database_path}"
+ text += f"\n Outputs : {outputs_path}"
+ text += "\n\nUse these paths for import (yes) or choose different ones (no) [Yn]: "
+
+ if db_exists and outdir_exists:
+ if (prompt(text).strip() or "Y").upper().startswith("Y"):
+ self.database_path = database_path
+ self.outputs_path = outputs_path
+ return True
+ else:
+ return False
+ else:
+ print(" Invalid: One or more paths in this config did not exist and cannot be used.")
+
+ else:
+ message_dialog(
+ title="Path not found",
+ text=f"Auto-discovery of configuration failed! Could not find ({yaml_path}), Custom paths can be specified.",
+ ).run()
+ return False
+
+ def confirm_and_load_from_user(self):
+ default = ""
+ while True:
+ database_path = os.path.expanduser(
+ prompt(
+ "Database: Specify absolute path to the database to import into: ",
+ completer=PathCompleter(
+ expanduser=True, file_filter=lambda x: Path(x).is_dir() or x.endswith((".db"))
+ ),
+ default=default,
+ )
+ )
+ if database_path.endswith(".db") and os.path.isabs(database_path) and os.path.exists(database_path):
+ break
+ default = database_path + "/" if Path(database_path).is_dir() else database_path
+
+ default = ""
+ while True:
+ outputs_path = os.path.expanduser(
+ prompt(
+ "Outputs: Specify absolute path to outputs/images directory to import into: ",
+ completer=PathCompleter(expanduser=True, only_directories=True),
+ default=default,
+ )
+ )
+
+ if outputs_path.endswith("images") and os.path.isabs(outputs_path) and os.path.exists(outputs_path):
+ break
+ default = outputs_path + "/" if Path(outputs_path).is_dir() else outputs_path
+
+ self.database_path = database_path
+ self.outputs_path = outputs_path
+
+ return
+
+ def load_paths_from_yaml(self, yaml_path):
+ """Load an Invoke AI yaml file and get the database and outputs paths."""
+ try:
+ with open(yaml_path, "rt", encoding=locale.getpreferredencoding()) as file:
+ yamlinfo = yaml.safe_load(file)
+ db_dir = yamlinfo.get("InvokeAI", {}).get("Paths", {}).get("db_dir", None)
+ outdir = yamlinfo.get("InvokeAI", {}).get("Paths", {}).get("outdir", None)
+ return db_dir, outdir
+ except Exception:
+ print(f"Failed to load paths from yaml file! {yaml_path}!")
+ return None, None
+
+
+class ImportStats:
+ """DTO for tracking work progress."""
+
+ def __init__(self):
+ pass
+
+ time_start = datetime.datetime.utcnow()
+ count_source_files = 0
+ count_skipped_file_exists = 0
+ count_skipped_db_exists = 0
+ count_imported = 0
+ count_imported_by_version = {}
+ count_file_errors = 0
+
+ @staticmethod
+ def get_elapsed_time_string():
+ """Get a friendly time string for the time elapsed since processing start."""
+ time_now = datetime.datetime.utcnow()
+ total_seconds = (time_now - ImportStats.time_start).total_seconds()
+ hours = int((total_seconds) / 3600)
+ minutes = int(((total_seconds) % 3600) / 60)
+ seconds = total_seconds % 60
+ out_str = f"{hours} hour(s) -" if hours > 0 else ""
+ out_str += f"{minutes} minute(s) -" if minutes > 0 else ""
+ out_str += f"{seconds:.2f} second(s)"
+ return out_str
+
+
+class InvokeAIMetadata:
+ """DTO for core Invoke AI generation properties parsed from metadata."""
+
+ def __init__(self):
+ pass
+
+ def __str__(self):
+ formatted_str = f"{self.generation_mode}~{self.steps}~{self.cfg_scale}~{self.model_name}~{self.scheduler}~{self.seed}~{self.width}~{self.height}~{self.rand_device}~{self.strength}~{self.init_image}"
+ formatted_str += f"\r\npositive_prompt: {self.positive_prompt}"
+ formatted_str += f"\r\nnegative_prompt: {self.negative_prompt}"
+ return formatted_str
+
+ generation_mode = None
+ steps = None
+ cfg_scale = None
+ model_name = None
+ scheduler = None
+ seed = None
+ width = None
+ height = None
+ rand_device = None
+ strength = None
+ init_image = None
+ positive_prompt = None
+ negative_prompt = None
+ imported_app_version = None
+
+ def to_json(self):
+ """Convert the active instance to json format."""
+ prop_dict = {}
+ prop_dict["generation_mode"] = self.generation_mode
+ # dont render prompt nodes if neither are set to avoid the ui thinking it can set them
+ # if at least one exists, render them both, but use empty string instead of None if one of them is empty
+ # this allows the field that is empty to actually be cleared byt he UI instead of leaving the previous value
+ if self.positive_prompt or self.negative_prompt:
+ prop_dict["positive_prompt"] = "" if self.positive_prompt is None else self.positive_prompt
+ prop_dict["negative_prompt"] = "" if self.negative_prompt is None else self.negative_prompt
+ prop_dict["width"] = self.width
+ prop_dict["height"] = self.height
+ # only render seed if it has a value to avoid ui thinking it can set this and then error
+ if self.seed:
+ prop_dict["seed"] = self.seed
+ prop_dict["rand_device"] = self.rand_device
+ prop_dict["cfg_scale"] = self.cfg_scale
+ prop_dict["steps"] = self.steps
+ prop_dict["scheduler"] = self.scheduler
+ prop_dict["clip_skip"] = 0
+ prop_dict["model"] = {}
+ prop_dict["model"]["model_name"] = self.model_name
+ prop_dict["model"]["base_model"] = None
+ prop_dict["controlnets"] = []
+ prop_dict["loras"] = []
+ prop_dict["vae"] = None
+ prop_dict["strength"] = self.strength
+ prop_dict["init_image"] = self.init_image
+ prop_dict["positive_style_prompt"] = None
+ prop_dict["negative_style_prompt"] = None
+ prop_dict["refiner_model"] = None
+ prop_dict["refiner_cfg_scale"] = None
+ prop_dict["refiner_steps"] = None
+ prop_dict["refiner_scheduler"] = None
+ prop_dict["refiner_aesthetic_store"] = None
+ prop_dict["refiner_start"] = None
+ prop_dict["imported_app_version"] = self.imported_app_version
+
+ return json.dumps(prop_dict)
+
+
+class InvokeAIMetadataParser:
+ """Parses strings with json data to find Invoke AI core metadata properties."""
+
+ def __init__(self):
+ pass
+
+ def parse_meta_tag_dream(self, dream_string):
+ """Take as input an png metadata json node for the 'dream' field variant from prior to 1.15"""
+ props = InvokeAIMetadata()
+
+ props.imported_app_version = "pre1.15"
+ seed_match = re.search("-S\\s*(\\d+)", dream_string)
+ if seed_match is not None:
+ try:
+ props.seed = int(seed_match[1])
+ except ValueError:
+ props.seed = None
+ raw_prompt = re.sub("(-S\\s*\\d+)", "", dream_string)
+ else:
+ raw_prompt = dream_string
+
+ pos_prompt, neg_prompt = self.split_prompt(raw_prompt)
+
+ props.positive_prompt = pos_prompt
+ props.negative_prompt = neg_prompt
+
+ return props
+
+ def parse_meta_tag_sd_metadata(self, tag_value):
+ """Take as input an png metadata json node for the 'sd-metadata' field variant from 1.15 through 2.3.5 post 2"""
+ props = InvokeAIMetadata()
+
+ props.imported_app_version = tag_value.get("app_version")
+ props.model_name = tag_value.get("model_weights")
+ img_node = tag_value.get("image")
+ if img_node is not None:
+ props.generation_mode = img_node.get("type")
+ props.width = img_node.get("width")
+ props.height = img_node.get("height")
+ props.seed = img_node.get("seed")
+ props.rand_device = "cuda" # hardcoded since all generations pre 3.0 used cuda random noise instead of cpu
+ props.cfg_scale = img_node.get("cfg_scale")
+ props.steps = img_node.get("steps")
+ props.scheduler = self.map_scheduler(img_node.get("sampler"))
+ props.strength = img_node.get("strength")
+ if props.strength is None:
+ props.strength = img_node.get("strength_steps") # try second name for this property
+ props.init_image = img_node.get("init_image_path")
+ if props.init_image is None: # try second name for this property
+ props.init_image = img_node.get("init_img")
+ # remove the path info from init_image so if we move the init image, it will be correctly relative in the new location
+ if props.init_image is not None:
+ props.init_image = os.path.basename(props.init_image)
+ raw_prompt = img_node.get("prompt")
+ if isinstance(raw_prompt, list):
+ raw_prompt = raw_prompt[0].get("prompt")
+
+ props.positive_prompt, props.negative_prompt = self.split_prompt(raw_prompt)
+
+ return props
+
+ def parse_meta_tag_invokeai(self, tag_value):
+ """Take as input an png metadata json node for the 'invokeai' field variant from 3.0.0 beta 1 through 5"""
+ props = InvokeAIMetadata()
+
+ props.imported_app_version = "3.0.0 or later"
+ props.generation_mode = tag_value.get("type")
+ if props.generation_mode is not None:
+ props.generation_mode = props.generation_mode.replace("t2l", "txt2img").replace("l2l", "img2img")
+
+ props.width = tag_value.get("width")
+ props.height = tag_value.get("height")
+ props.seed = tag_value.get("seed")
+ props.cfg_scale = tag_value.get("cfg_scale")
+ props.steps = tag_value.get("steps")
+ props.scheduler = tag_value.get("scheduler")
+ props.strength = tag_value.get("strength")
+ props.positive_prompt = tag_value.get("positive_conditioning")
+ props.negative_prompt = tag_value.get("negative_conditioning")
+
+ return props
+
+ def map_scheduler(self, old_scheduler):
+ """Convert the legacy sampler names to matching 3.0 schedulers"""
+ if old_scheduler is None:
+ return None
+
+ match (old_scheduler):
+ case "ddim":
+ return "ddim"
+ case "plms":
+ return "pnmd"
+ case "k_lms":
+ return "lms"
+ case "k_dpm_2":
+ return "kdpm_2"
+ case "k_dpm_2_a":
+ return "kdpm_2_a"
+ case "dpmpp_2":
+ return "dpmpp_2s"
+ case "k_dpmpp_2":
+ return "dpmpp_2m"
+ case "k_dpmpp_2_a":
+ return None # invalid, in 2.3.x, selecting this sample would just fallback to last run or plms if new session
+ case "k_euler":
+ return "euler"
+ case "k_euler_a":
+ return "euler_a"
+ case "k_heun":
+ return "heun"
+ return None
+
+ def split_prompt(self, raw_prompt: str):
+ """Split the unified prompt strings by extracting all negative prompt blocks out into the negative prompt."""
+ if raw_prompt is None:
+ return "", ""
+ raw_prompt_search = raw_prompt.replace("\r", "").replace("\n", "")
+ matches = re.findall(r"\[(.+?)\]", raw_prompt_search)
+ if len(matches) > 0:
+ negative_prompt = ""
+ if len(matches) == 1:
+ negative_prompt = matches[0].strip().strip(",")
+ else:
+ for match in matches:
+ negative_prompt += f"({match.strip().strip(',')})"
+ positive_prompt = re.sub(r"(\[.+?\])", "", raw_prompt_search).strip()
+ else:
+ positive_prompt = raw_prompt_search.strip()
+ negative_prompt = ""
+
+ return positive_prompt, negative_prompt
+
+
+class DatabaseMapper:
+ """Class to abstract database functionality."""
+
+ def __init__(self, database_path, database_backup_dir):
+ self.database_path = database_path
+ self.database_backup_dir = database_backup_dir
+ self.connection = None
+ self.cursor = None
+
+ def connect(self):
+ """Open connection to the database."""
+ self.connection = sqlite3.connect(self.database_path)
+ self.cursor = self.connection.cursor()
+
+ def get_board_names(self):
+ """Get a list of the current board names from the database."""
+ sql_get_board_name = "SELECT board_name FROM boards"
+ self.cursor.execute(sql_get_board_name)
+ rows = self.cursor.fetchall()
+ return [row[0] for row in rows]
+
+ def does_image_exist(self, image_name):
+ """Check database if a image name already exists and return a boolean."""
+ sql_get_image_by_name = f"SELECT image_name FROM images WHERE image_name='{image_name}'"
+ self.cursor.execute(sql_get_image_by_name)
+ rows = self.cursor.fetchall()
+ return True if len(rows) > 0 else False
+
+ def add_new_image_to_database(self, filename, width, height, metadata, modified_date_string):
+ """Add an image to the database."""
+ sql_add_image = f"""INSERT INTO images (image_name, image_origin, image_category, width, height, session_id, node_id, metadata, is_intermediate, created_at, updated_at)
+VALUES ('{filename}', 'internal', 'general', {width}, {height}, null, null, '{metadata}', 0, '{modified_date_string}', '{modified_date_string}')"""
+ self.cursor.execute(sql_add_image)
+ self.connection.commit()
+
+ def get_board_id_with_create(self, board_name):
+ """Get the board id for supplied name, and create the board if one does not exist."""
+ sql_find_board = f"SELECT board_id FROM boards WHERE board_name='{board_name}' COLLATE NOCASE"
+ self.cursor.execute(sql_find_board)
+ rows = self.cursor.fetchall()
+ if len(rows) > 0:
+ return rows[0][0]
+ else:
+ board_date_string = datetime.datetime.utcnow().date().isoformat()
+ new_board_id = str(uuid.uuid4())
+ sql_insert_board = f"INSERT INTO boards (board_id, board_name, created_at, updated_at) VALUES ('{new_board_id}', '{board_name}', '{board_date_string}', '{board_date_string}')"
+ self.cursor.execute(sql_insert_board)
+ self.connection.commit()
+ return new_board_id
+
+ def add_image_to_board(self, filename, board_id):
+ """Add an image mapping to a board."""
+ add_datetime_str = datetime.datetime.utcnow().isoformat()
+ sql_add_image_to_board = f"""INSERT INTO board_images (board_id, image_name, created_at, updated_at)
+ VALUES ('{board_id}', '{filename}', '{add_datetime_str}', '{add_datetime_str}')"""
+ self.cursor.execute(sql_add_image_to_board)
+ self.connection.commit()
+
+ def disconnect(self):
+ """Disconnect from the db, cleaning up connections and cursors."""
+ if self.cursor is not None:
+ self.cursor.close()
+ if self.connection is not None:
+ self.connection.close()
+
+ def backup(self, timestamp_string):
+ """Take a backup of the database."""
+ if not os.path.exists(self.database_backup_dir):
+ print(f"Database backup directory {self.database_backup_dir} does not exist -> creating...", end="")
+ os.makedirs(self.database_backup_dir)
+ print("Done!")
+ database_backup_path = os.path.join(self.database_backup_dir, f"backup-{timestamp_string}-invokeai.db")
+ print(f"Making DB Backup at {database_backup_path}...", end="")
+ shutil.copy2(self.database_path, database_backup_path)
+ print("Done!")
+
+
+class MediaImportProcessor:
+ """Containing class for script functionality."""
+
+ def __init__(self):
+ pass
+
+ board_name_id_map = {}
+
+ def get_import_file_list(self):
+ """Ask the user for the import folder and scan for the list of files to return."""
+ while True:
+ default = ""
+ while True:
+ import_dir = os.path.expanduser(
+ prompt(
+ "Inputs: Specify absolute path containing InvokeAI .png images to import: ",
+ completer=PathCompleter(expanduser=True, only_directories=True),
+ default=default,
+ )
+ )
+ if len(import_dir) > 0 and Path(import_dir).is_dir():
+ break
+ default = import_dir
+
+ recurse_directories = (
+ (prompt("Include files from subfolders recursively [yN]? ").strip() or "N").upper().startswith("N")
+ )
+ if recurse_directories:
+ is_recurse = False
+ matching_file_list = glob.glob(import_dir + "/*.png", recursive=False)
+ else:
+ is_recurse = True
+ matching_file_list = glob.glob(import_dir + "/**/*.png", recursive=True)
+
+ if len(matching_file_list) > 0:
+ return import_dir, is_recurse, matching_file_list
+ else:
+ print(f"The specific path {import_dir} exists, but does not contain .png files!")
+
+ def get_file_details(self, filepath):
+ """Retrieve the embedded metedata fields and dimensions from an image file."""
+ with PIL.Image.open(filepath) as img:
+ img.load()
+ png_width, png_height = img.size
+ img_info = img.info
+ return img_info, png_width, png_height
+
+ def select_board_option(self, board_names, timestamp_string):
+ """Allow the user to choose how a board is selected for imported files."""
+ while True:
+ print("\r\nOptions for board selection for imported images:")
+ print(f"1) Select an existing board name. (found {len(board_names)})")
+ print("2) Specify a board name to create/add to.")
+ print("3) Create/add to board named 'IMPORT'.")
+ print(
+ f"4) Create/add to board named 'IMPORT' with the current datetime string appended (.e.g IMPORT_{timestamp_string})."
+ )
+ print(
+ "5) Create/add to board named 'IMPORT' with a the original file app_version appended (.e.g IMPORT_2.2.5)."
+ )
+ input_option = input("Specify desired board option: ")
+ match (input_option):
+ case "1":
+ if len(board_names) < 1:
+ print("\r\nThere are no existing board names to choose from. Select another option!")
+ continue
+ board_name = self.select_item_from_list(
+ board_names, "board name", True, "Cancel, go back and choose a different board option."
+ )
+ if board_name is not None:
+ return board_name
+ case "2":
+ while True:
+ board_name = input("Specify new/existing board name: ")
+ if board_name:
+ return board_name
+ case "3":
+ return "IMPORT"
+ case "4":
+ return f"IMPORT_{timestamp_string}"
+ case "5":
+ return "IMPORT_APPVERSION"
+
+ def select_item_from_list(self, items, entity_name, allow_cancel, cancel_string):
+ """A general function to render a list of items to select in the console, prompt the user for a selection and ensure a valid entry is selected."""
+ print(f"Select a {entity_name.lower()} from the following list:")
+ index = 1
+ for item in items:
+ print(f"{index}) {item}")
+ index += 1
+ if allow_cancel:
+ print(f"{index}) {cancel_string}")
+ while True:
+ try:
+ option_number = int(input("Specify number of selection: "))
+ except ValueError:
+ continue
+ if allow_cancel and option_number == index:
+ return None
+ if option_number >= 1 and option_number <= len(items):
+ return items[option_number - 1]
+
+ def import_image(self, filepath: str, board_name_option: str, db_mapper: DatabaseMapper, config: Config):
+ """Import a single file by its path"""
+ parser = InvokeAIMetadataParser()
+ file_name = os.path.basename(filepath)
+ file_destination_path = os.path.join(config.outputs_path, file_name)
+
+ print("===============================================================================")
+ print(f"Importing {filepath}")
+
+ # check destination to see if the file was previously imported
+ if os.path.exists(file_destination_path):
+ print("File already exists in the destination, skipping!")
+ ImportStats.count_skipped_file_exists += 1
+ return
+
+ # check if file name is already referenced in the database
+ if db_mapper.does_image_exist(file_name):
+ print("A reference to a file with this name already exists in the database, skipping!")
+ ImportStats.count_skipped_db_exists += 1
+ return
+
+ # load image info and dimensions
+ img_info, png_width, png_height = self.get_file_details(filepath)
+
+ # parse metadata
+ destination_needs_meta_update = True
+ log_version_note = "(Unknown)"
+ if "invokeai_metadata" in img_info:
+ # for the latest, we will just re-emit the same json, no need to parse/modify
+ converted_field = None
+ latest_json_string = img_info.get("invokeai_metadata")
+ log_version_note = "3.0.0+"
+ destination_needs_meta_update = False
+ else:
+ if "sd-metadata" in img_info:
+ converted_field = parser.parse_meta_tag_sd_metadata(json.loads(img_info.get("sd-metadata")))
+ elif "invokeai" in img_info:
+ converted_field = parser.parse_meta_tag_invokeai(json.loads(img_info.get("invokeai")))
+ elif "dream" in img_info:
+ converted_field = parser.parse_meta_tag_dream(img_info.get("dream"))
+ elif "Dream" in img_info:
+ converted_field = parser.parse_meta_tag_dream(img_info.get("Dream"))
+ else:
+ converted_field = InvokeAIMetadata()
+ destination_needs_meta_update = False
+ print("File does not have metadata from known Invoke AI versions, add only, no update!")
+
+ # use the loaded img dimensions if the metadata didnt have them
+ if converted_field.width is None:
+ converted_field.width = png_width
+ if converted_field.height is None:
+ converted_field.height = png_height
+
+ log_version_note = converted_field.imported_app_version if converted_field else "NoVersion"
+ log_version_note = log_version_note or "NoVersion"
+
+ latest_json_string = converted_field.to_json()
+
+ print(f"From Invoke AI Version {log_version_note} with dimensions {png_width} x {png_height}.")
+
+ # if metadata needs update, then update metdata and copy in one shot
+ if destination_needs_meta_update:
+ print("Updating metadata while copying...", end="")
+ self.update_file_metadata_while_copying(
+ filepath, file_destination_path, "invokeai_metadata", latest_json_string
+ )
+ print("Done!")
+ else:
+ print("No metadata update necessary, copying only...", end="")
+ shutil.copy2(filepath, file_destination_path)
+ print("Done!")
+
+ # create thumbnail
+ print("Creating thumbnail...", end="")
+ thumbnail_path = os.path.join(config.thumbnail_path, os.path.splitext(file_name)[0]) + ".webp"
+ thumbnail_size = 256, 256
+ with PIL.Image.open(filepath) as source_image:
+ source_image.thumbnail(thumbnail_size)
+ source_image.save(thumbnail_path, "webp")
+ print("Done!")
+
+ # finalize the dynamic board name if there is an APPVERSION token in it.
+ if converted_field is not None:
+ board_name = board_name_option.replace("APPVERSION", converted_field.imported_app_version or "NoVersion")
+ else:
+ board_name = board_name_option.replace("APPVERSION", "Latest")
+
+ # maintain a map of alrady created/looked up ids to avoid DB queries
+ print("Finding/Creating board...", end="")
+ if board_name in self.board_name_id_map:
+ board_id = self.board_name_id_map[board_name]
+ else:
+ board_id = db_mapper.get_board_id_with_create(board_name)
+ self.board_name_id_map[board_name] = board_id
+ print("Done!")
+
+ # add image to db
+ print("Adding image to database......", end="")
+ modified_time = datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath))
+ db_mapper.add_new_image_to_database(file_name, png_width, png_height, latest_json_string, modified_time)
+ print("Done!")
+
+ # add image to board
+ print("Adding image to board......", end="")
+ db_mapper.add_image_to_board(file_name, board_id)
+ print("Done!")
+
+ ImportStats.count_imported += 1
+ if log_version_note in ImportStats.count_imported_by_version:
+ ImportStats.count_imported_by_version[log_version_note] += 1
+ else:
+ ImportStats.count_imported_by_version[log_version_note] = 1
+
+ def update_file_metadata_while_copying(self, filepath, file_destination_path, tag_name, tag_value):
+ """Perform a metadata update with save to a new destination which accomplishes a copy while updating metadata."""
+ with PIL.Image.open(filepath) as target_image:
+ existing_img_info = target_image.info
+ metadata = PIL.PngImagePlugin.PngInfo()
+ # re-add any existing invoke ai tags unless they are the one we are trying to add
+ for key in existing_img_info:
+ if key != tag_name and key in ("dream", "Dream", "sd-metadata", "invokeai", "invokeai_metadata"):
+ metadata.add_text(key, existing_img_info[key])
+ metadata.add_text(tag_name, tag_value)
+ target_image.save(file_destination_path, pnginfo=metadata)
+
+ def process(self):
+ """Begin main processing."""
+
+ print("===============================================================================")
+ print("This script will import images generated by earlier versions of")
+ print("InvokeAI into the currently installed root directory:")
+ print(f" {app_config.root_path}")
+ print("If this is not what you want to do, type ctrl-C now to cancel.")
+
+ # load config
+ print("===============================================================================")
+ print("= Configuration & Settings")
+
+ config = Config()
+ config.find_and_load()
+ db_mapper = DatabaseMapper(config.database_path, config.database_backup_dir)
+ db_mapper.connect()
+
+ import_dir, is_recurse, import_file_list = self.get_import_file_list()
+ ImportStats.count_source_files = len(import_file_list)
+
+ board_names = db_mapper.get_board_names()
+ board_name_option = self.select_board_option(board_names, config.TIMESTAMP_STRING)
+
+ print("\r\n===============================================================================")
+ print("= Import Settings Confirmation")
+
+ print()
+ print(f"Database File Path : {config.database_path}")
+ print(f"Outputs/Images Directory : {config.outputs_path}")
+ print(f"Import Image Source Directory : {import_dir}")
+ print(f" Recurse Source SubDirectories : {'Yes' if is_recurse else 'No'}")
+ print(f"Count of .png file(s) found : {len(import_file_list)}")
+ print(f"Board name option specified : {board_name_option}")
+ print(f"Database backup will be taken at : {config.database_backup_dir}")
+
+ print("\r\nNotes about the import process:")
+ print("- Source image files will not be modified, only copied to the outputs directory.")
+ print("- If the same file name already exists in the destination, the file will be skipped.")
+ print("- If the same file name already has a record in the database, the file will be skipped.")
+ print("- Invoke AI metadata tags will be updated/written into the imported copy only.")
+ print(
+ "- On the imported copy, only Invoke AI known tags (latest and legacy) will be retained (dream, sd-metadata, invokeai, invokeai_metadata)"
+ )
+ print(
+ "- A property 'imported_app_version' will be added to metadata that can be viewed in the UI's metadata viewer."
+ )
+ print(
+ "- The new 3.x InvokeAI outputs folder structure is flat so recursively found source imges will all be placed into the single outputs/images folder."
+ )
+
+ while True:
+ should_continue = prompt("\nDo you wish to continue with the import [Yn] ? ").lower() or "y"
+ if should_continue == "n":
+ print("\r\nCancelling Import")
+ return
+ elif should_continue == "y":
+ print()
+ break
+
+ db_mapper.backup(config.TIMESTAMP_STRING)
+
+ print()
+ ImportStats.time_start = datetime.datetime.utcnow()
+
+ for filepath in import_file_list:
+ try:
+ self.import_image(filepath, board_name_option, db_mapper, config)
+ except sqlite3.Error as sql_ex:
+ print(f"A database related exception was found processing {filepath}, will continue to next file. ")
+ print("Exception detail:")
+ print(sql_ex)
+ ImportStats.count_file_errors += 1
+ except Exception as ex:
+ print(f"Exception processing {filepath}, will continue to next file. ")
+ print("Exception detail:")
+ print(ex)
+ ImportStats.count_file_errors += 1
+
+ print("\r\n===============================================================================")
+ print(f"= Import Complete - Elpased Time: {ImportStats.get_elapsed_time_string()}")
+ print()
+ print(f"Source File(s) : {ImportStats.count_source_files}")
+ print(f"Total Imported : {ImportStats.count_imported}")
+ print(f"Skipped b/c file already exists on disk : {ImportStats.count_skipped_file_exists}")
+ print(f"Skipped b/c file already exists in db : {ImportStats.count_skipped_db_exists}")
+ print(f"Errors during import : {ImportStats.count_file_errors}")
+ if ImportStats.count_imported > 0:
+ print("\r\nBreakdown of imported files by version:")
+ for key, version in ImportStats.count_imported_by_version.items():
+ print(f" {key:20} : {version}")
+
+
+def main():
+ try:
+ processor = MediaImportProcessor()
+ processor.process()
+ except KeyboardInterrupt:
+ print("\r\n\r\nUser cancelled execution.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/invokeai/frontend/install/invokeai_configure.py b/invokeai/frontend/install/invokeai_configure.py
index e087e4f40f..d200d78555 100644
--- a/invokeai/frontend/install/invokeai_configure.py
+++ b/invokeai/frontend/install/invokeai_configure.py
@@ -1,4 +1,4 @@
"""
Wrapper for invokeai.backend.configure.invokeai_configure
"""
-from ...backend.install.invokeai_configure import main
+from ...backend.install.invokeai_configure import main as invokeai_configure # noqa: F401
diff --git a/invokeai/frontend/install/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py
index 3fe6ff1574..3fe9bb6f97 100644
--- a/invokeai/frontend/install/invokeai_update.py
+++ b/invokeai/frontend/install/invokeai_update.py
@@ -80,7 +80,7 @@ def welcome(versions: dict):
def get_extras():
extras = ""
try:
- dist = pkg_resources.get_distribution("xformers")
+ _ = pkg_resources.get_distribution("xformers")
extras = "[xformers]"
except pkg_resources.DistributionNotFound:
pass
@@ -90,7 +90,7 @@ def get_extras():
def main():
versions = get_versions()
if invokeai_is_running():
- print(f":exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]")
+ print(":exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]")
input("Press any key to continue...")
return
@@ -122,9 +122,9 @@ def main():
print("")
print("")
if os.system(cmd) == 0:
- print(f":heavy_check_mark: Upgrade successful")
+ print(":heavy_check_mark: Upgrade successful")
else:
- print(f":exclamation: [bold red]Upgrade failed[/red bold]")
+ print(":exclamation: [bold red]Upgrade failed[/red bold]")
if __name__ == "__main__":
diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py
index 0633553a3d..6e4440abef 100644
--- a/invokeai/frontend/install/model_install.py
+++ b/invokeai/frontend/install/model_install.py
@@ -28,7 +28,6 @@ from npyscreen import widget
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.install.model_install_backend import (
- ModelInstallList,
InstallSelections,
ModelInstall,
SchedulerPredictionType,
@@ -41,12 +40,12 @@ from invokeai.frontend.install.widgets import (
SingleSelectColumns,
TextBox,
BufferBox,
- FileBox,
set_min_terminal_size,
select_stable_diffusion_config_file,
CyclingForm,
MIN_COLS,
MIN_LINES,
+ WindowTooSmallException,
)
from invokeai.app.services.config import InvokeAIAppConfig
@@ -156,7 +155,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
BufferBox,
name="Log Messages",
editable=False,
- max_height=15,
+ max_height=6,
)
self.nextrely += 1
@@ -252,7 +251,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
) -> dict[str, npyscreen.widget]:
"""Generic code to create model selection widgets"""
widgets = dict()
- model_list = [x for x in self.all_models if self.all_models[x].model_type == model_type and not x in exclude]
+ model_list = [x for x in self.all_models if self.all_models[x].model_type == model_type and x not in exclude]
model_labels = [self.model_labels[x] for x in model_list]
show_recommended = len(self.installed_models) == 0
@@ -358,14 +357,14 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
try:
v.hidden = True
v.editable = False
- except:
+ except Exception:
pass
for k, v in widgets[selected_tab].items():
try:
v.hidden = False
if not isinstance(v, (npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
v.editable = True
- except:
+ except Exception:
pass
self.__class__.current_tab = selected_tab # for persistence
self.display()
@@ -542,7 +541,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.ti_models,
]
for section in ui_sections:
- if not "models_selected" in section:
+ if "models_selected" not in section:
continue
selected = set([section["models"][x] for x in section["models_selected"].value])
models_to_install = [x for x in selected if not self.all_models[x].installed]
@@ -638,7 +637,7 @@ def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPre
return None
else:
return response
- except:
+ except Exception:
return None
@@ -674,8 +673,7 @@ def process_and_execute(
def select_and_download_models(opt: Namespace):
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
config.precision = precision
- helper = lambda x: ask_user_for_prediction_type(x)
- installer = ModelInstall(config, prediction_type_helper=helper)
+ installer = ModelInstall(config, prediction_type_helper=ask_user_for_prediction_type)
if opt.list_models:
installer.list_models(opt.list_models)
elif opt.add or opt.delete:
@@ -693,7 +691,11 @@ def select_and_download_models(opt: Namespace):
# needed to support the probe() method running under a subprocess
torch.multiprocessing.set_start_method("spawn")
- set_min_terminal_size(MIN_COLS, MIN_LINES)
+ if not set_min_terminal_size(MIN_COLS, MIN_LINES):
+ raise WindowTooSmallException(
+ "Could not increase terminal size. Try running again with a larger window or smaller font size."
+ )
+
installApp = AddModelApplication(opt)
try:
installApp.run()
@@ -772,7 +774,7 @@ def main():
if not config.model_conf_path.exists():
logger.info("Your InvokeAI root directory is not set up. Calling invokeai-configure.")
- from invokeai.frontend.install import invokeai_configure
+ from invokeai.frontend.install.invokeai_configure import invokeai_configure
invokeai_configure()
sys.exit(0)
@@ -787,6 +789,8 @@ def main():
curses.echo()
curses.endwin()
logger.info("Goodbye! Come back soon.")
+ except WindowTooSmallException as e:
+ logger.error(str(e))
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
logger.error("Insufficient vertical space for the interface. Please make your window taller and try again")
diff --git a/invokeai/frontend/install/widgets.py b/invokeai/frontend/install/widgets.py
index 10da15bf13..79b6280990 100644
--- a/invokeai/frontend/install/widgets.py
+++ b/invokeai/frontend/install/widgets.py
@@ -21,31 +21,40 @@ MIN_COLS = 130
MIN_LINES = 38
+class WindowTooSmallException(Exception):
+ pass
+
+
# -------------------------------------
-def set_terminal_size(columns: int, lines: int):
- ts = get_terminal_size()
- width = max(columns, ts.columns)
- height = max(lines, ts.lines)
-
+def set_terminal_size(columns: int, lines: int) -> bool:
OS = platform.uname().system
- if OS == "Windows":
- pass
- # not working reliably - ask user to adjust the window
- # _set_terminal_size_powershell(width,height)
- elif OS in ["Darwin", "Linux"]:
- _set_terminal_size_unix(width, height)
+ screen_ok = False
+ while not screen_ok:
+ ts = get_terminal_size()
+ width = max(columns, ts.columns)
+ height = max(lines, ts.lines)
- # check whether it worked....
- ts = get_terminal_size()
- pause = False
- if ts.columns < columns:
- print("\033[1mThis window is too narrow for the user interface.\033[0m")
- pause = True
- if ts.lines < lines:
- print("\033[1mThis window is too short for the user interface.\033[0m")
- pause = True
- if pause:
- input("Maximize the window then press any key to continue..")
+ if OS == "Windows":
+ pass
+ # not working reliably - ask user to adjust the window
+ # _set_terminal_size_powershell(width,height)
+ elif OS in ["Darwin", "Linux"]:
+ _set_terminal_size_unix(width, height)
+
+ # check whether it worked....
+ ts = get_terminal_size()
+ if ts.columns < columns or ts.lines < lines:
+ print(
+ f"\033[1mThis window is too small for the interface. InvokeAI requires {columns}x{lines} (w x h) characters, but window is {ts.columns}x{ts.lines}\033[0m"
+ )
+ resp = input(
+ "Maximize the window and/or decrease the font size then press any key to continue. Type [Q] to give up.."
+ )
+ if resp.upper().startswith("Q"):
+ break
+ else:
+ screen_ok = True
+ return screen_ok
def _set_terminal_size_powershell(width: int, height: int):
@@ -80,21 +89,21 @@ def _set_terminal_size_unix(width: int, height: int):
sys.stdout.flush()
-def set_min_terminal_size(min_cols: int, min_lines: int):
+def set_min_terminal_size(min_cols: int, min_lines: int) -> bool:
# make sure there's enough room for the ui
term_cols, term_lines = get_terminal_size()
if term_cols >= min_cols and term_lines >= min_lines:
- return
+ return True
cols = max(term_cols, min_cols)
lines = max(term_lines, min_lines)
- set_terminal_size(cols, lines)
+ return set_terminal_size(cols, lines)
class IntSlider(npyscreen.Slider):
def translate_value(self):
stri = "%2d / %2d" % (self.value, self.out_of)
- l = (len(str(self.out_of))) * 2 + 4
- stri = stri.rjust(l)
+ length = (len(str(self.out_of))) * 2 + 4
+ stri = stri.rjust(length)
return stri
@@ -158,13 +167,13 @@ class FloatSlider(npyscreen.Slider):
# this is supposed to adjust display precision, but doesn't
def translate_value(self):
stri = "%3.2f / %3.2f" % (self.value, self.out_of)
- l = (len(str(self.out_of))) * 2 + 4
- stri = stri.rjust(l)
+ length = (len(str(self.out_of))) * 2 + 4
+ stri = stri.rjust(length)
return stri
class FloatTitleSlider(npyscreen.TitleText):
- _entry_type = FloatSlider
+ _entry_type = npyscreen.Slider
class SelectColumnBase:
diff --git a/invokeai/frontend/legacy_launch_invokeai.py b/invokeai/frontend/legacy_launch_invokeai.py
index e4509db6e5..e1e7dc26ab 100644
--- a/invokeai/frontend/legacy_launch_invokeai.py
+++ b/invokeai/frontend/legacy_launch_invokeai.py
@@ -1,4 +1,3 @@
-import os
import sys
import argparse
diff --git a/invokeai/frontend/merge/__init__.py b/invokeai/frontend/merge/__init__.py
index 3a2e4474a5..a18da9c0d4 100644
--- a/invokeai/frontend/merge/__init__.py
+++ b/invokeai/frontend/merge/__init__.py
@@ -1,4 +1,4 @@
"""
Initialization file for invokeai.frontend.merge
"""
-from .merge_diffusers import main as invokeai_merge_diffusers
+from .merge_diffusers import main as invokeai_merge_diffusers # noqa: F401
diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py
index db493ec17f..ada8eed644 100644
--- a/invokeai/frontend/merge/merge_diffusers.py
+++ b/invokeai/frontend/merge/merge_diffusers.py
@@ -9,19 +9,15 @@ import curses
import sys
from argparse import Namespace
from pathlib import Path
-from typing import List, Union
+from typing import List, Optional
import npyscreen
-from diffusers import DiffusionPipeline
-from diffusers import logging as dlogging
from npyscreen import widget
-from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import (
ModelMerger,
- MergeInterpolationMethod,
ModelManager,
ModelType,
BaseModelType,
@@ -318,7 +314,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
else:
return True
- def get_model_names(self, base_model: BaseModelType = None) -> List[str]:
+ def get_model_names(self, base_model: Optional[BaseModelType] = None) -> List[str]:
model_names = [
info["model_name"]
for info in self.model_manager.list_models(model_type=ModelType.Main, base_model=base_model)
@@ -382,7 +378,8 @@ def run_cli(args: Namespace):
def main():
args = _parse_args()
- config.parse_args(["--root", str(args.root_dir)])
+ if args.root_dir:
+ config.parse_args(["--root", str(args.root_dir)])
try:
if args.front_end:
diff --git a/invokeai/frontend/training/__init__.py b/invokeai/frontend/training/__init__.py
index db5d69fc7a..d4eff2f7fd 100644
--- a/invokeai/frontend/training/__init__.py
+++ b/invokeai/frontend/training/__init__.py
@@ -1,4 +1,4 @@
"""
Initialization file for invokeai.frontend.training
"""
-from .textual_inversion import main as invokeai_textual_inversion
+from .textual_inversion import main as invokeai_textual_inversion # noqa: F401
diff --git a/invokeai/frontend/training/textual_inversion.py b/invokeai/frontend/training/textual_inversion.py
index 25debf4bdc..12f4db8e7b 100755
--- a/invokeai/frontend/training/textual_inversion.py
+++ b/invokeai/frontend/training/textual_inversion.py
@@ -59,7 +59,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
try:
default = self.model_names.index(saved_args["model"])
- except:
+ except Exception:
pass
self.add_widget_intelligent(
@@ -377,7 +377,7 @@ def previous_args() -> dict:
try:
conf = OmegaConf.load(conf_file)
conf["placeholder_token"] = conf["placeholder_token"].strip("<>")
- except:
+ except Exception:
conf = None
return conf
diff --git a/invokeai/frontend/web/.husky/pre-commit b/invokeai/frontend/web/.husky/pre-commit
index f7584d86eb..f89f3ad735 100755
--- a/invokeai/frontend/web/.husky/pre-commit
+++ b/invokeai/frontend/web/.husky/pre-commit
@@ -1,6 +1,4 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
-python -m black . --check
-
cd invokeai/frontend/web/ && npm run lint-staged
diff --git a/invokeai/frontend/web/dist/assets/App-44cdaaf3.js b/invokeai/frontend/web/dist/assets/App-44cdaaf3.js
deleted file mode 100644
index 801b048b3a..0000000000
--- a/invokeai/frontend/web/dist/assets/App-44cdaaf3.js
+++ /dev/null
@@ -1,169 +0,0 @@
-import{t as wv,r as a7,i as Sv,a as Dc,b as x_,S as w_,c as S_,d as C_,e as Cv,f as k_,g as kv,h as i7,j as l7,k as c7,l as u7,m as __,n as d7,o as f7,p as p7,q as P_,s as h7,u as m7,v as g7,w as v7,x as b7,y as y7,z as f,A as i,B as om,C as Ap,D as x7,E as j_,F as I_,G as w7,P as md,H as K1,I as S7,J as C7,K as k7,L as _7,M as P7,N as j7,O as I7,Q as W2,R as E7,T as Ae,U as je,V as Ct,W as nt,X as gd,Y as mo,Z as Ir,_ as Fr,$ as qn,a0 as pl,a1 as ia,a2 as Ft,a3 as ns,a4 as tc,a5 as za,a6 as sm,a7 as X1,a8 as vd,a9 as sr,aa as O7,ab as H,ac as E_,ad as V2,ae as O_,af as _v,ag as Ac,ah as R7,ai as R_,aj as M_,ak as Tc,al as M7,am as fe,an as Ge,ao as Zt,ap as z,aq as D7,ar as U2,as as A7,at as T7,au as G2,av as te,aw as N7,ax as On,ay as Mn,az as Ee,aA as F,aB as Ys,aC as Ye,aD as Kn,aE as D_,aF as A_,aG as T_,aH as _i,aI as Ds,aJ as Y1,aK as $7,aL as z7,aM as L7,aN as Dl,aO as Pu,aP as B7,aQ as F7,aR as H7,aS as W7,aT as V7,aU as q2,aV as ui,aW as Q1,aX as Tp,aY as am,aZ as N_,a_ as os,a$ as $_,b0 as U7,b1 as Nc,b2 as z_,b3 as L_,b4 as Es,b5 as jo,b6 as ju,b7 as G7,b8 as q7,b9 as K7,ba as X7,bb as J1,bc as Np,bd as Y7,be as Q7,bf as Af,bg as Tf,bh as fu,bi as p0,bj as $u,bk as zu,bl as Lu,bm as Bu,bn as K2,bo as $p,bp as h0,bq as zp,br as m0,bs as Pv,bt as g0,bu as jv,bv as v0,bw as Lp,bx as X2,by as gc,bz as Y2,bA as vc,bB as Q2,bC as Bp,bD as Z1,bE as B_,bF as Iv,bG as Ev,bH as F_,bI as J7,bJ as Ov,bK as Z7,bL as Rv,bM as eb,bN as H_,bO as tb,bP as nb,bQ as eR,bR as tR,bS as im,bT as Xl,bU as nR,bV as rR,bW as Cp,bX as oR,bY as sR,bZ as aR,b_ as Mv,b$ as W_,c0 as iR,c1 as V_,c2 as U_,c3 as ss,c4 as J2,c5 as La,c6 as lR,c7 as Dv,c8 as cR,c9 as G_,ca as Z2,cb as uR,cc as dR,cd as fR,ce as pR,cf as hR,cg as mR,ch as rb,ci as ob,cj as gR,ck as Bn,cl as ew,cm as q_,cn as vR,co as bR,cp as yR,cq as xR,cr as wR,cs as SR,ct as CR,cu as kR,cv as _R,cw as K_,cx as PR,cy as jR,cz as IR,cA as ER,cB as OR,cC as RR,cD as MR,cE as DR,cF as AR,cG as TR,cH as tw,cI as NR,cJ as nw,cK as $R,cL as zR,cM as LR,cN as BR,cO as Yu,cP as so,cQ as bd,cR as yd,cS as FR,cT as Qn,cU as HR,cV as na,cW as sb,cX as lm,cY as WR,cZ as VR,c_ as UR,c$ as rw,d0 as Fp,d1 as GR,d2 as X_,d3 as ow,d4 as qR,d5 as KR,d6 as Ss,d7 as XR,d8 as YR,d9 as Y_,da as Q_,db as QR,dc as sw,dd as JR,de as ZR,df as J_,dg as eM,dh as tM,di as nM,dj as rM,dk as oM,dl as sM,dm as aM,dn as cm,dp as iM,dq as Z_,dr as aw,ds as Nf,dt as lM,du as ab,dv as e5,dw as cM,dx as uM,dy as dM,dz as ls,dA as fM,dB as pM,dC as hM,dD as mM,dE as gM,dF as vM,dG as bM,dH as yM,dI as xM,dJ as wM,dK as SM,dL as CM,dM as kM,dN as _M,dO as iw,dP as lw,dQ as PM,dR as t5,dS as n5,dT as xd,dU as r5,dV as qi,dW as o5,dX as cw,dY as jM,dZ as IM,d_ as s5,d$ as EM,e0 as OM,e1 as RM,e2 as MM,e3 as DM,e4 as ib,e5 as uw,e6 as a5,e7 as AM,e8 as dw,e9 as i5,ea as As,eb as TM,ec as l5,ed as fw,ee as NM,ef as $M,eg as zM,eh as LM,ei as BM,ej as FM,ek as HM,el as WM,em as VM,en as UM,eo as GM,ep as qM,eq as KM,er as XM,es as YM,et as QM,eu as JM,ev as ZM,ew as eD,ex as tD,ey as pw,ez as kp,eA as nD,eB as Hp,eC as c5,eD as Qu,eE as rD,eF as oD,eG as ea,eH as u5,eI as lb,eJ as wd,eK as sD,eL as aD,eM as iD,eN as Ea,eO as d5,eP as lD,eQ as cD,eR as f5,eS as uD,eT as dD,eU as fD,eV as pD,eW as hD,eX as mD,eY as gD,eZ as vD,e_ as bD,e$ as yD,f0 as hw,f1 as xD,f2 as wD,f3 as SD,f4 as CD,f5 as kD,f6 as _D,f7 as PD,f8 as jD,f9 as b0,fa as Js,fb as y0,fc as x0,fd as $f,fe as mw,ff as Av,fg as ID,fh as ED,fi as OD,fj as RD,fk as Wp,fl as p5,fm as h5,fn as MD,fo as DD,fp as m5,fq as g5,fr as v5,fs as b5,ft as y5,fu as x5,fv as w5,fw as S5,fx as nc,fy as rc,fz as C5,fA as k5,fB as AD,fC as _5,fD as P5,fE as j5,fF as I5,fG as E5,fH as O5,fI as cb,fJ as TD,fK as gw,fL as ND,fM as $D,fN as Vp,fO as vw,fP as bw,fQ as yw,fR as xw,fS as zD,fT as LD,fU as BD,fV as FD,fW as HD,fX as WD,fY as VD,fZ as UD,f_ as GD}from"./index-18f2f740.js";import{u as qD,c as KD,a as Dn,b as or,I as no,d as Ba,P as Ju,C as XD,e as ye,m as um,f as R5,g as Fa,h as YD,r as Ue,i as QD,j as ww,k as Vt,l as Sr}from"./MantineProvider-b20a2267.js";function JD(e,t){if(e==null)return{};var n={},r=Object.keys(e),o,s;for(s=0;s=0)&&(n[o]=e[o]);return n}var Sw=1/0,ZD=17976931348623157e292;function w0(e){if(!e)return e===0?e:0;if(e=wv(e),e===Sw||e===-Sw){var t=e<0?-1:1;return t*ZD}return e===e?e:0}var e9=function(){return a7.Date.now()};const S0=e9;var t9="Expected a function",n9=Math.max,r9=Math.min;function o9(e,t,n){var r,o,s,a,c,d,p=0,h=!1,m=!1,v=!0;if(typeof e!="function")throw new TypeError(t9);t=wv(t)||0,Sv(n)&&(h=!!n.leading,m="maxWait"in n,s=m?n9(wv(n.maxWait)||0,t):s,v="trailing"in n?!!n.trailing:v);function b(O){var R=r,M=o;return r=o=void 0,p=O,a=e.apply(M,R),a}function w(O){return p=O,c=setTimeout(_,t),h?b(O):a}function y(O){var R=O-d,M=O-p,A=t-R;return m?r9(A,s-M):A}function S(O){var R=O-d,M=O-p;return d===void 0||R>=t||R<0||m&&M>=s}function _(){var O=S0();if(S(O))return k(O);c=setTimeout(_,y(O))}function k(O){return c=void 0,v&&r?b(O):(r=o=void 0,a)}function j(){c!==void 0&&clearTimeout(c),p=0,r=d=o=c=void 0}function I(){return c===void 0?a:k(S0())}function E(){var O=S0(),R=S(O);if(r=arguments,o=this,d=O,R){if(c===void 0)return w(d);if(m)return clearTimeout(c),c=setTimeout(_,t),b(d)}return c===void 0&&(c=setTimeout(_,t)),a}return E.cancel=j,E.flush=I,E}var s9=200;function a9(e,t,n,r){var o=-1,s=S_,a=!0,c=e.length,d=[],p=t.length;if(!c)return d;n&&(t=Dc(t,x_(n))),r?(s=C_,a=!1):t.length>=s9&&(s=Cv,a=!1,t=new w_(t));e:for(;++o=120&&h.length>=120)?new w_(a&&h):void 0}h=e[0];var m=-1,v=c[0];e:for(;++m{r.has(s)&&n(o,s)})}const M5=({id:e,x:t,y:n,width:r,height:o,style:s,color:a,strokeColor:c,strokeWidth:d,className:p,borderRadius:h,shapeRendering:m,onClick:v})=>{const{background:b,backgroundColor:w}=s||{},y=a||b||w;return i.jsx("rect",{className:om(["react-flow__minimap-node",p]),x:t,y:n,rx:h,ry:h,width:r,height:o,fill:y,stroke:c,strokeWidth:d,shapeRendering:m,onClick:v?S=>v(S,e):void 0})};M5.displayName="MiniMapNode";var P9=f.memo(M5);const j9=e=>e.nodeOrigin,I9=e=>e.getNodes().filter(t=>!t.hidden&&t.width&&t.height),C0=e=>e instanceof Function?e:()=>e;function E9({nodeStrokeColor:e="transparent",nodeColor:t="#e2e2e2",nodeClassName:n="",nodeBorderRadius:r=5,nodeStrokeWidth:o=2,nodeComponent:s=P9,onClick:a}){const c=Ap(I9,K1),d=Ap(j9),p=C0(t),h=C0(e),m=C0(n),v=typeof window>"u"||window.chrome?"crispEdges":"geometricPrecision";return i.jsx(i.Fragment,{children:c.map(b=>{const{x:w,y}=x7(b,d).positionAbsolute;return i.jsx(s,{x:w,y,width:b.width,height:b.height,style:b.style,className:m(b),color:p(b),borderRadius:r,strokeColor:h(b),strokeWidth:o,shapeRendering:v,onClick:a,id:b.id},b.id)})})}var O9=f.memo(E9);const R9=200,M9=150,D9=e=>{const t=e.getNodes(),n={x:-e.transform[0]/e.transform[2],y:-e.transform[1]/e.transform[2],width:e.width/e.transform[2],height:e.height/e.transform[2]};return{viewBB:n,boundingRect:t.length>0?k7(_7(t,e.nodeOrigin),n):n,rfId:e.rfId}},A9="react-flow__minimap-desc";function D5({style:e,className:t,nodeStrokeColor:n="transparent",nodeColor:r="#e2e2e2",nodeClassName:o="",nodeBorderRadius:s=5,nodeStrokeWidth:a=2,nodeComponent:c,maskColor:d="rgb(240, 240, 240, 0.6)",maskStrokeColor:p="none",maskStrokeWidth:h=1,position:m="bottom-right",onClick:v,onNodeClick:b,pannable:w=!1,zoomable:y=!1,ariaLabel:S="React Flow mini map",inversePan:_=!1,zoomStep:k=10}){const j=j_(),I=f.useRef(null),{boundingRect:E,viewBB:O,rfId:R}=Ap(D9,K1),M=(e==null?void 0:e.width)??R9,A=(e==null?void 0:e.height)??M9,T=E.width/M,$=E.height/A,Q=Math.max(T,$),B=Q*M,V=Q*A,q=5*Q,G=E.x-(B-E.width)/2-q,D=E.y-(V-E.height)/2-q,L=B+q*2,W=V+q*2,Y=`${A9}-${R}`,ae=f.useRef(0);ae.current=Q,f.useEffect(()=>{if(I.current){const X=I_(I.current),K=re=>{const{transform:oe,d3Selection:pe,d3Zoom:le}=j.getState();if(re.sourceEvent.type!=="wheel"||!pe||!le)return;const ge=-re.sourceEvent.deltaY*(re.sourceEvent.deltaMode===1?.05:re.sourceEvent.deltaMode?1:.002)*k,ke=oe[2]*Math.pow(2,ge);le.scaleTo(pe,ke)},U=re=>{const{transform:oe,d3Selection:pe,d3Zoom:le,translateExtent:ge,width:ke,height:xe}=j.getState();if(re.sourceEvent.type!=="mousemove"||!pe||!le)return;const de=ae.current*Math.max(1,oe[2])*(_?-1:1),Te={x:oe[0]-re.sourceEvent.movementX*de,y:oe[1]-re.sourceEvent.movementY*de},Oe=[[0,0],[ke,xe]],$e=S7.translate(Te.x,Te.y).scale(oe[2]),kt=le.constrain()($e,Oe,ge);le.transform(pe,kt)},se=w7().on("zoom",w?U:null).on("zoom.wheel",y?K:null);return X.call(se),()=>{X.on("zoom",null)}}},[w,y,_,k]);const be=v?X=>{const K=C7(X);v(X,{x:K[0],y:K[1]})}:void 0,ie=b?(X,K)=>{const U=j.getState().nodeInternals.get(K);b(X,U)}:void 0;return i.jsx(md,{position:m,style:e,className:om(["react-flow__minimap",t]),"data-testid":"rf__minimap",children:i.jsxs("svg",{width:M,height:A,viewBox:`${G} ${D} ${L} ${W}`,role:"img","aria-labelledby":Y,ref:I,onClick:be,children:[S&&i.jsx("title",{id:Y,children:S}),i.jsx(O9,{onClick:ie,nodeColor:r,nodeStrokeColor:n,nodeBorderRadius:s,nodeClassName:o,nodeStrokeWidth:a,nodeComponent:c}),i.jsx("path",{className:"react-flow__minimap-mask",d:`M${G-q},${D-q}h${L+q*2}v${W+q*2}h${-L-q*2}z
- M${O.x},${O.y}h${O.width}v${O.height}h${-O.width}z`,fill:d,fillRule:"evenodd",stroke:p,strokeWidth:h,pointerEvents:"none"})]})})}D5.displayName="MiniMap";var T9=f.memo(D5),Cs;(function(e){e.Lines="lines",e.Dots="dots",e.Cross="cross"})(Cs||(Cs={}));function N9({color:e,dimensions:t,lineWidth:n}){return i.jsx("path",{stroke:e,strokeWidth:n,d:`M${t[0]/2} 0 V${t[1]} M0 ${t[1]/2} H${t[0]}`})}function $9({color:e,radius:t}){return i.jsx("circle",{cx:t,cy:t,r:t,fill:e})}const z9={[Cs.Dots]:"#91919a",[Cs.Lines]:"#eee",[Cs.Cross]:"#e2e2e2"},L9={[Cs.Dots]:1,[Cs.Lines]:1,[Cs.Cross]:6},B9=e=>({transform:e.transform,patternId:`pattern-${e.rfId}`});function A5({id:e,variant:t=Cs.Dots,gap:n=20,size:r,lineWidth:o=1,offset:s=2,color:a,style:c,className:d}){const p=f.useRef(null),{transform:h,patternId:m}=Ap(B9,K1),v=a||z9[t],b=r||L9[t],w=t===Cs.Dots,y=t===Cs.Cross,S=Array.isArray(n)?n:[n,n],_=[S[0]*h[2]||1,S[1]*h[2]||1],k=b*h[2],j=y?[k,k]:_,I=w?[k/s,k/s]:[j[0]/s,j[1]/s];return i.jsxs("svg",{className:om(["react-flow__background",d]),style:{...c,position:"absolute",width:"100%",height:"100%",top:0,left:0},ref:p,"data-testid":"rf__background",children:[i.jsx("pattern",{id:m+e,x:h[0]%_[0],y:h[1]%_[1],width:_[0],height:_[1],patternUnits:"userSpaceOnUse",patternTransform:`translate(-${I[0]},-${I[1]})`,children:w?i.jsx($9,{color:v,radius:k/s}):i.jsx(N9,{dimensions:j,color:v,lineWidth:o})}),i.jsx("rect",{x:"0",y:"0",width:"100%",height:"100%",fill:`url(#${m+e})`})]})}A5.displayName="Background";var F9=f.memo(A5),Fu;(function(e){e.Line="line",e.Handle="handle"})(Fu||(Fu={}));function H9({width:e,prevWidth:t,height:n,prevHeight:r,invertX:o,invertY:s}){const a=e-t,c=n-r,d=[a>0?1:a<0?-1:0,c>0?1:c<0?-1:0];return a&&o&&(d[0]=d[0]*-1),c&&s&&(d[1]=d[1]*-1),d}const T5={width:0,height:0,x:0,y:0},W9={...T5,pointerX:0,pointerY:0,aspectRatio:1};function V9({nodeId:e,position:t,variant:n=Fu.Handle,className:r,style:o={},children:s,color:a,minWidth:c=10,minHeight:d=10,maxWidth:p=Number.MAX_VALUE,maxHeight:h=Number.MAX_VALUE,keepAspectRatio:m=!1,shouldResize:v,onResizeStart:b,onResize:w,onResizeEnd:y}){const S=P7(),_=typeof e=="string"?e:S,k=j_(),j=f.useRef(null),I=f.useRef(W9),E=f.useRef(T5),O=j7(),R=n===Fu.Line?"right":"bottom-right",M=t??R;f.useEffect(()=>{if(!j.current||!_)return;const Q=I_(j.current),B=M.includes("right")||M.includes("left"),V=M.includes("bottom")||M.includes("top"),q=M.includes("left"),G=M.includes("top"),D=I7().on("start",L=>{const W=k.getState().nodeInternals.get(_),{xSnapped:Y,ySnapped:ae}=O(L);E.current={width:(W==null?void 0:W.width)??0,height:(W==null?void 0:W.height)??0,x:(W==null?void 0:W.position.x)??0,y:(W==null?void 0:W.position.y)??0},I.current={...E.current,pointerX:Y,pointerY:ae,aspectRatio:E.current.width/E.current.height},b==null||b(L,{...E.current})}).on("drag",L=>{const{nodeInternals:W,triggerNodeChanges:Y}=k.getState(),{xSnapped:ae,ySnapped:be}=O(L),ie=W.get(_);if(ie){const X=[],{pointerX:K,pointerY:U,width:se,height:re,x:oe,y:pe,aspectRatio:le}=I.current,{x:ge,y:ke,width:xe,height:de}=E.current,Te=Math.floor(B?ae-K:0),Oe=Math.floor(V?be-U:0);let $e=W2(se+(q?-Te:Te),c,p),kt=W2(re+(G?-Oe:Oe),d,h);if(m){const Me=$e/kt,Pt=B&&V,Tt=B&&!V,we=V&&!B;$e=Me<=le&&Pt||we?kt*le:$e,kt=Me>le&&Pt||Tt?$e/le:kt,$e>=p?($e=p,kt=p/le):$e<=c&&($e=c,kt=c/le),kt>=h?(kt=h,$e=h*le):kt<=d&&(kt=d,$e=d*le)}const ct=$e!==xe,on=kt!==de;if(q||G){const Me=q?oe-($e-se):oe,Pt=G?pe-(kt-re):pe,Tt=Me!==ge&&ct,we=Pt!==ke&&on;if(Tt||we){const ht={id:ie.id,type:"position",position:{x:Tt?Me:ge,y:we?Pt:ke}};X.push(ht),E.current.x=ht.position.x,E.current.y=ht.position.y}}if(ct||on){const Me={id:_,type:"dimensions",updateStyle:!0,resizing:!0,dimensions:{width:$e,height:kt}};X.push(Me),E.current.width=$e,E.current.height=kt}if(X.length===0)return;const vt=H9({width:E.current.width,prevWidth:xe,height:E.current.height,prevHeight:de,invertX:q,invertY:G}),bt={...E.current,direction:vt};if((v==null?void 0:v(L,bt))===!1)return;w==null||w(L,bt),Y(X)}}).on("end",L=>{const W={id:_,type:"dimensions",resizing:!1};y==null||y(L,{...E.current}),k.getState().triggerNodeChanges([W])});return Q.call(D),()=>{Q.on(".drag",null)}},[_,M,c,d,p,h,m,O,b,w,y]);const A=M.split("-"),T=n===Fu.Line?"borderColor":"backgroundColor",$=a?{...o,[T]:a}:o;return i.jsx("div",{className:om(["react-flow__resize-control","nodrag",...A,n,r]),ref:j,style:$,children:s})}var U9=f.memo(V9);const N5=1/60*1e3,G9=typeof performance<"u"?()=>performance.now():()=>Date.now(),$5=typeof window<"u"?e=>window.requestAnimationFrame(e):e=>setTimeout(()=>e(G9()),N5);function q9(e){let t=[],n=[],r=0,o=!1,s=!1;const a=new WeakSet,c={schedule:(d,p=!1,h=!1)=>{const m=h&&o,v=m?t:n;return p&&a.add(d),v.indexOf(d)===-1&&(v.push(d),m&&o&&(r=t.length)),d},cancel:d=>{const p=n.indexOf(d);p!==-1&&n.splice(p,1),a.delete(d)},process:d=>{if(o){s=!0;return}if(o=!0,[t,n]=[n,t],n.length=0,r=t.length,r)for(let p=0;p(e[t]=q9(()=>Zu=!0),e),{}),X9=Sd.reduce((e,t)=>{const n=dm[t];return e[t]=(r,o=!1,s=!1)=>(Zu||J9(),n.schedule(r,o,s)),e},{}),Y9=Sd.reduce((e,t)=>(e[t]=dm[t].cancel,e),{});Sd.reduce((e,t)=>(e[t]=()=>dm[t].process(oc),e),{});const Q9=e=>dm[e].process(oc),z5=e=>{Zu=!1,oc.delta=Tv?N5:Math.max(Math.min(e-oc.timestamp,K9),1),oc.timestamp=e,Nv=!0,Sd.forEach(Q9),Nv=!1,Zu&&(Tv=!1,$5(z5))},J9=()=>{Zu=!0,Tv=!0,Nv||$5(z5)},_w=()=>oc;function ub(e){const t=Object.assign({},e);for(let n in t)t[n]===void 0&&delete t[n];return t}function Z9(e){const{theme:t}=E7(),n=qD();return f.useMemo(()=>KD(t.direction,{...n,...e}),[e,t.direction,n])}var eA=Object.defineProperty,tA=(e,t,n)=>t in e?eA(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n,vr=(e,t,n)=>(tA(e,typeof t!="symbol"?t+"":t,n),n);function Pw(e){return e.sort((t,n)=>{const r=t.compareDocumentPosition(n);if(r&Node.DOCUMENT_POSITION_FOLLOWING||r&Node.DOCUMENT_POSITION_CONTAINED_BY)return-1;if(r&Node.DOCUMENT_POSITION_PRECEDING||r&Node.DOCUMENT_POSITION_CONTAINS)return 1;if(r&Node.DOCUMENT_POSITION_DISCONNECTED||r&Node.DOCUMENT_POSITION_IMPLEMENTATION_SPECIFIC)throw Error("Cannot sort the given nodes.");return 0})}var nA=e=>typeof e=="object"&&"nodeType"in e&&e.nodeType===Node.ELEMENT_NODE;function jw(e,t,n){let r=e+1;return n&&r>=t&&(r=0),r}function Iw(e,t,n){let r=e-1;return n&&r<0&&(r=t),r}var $v=typeof window<"u"?f.useLayoutEffect:f.useEffect,Up=e=>e,rA=class{constructor(){vr(this,"descendants",new Map),vr(this,"register",e=>{if(e!=null)return nA(e)?this.registerNode(e):t=>{this.registerNode(t,e)}}),vr(this,"unregister",e=>{this.descendants.delete(e);const t=Pw(Array.from(this.descendants.keys()));this.assignIndex(t)}),vr(this,"destroy",()=>{this.descendants.clear()}),vr(this,"assignIndex",e=>{this.descendants.forEach(t=>{const n=e.indexOf(t.node);t.index=n,t.node.dataset.index=t.index.toString()})}),vr(this,"count",()=>this.descendants.size),vr(this,"enabledCount",()=>this.enabledValues().length),vr(this,"values",()=>Array.from(this.descendants.values()).sort((t,n)=>t.index-n.index)),vr(this,"enabledValues",()=>this.values().filter(e=>!e.disabled)),vr(this,"item",e=>{if(this.count()!==0)return this.values()[e]}),vr(this,"enabledItem",e=>{if(this.enabledCount()!==0)return this.enabledValues()[e]}),vr(this,"first",()=>this.item(0)),vr(this,"firstEnabled",()=>this.enabledItem(0)),vr(this,"last",()=>this.item(this.descendants.size-1)),vr(this,"lastEnabled",()=>{const e=this.enabledValues().length-1;return this.enabledItem(e)}),vr(this,"indexOf",e=>{var t,n;return e&&(n=(t=this.descendants.get(e))==null?void 0:t.index)!=null?n:-1}),vr(this,"enabledIndexOf",e=>e==null?-1:this.enabledValues().findIndex(t=>t.node.isSameNode(e))),vr(this,"next",(e,t=!0)=>{const n=jw(e,this.count(),t);return this.item(n)}),vr(this,"nextEnabled",(e,t=!0)=>{const n=this.item(e);if(!n)return;const r=this.enabledIndexOf(n.node),o=jw(r,this.enabledCount(),t);return this.enabledItem(o)}),vr(this,"prev",(e,t=!0)=>{const n=Iw(e,this.count()-1,t);return this.item(n)}),vr(this,"prevEnabled",(e,t=!0)=>{const n=this.item(e);if(!n)return;const r=this.enabledIndexOf(n.node),o=Iw(r,this.enabledCount()-1,t);return this.enabledItem(o)}),vr(this,"registerNode",(e,t)=>{if(!e||this.descendants.has(e))return;const n=Array.from(this.descendants.keys()).concat(e),r=Pw(n);t!=null&&t.disabled&&(t.disabled=!!t.disabled);const o={node:e,index:-1,...t};this.descendants.set(e,o),this.assignIndex(r)})}};function oA(e,t){if(e!=null){if(typeof e=="function"){e(t);return}try{e.current=t}catch{throw new Error(`Cannot assign value '${t}' to ref '${e}'`)}}}function cn(...e){return t=>{e.forEach(n=>{oA(n,t)})}}function sA(...e){return f.useMemo(()=>cn(...e),e)}function aA(){const e=f.useRef(new rA);return $v(()=>()=>e.current.destroy()),e.current}var[iA,L5]=Dn({name:"DescendantsProvider",errorMessage:"useDescendantsContext must be used within DescendantsProvider"});function lA(e){const t=L5(),[n,r]=f.useState(-1),o=f.useRef(null);$v(()=>()=>{o.current&&t.unregister(o.current)},[]),$v(()=>{if(!o.current)return;const a=Number(o.current.dataset.index);n!=a&&!Number.isNaN(a)&&r(a)});const s=Up(e?t.register(e):t.register);return{descendants:t,index:n,enabledIndex:t.enabledIndexOf(o.current),register:cn(s,o)}}function db(){return[Up(iA),()=>Up(L5()),()=>aA(),o=>lA(o)]}var[cA,fm]=Dn({name:"AccordionStylesContext",hookName:"useAccordionStyles",providerName:""}),[uA,fb]=Dn({name:"AccordionItemContext",hookName:"useAccordionItemContext",providerName:""}),[dA,Ode,fA,pA]=db(),Iu=Ae(function(t,n){const{getButtonProps:r}=fb(),o=r(t,n),a={display:"flex",alignItems:"center",width:"100%",outline:0,...fm().button};return i.jsx(je.button,{...o,className:Ct("chakra-accordion__button",t.className),__css:a})});Iu.displayName="AccordionButton";function $c(e){const{value:t,defaultValue:n,onChange:r,shouldUpdate:o=(v,b)=>v!==b}=e,s=or(r),a=or(o),[c,d]=f.useState(n),p=t!==void 0,h=p?t:c,m=or(v=>{const w=typeof v=="function"?v(h):v;a(h,w)&&(p||d(w),s(w))},[p,s,h,a]);return[h,m]}function hA(e){const{onChange:t,defaultIndex:n,index:r,allowMultiple:o,allowToggle:s,...a}=e;vA(e),bA(e);const c=fA(),[d,p]=f.useState(-1);f.useEffect(()=>()=>{p(-1)},[]);const[h,m]=$c({value:r,defaultValue(){return o?n??[]:n??-1},onChange:t});return{index:h,setIndex:m,htmlProps:a,getAccordionItemProps:b=>{let w=!1;return b!==null&&(w=Array.isArray(h)?h.includes(b):h===b),{isOpen:w,onChange:S=>{if(b!==null)if(o&&Array.isArray(h)){const _=S?h.concat(b):h.filter(k=>k!==b);m(_)}else S?m(b):s&&m(-1)}}},focusedIndex:d,setFocusedIndex:p,descendants:c}}var[mA,pb]=Dn({name:"AccordionContext",hookName:"useAccordionContext",providerName:"Accordion"});function gA(e){const{isDisabled:t,isFocusable:n,id:r,...o}=e,{getAccordionItemProps:s,setFocusedIndex:a}=pb(),c=f.useRef(null),d=f.useId(),p=r??d,h=`accordion-button-${p}`,m=`accordion-panel-${p}`;yA(e);const{register:v,index:b,descendants:w}=pA({disabled:t&&!n}),{isOpen:y,onChange:S}=s(b===-1?null:b);xA({isOpen:y,isDisabled:t});const _=()=>{S==null||S(!0)},k=()=>{S==null||S(!1)},j=f.useCallback(()=>{S==null||S(!y),a(b)},[b,a,y,S]),I=f.useCallback(M=>{const T={ArrowDown:()=>{const $=w.nextEnabled(b);$==null||$.node.focus()},ArrowUp:()=>{const $=w.prevEnabled(b);$==null||$.node.focus()},Home:()=>{const $=w.firstEnabled();$==null||$.node.focus()},End:()=>{const $=w.lastEnabled();$==null||$.node.focus()}}[M.key];T&&(M.preventDefault(),T(M))},[w,b]),E=f.useCallback(()=>{a(b)},[a,b]),O=f.useCallback(function(A={},T=null){return{...A,type:"button",ref:cn(v,c,T),id:h,disabled:!!t,"aria-expanded":!!y,"aria-controls":m,onClick:nt(A.onClick,j),onFocus:nt(A.onFocus,E),onKeyDown:nt(A.onKeyDown,I)}},[h,t,y,j,E,I,m,v]),R=f.useCallback(function(A={},T=null){return{...A,ref:T,role:"region",id:m,"aria-labelledby":h,hidden:!y}},[h,y,m]);return{isOpen:y,isDisabled:t,isFocusable:n,onOpen:_,onClose:k,getButtonProps:O,getPanelProps:R,htmlProps:o}}function vA(e){const t=e.index||e.defaultIndex,n=t!=null&&!Array.isArray(t)&&e.allowMultiple;gd({condition:!!n,message:`If 'allowMultiple' is passed, then 'index' or 'defaultIndex' must be an array. You passed: ${typeof t},`})}function bA(e){gd({condition:!!(e.allowMultiple&&e.allowToggle),message:"If 'allowMultiple' is passed, 'allowToggle' will be ignored. Either remove 'allowToggle' or 'allowMultiple' depending on whether you want multiple accordions visible or not"})}function yA(e){gd({condition:!!(e.isFocusable&&!e.isDisabled),message:`Using only 'isFocusable', this prop is reserved for situations where you pass 'isDisabled' but you still want the element to receive focus (A11y). Either remove it or pass 'isDisabled' as well.
- `})}function xA(e){gd({condition:e.isOpen&&!!e.isDisabled,message:"Cannot open a disabled accordion item"})}function Eu(e){const{isOpen:t,isDisabled:n}=fb(),{reduceMotion:r}=pb(),o=Ct("chakra-accordion__icon",e.className),s=fm(),a={opacity:n?.4:1,transform:t?"rotate(-180deg)":void 0,transition:r?void 0:"transform 0.2s",transformOrigin:"center",...s.icon};return i.jsx(no,{viewBox:"0 0 24 24","aria-hidden":!0,className:o,__css:a,...e,children:i.jsx("path",{fill:"currentColor",d:"M16.59 8.59L12 13.17 7.41 8.59 6 10l6 6 6-6z"})})}Eu.displayName="AccordionIcon";var Ou=Ae(function(t,n){const{children:r,className:o}=t,{htmlProps:s,...a}=gA(t),d={...fm().container,overflowAnchor:"none"},p=f.useMemo(()=>a,[a]);return i.jsx(uA,{value:p,children:i.jsx(je.div,{ref:n,...s,className:Ct("chakra-accordion__item",o),__css:d,children:typeof r=="function"?r({isExpanded:!!a.isOpen,isDisabled:!!a.isDisabled}):r})})});Ou.displayName="AccordionItem";var Ki={ease:[.25,.1,.25,1],easeIn:[.4,0,1,1],easeOut:[0,0,.2,1],easeInOut:[.4,0,.2,1]},pu={scale:{enter:{scale:1},exit:{scale:.95}},fade:{enter:{opacity:1},exit:{opacity:0}},pushLeft:{enter:{x:"100%"},exit:{x:"-30%"}},pushRight:{enter:{x:"-100%"},exit:{x:"30%"}},pushUp:{enter:{y:"100%"},exit:{y:"-30%"}},pushDown:{enter:{y:"-100%"},exit:{y:"30%"}},slideLeft:{position:{left:0,top:0,bottom:0,width:"100%"},enter:{x:0,y:0},exit:{x:"-100%",y:0}},slideRight:{position:{right:0,top:0,bottom:0,width:"100%"},enter:{x:0,y:0},exit:{x:"100%",y:0}},slideUp:{position:{top:0,left:0,right:0,maxWidth:"100vw"},enter:{x:0,y:0},exit:{x:0,y:"-100%"}},slideDown:{position:{bottom:0,left:0,right:0,maxWidth:"100vw"},enter:{x:0,y:0},exit:{x:0,y:"100%"}}};function zv(e){var t;switch((t=e==null?void 0:e.direction)!=null?t:"right"){case"right":return pu.slideRight;case"left":return pu.slideLeft;case"bottom":return pu.slideDown;case"top":return pu.slideUp;default:return pu.slideRight}}var Yi={enter:{duration:.2,ease:Ki.easeOut},exit:{duration:.1,ease:Ki.easeIn}},ks={enter:(e,t)=>({...e,delay:typeof t=="number"?t:t==null?void 0:t.enter}),exit:(e,t)=>({...e,delay:typeof t=="number"?t:t==null?void 0:t.exit})},wA=e=>e!=null&&parseInt(e.toString(),10)>0,Ew={exit:{height:{duration:.2,ease:Ki.ease},opacity:{duration:.3,ease:Ki.ease}},enter:{height:{duration:.3,ease:Ki.ease},opacity:{duration:.4,ease:Ki.ease}}},SA={exit:({animateOpacity:e,startingHeight:t,transition:n,transitionEnd:r,delay:o})=>{var s;return{...e&&{opacity:wA(t)?1:0},height:t,transitionEnd:r==null?void 0:r.exit,transition:(s=n==null?void 0:n.exit)!=null?s:ks.exit(Ew.exit,o)}},enter:({animateOpacity:e,endingHeight:t,transition:n,transitionEnd:r,delay:o})=>{var s;return{...e&&{opacity:1},height:t,transitionEnd:r==null?void 0:r.enter,transition:(s=n==null?void 0:n.enter)!=null?s:ks.enter(Ew.enter,o)}}},pm=f.forwardRef((e,t)=>{const{in:n,unmountOnExit:r,animateOpacity:o=!0,startingHeight:s=0,endingHeight:a="auto",style:c,className:d,transition:p,transitionEnd:h,...m}=e,[v,b]=f.useState(!1);f.useEffect(()=>{const k=setTimeout(()=>{b(!0)});return()=>clearTimeout(k)},[]),gd({condition:Number(s)>0&&!!r,message:"startingHeight and unmountOnExit are mutually exclusive. You can't use them together"});const w=parseFloat(s.toString())>0,y={startingHeight:s,endingHeight:a,animateOpacity:o,transition:v?p:{enter:{duration:0}},transitionEnd:{enter:h==null?void 0:h.enter,exit:r?h==null?void 0:h.exit:{...h==null?void 0:h.exit,display:w?"block":"none"}}},S=r?n:!0,_=n||r?"enter":"exit";return i.jsx(mo,{initial:!1,custom:y,children:S&&i.jsx(Ir.div,{ref:t,...m,className:Ct("chakra-collapse",d),style:{overflow:"hidden",display:"block",...c},custom:y,variants:SA,initial:r?"exit":!1,animate:_,exit:"exit"})})});pm.displayName="Collapse";var CA={enter:({transition:e,transitionEnd:t,delay:n}={})=>{var r;return{opacity:1,transition:(r=e==null?void 0:e.enter)!=null?r:ks.enter(Yi.enter,n),transitionEnd:t==null?void 0:t.enter}},exit:({transition:e,transitionEnd:t,delay:n}={})=>{var r;return{opacity:0,transition:(r=e==null?void 0:e.exit)!=null?r:ks.exit(Yi.exit,n),transitionEnd:t==null?void 0:t.exit}}},B5={initial:"exit",animate:"enter",exit:"exit",variants:CA},kA=f.forwardRef(function(t,n){const{unmountOnExit:r,in:o,className:s,transition:a,transitionEnd:c,delay:d,...p}=t,h=o||r?"enter":"exit",m=r?o&&r:!0,v={transition:a,transitionEnd:c,delay:d};return i.jsx(mo,{custom:v,children:m&&i.jsx(Ir.div,{ref:n,className:Ct("chakra-fade",s),custom:v,...B5,animate:h,...p})})});kA.displayName="Fade";var _A={exit:({reverse:e,initialScale:t,transition:n,transitionEnd:r,delay:o})=>{var s;return{opacity:0,...e?{scale:t,transitionEnd:r==null?void 0:r.exit}:{transitionEnd:{scale:t,...r==null?void 0:r.exit}},transition:(s=n==null?void 0:n.exit)!=null?s:ks.exit(Yi.exit,o)}},enter:({transitionEnd:e,transition:t,delay:n})=>{var r;return{opacity:1,scale:1,transition:(r=t==null?void 0:t.enter)!=null?r:ks.enter(Yi.enter,n),transitionEnd:e==null?void 0:e.enter}}},F5={initial:"exit",animate:"enter",exit:"exit",variants:_A},PA=f.forwardRef(function(t,n){const{unmountOnExit:r,in:o,reverse:s=!0,initialScale:a=.95,className:c,transition:d,transitionEnd:p,delay:h,...m}=t,v=r?o&&r:!0,b=o||r?"enter":"exit",w={initialScale:a,reverse:s,transition:d,transitionEnd:p,delay:h};return i.jsx(mo,{custom:w,children:v&&i.jsx(Ir.div,{ref:n,className:Ct("chakra-offset-slide",c),...F5,animate:b,custom:w,...m})})});PA.displayName="ScaleFade";var jA={initial:({offsetX:e,offsetY:t,transition:n,transitionEnd:r,delay:o})=>{var s;return{opacity:0,x:e,y:t,transition:(s=n==null?void 0:n.exit)!=null?s:ks.exit(Yi.exit,o),transitionEnd:r==null?void 0:r.exit}},enter:({transition:e,transitionEnd:t,delay:n})=>{var r;return{opacity:1,x:0,y:0,transition:(r=e==null?void 0:e.enter)!=null?r:ks.enter(Yi.enter,n),transitionEnd:t==null?void 0:t.enter}},exit:({offsetY:e,offsetX:t,transition:n,transitionEnd:r,reverse:o,delay:s})=>{var a;const c={x:t,y:e};return{opacity:0,transition:(a=n==null?void 0:n.exit)!=null?a:ks.exit(Yi.exit,s),...o?{...c,transitionEnd:r==null?void 0:r.exit}:{transitionEnd:{...c,...r==null?void 0:r.exit}}}}},Lv={initial:"initial",animate:"enter",exit:"exit",variants:jA},IA=f.forwardRef(function(t,n){const{unmountOnExit:r,in:o,reverse:s=!0,className:a,offsetX:c=0,offsetY:d=8,transition:p,transitionEnd:h,delay:m,...v}=t,b=r?o&&r:!0,w=o||r?"enter":"exit",y={offsetX:c,offsetY:d,reverse:s,transition:p,transitionEnd:h,delay:m};return i.jsx(mo,{custom:y,children:b&&i.jsx(Ir.div,{ref:n,className:Ct("chakra-offset-slide",a),custom:y,...Lv,animate:w,...v})})});IA.displayName="SlideFade";var Ow={exit:{duration:.15,ease:Ki.easeInOut},enter:{type:"spring",damping:25,stiffness:180}},EA={exit:({direction:e,transition:t,transitionEnd:n,delay:r})=>{var o;const{exit:s}=zv({direction:e});return{...s,transition:(o=t==null?void 0:t.exit)!=null?o:ks.exit(Ow.exit,r),transitionEnd:n==null?void 0:n.exit}},enter:({direction:e,transitionEnd:t,transition:n,delay:r})=>{var o;const{enter:s}=zv({direction:e});return{...s,transition:(o=n==null?void 0:n.enter)!=null?o:ks.enter(Ow.enter,r),transitionEnd:t==null?void 0:t.enter}}},H5=f.forwardRef(function(t,n){const{direction:r="right",style:o,unmountOnExit:s,in:a,className:c,transition:d,transitionEnd:p,delay:h,motionProps:m,...v}=t,b=zv({direction:r}),w=Object.assign({position:"fixed"},b.position,o),y=s?a&&s:!0,S=a||s?"enter":"exit",_={transitionEnd:p,transition:d,direction:r,delay:h};return i.jsx(mo,{custom:_,children:y&&i.jsx(Ir.div,{...v,ref:n,initial:"exit",className:Ct("chakra-slide",c),animate:S,exit:"exit",custom:_,variants:EA,style:w,...m})})});H5.displayName="Slide";var Ru=Ae(function(t,n){const{className:r,motionProps:o,...s}=t,{reduceMotion:a}=pb(),{getPanelProps:c,isOpen:d}=fb(),p=c(s,n),h=Ct("chakra-accordion__panel",r),m=fm();a||delete p.hidden;const v=i.jsx(je.div,{...p,__css:m.panel,className:h});return a?v:i.jsx(pm,{in:d,...o,children:v})});Ru.displayName="AccordionPanel";var W5=Ae(function({children:t,reduceMotion:n,...r},o){const s=Fr("Accordion",r),a=qn(r),{htmlProps:c,descendants:d,...p}=hA(a),h=f.useMemo(()=>({...p,reduceMotion:!!n}),[p,n]);return i.jsx(dA,{value:d,children:i.jsx(mA,{value:h,children:i.jsx(cA,{value:s,children:i.jsx(je.div,{ref:o,...c,className:Ct("chakra-accordion",r.className),__css:s.root,children:t})})})})});W5.displayName="Accordion";function Cd(e){return f.Children.toArray(e).filter(t=>f.isValidElement(t))}var[OA,RA]=Dn({strict:!1,name:"ButtonGroupContext"}),MA={horizontal:{"> *:first-of-type:not(:last-of-type)":{borderEndRadius:0},"> *:not(:first-of-type):not(:last-of-type)":{borderRadius:0},"> *:not(:first-of-type):last-of-type":{borderStartRadius:0}},vertical:{"> *:first-of-type:not(:last-of-type)":{borderBottomRadius:0},"> *:not(:first-of-type):not(:last-of-type)":{borderRadius:0},"> *:not(:first-of-type):last-of-type":{borderTopRadius:0}}},DA={horizontal:e=>({"& > *:not(style) ~ *:not(style)":{marginStart:e}}),vertical:e=>({"& > *:not(style) ~ *:not(style)":{marginTop:e}})},rr=Ae(function(t,n){const{size:r,colorScheme:o,variant:s,className:a,spacing:c="0.5rem",isAttached:d,isDisabled:p,orientation:h="horizontal",...m}=t,v=Ct("chakra-button__group",a),b=f.useMemo(()=>({size:r,colorScheme:o,variant:s,isDisabled:p}),[r,o,s,p]);let w={display:"inline-flex",...d?MA[h]:DA[h](c)};const y=h==="vertical";return i.jsx(OA,{value:b,children:i.jsx(je.div,{ref:n,role:"group",__css:w,className:v,"data-attached":d?"":void 0,"data-orientation":h,flexDir:y?"column":void 0,...m})})});rr.displayName="ButtonGroup";function AA(e){const[t,n]=f.useState(!e);return{ref:f.useCallback(s=>{s&&n(s.tagName==="BUTTON")},[]),type:t?"button":void 0}}function Bv(e){const{children:t,className:n,...r}=e,o=f.isValidElement(t)?f.cloneElement(t,{"aria-hidden":!0,focusable:!1}):t,s=Ct("chakra-button__icon",n);return i.jsx(je.span,{display:"inline-flex",alignSelf:"center",flexShrink:0,...r,className:s,children:o})}Bv.displayName="ButtonIcon";function Gp(e){const{label:t,placement:n,spacing:r="0.5rem",children:o=i.jsx(pl,{color:"currentColor",width:"1em",height:"1em"}),className:s,__css:a,...c}=e,d=Ct("chakra-button__spinner",s),p=n==="start"?"marginEnd":"marginStart",h=f.useMemo(()=>({display:"flex",alignItems:"center",position:t?"relative":"absolute",[p]:t?r:0,fontSize:"1em",lineHeight:"normal",...a}),[a,t,p,r]);return i.jsx(je.div,{className:d,...c,__css:h,children:o})}Gp.displayName="ButtonSpinner";var bc=Ae((e,t)=>{const n=RA(),r=ia("Button",{...n,...e}),{isDisabled:o=n==null?void 0:n.isDisabled,isLoading:s,isActive:a,children:c,leftIcon:d,rightIcon:p,loadingText:h,iconSpacing:m="0.5rem",type:v,spinner:b,spinnerPlacement:w="start",className:y,as:S,..._}=qn(e),k=f.useMemo(()=>{const O={...r==null?void 0:r._focus,zIndex:1};return{display:"inline-flex",appearance:"none",alignItems:"center",justifyContent:"center",userSelect:"none",position:"relative",whiteSpace:"nowrap",verticalAlign:"middle",outline:"none",...r,...!!n&&{_focus:O}}},[r,n]),{ref:j,type:I}=AA(S),E={rightIcon:p,leftIcon:d,iconSpacing:m,children:c};return i.jsxs(je.button,{ref:sA(t,j),as:S,type:v??I,"data-active":Ft(a),"data-loading":Ft(s),__css:k,className:Ct("chakra-button",y),..._,disabled:o||s,children:[s&&w==="start"&&i.jsx(Gp,{className:"chakra-button__spinner--start",label:h,placement:"start",spacing:m,children:b}),s?h||i.jsx(je.span,{opacity:0,children:i.jsx(Rw,{...E})}):i.jsx(Rw,{...E}),s&&w==="end"&&i.jsx(Gp,{className:"chakra-button__spinner--end",label:h,placement:"end",spacing:m,children:b})]})});bc.displayName="Button";function Rw(e){const{leftIcon:t,rightIcon:n,children:r,iconSpacing:o}=e;return i.jsxs(i.Fragment,{children:[t&&i.jsx(Bv,{marginEnd:o,children:t}),r,n&&i.jsx(Bv,{marginStart:o,children:n})]})}var Ca=Ae((e,t)=>{const{icon:n,children:r,isRound:o,"aria-label":s,...a}=e,c=n||r,d=f.isValidElement(c)?f.cloneElement(c,{"aria-hidden":!0,focusable:!1}):null;return i.jsx(bc,{padding:"0",borderRadius:o?"full":void 0,ref:t,"aria-label":s,...a,children:d})});Ca.displayName="IconButton";var[Rde,TA]=Dn({name:"CheckboxGroupContext",strict:!1});function NA(e){const[t,n]=f.useState(e),[r,o]=f.useState(!1);return e!==t&&(o(!0),n(e)),r}function $A(e){return i.jsx(je.svg,{width:"1.2em",viewBox:"0 0 12 10",style:{fill:"none",strokeWidth:2,stroke:"currentColor",strokeDasharray:16},...e,children:i.jsx("polyline",{points:"1.5 6 4.5 9 10.5 1"})})}function zA(e){return i.jsx(je.svg,{width:"1.2em",viewBox:"0 0 24 24",style:{stroke:"currentColor",strokeWidth:4},...e,children:i.jsx("line",{x1:"21",x2:"3",y1:"12",y2:"12"})})}function LA(e){const{isIndeterminate:t,isChecked:n,...r}=e,o=t?zA:$A;return n||t?i.jsx(je.div,{style:{display:"flex",alignItems:"center",justifyContent:"center",height:"100%"},children:i.jsx(o,{...r})}):null}var[BA,V5]=Dn({name:"FormControlStylesContext",errorMessage:`useFormControlStyles returned is 'undefined'. Seems you forgot to wrap the components in "" `}),[FA,kd]=Dn({strict:!1,name:"FormControlContext"});function HA(e){const{id:t,isRequired:n,isInvalid:r,isDisabled:o,isReadOnly:s,...a}=e,c=f.useId(),d=t||`field-${c}`,p=`${d}-label`,h=`${d}-feedback`,m=`${d}-helptext`,[v,b]=f.useState(!1),[w,y]=f.useState(!1),[S,_]=f.useState(!1),k=f.useCallback((R={},M=null)=>({id:m,...R,ref:cn(M,A=>{A&&y(!0)})}),[m]),j=f.useCallback((R={},M=null)=>({...R,ref:M,"data-focus":Ft(S),"data-disabled":Ft(o),"data-invalid":Ft(r),"data-readonly":Ft(s),id:R.id!==void 0?R.id:p,htmlFor:R.htmlFor!==void 0?R.htmlFor:d}),[d,o,S,r,s,p]),I=f.useCallback((R={},M=null)=>({id:h,...R,ref:cn(M,A=>{A&&b(!0)}),"aria-live":"polite"}),[h]),E=f.useCallback((R={},M=null)=>({...R,...a,ref:M,role:"group"}),[a]),O=f.useCallback((R={},M=null)=>({...R,ref:M,role:"presentation","aria-hidden":!0,children:R.children||"*"}),[]);return{isRequired:!!n,isInvalid:!!r,isReadOnly:!!s,isDisabled:!!o,isFocused:!!S,onFocus:()=>_(!0),onBlur:()=>_(!1),hasFeedbackText:v,setHasFeedbackText:b,hasHelpText:w,setHasHelpText:y,id:d,labelId:p,feedbackId:h,helpTextId:m,htmlProps:a,getHelpTextProps:k,getErrorMessageProps:I,getRootProps:E,getLabelProps:j,getRequiredIndicatorProps:O}}var go=Ae(function(t,n){const r=Fr("Form",t),o=qn(t),{getRootProps:s,htmlProps:a,...c}=HA(o),d=Ct("chakra-form-control",t.className);return i.jsx(FA,{value:c,children:i.jsx(BA,{value:r,children:i.jsx(je.div,{...s({},n),className:d,__css:r.container})})})});go.displayName="FormControl";var WA=Ae(function(t,n){const r=kd(),o=V5(),s=Ct("chakra-form__helper-text",t.className);return i.jsx(je.div,{...r==null?void 0:r.getHelpTextProps(t,n),__css:o.helperText,className:s})});WA.displayName="FormHelperText";var Lo=Ae(function(t,n){var r;const o=ia("FormLabel",t),s=qn(t),{className:a,children:c,requiredIndicator:d=i.jsx(U5,{}),optionalIndicator:p=null,...h}=s,m=kd(),v=(r=m==null?void 0:m.getLabelProps(h,n))!=null?r:{ref:n,...h};return i.jsxs(je.label,{...v,className:Ct("chakra-form__label",s.className),__css:{display:"block",textAlign:"start",...o},children:[c,m!=null&&m.isRequired?d:p]})});Lo.displayName="FormLabel";var U5=Ae(function(t,n){const r=kd(),o=V5();if(!(r!=null&&r.isRequired))return null;const s=Ct("chakra-form__required-indicator",t.className);return i.jsx(je.span,{...r==null?void 0:r.getRequiredIndicatorProps(t,n),__css:o.requiredIndicator,className:s})});U5.displayName="RequiredIndicator";function hb(e){const{isDisabled:t,isInvalid:n,isReadOnly:r,isRequired:o,...s}=mb(e);return{...s,disabled:t,readOnly:r,required:o,"aria-invalid":ns(n),"aria-required":ns(o),"aria-readonly":ns(r)}}function mb(e){var t,n,r;const o=kd(),{id:s,disabled:a,readOnly:c,required:d,isRequired:p,isInvalid:h,isReadOnly:m,isDisabled:v,onFocus:b,onBlur:w,...y}=e,S=e["aria-describedby"]?[e["aria-describedby"]]:[];return o!=null&&o.hasFeedbackText&&(o!=null&&o.isInvalid)&&S.push(o.feedbackId),o!=null&&o.hasHelpText&&S.push(o.helpTextId),{...y,"aria-describedby":S.join(" ")||void 0,id:s??(o==null?void 0:o.id),isDisabled:(t=a??v)!=null?t:o==null?void 0:o.isDisabled,isReadOnly:(n=c??m)!=null?n:o==null?void 0:o.isReadOnly,isRequired:(r=d??p)!=null?r:o==null?void 0:o.isRequired,isInvalid:h??(o==null?void 0:o.isInvalid),onFocus:nt(o==null?void 0:o.onFocus,b),onBlur:nt(o==null?void 0:o.onBlur,w)}}var gb={border:"0",clip:"rect(0, 0, 0, 0)",height:"1px",width:"1px",margin:"-1px",padding:"0",overflow:"hidden",whiteSpace:"nowrap",position:"absolute"},G5=je("span",{baseStyle:gb});G5.displayName="VisuallyHidden";var VA=je("input",{baseStyle:gb});VA.displayName="VisuallyHiddenInput";const UA=()=>typeof document<"u";let Mw=!1,_d=null,ol=!1,Fv=!1;const Hv=new Set;function vb(e,t){Hv.forEach(n=>n(e,t))}const GA=typeof window<"u"&&window.navigator!=null?/^Mac/.test(window.navigator.platform):!1;function qA(e){return!(e.metaKey||!GA&&e.altKey||e.ctrlKey||e.key==="Control"||e.key==="Shift"||e.key==="Meta")}function Dw(e){ol=!0,qA(e)&&(_d="keyboard",vb("keyboard",e))}function Tl(e){if(_d="pointer",e.type==="mousedown"||e.type==="pointerdown"){ol=!0;const t=e.composedPath?e.composedPath()[0]:e.target;let n=!1;try{n=t.matches(":focus-visible")}catch{}if(n)return;vb("pointer",e)}}function KA(e){return e.mozInputSource===0&&e.isTrusted?!0:e.detail===0&&!e.pointerType}function XA(e){KA(e)&&(ol=!0,_d="virtual")}function YA(e){e.target===window||e.target===document||(!ol&&!Fv&&(_d="virtual",vb("virtual",e)),ol=!1,Fv=!1)}function QA(){ol=!1,Fv=!0}function Aw(){return _d!=="pointer"}function JA(){if(!UA()||Mw)return;const{focus:e}=HTMLElement.prototype;HTMLElement.prototype.focus=function(...n){ol=!0,e.apply(this,n)},document.addEventListener("keydown",Dw,!0),document.addEventListener("keyup",Dw,!0),document.addEventListener("click",XA,!0),window.addEventListener("focus",YA,!0),window.addEventListener("blur",QA,!1),typeof PointerEvent<"u"?(document.addEventListener("pointerdown",Tl,!0),document.addEventListener("pointermove",Tl,!0),document.addEventListener("pointerup",Tl,!0)):(document.addEventListener("mousedown",Tl,!0),document.addEventListener("mousemove",Tl,!0),document.addEventListener("mouseup",Tl,!0)),Mw=!0}function q5(e){JA(),e(Aw());const t=()=>e(Aw());return Hv.add(t),()=>{Hv.delete(t)}}function ZA(e,t=[]){const n=Object.assign({},e);for(const r of t)r in n&&delete n[r];return n}function K5(e={}){const t=mb(e),{isDisabled:n,isReadOnly:r,isRequired:o,isInvalid:s,id:a,onBlur:c,onFocus:d,"aria-describedby":p}=t,{defaultChecked:h,isChecked:m,isFocusable:v,onChange:b,isIndeterminate:w,name:y,value:S,tabIndex:_=void 0,"aria-label":k,"aria-labelledby":j,"aria-invalid":I,...E}=e,O=ZA(E,["isDisabled","isReadOnly","isRequired","isInvalid","id","onBlur","onFocus","aria-describedby"]),R=or(b),M=or(c),A=or(d),[T,$]=f.useState(!1),[Q,B]=f.useState(!1),[V,q]=f.useState(!1),[G,D]=f.useState(!1);f.useEffect(()=>q5($),[]);const L=f.useRef(null),[W,Y]=f.useState(!0),[ae,be]=f.useState(!!h),ie=m!==void 0,X=ie?m:ae,K=f.useCallback(de=>{if(r||n){de.preventDefault();return}ie||be(X?de.target.checked:w?!0:de.target.checked),R==null||R(de)},[r,n,X,ie,w,R]);tc(()=>{L.current&&(L.current.indeterminate=!!w)},[w]),Ba(()=>{n&&B(!1)},[n,B]),tc(()=>{const de=L.current;if(!(de!=null&&de.form))return;const Te=()=>{be(!!h)};return de.form.addEventListener("reset",Te),()=>{var Oe;return(Oe=de.form)==null?void 0:Oe.removeEventListener("reset",Te)}},[]);const U=n&&!v,se=f.useCallback(de=>{de.key===" "&&D(!0)},[D]),re=f.useCallback(de=>{de.key===" "&&D(!1)},[D]);tc(()=>{if(!L.current)return;L.current.checked!==X&&be(L.current.checked)},[L.current]);const oe=f.useCallback((de={},Te=null)=>{const Oe=$e=>{Q&&$e.preventDefault(),D(!0)};return{...de,ref:Te,"data-active":Ft(G),"data-hover":Ft(V),"data-checked":Ft(X),"data-focus":Ft(Q),"data-focus-visible":Ft(Q&&T),"data-indeterminate":Ft(w),"data-disabled":Ft(n),"data-invalid":Ft(s),"data-readonly":Ft(r),"aria-hidden":!0,onMouseDown:nt(de.onMouseDown,Oe),onMouseUp:nt(de.onMouseUp,()=>D(!1)),onMouseEnter:nt(de.onMouseEnter,()=>q(!0)),onMouseLeave:nt(de.onMouseLeave,()=>q(!1))}},[G,X,n,Q,T,V,w,s,r]),pe=f.useCallback((de={},Te=null)=>({...de,ref:Te,"data-active":Ft(G),"data-hover":Ft(V),"data-checked":Ft(X),"data-focus":Ft(Q),"data-focus-visible":Ft(Q&&T),"data-indeterminate":Ft(w),"data-disabled":Ft(n),"data-invalid":Ft(s),"data-readonly":Ft(r)}),[G,X,n,Q,T,V,w,s,r]),le=f.useCallback((de={},Te=null)=>({...O,...de,ref:cn(Te,Oe=>{Oe&&Y(Oe.tagName==="LABEL")}),onClick:nt(de.onClick,()=>{var Oe;W||((Oe=L.current)==null||Oe.click(),requestAnimationFrame(()=>{var $e;($e=L.current)==null||$e.focus({preventScroll:!0})}))}),"data-disabled":Ft(n),"data-checked":Ft(X),"data-invalid":Ft(s)}),[O,n,X,s,W]),ge=f.useCallback((de={},Te=null)=>({...de,ref:cn(L,Te),type:"checkbox",name:y,value:S,id:a,tabIndex:_,onChange:nt(de.onChange,K),onBlur:nt(de.onBlur,M,()=>B(!1)),onFocus:nt(de.onFocus,A,()=>B(!0)),onKeyDown:nt(de.onKeyDown,se),onKeyUp:nt(de.onKeyUp,re),required:o,checked:X,disabled:U,readOnly:r,"aria-label":k,"aria-labelledby":j,"aria-invalid":I?!!I:s,"aria-describedby":p,"aria-disabled":n,style:gb}),[y,S,a,K,M,A,se,re,o,X,U,r,k,j,I,s,p,n,_]),ke=f.useCallback((de={},Te=null)=>({...de,ref:Te,onMouseDown:nt(de.onMouseDown,eT),"data-disabled":Ft(n),"data-checked":Ft(X),"data-invalid":Ft(s)}),[X,n,s]);return{state:{isInvalid:s,isFocused:Q,isChecked:X,isActive:G,isHovered:V,isIndeterminate:w,isDisabled:n,isReadOnly:r,isRequired:o},getRootProps:le,getCheckboxProps:oe,getIndicatorProps:pe,getInputProps:ge,getLabelProps:ke,htmlProps:O}}function eT(e){e.preventDefault(),e.stopPropagation()}var tT={display:"inline-flex",alignItems:"center",justifyContent:"center",verticalAlign:"top",userSelect:"none",flexShrink:0},nT={cursor:"pointer",display:"inline-flex",alignItems:"center",verticalAlign:"top",position:"relative"},rT=za({from:{opacity:0,strokeDashoffset:16,transform:"scale(0.95)"},to:{opacity:1,strokeDashoffset:0,transform:"scale(1)"}}),oT=za({from:{opacity:0},to:{opacity:1}}),sT=za({from:{transform:"scaleX(0.65)"},to:{transform:"scaleX(1)"}}),X5=Ae(function(t,n){const r=TA(),o={...r,...t},s=Fr("Checkbox",o),a=qn(t),{spacing:c="0.5rem",className:d,children:p,iconColor:h,iconSize:m,icon:v=i.jsx(LA,{}),isChecked:b,isDisabled:w=r==null?void 0:r.isDisabled,onChange:y,inputProps:S,..._}=a;let k=b;r!=null&&r.value&&a.value&&(k=r.value.includes(a.value));let j=y;r!=null&&r.onChange&&a.value&&(j=sm(r.onChange,y));const{state:I,getInputProps:E,getCheckboxProps:O,getLabelProps:R,getRootProps:M}=K5({..._,isDisabled:w,isChecked:k,onChange:j}),A=NA(I.isChecked),T=f.useMemo(()=>({animation:A?I.isIndeterminate?`${oT} 20ms linear, ${sT} 200ms linear`:`${rT} 200ms linear`:void 0,fontSize:m,color:h,...s.icon}),[h,m,A,I.isIndeterminate,s.icon]),$=f.cloneElement(v,{__css:T,isIndeterminate:I.isIndeterminate,isChecked:I.isChecked});return i.jsxs(je.label,{__css:{...nT,...s.container},className:Ct("chakra-checkbox",d),...M(),children:[i.jsx("input",{className:"chakra-checkbox__input",...E(S,n)}),i.jsx(je.span,{__css:{...tT,...s.control},className:"chakra-checkbox__control",...O(),children:$}),p&&i.jsx(je.span,{className:"chakra-checkbox__label",...R(),__css:{marginStart:c,...s.label},children:p})]})});X5.displayName="Checkbox";function aT(e){const t=parseFloat(e);return typeof t!="number"||Number.isNaN(t)?0:t}function bb(e,t){let n=aT(e);const r=10**(t??10);return n=Math.round(n*r)/r,t?n.toFixed(t):n.toString()}function Wv(e){if(!Number.isFinite(e))return 0;let t=1,n=0;for(;Math.round(e*t)/t!==e;)t*=10,n+=1;return n}function qp(e,t,n){return(e-t)*100/(n-t)}function Y5(e,t,n){return(n-t)*e+t}function Vv(e,t,n){const r=Math.round((e-t)/n)*n+t,o=Wv(n);return bb(r,o)}function sc(e,t,n){return e==null?e:(n{var T;return r==null?"":(T=k0(r,s,n))!=null?T:""}),v=typeof o<"u",b=v?o:h,w=Q5(ni(b),s),y=n??w,S=f.useCallback(T=>{T!==b&&(v||m(T.toString()),p==null||p(T.toString(),ni(T)))},[p,v,b]),_=f.useCallback(T=>{let $=T;return d&&($=sc($,a,c)),bb($,y)},[y,d,c,a]),k=f.useCallback((T=s)=>{let $;b===""?$=ni(T):$=ni(b)+T,$=_($),S($)},[_,s,S,b]),j=f.useCallback((T=s)=>{let $;b===""?$=ni(-T):$=ni(b)-T,$=_($),S($)},[_,s,S,b]),I=f.useCallback(()=>{var T;let $;r==null?$="":$=(T=k0(r,s,n))!=null?T:a,S($)},[r,n,s,S,a]),E=f.useCallback(T=>{var $;const Q=($=k0(T,s,y))!=null?$:a;S(Q)},[y,s,S,a]),O=ni(b);return{isOutOfRange:O>c||O" `}),[cT,Z5]=Dn({name:"EditableContext",errorMessage:"useEditableContext: context is undefined. Seems you forgot to wrap the editable components in ``"}),e3={fontSize:"inherit",fontWeight:"inherit",textAlign:"inherit",bg:"transparent"},t3=Ae(function(t,n){const{getInputProps:r}=Z5(),o=J5(),s=r(t,n),a=Ct("chakra-editable__input",t.className);return i.jsx(je.input,{...s,__css:{outline:0,...e3,...o.input},className:a})});t3.displayName="EditableInput";var n3=Ae(function(t,n){const{getPreviewProps:r}=Z5(),o=J5(),s=r(t,n),a=Ct("chakra-editable__preview",t.className);return i.jsx(je.span,{...s,__css:{cursor:"text",display:"inline-block",...e3,...o.preview},className:a})});n3.displayName="EditablePreview";function Qi(e,t,n,r){const o=or(n);return f.useEffect(()=>{const s=typeof e=="function"?e():e??document;if(!(!n||!s))return s.addEventListener(t,o,r),()=>{s.removeEventListener(t,o,r)}},[t,e,r,o,n]),()=>{const s=typeof e=="function"?e():e??document;s==null||s.removeEventListener(t,o,r)}}function uT(e){return"current"in e}var r3=()=>typeof window<"u";function dT(){var e;const t=navigator.userAgentData;return(e=t==null?void 0:t.platform)!=null?e:navigator.platform}var fT=e=>r3()&&e.test(navigator.vendor),pT=e=>r3()&&e.test(dT()),hT=()=>pT(/mac|iphone|ipad|ipod/i),mT=()=>hT()&&fT(/apple/i);function o3(e){const{ref:t,elements:n,enabled:r}=e,o=()=>{var s,a;return(a=(s=t.current)==null?void 0:s.ownerDocument)!=null?a:document};Qi(o,"pointerdown",s=>{if(!mT()||!r)return;const a=s.target,d=(n??[t]).some(p=>{const h=uT(p)?p.current:p;return(h==null?void 0:h.contains(a))||h===a});o().activeElement!==a&&d&&(s.preventDefault(),a.focus())})}function Tw(e,t){return e?e===t||e.contains(t):!1}function gT(e={}){const{onChange:t,onCancel:n,onSubmit:r,onBlur:o,value:s,isDisabled:a,defaultValue:c,startWithEditView:d,isPreviewFocusable:p=!0,submitOnBlur:h=!0,selectAllOnFocus:m=!0,placeholder:v,onEdit:b,finalFocusRef:w,...y}=e,S=or(b),_=!!(d&&!a),[k,j]=f.useState(_),[I,E]=$c({defaultValue:c||"",value:s,onChange:t}),[O,R]=f.useState(I),M=f.useRef(null),A=f.useRef(null),T=f.useRef(null),$=f.useRef(null),Q=f.useRef(null);o3({ref:M,enabled:k,elements:[$,Q]});const B=!k&&!a;tc(()=>{var oe,pe;k&&((oe=M.current)==null||oe.focus(),m&&((pe=M.current)==null||pe.select()))},[]),Ba(()=>{var oe,pe,le,ge;if(!k){w?(oe=w.current)==null||oe.focus():(pe=T.current)==null||pe.focus();return}(le=M.current)==null||le.focus(),m&&((ge=M.current)==null||ge.select()),S==null||S()},[k,S,m]);const V=f.useCallback(()=>{B&&j(!0)},[B]),q=f.useCallback(()=>{R(I)},[I]),G=f.useCallback(()=>{j(!1),E(O),n==null||n(O),o==null||o(O)},[n,o,E,O]),D=f.useCallback(()=>{j(!1),R(I),r==null||r(I),o==null||o(O)},[I,r,o,O]);f.useEffect(()=>{if(k)return;const oe=M.current;(oe==null?void 0:oe.ownerDocument.activeElement)===oe&&(oe==null||oe.blur())},[k]);const L=f.useCallback(oe=>{E(oe.currentTarget.value)},[E]),W=f.useCallback(oe=>{const pe=oe.key,ge={Escape:G,Enter:ke=>{!ke.shiftKey&&!ke.metaKey&&D()}}[pe];ge&&(oe.preventDefault(),ge(oe))},[G,D]),Y=f.useCallback(oe=>{const pe=oe.key,ge={Escape:G}[pe];ge&&(oe.preventDefault(),ge(oe))},[G]),ae=I.length===0,be=f.useCallback(oe=>{var pe;if(!k)return;const le=oe.currentTarget.ownerDocument,ge=(pe=oe.relatedTarget)!=null?pe:le.activeElement,ke=Tw($.current,ge),xe=Tw(Q.current,ge);!ke&&!xe&&(h?D():G())},[h,D,G,k]),ie=f.useCallback((oe={},pe=null)=>{const le=B&&p?0:void 0;return{...oe,ref:cn(pe,A),children:ae?v:I,hidden:k,"aria-disabled":ns(a),tabIndex:le,onFocus:nt(oe.onFocus,V,q)}},[a,k,B,p,ae,V,q,v,I]),X=f.useCallback((oe={},pe=null)=>({...oe,hidden:!k,placeholder:v,ref:cn(pe,M),disabled:a,"aria-disabled":ns(a),value:I,onBlur:nt(oe.onBlur,be),onChange:nt(oe.onChange,L),onKeyDown:nt(oe.onKeyDown,W),onFocus:nt(oe.onFocus,q)}),[a,k,be,L,W,q,v,I]),K=f.useCallback((oe={},pe=null)=>({...oe,hidden:!k,placeholder:v,ref:cn(pe,M),disabled:a,"aria-disabled":ns(a),value:I,onBlur:nt(oe.onBlur,be),onChange:nt(oe.onChange,L),onKeyDown:nt(oe.onKeyDown,Y),onFocus:nt(oe.onFocus,q)}),[a,k,be,L,Y,q,v,I]),U=f.useCallback((oe={},pe=null)=>({"aria-label":"Edit",...oe,type:"button",onClick:nt(oe.onClick,V),ref:cn(pe,T),disabled:a}),[V,a]),se=f.useCallback((oe={},pe=null)=>({...oe,"aria-label":"Submit",ref:cn(Q,pe),type:"button",onClick:nt(oe.onClick,D),disabled:a}),[D,a]),re=f.useCallback((oe={},pe=null)=>({"aria-label":"Cancel",id:"cancel",...oe,ref:cn($,pe),type:"button",onClick:nt(oe.onClick,G),disabled:a}),[G,a]);return{isEditing:k,isDisabled:a,isValueEmpty:ae,value:I,onEdit:V,onCancel:G,onSubmit:D,getPreviewProps:ie,getInputProps:X,getTextareaProps:K,getEditButtonProps:U,getSubmitButtonProps:se,getCancelButtonProps:re,htmlProps:y}}var s3=Ae(function(t,n){const r=Fr("Editable",t),o=qn(t),{htmlProps:s,...a}=gT(o),{isEditing:c,onSubmit:d,onCancel:p,onEdit:h}=a,m=Ct("chakra-editable",t.className),v=X1(t.children,{isEditing:c,onSubmit:d,onCancel:p,onEdit:h});return i.jsx(cT,{value:a,children:i.jsx(lT,{value:r,children:i.jsx(je.div,{ref:n,...s,className:m,children:v})})})});s3.displayName="Editable";var a3={exports:{}},vT="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED",bT=vT,yT=bT;function i3(){}function l3(){}l3.resetWarningCache=i3;var xT=function(){function e(r,o,s,a,c,d){if(d!==yT){var p=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw p.name="Invariant Violation",p}}e.isRequired=e;function t(){return e}var n={array:e,bigint:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:l3,resetWarningCache:i3};return n.PropTypes=n,n};a3.exports=xT();var wT=a3.exports;const Ln=vd(wT);var Uv="data-focus-lock",c3="data-focus-lock-disabled",ST="data-no-focus-lock",CT="data-autofocus-inside",kT="data-no-autofocus";function _T(e,t){return typeof e=="function"?e(t):e&&(e.current=t),e}function PT(e,t){var n=f.useState(function(){return{value:e,callback:t,facade:{get current(){return n.value},set current(r){var o=n.value;o!==r&&(n.value=r,n.callback(r,o))}}}})[0];return n.callback=t,n.facade}function u3(e,t){return PT(t||null,function(n){return e.forEach(function(r){return _T(r,n)})})}var _0={width:"1px",height:"0px",padding:0,overflow:"hidden",position:"fixed",top:"1px",left:"1px"},Qs=function(){return Qs=Object.assign||function(t){for(var n,r=1,o=arguments.length;r0&&s[s.length-1])&&(p[0]===6||p[0]===2)){n=0;continue}if(p[0]===3&&(!s||p[1]>s[0]&&p[1]0)&&!(o=r.next()).done;)s.push(o.value)}catch(c){a={error:c}}finally{try{o&&!o.done&&(n=r.return)&&n.call(r)}finally{if(a)throw a.error}}return s}function Gv(e,t,n){if(n||arguments.length===2)for(var r=0,o=t.length,s;r=0}).sort(BT)},FT=["button:enabled","select:enabled","textarea:enabled","input:enabled","a[href]","area[href]","summary","iframe","object","embed","audio[controls]","video[controls]","[tabindex]","[contenteditable]","[autofocus]"],Sb=FT.join(","),HT="".concat(Sb,", [data-focus-guard]"),I3=function(e,t){return la((e.shadowRoot||e).children).reduce(function(n,r){return n.concat(r.matches(t?HT:Sb)?[r]:[],I3(r))},[])},WT=function(e,t){var n;return e instanceof HTMLIFrameElement&&(!((n=e.contentDocument)===null||n===void 0)&&n.body)?hm([e.contentDocument.body],t):[e]},hm=function(e,t){return e.reduce(function(n,r){var o,s=I3(r,t),a=(o=[]).concat.apply(o,s.map(function(c){return WT(c,t)}));return n.concat(a,r.parentNode?la(r.parentNode.querySelectorAll(Sb)).filter(function(c){return c===r}):[])},[])},VT=function(e){var t=e.querySelectorAll("[".concat(CT,"]"));return la(t).map(function(n){return hm([n])}).reduce(function(n,r){return n.concat(r)},[])},Cb=function(e,t){return la(e).filter(function(n){return S3(t,n)}).filter(function(n){return $T(n)})},$w=function(e,t){return t===void 0&&(t=new Map),la(e).filter(function(n){return C3(t,n)})},Kv=function(e,t,n){return j3(Cb(hm(e,n),t),!0,n)},zw=function(e,t){return j3(Cb(hm(e),t),!1)},UT=function(e,t){return Cb(VT(e),t)},ac=function(e,t){return e.shadowRoot?ac(e.shadowRoot,t):Object.getPrototypeOf(e).contains!==void 0&&Object.getPrototypeOf(e).contains.call(e,t)?!0:la(e.children).some(function(n){var r;if(n instanceof HTMLIFrameElement){var o=(r=n.contentDocument)===null||r===void 0?void 0:r.body;return o?ac(o,t):!1}return ac(n,t)})},GT=function(e){for(var t=new Set,n=e.length,r=0;r0&&t.add(o),(s&Node.DOCUMENT_POSITION_CONTAINS)>0&&t.add(r)}return e.filter(function(a,c){return!t.has(c)})},E3=function(e){return e.parentNode?E3(e.parentNode):e},kb=function(e){var t=Kp(e);return t.filter(Boolean).reduce(function(n,r){var o=r.getAttribute(Uv);return n.push.apply(n,o?GT(la(E3(r).querySelectorAll("[".concat(Uv,'="').concat(o,'"]:not([').concat(c3,'="disabled"])')))):[r]),n},[])},qT=function(e){try{return e()}catch{return}},ed=function(e){if(e===void 0&&(e=document),!(!e||!e.activeElement)){var t=e.activeElement;return t.shadowRoot?ed(t.shadowRoot):t instanceof HTMLIFrameElement&&qT(function(){return t.contentWindow.document})?ed(t.contentWindow.document):t}},KT=function(e,t){return e===t},XT=function(e,t){return!!la(e.querySelectorAll("iframe")).some(function(n){return KT(n,t)})},O3=function(e,t){return t===void 0&&(t=ed(y3(e).ownerDocument)),!t||t.dataset&&t.dataset.focusGuard?!1:kb(e).some(function(n){return ac(n,t)||XT(n,t)})},YT=function(e){e===void 0&&(e=document);var t=ed(e);return t?la(e.querySelectorAll("[".concat(ST,"]"))).some(function(n){return ac(n,t)}):!1},QT=function(e,t){return t.filter(P3).filter(function(n){return n.name===e.name}).filter(function(n){return n.checked})[0]||e},_b=function(e,t){return P3(e)&&e.name?QT(e,t):e},JT=function(e){var t=new Set;return e.forEach(function(n){return t.add(_b(n,e))}),e.filter(function(n){return t.has(n)})},Lw=function(e){return e[0]&&e.length>1?_b(e[0],e):e[0]},Bw=function(e,t){return e.length>1?e.indexOf(_b(e[t],e)):t},R3="NEW_FOCUS",ZT=function(e,t,n,r){var o=e.length,s=e[0],a=e[o-1],c=wb(n);if(!(n&&e.indexOf(n)>=0)){var d=n!==void 0?t.indexOf(n):-1,p=r?t.indexOf(r):d,h=r?e.indexOf(r):-1,m=d-p,v=t.indexOf(s),b=t.indexOf(a),w=JT(t),y=n!==void 0?w.indexOf(n):-1,S=y-(r?w.indexOf(r):d),_=Bw(e,0),k=Bw(e,o-1);if(d===-1||h===-1)return R3;if(!m&&h>=0)return h;if(d<=v&&c&&Math.abs(m)>1)return k;if(d>=b&&c&&Math.abs(m)>1)return _;if(m&&Math.abs(S)>1)return h;if(d<=v)return k;if(d>b)return _;if(m)return Math.abs(m)>1?h:(o+h+m)%o}},eN=function(e){return function(t){var n,r=(n=k3(t))===null||n===void 0?void 0:n.autofocus;return t.autofocus||r!==void 0&&r!=="false"||e.indexOf(t)>=0}},tN=function(e,t,n){var r=e.map(function(s){var a=s.node;return a}),o=$w(r.filter(eN(n)));return o&&o.length?Lw(o):Lw($w(t))},Xv=function(e,t){return t===void 0&&(t=[]),t.push(e),e.parentNode&&Xv(e.parentNode.host||e.parentNode,t),t},P0=function(e,t){for(var n=Xv(e),r=Xv(t),o=0;o=0)return s}return!1},M3=function(e,t,n){var r=Kp(e),o=Kp(t),s=r[0],a=!1;return o.filter(Boolean).forEach(function(c){a=P0(a||c,c)||a,n.filter(Boolean).forEach(function(d){var p=P0(s,d);p&&(!a||ac(p,a)?a=p:a=P0(p,a))})}),a},nN=function(e,t){return e.reduce(function(n,r){return n.concat(UT(r,t))},[])},rN=function(e,t){var n=new Map;return t.forEach(function(r){return n.set(r.node,r)}),e.map(function(r){return n.get(r)}).filter(LT)},oN=function(e,t){var n=ed(Kp(e).length>0?document:y3(e).ownerDocument),r=kb(e).filter(Xp),o=M3(n||e,e,r),s=new Map,a=zw(r,s),c=Kv(r,s).filter(function(b){var w=b.node;return Xp(w)});if(!(!c[0]&&(c=a,!c[0]))){var d=zw([o],s).map(function(b){var w=b.node;return w}),p=rN(d,c),h=p.map(function(b){var w=b.node;return w}),m=ZT(h,d,n,t);if(m===R3){var v=tN(a,h,nN(r,s));if(v)return{node:v};console.warn("focus-lock: cannot find any node to move focus into");return}return m===void 0?m:p[m]}},sN=function(e){var t=kb(e).filter(Xp),n=M3(e,e,t),r=new Map,o=Kv([n],r,!0),s=Kv(t,r).filter(function(a){var c=a.node;return Xp(c)}).map(function(a){var c=a.node;return c});return o.map(function(a){var c=a.node,d=a.index;return{node:c,index:d,lockItem:s.indexOf(c)>=0,guard:wb(c)}})},aN=function(e,t){"focus"in e&&e.focus(t),"contentWindow"in e&&e.contentWindow&&e.contentWindow.focus()},j0=0,I0=!1,D3=function(e,t,n){n===void 0&&(n={});var r=oN(e,t);if(!I0&&r){if(j0>2){console.error("FocusLock: focus-fighting detected. Only one focus management system could be active. See https://github.com/theKashey/focus-lock/#focus-fighting"),I0=!0,setTimeout(function(){I0=!1},1);return}j0++,aN(r.node,n.focusOptions),j0--}};function Pb(e){setTimeout(e,1)}var iN=function(){return document&&document.activeElement===document.body},lN=function(){return iN()||YT()},ic=null,Yl=null,lc=null,td=!1,cN=function(){return!0},uN=function(t){return(ic.whiteList||cN)(t)},dN=function(t,n){lc={observerNode:t,portaledElement:n}},fN=function(t){return lc&&lc.portaledElement===t};function Fw(e,t,n,r){var o=null,s=e;do{var a=r[s];if(a.guard)a.node.dataset.focusAutoGuard&&(o=a);else if(a.lockItem){if(s!==e)return;o=null}else break}while((s+=n)!==t);o&&(o.node.tabIndex=0)}var pN=function(t){return t&&"current"in t?t.current:t},hN=function(t){return t?!!td:td==="meanwhile"},mN=function e(t,n,r){return n&&(n.host===t&&(!n.activeElement||r.contains(n.activeElement))||n.parentNode&&e(t,n.parentNode,r))},gN=function(t,n){return n.some(function(r){return mN(t,r,r)})},Yp=function(){var t=!1;if(ic){var n=ic,r=n.observed,o=n.persistentFocus,s=n.autoFocus,a=n.shards,c=n.crossFrame,d=n.focusOptions,p=r||lc&&lc.portaledElement,h=document&&document.activeElement;if(p){var m=[p].concat(a.map(pN).filter(Boolean));if((!h||uN(h))&&(o||hN(c)||!lN()||!Yl&&s)&&(p&&!(O3(m)||h&&gN(h,m)||fN(h))&&(document&&!Yl&&h&&!s?(h.blur&&h.blur(),document.body.focus()):(t=D3(m,Yl,{focusOptions:d}),lc={})),td=!1,Yl=document&&document.activeElement),document){var v=document&&document.activeElement,b=sN(m),w=b.map(function(y){var S=y.node;return S}).indexOf(v);w>-1&&(b.filter(function(y){var S=y.guard,_=y.node;return S&&_.dataset.focusAutoGuard}).forEach(function(y){var S=y.node;return S.removeAttribute("tabIndex")}),Fw(w,b.length,1,b),Fw(w,-1,-1,b))}}}return t},A3=function(t){Yp()&&t&&(t.stopPropagation(),t.preventDefault())},jb=function(){return Pb(Yp)},vN=function(t){var n=t.target,r=t.currentTarget;r.contains(n)||dN(r,n)},bN=function(){return null},T3=function(){td="just",Pb(function(){td="meanwhile"})},yN=function(){document.addEventListener("focusin",A3),document.addEventListener("focusout",jb),window.addEventListener("blur",T3)},xN=function(){document.removeEventListener("focusin",A3),document.removeEventListener("focusout",jb),window.removeEventListener("blur",T3)};function wN(e){return e.filter(function(t){var n=t.disabled;return!n})}function SN(e){var t=e.slice(-1)[0];t&&!ic&&yN();var n=ic,r=n&&t&&t.id===n.id;ic=t,n&&!r&&(n.onDeactivation(),e.filter(function(o){var s=o.id;return s===n.id}).length||n.returnFocus(!t)),t?(Yl=null,(!r||n.observed!==t.observed)&&t.onActivation(),Yp(),Pb(Yp)):(xN(),Yl=null)}g3.assignSyncMedium(vN);v3.assignMedium(jb);IT.assignMedium(function(e){return e({moveFocusInside:D3,focusInside:O3})});const CN=MT(wN,SN)(bN);var N3=f.forwardRef(function(t,n){return f.createElement(b3,sr({sideCar:CN,ref:n},t))}),$3=b3.propTypes||{};$3.sideCar;JD($3,["sideCar"]);N3.propTypes={};const Hw=N3;function z3(e){return e!=null&&typeof e=="object"&&"nodeType"in e&&e.nodeType===Node.ELEMENT_NODE}function Ib(e){var t;if(!z3(e))return!1;const n=(t=e.ownerDocument.defaultView)!=null?t:window;return e instanceof n.HTMLElement}function kN(e){var t,n;return(n=(t=L3(e))==null?void 0:t.defaultView)!=null?n:window}function L3(e){return z3(e)?e.ownerDocument:document}function _N(e){return L3(e).activeElement}function PN(e){const t=e.ownerDocument.defaultView||window,{overflow:n,overflowX:r,overflowY:o}=t.getComputedStyle(e);return/auto|scroll|overlay|hidden/.test(n+o+r)}function jN(e){return e.localName==="html"?e:e.assignedSlot||e.parentElement||e.ownerDocument.documentElement}function B3(e){return["html","body","#document"].includes(e.localName)?e.ownerDocument.body:Ib(e)&&PN(e)?e:B3(jN(e))}var F3=e=>e.hasAttribute("tabindex"),IN=e=>F3(e)&&e.tabIndex===-1;function EN(e){return!!e.getAttribute("disabled")||!!e.getAttribute("aria-disabled")}function H3(e){return e.parentElement&&H3(e.parentElement)?!0:e.hidden}function ON(e){const t=e.getAttribute("contenteditable");return t!=="false"&&t!=null}function W3(e){if(!Ib(e)||H3(e)||EN(e))return!1;const{localName:t}=e;if(["input","select","textarea","button"].indexOf(t)>=0)return!0;const r={a:()=>e.hasAttribute("href"),audio:()=>e.hasAttribute("controls"),video:()=>e.hasAttribute("controls")};return t in r?r[t]():ON(e)?!0:F3(e)}function RN(e){return e?Ib(e)&&W3(e)&&!IN(e):!1}var MN=["input:not(:disabled):not([disabled])","select:not(:disabled):not([disabled])","textarea:not(:disabled):not([disabled])","embed","iframe","object","a[href]","area[href]","button:not(:disabled):not([disabled])","[tabindex]","audio[controls]","video[controls]","*[tabindex]:not([aria-disabled])","*[contenteditable]"],DN=MN.join(),AN=e=>e.offsetWidth>0&&e.offsetHeight>0;function V3(e){const t=Array.from(e.querySelectorAll(DN));return t.unshift(e),t.filter(n=>W3(n)&&AN(n))}var Ww,TN=(Ww=Hw.default)!=null?Ww:Hw,U3=e=>{const{initialFocusRef:t,finalFocusRef:n,contentRef:r,restoreFocus:o,children:s,isDisabled:a,autoFocus:c,persistentFocus:d,lockFocusAcrossFrames:p}=e,h=f.useCallback(()=>{t!=null&&t.current?t.current.focus():r!=null&&r.current&&V3(r.current).length===0&&requestAnimationFrame(()=>{var w;(w=r.current)==null||w.focus()})},[t,r]),m=f.useCallback(()=>{var b;(b=n==null?void 0:n.current)==null||b.focus()},[n]),v=o&&!n;return i.jsx(TN,{crossFrame:p,persistentFocus:d,autoFocus:c,disabled:a,onActivation:h,onDeactivation:m,returnFocus:v,children:s})};U3.displayName="FocusLock";function NN(e,t,n,r){const o=E_(t);return f.useEffect(()=>{var s;const a=(s=V2(n))!=null?s:document;if(t)return a.addEventListener(e,o,r),()=>{a.removeEventListener(e,o,r)}},[e,n,r,o,t]),()=>{var s;((s=V2(n))!=null?s:document).removeEventListener(e,o,r)}}function $N(e){const{ref:t,handler:n,enabled:r=!0}=e,o=E_(n),a=f.useRef({isPointerDown:!1,ignoreEmulatedMouseEvents:!1}).current;f.useEffect(()=>{if(!r)return;const c=m=>{E0(m,t)&&(a.isPointerDown=!0)},d=m=>{if(a.ignoreEmulatedMouseEvents){a.ignoreEmulatedMouseEvents=!1;return}a.isPointerDown&&n&&E0(m,t)&&(a.isPointerDown=!1,o(m))},p=m=>{a.ignoreEmulatedMouseEvents=!0,n&&a.isPointerDown&&E0(m,t)&&(a.isPointerDown=!1,o(m))},h=O_(t.current);return h.addEventListener("mousedown",c,!0),h.addEventListener("mouseup",d,!0),h.addEventListener("touchstart",c,!0),h.addEventListener("touchend",p,!0),()=>{h.removeEventListener("mousedown",c,!0),h.removeEventListener("mouseup",d,!0),h.removeEventListener("touchstart",c,!0),h.removeEventListener("touchend",p,!0)}},[n,t,o,a,r])}function E0(e,t){var n;const r=e.target;return r&&!O_(r).contains(r)?!1:!((n=t.current)!=null&&n.contains(r))}var[zN,LN]=Dn({name:"InputGroupStylesContext",errorMessage:`useInputGroupStyles returned is 'undefined'. Seems you forgot to wrap the components in "" `}),G3=Ae(function(t,n){const r=Fr("Input",t),{children:o,className:s,...a}=qn(t),c=Ct("chakra-input__group",s),d={},p=Cd(o),h=r.field;p.forEach(v=>{var b,w;r&&(h&&v.type.id==="InputLeftElement"&&(d.paddingStart=(b=h.height)!=null?b:h.h),h&&v.type.id==="InputRightElement"&&(d.paddingEnd=(w=h.height)!=null?w:h.h),v.type.id==="InputRightAddon"&&(d.borderEndRadius=0),v.type.id==="InputLeftAddon"&&(d.borderStartRadius=0))});const m=p.map(v=>{var b,w;const y=ub({size:((b=v.props)==null?void 0:b.size)||t.size,variant:((w=v.props)==null?void 0:w.variant)||t.variant});return v.type.id!=="Input"?f.cloneElement(v,y):f.cloneElement(v,Object.assign(y,d,v.props))});return i.jsx(je.div,{className:c,ref:n,__css:{width:"100%",display:"flex",position:"relative",isolation:"isolate",...r.group},"data-group":!0,...a,children:i.jsx(zN,{value:r,children:m})})});G3.displayName="InputGroup";var BN=je("div",{baseStyle:{display:"flex",alignItems:"center",justifyContent:"center",position:"absolute",top:"0",zIndex:2}}),mm=Ae(function(t,n){var r,o;const{placement:s="left",...a}=t,c=LN(),d=c.field,h={[s==="left"?"insetStart":"insetEnd"]:"0",width:(r=d==null?void 0:d.height)!=null?r:d==null?void 0:d.h,height:(o=d==null?void 0:d.height)!=null?o:d==null?void 0:d.h,fontSize:d==null?void 0:d.fontSize,...c.element};return i.jsx(BN,{ref:n,__css:h,...a})});mm.id="InputElement";mm.displayName="InputElement";var q3=Ae(function(t,n){const{className:r,...o}=t,s=Ct("chakra-input__left-element",r);return i.jsx(mm,{ref:n,placement:"left",className:s,...o})});q3.id="InputLeftElement";q3.displayName="InputLeftElement";var Eb=Ae(function(t,n){const{className:r,...o}=t,s=Ct("chakra-input__right-element",r);return i.jsx(mm,{ref:n,placement:"right",className:s,...o})});Eb.id="InputRightElement";Eb.displayName="InputRightElement";var Pd=Ae(function(t,n){const{htmlSize:r,...o}=t,s=Fr("Input",o),a=qn(o),c=hb(a),d=Ct("chakra-input",t.className);return i.jsx(je.input,{size:r,...c,__css:s.field,ref:n,className:d})});Pd.displayName="Input";Pd.id="Input";var Ob=Ae(function(t,n){const r=ia("Link",t),{className:o,isExternal:s,...a}=qn(t);return i.jsx(je.a,{target:s?"_blank":void 0,rel:s?"noopener":void 0,ref:n,className:Ct("chakra-link",o),...a,__css:r})});Ob.displayName="Link";var[FN,K3]=Dn({name:"ListStylesContext",errorMessage:`useListStyles returned is 'undefined'. Seems you forgot to wrap the components in "
" `}),Rb=Ae(function(t,n){const r=Fr("List",t),{children:o,styleType:s="none",stylePosition:a,spacing:c,...d}=qn(t),p=Cd(o),m=c?{["& > *:not(style) ~ *:not(style)"]:{mt:c}}:{};return i.jsx(FN,{value:r,children:i.jsx(je.ul,{ref:n,listStyleType:s,listStylePosition:a,role:"list",__css:{...r.container,...m},...d,children:p})})});Rb.displayName="List";var HN=Ae((e,t)=>{const{as:n,...r}=e;return i.jsx(Rb,{ref:t,as:"ol",styleType:"decimal",marginStart:"1em",...r})});HN.displayName="OrderedList";var Mb=Ae(function(t,n){const{as:r,...o}=t;return i.jsx(Rb,{ref:n,as:"ul",styleType:"initial",marginStart:"1em",...o})});Mb.displayName="UnorderedList";var wa=Ae(function(t,n){const r=K3();return i.jsx(je.li,{ref:n,...t,__css:r.item})});wa.displayName="ListItem";var WN=Ae(function(t,n){const r=K3();return i.jsx(no,{ref:n,role:"presentation",...t,__css:r.icon})});WN.displayName="ListIcon";var sl=Ae(function(t,n){const{templateAreas:r,gap:o,rowGap:s,columnGap:a,column:c,row:d,autoFlow:p,autoRows:h,templateRows:m,autoColumns:v,templateColumns:b,...w}=t,y={display:"grid",gridTemplateAreas:r,gridGap:o,gridRowGap:s,gridColumnGap:a,gridAutoColumns:v,gridColumn:c,gridRow:d,gridAutoFlow:p,gridAutoRows:h,gridTemplateRows:m,gridTemplateColumns:b};return i.jsx(je.div,{ref:n,__css:y,...w})});sl.displayName="Grid";function X3(e,t){return Array.isArray(e)?e.map(n=>n===null?null:t(n)):_v(e)?Object.keys(e).reduce((n,r)=>(n[r]=t(e[r]),n),{}):e!=null?t(e):null}var hl=je("div",{baseStyle:{flex:1,justifySelf:"stretch",alignSelf:"stretch"}});hl.displayName="Spacer";var qe=Ae(function(t,n){const r=ia("Text",t),{className:o,align:s,decoration:a,casing:c,...d}=qn(t),p=ub({textAlign:t.align,textDecoration:t.decoration,textTransform:t.casing});return i.jsx(je.p,{ref:n,className:Ct("chakra-text",t.className),...p,...d,__css:r})});qe.displayName="Text";var Y3=e=>i.jsx(je.div,{className:"chakra-stack__item",...e,__css:{display:"inline-block",flex:"0 0 auto",minWidth:0,...e.__css}});Y3.displayName="StackItem";function VN(e){const{spacing:t,direction:n}=e,r={column:{my:t,mx:0,borderLeftWidth:0,borderBottomWidth:"1px"},"column-reverse":{my:t,mx:0,borderLeftWidth:0,borderBottomWidth:"1px"},row:{mx:t,my:0,borderLeftWidth:"1px",borderBottomWidth:0},"row-reverse":{mx:t,my:0,borderLeftWidth:"1px",borderBottomWidth:0}};return{"&":X3(n,o=>r[o])}}var Db=Ae((e,t)=>{const{isInline:n,direction:r,align:o,justify:s,spacing:a="0.5rem",wrap:c,children:d,divider:p,className:h,shouldWrapChildren:m,...v}=e,b=n?"row":r??"column",w=f.useMemo(()=>VN({spacing:a,direction:b}),[a,b]),y=!!p,S=!m&&!y,_=f.useMemo(()=>{const j=Cd(d);return S?j:j.map((I,E)=>{const O=typeof I.key<"u"?I.key:E,R=E+1===j.length,A=m?i.jsx(Y3,{children:I},O):I;if(!y)return A;const T=f.cloneElement(p,{__css:w}),$=R?null:T;return i.jsxs(f.Fragment,{children:[A,$]},O)})},[p,w,y,S,m,d]),k=Ct("chakra-stack",h);return i.jsx(je.div,{ref:t,display:"flex",alignItems:o,justifyContent:s,flexDirection:b,flexWrap:c,gap:y?void 0:a,className:k,...v,children:_})});Db.displayName="Stack";var Q3=Ae((e,t)=>i.jsx(Db,{align:"center",...e,direction:"column",ref:t}));Q3.displayName="VStack";var di=Ae((e,t)=>i.jsx(Db,{align:"center",...e,direction:"row",ref:t}));di.displayName="HStack";function Vw(e){return X3(e,t=>t==="auto"?"auto":`span ${t}/span ${t}`)}var Yv=Ae(function(t,n){const{area:r,colSpan:o,colStart:s,colEnd:a,rowEnd:c,rowSpan:d,rowStart:p,...h}=t,m=ub({gridArea:r,gridColumn:Vw(o),gridRow:Vw(d),gridColumnStart:s,gridColumnEnd:a,gridRowStart:p,gridRowEnd:c});return i.jsx(je.div,{ref:n,__css:m,...h})});Yv.displayName="GridItem";var ml=Ae(function(t,n){const r=ia("Badge",t),{className:o,...s}=qn(t);return i.jsx(je.span,{ref:n,className:Ct("chakra-badge",t.className),...s,__css:{display:"inline-block",whiteSpace:"nowrap",verticalAlign:"middle",...r}})});ml.displayName="Badge";var Pi=Ae(function(t,n){const{borderLeftWidth:r,borderBottomWidth:o,borderTopWidth:s,borderRightWidth:a,borderWidth:c,borderStyle:d,borderColor:p,...h}=ia("Divider",t),{className:m,orientation:v="horizontal",__css:b,...w}=qn(t),y={vertical:{borderLeftWidth:r||a||c||"1px",height:"100%"},horizontal:{borderBottomWidth:o||s||c||"1px",width:"100%"}};return i.jsx(je.hr,{ref:n,"aria-orientation":v,...w,__css:{...h,border:"0",borderColor:p,borderStyle:d,...y[v],...b},className:Ct("chakra-divider",m)})});Pi.displayName="Divider";function UN(e){const{key:t}=e;return t.length===1||t.length>1&&/[^a-zA-Z0-9]/.test(t)}function GN(e={}){const{timeout:t=300,preventDefault:n=()=>!0}=e,[r,o]=f.useState([]),s=f.useRef(),a=()=>{s.current&&(clearTimeout(s.current),s.current=null)},c=()=>{a(),s.current=setTimeout(()=>{o([]),s.current=null},t)};f.useEffect(()=>a,[]);function d(p){return h=>{if(h.key==="Backspace"){const m=[...r];m.pop(),o(m);return}if(UN(h)){const m=r.concat(h.key);n(h)&&(h.preventDefault(),h.stopPropagation()),o(m),p(m.join("")),c()}}}return d}function qN(e,t,n,r){if(t==null)return r;if(!r)return e.find(a=>n(a).toLowerCase().startsWith(t.toLowerCase()));const o=e.filter(s=>n(s).toLowerCase().startsWith(t.toLowerCase()));if(o.length>0){let s;return o.includes(r)?(s=o.indexOf(r)+1,s===o.length&&(s=0),o[s]):(s=e.indexOf(o[0]),e[s])}return r}function KN(){const e=f.useRef(new Map),t=e.current,n=f.useCallback((o,s,a,c)=>{e.current.set(a,{type:s,el:o,options:c}),o.addEventListener(s,a,c)},[]),r=f.useCallback((o,s,a,c)=>{o.removeEventListener(s,a,c),e.current.delete(a)},[]);return f.useEffect(()=>()=>{t.forEach((o,s)=>{r(o.el,o.type,s,o.options)})},[r,t]),{add:n,remove:r}}function O0(e){const t=e.target,{tagName:n,isContentEditable:r}=t;return n!=="INPUT"&&n!=="TEXTAREA"&&r!==!0}function J3(e={}){const{ref:t,isDisabled:n,isFocusable:r,clickOnEnter:o=!0,clickOnSpace:s=!0,onMouseDown:a,onMouseUp:c,onClick:d,onKeyDown:p,onKeyUp:h,tabIndex:m,onMouseOver:v,onMouseLeave:b,...w}=e,[y,S]=f.useState(!0),[_,k]=f.useState(!1),j=KN(),I=D=>{D&&D.tagName!=="BUTTON"&&S(!1)},E=y?m:m||0,O=n&&!r,R=f.useCallback(D=>{if(n){D.stopPropagation(),D.preventDefault();return}D.currentTarget.focus(),d==null||d(D)},[n,d]),M=f.useCallback(D=>{_&&O0(D)&&(D.preventDefault(),D.stopPropagation(),k(!1),j.remove(document,"keyup",M,!1))},[_,j]),A=f.useCallback(D=>{if(p==null||p(D),n||D.defaultPrevented||D.metaKey||!O0(D.nativeEvent)||y)return;const L=o&&D.key==="Enter";s&&D.key===" "&&(D.preventDefault(),k(!0)),L&&(D.preventDefault(),D.currentTarget.click()),j.add(document,"keyup",M,!1)},[n,y,p,o,s,j,M]),T=f.useCallback(D=>{if(h==null||h(D),n||D.defaultPrevented||D.metaKey||!O0(D.nativeEvent)||y)return;s&&D.key===" "&&(D.preventDefault(),k(!1),D.currentTarget.click())},[s,y,n,h]),$=f.useCallback(D=>{D.button===0&&(k(!1),j.remove(document,"mouseup",$,!1))},[j]),Q=f.useCallback(D=>{if(D.button!==0)return;if(n){D.stopPropagation(),D.preventDefault();return}y||k(!0),D.currentTarget.focus({preventScroll:!0}),j.add(document,"mouseup",$,!1),a==null||a(D)},[n,y,a,j,$]),B=f.useCallback(D=>{D.button===0&&(y||k(!1),c==null||c(D))},[c,y]),V=f.useCallback(D=>{if(n){D.preventDefault();return}v==null||v(D)},[n,v]),q=f.useCallback(D=>{_&&(D.preventDefault(),k(!1)),b==null||b(D)},[_,b]),G=cn(t,I);return y?{...w,ref:G,type:"button","aria-disabled":O?void 0:n,disabled:O,onClick:R,onMouseDown:a,onMouseUp:c,onKeyUp:h,onKeyDown:p,onMouseOver:v,onMouseLeave:b}:{...w,ref:G,role:"button","data-active":Ft(_),"aria-disabled":n?"true":void 0,tabIndex:O?void 0:E,onClick:R,onMouseDown:Q,onMouseUp:B,onKeyUp:T,onKeyDown:A,onMouseOver:V,onMouseLeave:q}}function XN(e){const t=e.current;if(!t)return!1;const n=_N(t);return!n||t.contains(n)?!1:!!RN(n)}function Z3(e,t){const{shouldFocus:n,visible:r,focusRef:o}=t,s=n&&!r;Ba(()=>{if(!s||XN(e))return;const a=(o==null?void 0:o.current)||e.current;let c;if(a)return c=requestAnimationFrame(()=>{a.focus({preventScroll:!0})}),()=>{cancelAnimationFrame(c)}},[s,e,o])}var YN={preventScroll:!0,shouldFocus:!1};function QN(e,t=YN){const{focusRef:n,preventScroll:r,shouldFocus:o,visible:s}=t,a=JN(e)?e.current:e,c=o&&s,d=f.useRef(c),p=f.useRef(s);tc(()=>{!p.current&&s&&(d.current=c),p.current=s},[s,c]);const h=f.useCallback(()=>{if(!(!s||!a||!d.current)&&(d.current=!1,!a.contains(document.activeElement)))if(n!=null&&n.current)requestAnimationFrame(()=>{var m;(m=n.current)==null||m.focus({preventScroll:r})});else{const m=V3(a);m.length>0&&requestAnimationFrame(()=>{m[0].focus({preventScroll:r})})}},[s,r,a,n]);Ba(()=>{h()},[h]),Qi(a,"transitionend",h)}function JN(e){return"current"in e}var Nl=(e,t)=>({var:e,varRef:t?`var(${e}, ${t})`:`var(${e})`}),jr={arrowShadowColor:Nl("--popper-arrow-shadow-color"),arrowSize:Nl("--popper-arrow-size","8px"),arrowSizeHalf:Nl("--popper-arrow-size-half"),arrowBg:Nl("--popper-arrow-bg"),transformOrigin:Nl("--popper-transform-origin"),arrowOffset:Nl("--popper-arrow-offset")};function ZN(e){if(e.includes("top"))return"1px 1px 0px 0 var(--popper-arrow-shadow-color)";if(e.includes("bottom"))return"-1px -1px 0px 0 var(--popper-arrow-shadow-color)";if(e.includes("right"))return"-1px 1px 0px 0 var(--popper-arrow-shadow-color)";if(e.includes("left"))return"1px -1px 0px 0 var(--popper-arrow-shadow-color)"}var e$={top:"bottom center","top-start":"bottom left","top-end":"bottom right",bottom:"top center","bottom-start":"top left","bottom-end":"top right",left:"right center","left-start":"right top","left-end":"right bottom",right:"left center","right-start":"left top","right-end":"left bottom"},t$=e=>e$[e],Uw={scroll:!0,resize:!0};function n$(e){let t;return typeof e=="object"?t={enabled:!0,options:{...Uw,...e}}:t={enabled:e,options:Uw},t}var r$={name:"matchWidth",enabled:!0,phase:"beforeWrite",requires:["computeStyles"],fn:({state:e})=>{e.styles.popper.width=`${e.rects.reference.width}px`},effect:({state:e})=>()=>{const t=e.elements.reference;e.elements.popper.style.width=`${t.offsetWidth}px`}},o$={name:"transformOrigin",enabled:!0,phase:"write",fn:({state:e})=>{Gw(e)},effect:({state:e})=>()=>{Gw(e)}},Gw=e=>{e.elements.popper.style.setProperty(jr.transformOrigin.var,t$(e.placement))},s$={name:"positionArrow",enabled:!0,phase:"afterWrite",fn:({state:e})=>{a$(e)}},a$=e=>{var t;if(!e.placement)return;const n=i$(e.placement);if((t=e.elements)!=null&&t.arrow&&n){Object.assign(e.elements.arrow.style,{[n.property]:n.value,width:jr.arrowSize.varRef,height:jr.arrowSize.varRef,zIndex:-1});const r={[jr.arrowSizeHalf.var]:`calc(${jr.arrowSize.varRef} / 2 - 1px)`,[jr.arrowOffset.var]:`calc(${jr.arrowSizeHalf.varRef} * -1)`};for(const o in r)e.elements.arrow.style.setProperty(o,r[o])}},i$=e=>{if(e.startsWith("top"))return{property:"bottom",value:jr.arrowOffset.varRef};if(e.startsWith("bottom"))return{property:"top",value:jr.arrowOffset.varRef};if(e.startsWith("left"))return{property:"right",value:jr.arrowOffset.varRef};if(e.startsWith("right"))return{property:"left",value:jr.arrowOffset.varRef}},l$={name:"innerArrow",enabled:!0,phase:"main",requires:["arrow"],fn:({state:e})=>{qw(e)},effect:({state:e})=>()=>{qw(e)}},qw=e=>{if(!e.elements.arrow)return;const t=e.elements.arrow.querySelector("[data-popper-arrow-inner]");if(!t)return;const n=ZN(e.placement);n&&t.style.setProperty("--popper-arrow-default-shadow",n),Object.assign(t.style,{transform:"rotate(45deg)",background:jr.arrowBg.varRef,top:0,left:0,width:"100%",height:"100%",position:"absolute",zIndex:"inherit",boxShadow:"var(--popper-arrow-shadow, var(--popper-arrow-default-shadow))"})},c$={"start-start":{ltr:"left-start",rtl:"right-start"},"start-end":{ltr:"left-end",rtl:"right-end"},"end-start":{ltr:"right-start",rtl:"left-start"},"end-end":{ltr:"right-end",rtl:"left-end"},start:{ltr:"left",rtl:"right"},end:{ltr:"right",rtl:"left"}},u$={"auto-start":"auto-end","auto-end":"auto-start","top-start":"top-end","top-end":"top-start","bottom-start":"bottom-end","bottom-end":"bottom-start"};function d$(e,t="ltr"){var n,r;const o=((n=c$[e])==null?void 0:n[t])||e;return t==="ltr"?o:(r=u$[e])!=null?r:o}var _o="top",as="bottom",is="right",Po="left",Ab="auto",jd=[_o,as,is,Po],yc="start",nd="end",f$="clippingParents",e6="viewport",hu="popper",p$="reference",Kw=jd.reduce(function(e,t){return e.concat([t+"-"+yc,t+"-"+nd])},[]),t6=[].concat(jd,[Ab]).reduce(function(e,t){return e.concat([t,t+"-"+yc,t+"-"+nd])},[]),h$="beforeRead",m$="read",g$="afterRead",v$="beforeMain",b$="main",y$="afterMain",x$="beforeWrite",w$="write",S$="afterWrite",C$=[h$,m$,g$,v$,b$,y$,x$,w$,S$];function ra(e){return e?(e.nodeName||"").toLowerCase():null}function Bo(e){if(e==null)return window;if(e.toString()!=="[object Window]"){var t=e.ownerDocument;return t&&t.defaultView||window}return e}function al(e){var t=Bo(e).Element;return e instanceof t||e instanceof Element}function rs(e){var t=Bo(e).HTMLElement;return e instanceof t||e instanceof HTMLElement}function Tb(e){if(typeof ShadowRoot>"u")return!1;var t=Bo(e).ShadowRoot;return e instanceof t||e instanceof ShadowRoot}function k$(e){var t=e.state;Object.keys(t.elements).forEach(function(n){var r=t.styles[n]||{},o=t.attributes[n]||{},s=t.elements[n];!rs(s)||!ra(s)||(Object.assign(s.style,r),Object.keys(o).forEach(function(a){var c=o[a];c===!1?s.removeAttribute(a):s.setAttribute(a,c===!0?"":c)}))})}function _$(e){var t=e.state,n={popper:{position:t.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(t.elements.popper.style,n.popper),t.styles=n,t.elements.arrow&&Object.assign(t.elements.arrow.style,n.arrow),function(){Object.keys(t.elements).forEach(function(r){var o=t.elements[r],s=t.attributes[r]||{},a=Object.keys(t.styles.hasOwnProperty(r)?t.styles[r]:n[r]),c=a.reduce(function(d,p){return d[p]="",d},{});!rs(o)||!ra(o)||(Object.assign(o.style,c),Object.keys(s).forEach(function(d){o.removeAttribute(d)}))})}}const P$={name:"applyStyles",enabled:!0,phase:"write",fn:k$,effect:_$,requires:["computeStyles"]};function ta(e){return e.split("-")[0]}var Ji=Math.max,Qp=Math.min,xc=Math.round;function Qv(){var e=navigator.userAgentData;return e!=null&&e.brands&&Array.isArray(e.brands)?e.brands.map(function(t){return t.brand+"/"+t.version}).join(" "):navigator.userAgent}function n6(){return!/^((?!chrome|android).)*safari/i.test(Qv())}function wc(e,t,n){t===void 0&&(t=!1),n===void 0&&(n=!1);var r=e.getBoundingClientRect(),o=1,s=1;t&&rs(e)&&(o=e.offsetWidth>0&&xc(r.width)/e.offsetWidth||1,s=e.offsetHeight>0&&xc(r.height)/e.offsetHeight||1);var a=al(e)?Bo(e):window,c=a.visualViewport,d=!n6()&&n,p=(r.left+(d&&c?c.offsetLeft:0))/o,h=(r.top+(d&&c?c.offsetTop:0))/s,m=r.width/o,v=r.height/s;return{width:m,height:v,top:h,right:p+m,bottom:h+v,left:p,x:p,y:h}}function Nb(e){var t=wc(e),n=e.offsetWidth,r=e.offsetHeight;return Math.abs(t.width-n)<=1&&(n=t.width),Math.abs(t.height-r)<=1&&(r=t.height),{x:e.offsetLeft,y:e.offsetTop,width:n,height:r}}function r6(e,t){var n=t.getRootNode&&t.getRootNode();if(e.contains(t))return!0;if(n&&Tb(n)){var r=t;do{if(r&&e.isSameNode(r))return!0;r=r.parentNode||r.host}while(r)}return!1}function Oa(e){return Bo(e).getComputedStyle(e)}function j$(e){return["table","td","th"].indexOf(ra(e))>=0}function ji(e){return((al(e)?e.ownerDocument:e.document)||window.document).documentElement}function gm(e){return ra(e)==="html"?e:e.assignedSlot||e.parentNode||(Tb(e)?e.host:null)||ji(e)}function Xw(e){return!rs(e)||Oa(e).position==="fixed"?null:e.offsetParent}function I$(e){var t=/firefox/i.test(Qv()),n=/Trident/i.test(Qv());if(n&&rs(e)){var r=Oa(e);if(r.position==="fixed")return null}var o=gm(e);for(Tb(o)&&(o=o.host);rs(o)&&["html","body"].indexOf(ra(o))<0;){var s=Oa(o);if(s.transform!=="none"||s.perspective!=="none"||s.contain==="paint"||["transform","perspective"].indexOf(s.willChange)!==-1||t&&s.willChange==="filter"||t&&s.filter&&s.filter!=="none")return o;o=o.parentNode}return null}function Id(e){for(var t=Bo(e),n=Xw(e);n&&j$(n)&&Oa(n).position==="static";)n=Xw(n);return n&&(ra(n)==="html"||ra(n)==="body"&&Oa(n).position==="static")?t:n||I$(e)||t}function $b(e){return["top","bottom"].indexOf(e)>=0?"x":"y"}function Hu(e,t,n){return Ji(e,Qp(t,n))}function E$(e,t,n){var r=Hu(e,t,n);return r>n?n:r}function o6(){return{top:0,right:0,bottom:0,left:0}}function s6(e){return Object.assign({},o6(),e)}function a6(e,t){return t.reduce(function(n,r){return n[r]=e,n},{})}var O$=function(t,n){return t=typeof t=="function"?t(Object.assign({},n.rects,{placement:n.placement})):t,s6(typeof t!="number"?t:a6(t,jd))};function R$(e){var t,n=e.state,r=e.name,o=e.options,s=n.elements.arrow,a=n.modifiersData.popperOffsets,c=ta(n.placement),d=$b(c),p=[Po,is].indexOf(c)>=0,h=p?"height":"width";if(!(!s||!a)){var m=O$(o.padding,n),v=Nb(s),b=d==="y"?_o:Po,w=d==="y"?as:is,y=n.rects.reference[h]+n.rects.reference[d]-a[d]-n.rects.popper[h],S=a[d]-n.rects.reference[d],_=Id(s),k=_?d==="y"?_.clientHeight||0:_.clientWidth||0:0,j=y/2-S/2,I=m[b],E=k-v[h]-m[w],O=k/2-v[h]/2+j,R=Hu(I,O,E),M=d;n.modifiersData[r]=(t={},t[M]=R,t.centerOffset=R-O,t)}}function M$(e){var t=e.state,n=e.options,r=n.element,o=r===void 0?"[data-popper-arrow]":r;o!=null&&(typeof o=="string"&&(o=t.elements.popper.querySelector(o),!o)||r6(t.elements.popper,o)&&(t.elements.arrow=o))}const D$={name:"arrow",enabled:!0,phase:"main",fn:R$,effect:M$,requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Sc(e){return e.split("-")[1]}var A$={top:"auto",right:"auto",bottom:"auto",left:"auto"};function T$(e,t){var n=e.x,r=e.y,o=t.devicePixelRatio||1;return{x:xc(n*o)/o||0,y:xc(r*o)/o||0}}function Yw(e){var t,n=e.popper,r=e.popperRect,o=e.placement,s=e.variation,a=e.offsets,c=e.position,d=e.gpuAcceleration,p=e.adaptive,h=e.roundOffsets,m=e.isFixed,v=a.x,b=v===void 0?0:v,w=a.y,y=w===void 0?0:w,S=typeof h=="function"?h({x:b,y}):{x:b,y};b=S.x,y=S.y;var _=a.hasOwnProperty("x"),k=a.hasOwnProperty("y"),j=Po,I=_o,E=window;if(p){var O=Id(n),R="clientHeight",M="clientWidth";if(O===Bo(n)&&(O=ji(n),Oa(O).position!=="static"&&c==="absolute"&&(R="scrollHeight",M="scrollWidth")),O=O,o===_o||(o===Po||o===is)&&s===nd){I=as;var A=m&&O===E&&E.visualViewport?E.visualViewport.height:O[R];y-=A-r.height,y*=d?1:-1}if(o===Po||(o===_o||o===as)&&s===nd){j=is;var T=m&&O===E&&E.visualViewport?E.visualViewport.width:O[M];b-=T-r.width,b*=d?1:-1}}var $=Object.assign({position:c},p&&A$),Q=h===!0?T$({x:b,y},Bo(n)):{x:b,y};if(b=Q.x,y=Q.y,d){var B;return Object.assign({},$,(B={},B[I]=k?"0":"",B[j]=_?"0":"",B.transform=(E.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",B))}return Object.assign({},$,(t={},t[I]=k?y+"px":"",t[j]=_?b+"px":"",t.transform="",t))}function N$(e){var t=e.state,n=e.options,r=n.gpuAcceleration,o=r===void 0?!0:r,s=n.adaptive,a=s===void 0?!0:s,c=n.roundOffsets,d=c===void 0?!0:c,p={placement:ta(t.placement),variation:Sc(t.placement),popper:t.elements.popper,popperRect:t.rects.popper,gpuAcceleration:o,isFixed:t.options.strategy==="fixed"};t.modifiersData.popperOffsets!=null&&(t.styles.popper=Object.assign({},t.styles.popper,Yw(Object.assign({},p,{offsets:t.modifiersData.popperOffsets,position:t.options.strategy,adaptive:a,roundOffsets:d})))),t.modifiersData.arrow!=null&&(t.styles.arrow=Object.assign({},t.styles.arrow,Yw(Object.assign({},p,{offsets:t.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:d})))),t.attributes.popper=Object.assign({},t.attributes.popper,{"data-popper-placement":t.placement})}const $$={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:N$,data:{}};var zf={passive:!0};function z$(e){var t=e.state,n=e.instance,r=e.options,o=r.scroll,s=o===void 0?!0:o,a=r.resize,c=a===void 0?!0:a,d=Bo(t.elements.popper),p=[].concat(t.scrollParents.reference,t.scrollParents.popper);return s&&p.forEach(function(h){h.addEventListener("scroll",n.update,zf)}),c&&d.addEventListener("resize",n.update,zf),function(){s&&p.forEach(function(h){h.removeEventListener("scroll",n.update,zf)}),c&&d.removeEventListener("resize",n.update,zf)}}const L$={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:z$,data:{}};var B$={left:"right",right:"left",bottom:"top",top:"bottom"};function _p(e){return e.replace(/left|right|bottom|top/g,function(t){return B$[t]})}var F$={start:"end",end:"start"};function Qw(e){return e.replace(/start|end/g,function(t){return F$[t]})}function zb(e){var t=Bo(e),n=t.pageXOffset,r=t.pageYOffset;return{scrollLeft:n,scrollTop:r}}function Lb(e){return wc(ji(e)).left+zb(e).scrollLeft}function H$(e,t){var n=Bo(e),r=ji(e),o=n.visualViewport,s=r.clientWidth,a=r.clientHeight,c=0,d=0;if(o){s=o.width,a=o.height;var p=n6();(p||!p&&t==="fixed")&&(c=o.offsetLeft,d=o.offsetTop)}return{width:s,height:a,x:c+Lb(e),y:d}}function W$(e){var t,n=ji(e),r=zb(e),o=(t=e.ownerDocument)==null?void 0:t.body,s=Ji(n.scrollWidth,n.clientWidth,o?o.scrollWidth:0,o?o.clientWidth:0),a=Ji(n.scrollHeight,n.clientHeight,o?o.scrollHeight:0,o?o.clientHeight:0),c=-r.scrollLeft+Lb(e),d=-r.scrollTop;return Oa(o||n).direction==="rtl"&&(c+=Ji(n.clientWidth,o?o.clientWidth:0)-s),{width:s,height:a,x:c,y:d}}function Bb(e){var t=Oa(e),n=t.overflow,r=t.overflowX,o=t.overflowY;return/auto|scroll|overlay|hidden/.test(n+o+r)}function i6(e){return["html","body","#document"].indexOf(ra(e))>=0?e.ownerDocument.body:rs(e)&&Bb(e)?e:i6(gm(e))}function Wu(e,t){var n;t===void 0&&(t=[]);var r=i6(e),o=r===((n=e.ownerDocument)==null?void 0:n.body),s=Bo(r),a=o?[s].concat(s.visualViewport||[],Bb(r)?r:[]):r,c=t.concat(a);return o?c:c.concat(Wu(gm(a)))}function Jv(e){return Object.assign({},e,{left:e.x,top:e.y,right:e.x+e.width,bottom:e.y+e.height})}function V$(e,t){var n=wc(e,!1,t==="fixed");return n.top=n.top+e.clientTop,n.left=n.left+e.clientLeft,n.bottom=n.top+e.clientHeight,n.right=n.left+e.clientWidth,n.width=e.clientWidth,n.height=e.clientHeight,n.x=n.left,n.y=n.top,n}function Jw(e,t,n){return t===e6?Jv(H$(e,n)):al(t)?V$(t,n):Jv(W$(ji(e)))}function U$(e){var t=Wu(gm(e)),n=["absolute","fixed"].indexOf(Oa(e).position)>=0,r=n&&rs(e)?Id(e):e;return al(r)?t.filter(function(o){return al(o)&&r6(o,r)&&ra(o)!=="body"}):[]}function G$(e,t,n,r){var o=t==="clippingParents"?U$(e):[].concat(t),s=[].concat(o,[n]),a=s[0],c=s.reduce(function(d,p){var h=Jw(e,p,r);return d.top=Ji(h.top,d.top),d.right=Qp(h.right,d.right),d.bottom=Qp(h.bottom,d.bottom),d.left=Ji(h.left,d.left),d},Jw(e,a,r));return c.width=c.right-c.left,c.height=c.bottom-c.top,c.x=c.left,c.y=c.top,c}function l6(e){var t=e.reference,n=e.element,r=e.placement,o=r?ta(r):null,s=r?Sc(r):null,a=t.x+t.width/2-n.width/2,c=t.y+t.height/2-n.height/2,d;switch(o){case _o:d={x:a,y:t.y-n.height};break;case as:d={x:a,y:t.y+t.height};break;case is:d={x:t.x+t.width,y:c};break;case Po:d={x:t.x-n.width,y:c};break;default:d={x:t.x,y:t.y}}var p=o?$b(o):null;if(p!=null){var h=p==="y"?"height":"width";switch(s){case yc:d[p]=d[p]-(t[h]/2-n[h]/2);break;case nd:d[p]=d[p]+(t[h]/2-n[h]/2);break}}return d}function rd(e,t){t===void 0&&(t={});var n=t,r=n.placement,o=r===void 0?e.placement:r,s=n.strategy,a=s===void 0?e.strategy:s,c=n.boundary,d=c===void 0?f$:c,p=n.rootBoundary,h=p===void 0?e6:p,m=n.elementContext,v=m===void 0?hu:m,b=n.altBoundary,w=b===void 0?!1:b,y=n.padding,S=y===void 0?0:y,_=s6(typeof S!="number"?S:a6(S,jd)),k=v===hu?p$:hu,j=e.rects.popper,I=e.elements[w?k:v],E=G$(al(I)?I:I.contextElement||ji(e.elements.popper),d,h,a),O=wc(e.elements.reference),R=l6({reference:O,element:j,strategy:"absolute",placement:o}),M=Jv(Object.assign({},j,R)),A=v===hu?M:O,T={top:E.top-A.top+_.top,bottom:A.bottom-E.bottom+_.bottom,left:E.left-A.left+_.left,right:A.right-E.right+_.right},$=e.modifiersData.offset;if(v===hu&&$){var Q=$[o];Object.keys(T).forEach(function(B){var V=[is,as].indexOf(B)>=0?1:-1,q=[_o,as].indexOf(B)>=0?"y":"x";T[B]+=Q[q]*V})}return T}function q$(e,t){t===void 0&&(t={});var n=t,r=n.placement,o=n.boundary,s=n.rootBoundary,a=n.padding,c=n.flipVariations,d=n.allowedAutoPlacements,p=d===void 0?t6:d,h=Sc(r),m=h?c?Kw:Kw.filter(function(w){return Sc(w)===h}):jd,v=m.filter(function(w){return p.indexOf(w)>=0});v.length===0&&(v=m);var b=v.reduce(function(w,y){return w[y]=rd(e,{placement:y,boundary:o,rootBoundary:s,padding:a})[ta(y)],w},{});return Object.keys(b).sort(function(w,y){return b[w]-b[y]})}function K$(e){if(ta(e)===Ab)return[];var t=_p(e);return[Qw(e),t,Qw(t)]}function X$(e){var t=e.state,n=e.options,r=e.name;if(!t.modifiersData[r]._skip){for(var o=n.mainAxis,s=o===void 0?!0:o,a=n.altAxis,c=a===void 0?!0:a,d=n.fallbackPlacements,p=n.padding,h=n.boundary,m=n.rootBoundary,v=n.altBoundary,b=n.flipVariations,w=b===void 0?!0:b,y=n.allowedAutoPlacements,S=t.options.placement,_=ta(S),k=_===S,j=d||(k||!w?[_p(S)]:K$(S)),I=[S].concat(j).reduce(function(X,K){return X.concat(ta(K)===Ab?q$(t,{placement:K,boundary:h,rootBoundary:m,padding:p,flipVariations:w,allowedAutoPlacements:y}):K)},[]),E=t.rects.reference,O=t.rects.popper,R=new Map,M=!0,A=I[0],T=0;T=0,q=V?"width":"height",G=rd(t,{placement:$,boundary:h,rootBoundary:m,altBoundary:v,padding:p}),D=V?B?is:Po:B?as:_o;E[q]>O[q]&&(D=_p(D));var L=_p(D),W=[];if(s&&W.push(G[Q]<=0),c&&W.push(G[D]<=0,G[L]<=0),W.every(function(X){return X})){A=$,M=!1;break}R.set($,W)}if(M)for(var Y=w?3:1,ae=function(K){var U=I.find(function(se){var re=R.get(se);if(re)return re.slice(0,K).every(function(oe){return oe})});if(U)return A=U,"break"},be=Y;be>0;be--){var ie=ae(be);if(ie==="break")break}t.placement!==A&&(t.modifiersData[r]._skip=!0,t.placement=A,t.reset=!0)}}const Y$={name:"flip",enabled:!0,phase:"main",fn:X$,requiresIfExists:["offset"],data:{_skip:!1}};function Zw(e,t,n){return n===void 0&&(n={x:0,y:0}),{top:e.top-t.height-n.y,right:e.right-t.width+n.x,bottom:e.bottom-t.height+n.y,left:e.left-t.width-n.x}}function eS(e){return[_o,is,as,Po].some(function(t){return e[t]>=0})}function Q$(e){var t=e.state,n=e.name,r=t.rects.reference,o=t.rects.popper,s=t.modifiersData.preventOverflow,a=rd(t,{elementContext:"reference"}),c=rd(t,{altBoundary:!0}),d=Zw(a,r),p=Zw(c,o,s),h=eS(d),m=eS(p);t.modifiersData[n]={referenceClippingOffsets:d,popperEscapeOffsets:p,isReferenceHidden:h,hasPopperEscaped:m},t.attributes.popper=Object.assign({},t.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":m})}const J$={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:Q$};function Z$(e,t,n){var r=ta(e),o=[Po,_o].indexOf(r)>=0?-1:1,s=typeof n=="function"?n(Object.assign({},t,{placement:e})):n,a=s[0],c=s[1];return a=a||0,c=(c||0)*o,[Po,is].indexOf(r)>=0?{x:c,y:a}:{x:a,y:c}}function ez(e){var t=e.state,n=e.options,r=e.name,o=n.offset,s=o===void 0?[0,0]:o,a=t6.reduce(function(h,m){return h[m]=Z$(m,t.rects,s),h},{}),c=a[t.placement],d=c.x,p=c.y;t.modifiersData.popperOffsets!=null&&(t.modifiersData.popperOffsets.x+=d,t.modifiersData.popperOffsets.y+=p),t.modifiersData[r]=a}const tz={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:ez};function nz(e){var t=e.state,n=e.name;t.modifiersData[n]=l6({reference:t.rects.reference,element:t.rects.popper,strategy:"absolute",placement:t.placement})}const rz={name:"popperOffsets",enabled:!0,phase:"read",fn:nz,data:{}};function oz(e){return e==="x"?"y":"x"}function sz(e){var t=e.state,n=e.options,r=e.name,o=n.mainAxis,s=o===void 0?!0:o,a=n.altAxis,c=a===void 0?!1:a,d=n.boundary,p=n.rootBoundary,h=n.altBoundary,m=n.padding,v=n.tether,b=v===void 0?!0:v,w=n.tetherOffset,y=w===void 0?0:w,S=rd(t,{boundary:d,rootBoundary:p,padding:m,altBoundary:h}),_=ta(t.placement),k=Sc(t.placement),j=!k,I=$b(_),E=oz(I),O=t.modifiersData.popperOffsets,R=t.rects.reference,M=t.rects.popper,A=typeof y=="function"?y(Object.assign({},t.rects,{placement:t.placement})):y,T=typeof A=="number"?{mainAxis:A,altAxis:A}:Object.assign({mainAxis:0,altAxis:0},A),$=t.modifiersData.offset?t.modifiersData.offset[t.placement]:null,Q={x:0,y:0};if(O){if(s){var B,V=I==="y"?_o:Po,q=I==="y"?as:is,G=I==="y"?"height":"width",D=O[I],L=D+S[V],W=D-S[q],Y=b?-M[G]/2:0,ae=k===yc?R[G]:M[G],be=k===yc?-M[G]:-R[G],ie=t.elements.arrow,X=b&&ie?Nb(ie):{width:0,height:0},K=t.modifiersData["arrow#persistent"]?t.modifiersData["arrow#persistent"].padding:o6(),U=K[V],se=K[q],re=Hu(0,R[G],X[G]),oe=j?R[G]/2-Y-re-U-T.mainAxis:ae-re-U-T.mainAxis,pe=j?-R[G]/2+Y+re+se+T.mainAxis:be+re+se+T.mainAxis,le=t.elements.arrow&&Id(t.elements.arrow),ge=le?I==="y"?le.clientTop||0:le.clientLeft||0:0,ke=(B=$==null?void 0:$[I])!=null?B:0,xe=D+oe-ke-ge,de=D+pe-ke,Te=Hu(b?Qp(L,xe):L,D,b?Ji(W,de):W);O[I]=Te,Q[I]=Te-D}if(c){var Oe,$e=I==="x"?_o:Po,kt=I==="x"?as:is,ct=O[E],on=E==="y"?"height":"width",vt=ct+S[$e],bt=ct-S[kt],Se=[_o,Po].indexOf(_)!==-1,Me=(Oe=$==null?void 0:$[E])!=null?Oe:0,Pt=Se?vt:ct-R[on]-M[on]-Me+T.altAxis,Tt=Se?ct+R[on]+M[on]-Me-T.altAxis:bt,we=b&&Se?E$(Pt,ct,Tt):Hu(b?Pt:vt,ct,b?Tt:bt);O[E]=we,Q[E]=we-ct}t.modifiersData[r]=Q}}const az={name:"preventOverflow",enabled:!0,phase:"main",fn:sz,requiresIfExists:["offset"]};function iz(e){return{scrollLeft:e.scrollLeft,scrollTop:e.scrollTop}}function lz(e){return e===Bo(e)||!rs(e)?zb(e):iz(e)}function cz(e){var t=e.getBoundingClientRect(),n=xc(t.width)/e.offsetWidth||1,r=xc(t.height)/e.offsetHeight||1;return n!==1||r!==1}function uz(e,t,n){n===void 0&&(n=!1);var r=rs(t),o=rs(t)&&cz(t),s=ji(t),a=wc(e,o,n),c={scrollLeft:0,scrollTop:0},d={x:0,y:0};return(r||!r&&!n)&&((ra(t)!=="body"||Bb(s))&&(c=lz(t)),rs(t)?(d=wc(t,!0),d.x+=t.clientLeft,d.y+=t.clientTop):s&&(d.x=Lb(s))),{x:a.left+c.scrollLeft-d.x,y:a.top+c.scrollTop-d.y,width:a.width,height:a.height}}function dz(e){var t=new Map,n=new Set,r=[];e.forEach(function(s){t.set(s.name,s)});function o(s){n.add(s.name);var a=[].concat(s.requires||[],s.requiresIfExists||[]);a.forEach(function(c){if(!n.has(c)){var d=t.get(c);d&&o(d)}}),r.push(s)}return e.forEach(function(s){n.has(s.name)||o(s)}),r}function fz(e){var t=dz(e);return C$.reduce(function(n,r){return n.concat(t.filter(function(o){return o.phase===r}))},[])}function pz(e){var t;return function(){return t||(t=new Promise(function(n){Promise.resolve().then(function(){t=void 0,n(e())})})),t}}function hz(e){var t=e.reduce(function(n,r){var o=n[r.name];return n[r.name]=o?Object.assign({},o,r,{options:Object.assign({},o.options,r.options),data:Object.assign({},o.data,r.data)}):r,n},{});return Object.keys(t).map(function(n){return t[n]})}var tS={placement:"bottom",modifiers:[],strategy:"absolute"};function nS(){for(var e=arguments.length,t=new Array(e),n=0;n{}),j=f.useCallback(()=>{var T;!t||!w.current||!y.current||((T=k.current)==null||T.call(k),S.current=vz(w.current,y.current,{placement:_,modifiers:[l$,s$,o$,{...r$,enabled:!!v},{name:"eventListeners",...n$(a)},{name:"arrow",options:{padding:s}},{name:"offset",options:{offset:c??[0,d]}},{name:"flip",enabled:!!p,options:{padding:8}},{name:"preventOverflow",enabled:!!m,options:{boundary:h}},...n??[]],strategy:o}),S.current.forceUpdate(),k.current=S.current.destroy)},[_,t,n,v,a,s,c,d,p,m,h,o]);f.useEffect(()=>()=>{var T;!w.current&&!y.current&&((T=S.current)==null||T.destroy(),S.current=null)},[]);const I=f.useCallback(T=>{w.current=T,j()},[j]),E=f.useCallback((T={},$=null)=>({...T,ref:cn(I,$)}),[I]),O=f.useCallback(T=>{y.current=T,j()},[j]),R=f.useCallback((T={},$=null)=>({...T,ref:cn(O,$),style:{...T.style,position:o,minWidth:v?void 0:"max-content",inset:"0 auto auto 0"}}),[o,O,v]),M=f.useCallback((T={},$=null)=>{const{size:Q,shadowColor:B,bg:V,style:q,...G}=T;return{...G,ref:$,"data-popper-arrow":"",style:bz(T)}},[]),A=f.useCallback((T={},$=null)=>({...T,ref:$,"data-popper-arrow-inner":""}),[]);return{update(){var T;(T=S.current)==null||T.update()},forceUpdate(){var T;(T=S.current)==null||T.forceUpdate()},transformOrigin:jr.transformOrigin.varRef,referenceRef:I,popperRef:O,getPopperProps:R,getArrowProps:M,getArrowInnerProps:A,getReferenceProps:E}}function bz(e){const{size:t,shadowColor:n,bg:r,style:o}=e,s={...o,position:"absolute"};return t&&(s["--popper-arrow-size"]=t),n&&(s["--popper-arrow-shadow-color"]=n),r&&(s["--popper-arrow-bg"]=r),s}function Hb(e={}){const{onClose:t,onOpen:n,isOpen:r,id:o}=e,s=or(n),a=or(t),[c,d]=f.useState(e.defaultIsOpen||!1),p=r!==void 0?r:c,h=r!==void 0,m=f.useId(),v=o??`disclosure-${m}`,b=f.useCallback(()=>{h||d(!1),a==null||a()},[h,a]),w=f.useCallback(()=>{h||d(!0),s==null||s()},[h,s]),y=f.useCallback(()=>{p?b():w()},[p,w,b]);function S(k={}){return{...k,"aria-expanded":p,"aria-controls":v,onClick(j){var I;(I=k.onClick)==null||I.call(k,j),y()}}}function _(k={}){return{...k,hidden:!p,id:v}}return{isOpen:p,onOpen:w,onClose:b,onToggle:y,isControlled:h,getButtonProps:S,getDisclosureProps:_}}function yz(e){const{ref:t,handler:n,enabled:r=!0}=e,o=or(n),a=f.useRef({isPointerDown:!1,ignoreEmulatedMouseEvents:!1}).current;f.useEffect(()=>{if(!r)return;const c=m=>{R0(m,t)&&(a.isPointerDown=!0)},d=m=>{if(a.ignoreEmulatedMouseEvents){a.ignoreEmulatedMouseEvents=!1;return}a.isPointerDown&&n&&R0(m,t)&&(a.isPointerDown=!1,o(m))},p=m=>{a.ignoreEmulatedMouseEvents=!0,n&&a.isPointerDown&&R0(m,t)&&(a.isPointerDown=!1,o(m))},h=c6(t.current);return h.addEventListener("mousedown",c,!0),h.addEventListener("mouseup",d,!0),h.addEventListener("touchstart",c,!0),h.addEventListener("touchend",p,!0),()=>{h.removeEventListener("mousedown",c,!0),h.removeEventListener("mouseup",d,!0),h.removeEventListener("touchstart",c,!0),h.removeEventListener("touchend",p,!0)}},[n,t,o,a,r])}function R0(e,t){var n;const r=e.target;return r&&!c6(r).contains(r)?!1:!((n=t.current)!=null&&n.contains(r))}function c6(e){var t;return(t=e==null?void 0:e.ownerDocument)!=null?t:document}function u6(e){const{isOpen:t,ref:n}=e,[r,o]=f.useState(t),[s,a]=f.useState(!1);return f.useEffect(()=>{s||(o(t),a(!0))},[t,s,r]),Qi(()=>n.current,"animationend",()=>{o(t)}),{present:!(t?!1:!r),onComplete(){var d;const p=kN(n.current),h=new p.CustomEvent("animationend",{bubbles:!0});(d=n.current)==null||d.dispatchEvent(h)}}}function Wb(e){const{wasSelected:t,enabled:n,isSelected:r,mode:o="unmount"}=e;return!!(!n||r||o==="keepMounted"&&t)}var[xz,wz,Sz,Cz]=db(),[kz,Ed]=Dn({strict:!1,name:"MenuContext"});function _z(e,...t){const n=f.useId(),r=e||n;return f.useMemo(()=>t.map(o=>`${o}-${r}`),[r,t])}function d6(e){var t;return(t=e==null?void 0:e.ownerDocument)!=null?t:document}function rS(e){return d6(e).activeElement===e}function Pz(e={}){const{id:t,closeOnSelect:n=!0,closeOnBlur:r=!0,initialFocusRef:o,autoSelect:s=!0,isLazy:a,isOpen:c,defaultIsOpen:d,onClose:p,onOpen:h,placement:m="bottom-start",lazyBehavior:v="unmount",direction:b,computePositionOnMount:w=!1,...y}=e,S=f.useRef(null),_=f.useRef(null),k=Sz(),j=f.useCallback(()=>{requestAnimationFrame(()=>{var ie;(ie=S.current)==null||ie.focus({preventScroll:!1})})},[]),I=f.useCallback(()=>{const ie=setTimeout(()=>{var X;if(o)(X=o.current)==null||X.focus();else{const K=k.firstEnabled();K&&B(K.index)}});L.current.add(ie)},[k,o]),E=f.useCallback(()=>{const ie=setTimeout(()=>{const X=k.lastEnabled();X&&B(X.index)});L.current.add(ie)},[k]),O=f.useCallback(()=>{h==null||h(),s?I():j()},[s,I,j,h]),{isOpen:R,onOpen:M,onClose:A,onToggle:T}=Hb({isOpen:c,defaultIsOpen:d,onClose:p,onOpen:O});yz({enabled:R&&r,ref:S,handler:ie=>{var X;(X=_.current)!=null&&X.contains(ie.target)||A()}});const $=Fb({...y,enabled:R||w,placement:m,direction:b}),[Q,B]=f.useState(-1);Ba(()=>{R||B(-1)},[R]),Z3(S,{focusRef:_,visible:R,shouldFocus:!0});const V=u6({isOpen:R,ref:S}),[q,G]=_z(t,"menu-button","menu-list"),D=f.useCallback(()=>{M(),j()},[M,j]),L=f.useRef(new Set([]));Az(()=>{L.current.forEach(ie=>clearTimeout(ie)),L.current.clear()});const W=f.useCallback(()=>{M(),I()},[I,M]),Y=f.useCallback(()=>{M(),E()},[M,E]),ae=f.useCallback(()=>{var ie,X;const K=d6(S.current),U=(ie=S.current)==null?void 0:ie.contains(K.activeElement);if(!(R&&!U))return;const re=(X=k.item(Q))==null?void 0:X.node;re==null||re.focus()},[R,Q,k]),be=f.useRef(null);return{openAndFocusMenu:D,openAndFocusFirstItem:W,openAndFocusLastItem:Y,onTransitionEnd:ae,unstable__animationState:V,descendants:k,popper:$,buttonId:q,menuId:G,forceUpdate:$.forceUpdate,orientation:"vertical",isOpen:R,onToggle:T,onOpen:M,onClose:A,menuRef:S,buttonRef:_,focusedIndex:Q,closeOnSelect:n,closeOnBlur:r,autoSelect:s,setFocusedIndex:B,isLazy:a,lazyBehavior:v,initialFocusRef:o,rafId:be}}function jz(e={},t=null){const n=Ed(),{onToggle:r,popper:o,openAndFocusFirstItem:s,openAndFocusLastItem:a}=n,c=f.useCallback(d=>{const p=d.key,m={Enter:s,ArrowDown:s,ArrowUp:a}[p];m&&(d.preventDefault(),d.stopPropagation(),m(d))},[s,a]);return{...e,ref:cn(n.buttonRef,t,o.referenceRef),id:n.buttonId,"data-active":Ft(n.isOpen),"aria-expanded":n.isOpen,"aria-haspopup":"menu","aria-controls":n.menuId,onClick:nt(e.onClick,r),onKeyDown:nt(e.onKeyDown,c)}}function Zv(e){var t;return Mz(e)&&!!((t=e==null?void 0:e.getAttribute("role"))!=null&&t.startsWith("menuitem"))}function Iz(e={},t=null){const n=Ed();if(!n)throw new Error("useMenuContext: context is undefined. Seems you forgot to wrap component within