diff --git a/docs/help/FAQ.md b/docs/help/FAQ.md
index 458770e41a..4c297f442a 100644
--- a/docs/help/FAQ.md
+++ b/docs/help/FAQ.md
@@ -40,6 +40,25 @@ Follow the same steps to scan and import the missing models.
- Check the `ram` setting in `invokeai.yaml`. This setting tells Invoke how much of your system RAM can be used to cache models. Having this too high or too low can slow things down. That said, it's generally safest to not set this at all and instead let Invoke manage it.
- Check the `vram` setting in `invokeai.yaml`. This setting tells Invoke how much of your GPU VRAM can be used to cache models. Counter-intuitively, if this setting is too high, Invoke will need to do a lot of shuffling of models as it juggles the VRAM cache and the currently-loaded model. The default value of 0.25 is generally works well for GPUs without 16GB or more VRAM. Even on a 24GB card, the default works well.
- Check that your generations are happening on your GPU (if you have one). InvokeAI will log what is being used for generation upon startup. If your GPU isn't used, re-install to ensure the correct versions of torch get installed.
+- If you are on Windows, you may have exceeded your GPU's VRAM capacity and are using slower [shared GPU memory](#shared-gpu-memory-windows). There's a guide to opt out of this behaviour in the linked FAQ entry.
+
+## Shared GPU Memory (Windows)
+
+!!! tip "Nvidia GPUs with driver 536.40"
+
+ This only applies to current Nvidia cards with driver 536.40 or later, released in June 2023.
+
+When the GPU doesn't have enough VRAM for a task, Windows is able to allocate some of its CPU RAM to the GPU. This is much slower than VRAM, but it does allow the system to generate when it otherwise might no have enough VRAM.
+
+When shared GPU memory is used, generation slows down dramatically - but at least it doesn't crash.
+
+If you'd like to opt out of this behavior and instead get an error when you exceed your GPU's VRAM, follow [this guide from Nvidia](https://nvidia.custhelp.com/app/answers/detail/a_id/5490).
+
+Here's how to get the python path required in the linked guide:
+
+- Run `invoke.bat`.
+- Select option 2 for developer console.
+- At least one python path will be printed. Copy the path that includes your invoke installation directory (typically the first).
## Installer cannot find python (Windows)
diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py
index 0ac40e97fb..485414d263 100644
--- a/invokeai/app/invocations/ip_adapter.py
+++ b/invokeai/app/invocations/ip_adapter.py
@@ -67,7 +67,7 @@ class IPAdapterInvocation(BaseInvocation):
)
clip_vision_model: Literal["ViT-H", "ViT-G"] = InputField(
description="CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.",
- default="auto",
+ default="ViT-H",
ui_order=2,
)
weight: Union[float, List[float]] = InputField(
diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py
index e533baf3bc..9994d663e5 100644
--- a/invokeai/app/services/shared/invocation_context.py
+++ b/invokeai/app/services/shared/invocation_context.py
@@ -245,6 +245,18 @@ class ImagesInterface(InvocationContextInterface):
"""
return self._services.images.get_dto(image_name)
+ def get_path(self, image_name: str, thumbnail: bool = False) -> Path:
+ """Gets the internal path to an image or thumbnail.
+
+ Args:
+ image_name: The name of the image to get the path of.
+ thumbnail: Get the path of the thumbnail instead of the full image
+
+ Returns:
+ The local path of the image or thumbnail.
+ """
+ return self._services.images.get_path(image_name, thumbnail)
+
class TensorsInterface(InvocationContextInterface):
def save(self, tensor: Tensor) -> str:
diff --git a/invokeai/backend/model_manager/load/load_default.py b/invokeai/backend/model_manager/load/load_default.py
index 60cc1f5e6c..6774fc2989 100644
--- a/invokeai/backend/model_manager/load/load_default.py
+++ b/invokeai/backend/model_manager/load/load_default.py
@@ -37,7 +37,7 @@ class ModelLoader(ModelLoaderBase):
self._logger = logger
self._ram_cache = ram_cache
self._convert_cache = convert_cache
- self._torch_dtype = torch_dtype(choose_torch_device(), app_config)
+ self._torch_dtype = torch_dtype(choose_torch_device())
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
"""
diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_base.py b/invokeai/backend/model_manager/load/model_cache/model_cache_base.py
index eb82f87cb2..a8c2dd3e92 100644
--- a/invokeai/backend/model_manager/load/model_cache/model_cache_base.py
+++ b/invokeai/backend/model_manager/load/model_cache/model_cache_base.py
@@ -117,7 +117,7 @@ class ModelCacheBase(ABC, Generic[T]):
@property
@abstractmethod
- def stats(self) -> CacheStats:
+ def stats(self) -> Optional[CacheStats]:
"""Return collected CacheStats object."""
pass
diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py
index 4d5d09864e..f2e0c01a94 100644
--- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py
+++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py
@@ -269,9 +269,6 @@ class ModelCache(ModelCacheBase[AnyModel]):
if torch.device(source_device).type == torch.device(target_device).type:
return
- # may raise an exception here if insufficient GPU VRAM
- self._check_free_vram(target_device, cache_entry.size)
-
start_model_to_time = time.time()
snapshot_before = self._capture_memory_snapshot()
cache_entry.model.to(target_device)
@@ -329,11 +326,11 @@ class ModelCache(ModelCacheBase[AnyModel]):
f" {in_ram_models}/{in_vram_models}({locked_in_vram_models})"
)
- def make_room(self, model_size: int) -> None:
+ def make_room(self, size: int) -> None:
"""Make enough room in the cache to accommodate a new model of indicated size."""
# calculate how much memory this model will require
# multiplier = 2 if self.precision==torch.float32 else 1
- bytes_needed = model_size
+ bytes_needed = size
maximum_size = self.max_cache_size * GIG # stored in GB, convert to bytes
current_size = self.cache_size()
@@ -388,7 +385,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
# 1 from onnx runtime object
if not cache_entry.locked and refs <= (3 if "onnx" in model_key else 2):
self.logger.debug(
- f"Removing {model_key} from RAM cache to free at least {(model_size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)"
+ f"Removing {model_key} from RAM cache to free at least {(size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)"
)
current_size -= cache_entry.size
models_cleared += 1
@@ -420,24 +417,3 @@ class ModelCache(ModelCacheBase[AnyModel]):
mps.empty_cache()
self.logger.debug(f"After making room: cached_models={len(self._cached_models)}")
-
- def _free_vram(self, device: torch.device) -> int:
- vram_device = ( # mem_get_info() needs an indexed device
- device if device.index is not None else torch.device(str(device), index=0)
- )
- free_mem, _ = torch.cuda.mem_get_info(vram_device)
- for _, cache_entry in self._cached_models.items():
- if cache_entry.loaded and not cache_entry.locked:
- free_mem += cache_entry.size
- return free_mem
-
- def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None:
- if target_device.type != "cuda":
- return
- free_mem = self._free_vram(target_device)
- if needed_size > free_mem:
- needed_gb = round(needed_size / GIG, 2)
- free_gb = round(free_mem / GIG, 2)
- raise torch.cuda.OutOfMemoryError(
- f"Insufficient VRAM to load model, requested {needed_gb}GB but only had {free_gb}GB free"
- )
diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py
index 0be53c842a..cb6b93eaac 100644
--- a/invokeai/backend/util/devices.py
+++ b/invokeai/backend/util/devices.py
@@ -6,8 +6,7 @@ from typing import Literal, Optional, Union
import torch
from torch import autocast
-from invokeai.app.services.config import InvokeAIAppConfig
-from invokeai.app.services.config.config_default import get_config
+from invokeai.app.services.config.config_default import PRECISION, get_config
CPU_DEVICE = torch.device("cpu")
CUDA_DEVICE = torch.device("cuda")
@@ -33,35 +32,34 @@ def get_torch_device_name() -> str:
return torch.cuda.get_device_name(device) if device.type == "cuda" else device.type.upper()
-# We are in transition here from using a single global AppConfig to allowing multiple
-# configurations. It is strongly recommended to pass the app_config to this function.
-def choose_precision(
- device: torch.device, app_config: Optional[InvokeAIAppConfig] = None
-) -> Literal["float32", "float16", "bfloat16"]:
+def choose_precision(device: torch.device) -> Literal["float32", "float16", "bfloat16"]:
"""Return an appropriate precision for the given torch device."""
- app_config = app_config or get_config()
+ app_config = get_config()
if device.type == "cuda":
device_name = torch.cuda.get_device_name(device)
- if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name):
- if app_config.precision == "float32":
- return "float32"
- elif app_config.precision == "bfloat16":
- return "bfloat16"
- else:
- return "float16"
+ if "GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name:
+ # These GPUs have limited support for float16
+ return "float32"
+ elif app_config.precision == "auto" or app_config.precision == "autocast":
+ # Default to float16 for CUDA devices
+ return "float16"
+ else:
+ # Use the user-defined precision
+ return app_config.precision
elif device.type == "mps":
- return "float16"
+ if app_config.precision == "auto" or app_config.precision == "autocast":
+ # Default to float16 for MPS devices
+ return "float16"
+ else:
+ # Use the user-defined precision
+ return app_config.precision
+ # CPU / safe fallback
return "float32"
-# We are in transition here from using a single global AppConfig to allowing multiple
-# configurations. It is strongly recommended to pass the app_config to this function.
-def torch_dtype(
- device: Optional[torch.device] = None,
- app_config: Optional[InvokeAIAppConfig] = None,
-) -> torch.dtype:
+def torch_dtype(device: Optional[torch.device] = None) -> torch.dtype:
device = device or choose_torch_device()
- precision = choose_precision(device, app_config)
+ precision = choose_precision(device)
if precision == "float16":
return torch.float16
if precision == "bfloat16":
@@ -71,7 +69,7 @@ def torch_dtype(
return torch.float32
-def choose_autocast(precision):
+def choose_autocast(precision: PRECISION):
"""Returns an autocast context or nullcontext for the given precision string"""
# float16 currently requires autocast to avoid errors like:
# 'expected scalar type Half but found Float'
diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json
index 81fc9c4dd3..a99910c549 100644
--- a/invokeai/frontend/web/package.json
+++ b/invokeai/frontend/web/package.json
@@ -52,6 +52,7 @@
},
"dependencies": {
"@chakra-ui/react-use-size": "^2.1.0",
+ "@dagrejs/dagre": "^1.1.1",
"@dagrejs/graphlib": "^2.2.1",
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/sortable": "^8.0.0",
diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml
index 4be16619ec..bf423c3d46 100644
--- a/invokeai/frontend/web/pnpm-lock.yaml
+++ b/invokeai/frontend/web/pnpm-lock.yaml
@@ -11,6 +11,9 @@ dependencies:
'@chakra-ui/react-use-size':
specifier: ^2.1.0
version: 2.1.0(react@18.2.0)
+ '@dagrejs/dagre':
+ specifier: ^1.1.1
+ version: 1.1.1
'@dagrejs/graphlib':
specifier: ^2.2.1
version: 2.2.1
@@ -3092,6 +3095,12 @@ packages:
dev: true
optional: true
+ /@dagrejs/dagre@1.1.1:
+ resolution: {integrity: sha512-AQfT6pffEuPE32weFzhS/u3UpX+bRXUARIXL7UqLaxz497cN8pjuBlX6axO4IIECE2gBV8eLFQkGCtKX5sDaUA==}
+ dependencies:
+ '@dagrejs/graphlib': 2.2.1
+ dev: false
+
/@dagrejs/graphlib@2.2.1:
resolution: {integrity: sha512-xJsN1v6OAxXk6jmNdM+OS/bBE8nDCwM0yDNprXR18ZNatL6to9ggod9+l2XtiLhXfLm0NkE7+Er/cpdlM+SkUA==}
engines: {node: '>17.0.0'}
diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json
index d5be1b1fce..ee370d1e42 100644
--- a/invokeai/frontend/web/public/locales/ar.json
+++ b/invokeai/frontend/web/public/locales/ar.json
@@ -291,7 +291,6 @@
"canvasMerged": "تم دمج الخط",
"sentToImageToImage": "تم إرسال إلى صورة إلى صورة",
"sentToUnifiedCanvas": "تم إرسال إلى لوحة موحدة",
- "parametersSet": "تم تعيين المعلمات",
"parametersNotSet": "لم يتم تعيين المعلمات",
"metadataLoadFailed": "فشل تحميل البيانات الوصفية"
},
diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json
index 4f18cd0050..033dffdc44 100644
--- a/invokeai/frontend/web/public/locales/de.json
+++ b/invokeai/frontend/web/public/locales/de.json
@@ -480,7 +480,6 @@
"canvasMerged": "Leinwand zusammengeführt",
"sentToImageToImage": "Gesendet an Bild zu Bild",
"sentToUnifiedCanvas": "Gesendet an Leinwand",
- "parametersSet": "Parameter festlegen",
"parametersNotSet": "Parameter nicht festgelegt",
"metadataLoadFailed": "Metadaten konnten nicht geladen werden",
"setCanvasInitialImage": "Ausgangsbild setzen",
diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json
index 623cea64ee..5454c72e68 100644
--- a/invokeai/frontend/web/public/locales/en.json
+++ b/invokeai/frontend/web/public/locales/en.json
@@ -849,6 +849,7 @@
"version": "Version",
"versionUnknown": " Version Unknown",
"workflow": "Workflow",
+ "graph": "Graph",
"workflowAuthor": "Author",
"workflowContact": "Contact",
"workflowDescription": "Short Description",
@@ -1041,10 +1042,10 @@
"metadataLoadFailed": "Failed to load metadata",
"modelAddedSimple": "Model Added to Queue",
"modelImportCanceled": "Model Import Canceled",
+ "parameters": "Parameters",
"parameterNotSet": "{{parameter}} not set",
"parameterSet": "{{parameter}} set",
"parametersNotSet": "Parameters Not Set",
- "parametersSet": "Parameters Set",
"problemCopyingCanvas": "Problem Copying Canvas",
"problemCopyingCanvasDesc": "Unable to export base layer",
"problemCopyingImage": "Unable to Copy Image",
@@ -1482,7 +1483,11 @@
"workflowName": "Workflow Name",
"newWorkflowCreated": "New Workflow Created",
"workflowCleared": "Workflow Cleared",
- "workflowEditorMenu": "Workflow Editor Menu"
+ "workflowEditorMenu": "Workflow Editor Menu",
+ "loadFromGraph": "Load Workflow from Graph",
+ "convertGraph": "Convert Graph",
+ "loadWorkflow": "$t(common.load) Workflow",
+ "autoLayout": "Auto Layout"
},
"app": {
"storeNotInitialized": "Store is not initialized"
diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json
index c7af596556..3037045db5 100644
--- a/invokeai/frontend/web/public/locales/es.json
+++ b/invokeai/frontend/web/public/locales/es.json
@@ -363,7 +363,6 @@
"canvasMerged": "Lienzo consolidado",
"sentToImageToImage": "Enviar hacia Imagen a Imagen",
"sentToUnifiedCanvas": "Enviar hacia Lienzo Consolidado",
- "parametersSet": "Parámetros establecidos",
"parametersNotSet": "Parámetros no establecidos",
"metadataLoadFailed": "Error al cargar metadatos",
"serverError": "Error en el servidor",
diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json
index 095ee5d0d5..b8f560e265 100644
--- a/invokeai/frontend/web/public/locales/fr.json
+++ b/invokeai/frontend/web/public/locales/fr.json
@@ -298,7 +298,6 @@
"canvasMerged": "Canvas fusionné",
"sentToImageToImage": "Envoyé à Image à Image",
"sentToUnifiedCanvas": "Envoyé à Canvas unifié",
- "parametersSet": "Paramètres définis",
"parametersNotSet": "Paramètres non définis",
"metadataLoadFailed": "Échec du chargement des métadonnées"
},
diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json
index efb90f61c7..dbbb3cbec4 100644
--- a/invokeai/frontend/web/public/locales/he.json
+++ b/invokeai/frontend/web/public/locales/he.json
@@ -306,7 +306,6 @@
"canvasMerged": "קנבס מוזג",
"sentToImageToImage": "נשלח לתמונה לתמונה",
"sentToUnifiedCanvas": "נשלח אל קנבס מאוחד",
- "parametersSet": "הגדרת פרמטרים",
"parametersNotSet": "פרמטרים לא הוגדרו",
"metadataLoadFailed": "טעינת מטא-נתונים נכשלה"
},
diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json
index 2b211484aa..ff4e44c487 100644
--- a/invokeai/frontend/web/public/locales/it.json
+++ b/invokeai/frontend/web/public/locales/it.json
@@ -569,7 +569,6 @@
"canvasMerged": "Tela unita",
"sentToImageToImage": "Inviato a Immagine a Immagine",
"sentToUnifiedCanvas": "Inviato a Tela Unificata",
- "parametersSet": "Parametri impostati",
"parametersNotSet": "Parametri non impostati",
"metadataLoadFailed": "Impossibile caricare i metadati",
"serverError": "Errore del Server",
diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json
index 8fd8c96ee4..70adbb371d 100644
--- a/invokeai/frontend/web/public/locales/nl.json
+++ b/invokeai/frontend/web/public/locales/nl.json
@@ -420,7 +420,6 @@
"canvasMerged": "Canvas samengevoegd",
"sentToImageToImage": "Gestuurd naar Afbeelding naar afbeelding",
"sentToUnifiedCanvas": "Gestuurd naar Centraal canvas",
- "parametersSet": "Parameters ingesteld",
"parametersNotSet": "Parameters niet ingesteld",
"metadataLoadFailed": "Fout bij laden metagegevens",
"serverError": "Serverfout",
diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json
index 399417db58..b7592c3fae 100644
--- a/invokeai/frontend/web/public/locales/pl.json
+++ b/invokeai/frontend/web/public/locales/pl.json
@@ -267,7 +267,6 @@
"canvasMerged": "Scalono widoczne warstwy",
"sentToImageToImage": "Wysłano do Obraz na obraz",
"sentToUnifiedCanvas": "Wysłano do trybu uniwersalnego",
- "parametersSet": "Ustawiono parametry",
"parametersNotSet": "Nie ustawiono parametrów",
"metadataLoadFailed": "Błąd wczytywania metadanych"
},
diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json
index 34f99b7075..3003a1732b 100644
--- a/invokeai/frontend/web/public/locales/pt.json
+++ b/invokeai/frontend/web/public/locales/pt.json
@@ -310,7 +310,6 @@
"canvasMerged": "Tela Fundida",
"sentToImageToImage": "Mandar Para Imagem Para Imagem",
"sentToUnifiedCanvas": "Enviada para a Tela Unificada",
- "parametersSet": "Parâmetros Definidos",
"parametersNotSet": "Parâmetros Não Definidos",
"metadataLoadFailed": "Falha ao tentar carregar metadados"
},
diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json
index 2859eb31db..c966c6db50 100644
--- a/invokeai/frontend/web/public/locales/pt_BR.json
+++ b/invokeai/frontend/web/public/locales/pt_BR.json
@@ -307,7 +307,6 @@
"canvasMerged": "Tela Fundida",
"sentToImageToImage": "Mandar Para Imagem Para Imagem",
"sentToUnifiedCanvas": "Enviada para a Tela Unificada",
- "parametersSet": "Parâmetros Definidos",
"parametersNotSet": "Parâmetros Não Definidos",
"metadataLoadFailed": "Falha ao tentar carregar metadados"
},
diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json
index 258bceeb05..4dd2ad895a 100644
--- a/invokeai/frontend/web/public/locales/ru.json
+++ b/invokeai/frontend/web/public/locales/ru.json
@@ -575,7 +575,6 @@
"canvasMerged": "Холст объединен",
"sentToImageToImage": "Отправить в img2img",
"sentToUnifiedCanvas": "Отправлено на Единый холст",
- "parametersSet": "Параметры заданы",
"parametersNotSet": "Параметры не заданы",
"metadataLoadFailed": "Не удалось загрузить метаданные",
"serverError": "Ошибка сервера",
diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json
index f97909525c..9bb38c21b3 100644
--- a/invokeai/frontend/web/public/locales/uk.json
+++ b/invokeai/frontend/web/public/locales/uk.json
@@ -315,7 +315,6 @@
"canvasMerged": "Полотно об'єднане",
"sentToImageToImage": "Надіслати до img2img",
"sentToUnifiedCanvas": "Надіслати на полотно",
- "parametersSet": "Параметри задані",
"parametersNotSet": "Параметри не задані",
"metadataLoadFailed": "Не вдалося завантажити метадані",
"serverError": "Помилка сервера",
diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json
index 77a06ea77b..a88f540990 100644
--- a/invokeai/frontend/web/public/locales/zh_CN.json
+++ b/invokeai/frontend/web/public/locales/zh_CN.json
@@ -487,7 +487,6 @@
"canvasMerged": "画布已合并",
"sentToImageToImage": "已发送到图生图",
"sentToUnifiedCanvas": "已发送到统一画布",
- "parametersSet": "参数已设定",
"parametersNotSet": "参数未设定",
"metadataLoadFailed": "加载元数据失败",
"uploadFailedInvalidUploadDesc": "必须是单张的 PNG 或 JPEG 图片",
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
index 5b9f15c21a..ce75ea62e0 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
@@ -33,6 +33,7 @@ const ImageMetadataActions = (props: Props) => {
+
diff --git a/invokeai/frontend/web/src/features/metadata/util/handlers.ts b/invokeai/frontend/web/src/features/metadata/util/handlers.ts
index b64426b422..2fb840afcb 100644
--- a/invokeai/frontend/web/src/features/metadata/util/handlers.ts
+++ b/invokeai/frontend/web/src/features/metadata/util/handlers.ts
@@ -189,6 +189,12 @@ export const handlers = {
recaller: recallers.cfgScale,
}),
height: buildHandlers({ getLabel: () => t('metadata.height'), parser: parsers.height, recaller: recallers.height }),
+ initialImage: buildHandlers({
+ getLabel: () => t('metadata.initImage'),
+ parser: parsers.initialImage,
+ recaller: recallers.initialImage,
+ renderValue: async (imageDTO) => imageDTO.image_name,
+ }),
negativePrompt: buildHandlers({
getLabel: () => t('metadata.negativePrompt'),
parser: parsers.negativePrompt,
@@ -405,6 +411,6 @@ export const parseAndRecallAllMetadata = async (metadata: unknown, skip: (keyof
})
);
if (results.some((result) => result.status === 'fulfilled')) {
- parameterSetToast(t('toast.parametersSet'));
+ parameterSetToast(t('toast.parameters'));
}
};
diff --git a/invokeai/frontend/web/src/features/metadata/util/parsers.ts b/invokeai/frontend/web/src/features/metadata/util/parsers.ts
index 635a63a8de..9f5c14d94e 100644
--- a/invokeai/frontend/web/src/features/metadata/util/parsers.ts
+++ b/invokeai/frontend/web/src/features/metadata/util/parsers.ts
@@ -1,3 +1,4 @@
+import { getStore } from 'app/store/nanostores/store';
import {
initialControlNet,
initialIPAdapter,
@@ -57,6 +58,8 @@ import {
isParameterWidth,
} from 'features/parameters/types/parameterSchemas';
import { get, isArray, isString } from 'lodash-es';
+import { imagesApi } from 'services/api/endpoints/images';
+import type { ImageDTO } from 'services/api/types';
import {
isControlNetModelConfig,
isIPAdapterModelConfig,
@@ -135,6 +138,14 @@ const parseCFGRescaleMultiplier: MetadataParseFunc = (metadata) =>
getProperty(metadata, 'scheduler', isParameterScheduler);
+const parseInitialImage: MetadataParseFunc = async (metadata) => {
+ const imageName = await getProperty(metadata, 'init_image', isString);
+ const imageDTORequest = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName));
+ const imageDTO = await imageDTORequest.unwrap();
+ imageDTORequest.unsubscribe();
+ return imageDTO;
+};
+
const parseWidth: MetadataParseFunc = (metadata) => getProperty(metadata, 'width', isParameterWidth);
const parseHeight: MetadataParseFunc = (metadata) =>
@@ -402,6 +413,7 @@ export const parsers = {
cfgScale: parseCFGScale,
cfgRescaleMultiplier: parseCFGRescaleMultiplier,
scheduler: parseScheduler,
+ initialImage: parseInitialImage,
width: parseWidth,
height: parseHeight,
steps: parseSteps,
diff --git a/invokeai/frontend/web/src/features/metadata/util/recallers.ts b/invokeai/frontend/web/src/features/metadata/util/recallers.ts
index f35399c139..88af390a20 100644
--- a/invokeai/frontend/web/src/features/metadata/util/recallers.ts
+++ b/invokeai/frontend/web/src/features/metadata/util/recallers.ts
@@ -17,6 +17,7 @@ import type {
import { modelSelected } from 'features/parameters/store/actions';
import {
heightRecalled,
+ initialImageChanged,
setCfgRescaleMultiplier,
setCfgScale,
setImg2imgStrength,
@@ -61,6 +62,7 @@ import {
setRefinerStart,
setRefinerSteps,
} from 'features/sdxl/store/sdxlSlice';
+import type { ImageDTO } from 'services/api/types';
const recallPositivePrompt: MetadataRecallFunc = (positivePrompt) => {
getStore().dispatch(setPositivePrompt(positivePrompt));
@@ -94,6 +96,10 @@ const recallScheduler: MetadataRecallFunc = (scheduler) => {
getStore().dispatch(setScheduler(scheduler));
};
+const recallInitialImage: MetadataRecallFunc = async (imageDTO) => {
+ getStore().dispatch(initialImageChanged(imageDTO));
+};
+
const recallWidth: MetadataRecallFunc = (width) => {
getStore().dispatch(widthRecalled(width));
};
@@ -235,6 +241,7 @@ export const recallers = {
cfgScale: recallCFGScale,
cfgRescaleMultiplier: recallCFGRescaleMultiplier,
scheduler: recallScheduler,
+ initialImage: recallInitialImage,
width: recallWidth,
height: recallHeight,
steps: recallSteps,
diff --git a/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx b/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx
index 8307997ff9..737adb52e7 100644
--- a/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx
@@ -3,6 +3,7 @@ import 'reactflow/dist/style.css';
import { Flex } from '@invoke-ai/ui-library';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import TopPanel from 'features/nodes/components/flow/panels/TopPanel/TopPanel';
+import { LoadWorkflowFromGraphModal } from 'features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal';
import { SaveWorkflowAsDialog } from 'features/workflowLibrary/components/SaveWorkflowAsDialog/SaveWorkflowAsDialog';
import type { AnimationProps } from 'framer-motion';
import { AnimatePresence, motion } from 'framer-motion';
@@ -61,6 +62,7 @@ const NodeEditor = () => {
+
)}
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/NumberFieldInputComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/NumberFieldInputComponent.tsx
index 0cb250bb22..e3f33d8a45 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/NumberFieldInputComponent.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/NumberFieldInputComponent.tsx
@@ -37,34 +37,50 @@ const NumberFieldInputComponent = (
);
const min = useMemo(() => {
+ let min = -NUMPY_RAND_MAX;
if (!isNil(fieldTemplate.minimum)) {
- return fieldTemplate.minimum;
+ min = fieldTemplate.minimum;
}
if (!isNil(fieldTemplate.exclusiveMinimum)) {
- return fieldTemplate.exclusiveMinimum + 0.01;
+ min = fieldTemplate.exclusiveMinimum + 0.01;
}
- return;
+ return min;
}, [fieldTemplate.exclusiveMinimum, fieldTemplate.minimum]);
const max = useMemo(() => {
+ let max = NUMPY_RAND_MAX;
if (!isNil(fieldTemplate.maximum)) {
- return fieldTemplate.maximum;
+ max = fieldTemplate.maximum;
}
if (!isNil(fieldTemplate.exclusiveMaximum)) {
- return fieldTemplate.exclusiveMaximum - 0.01;
+ max = fieldTemplate.exclusiveMaximum - 0.01;
}
- return;
+ return max;
}, [fieldTemplate.exclusiveMaximum, fieldTemplate.maximum]);
+ const step = useMemo(() => {
+ if (isNil(fieldTemplate.multipleOf)) {
+ return isIntegerField ? 1 : 0.1;
+ }
+ return fieldTemplate.multipleOf;
+ }, [fieldTemplate.multipleOf, isIntegerField]);
+
+ const fineStep = useMemo(() => {
+ if (isNil(fieldTemplate.multipleOf)) {
+ return isIntegerField ? 1 : 0.01;
+ }
+ return fieldTemplate.multipleOf;
+ }, [fieldTemplate.multipleOf, isIntegerField]);
+
return (
);
diff --git a/invokeai/frontend/web/src/features/nodes/util/workflow/graphToWorkflow.ts b/invokeai/frontend/web/src/features/nodes/util/workflow/graphToWorkflow.ts
new file mode 100644
index 0000000000..eec9c6cf4b
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/util/workflow/graphToWorkflow.ts
@@ -0,0 +1,148 @@
+import * as dagre from '@dagrejs/dagre';
+import { logger } from 'app/logging/logger';
+import { getStore } from 'app/store/nanostores/store';
+import { NODE_WIDTH } from 'features/nodes/types/constants';
+import type { FieldInputInstance } from 'features/nodes/types/field';
+import type { WorkflowV3 } from 'features/nodes/types/workflow';
+import { buildFieldInputInstance } from 'features/nodes/util/schema/buildFieldInputInstance';
+import { t } from 'i18next';
+import { forEach } from 'lodash-es';
+import type { NonNullableGraph } from 'services/api/types';
+import { v4 as uuidv4 } from 'uuid';
+
+/**
+ * Converts a graph to a workflow. This is a best-effort conversion and may not be perfect.
+ * For example, if a graph references an unknown node type, that node will be skipped.
+ * @param graph The graph to convert to a workflow
+ * @param autoLayout Whether to auto-layout the nodes using `dagre`. If false, nodes will be simply stacked on top of one another with an offset.
+ * @returns The workflow.
+ */
+export const graphToWorkflow = (graph: NonNullableGraph, autoLayout = true): WorkflowV3 => {
+ const invocationTemplates = getStore().getState().nodes.templates;
+
+ if (!invocationTemplates) {
+ throw new Error(t('app.storeNotInitialized'));
+ }
+
+ // Initialize the workflow
+ const workflow: WorkflowV3 = {
+ name: '',
+ author: '',
+ contact: '',
+ description: '',
+ meta: {
+ category: 'user',
+ version: '3.0.0',
+ },
+ notes: '',
+ tags: '',
+ version: '',
+ exposedFields: [],
+ edges: [],
+ nodes: [],
+ };
+
+ // Convert nodes
+ forEach(graph.nodes, (node) => {
+ const template = invocationTemplates[node.type];
+
+ // Skip missing node templates - this is a best-effort
+ if (!template) {
+ logger('nodes').warn(`Node type ${node.type} not found in invocationTemplates`);
+ return;
+ }
+
+ // Build field input instances for each attr
+ const inputs: Record = {};
+
+ forEach(node, (value, key) => {
+ // Ignore the non-input keys - I think this is all of them?
+ if (key === 'id' || key === 'type' || key === 'is_intermediate' || key === 'use_cache') {
+ return;
+ }
+
+ const inputTemplate = template.inputs[key];
+
+ // Skip missing input templates
+ if (!inputTemplate) {
+ logger('nodes').warn(`Input ${key} not found in template for node type ${node.type}`);
+ return;
+ }
+
+ // This _should_ be all we need to do!
+ const inputInstance = buildFieldInputInstance(node.id, inputTemplate);
+ inputInstance.value = value;
+ inputs[key] = inputInstance;
+ });
+
+ workflow.nodes.push({
+ id: node.id,
+ type: 'invocation',
+ position: { x: 0, y: 0 }, // we'll do layout later, just need something here
+ data: {
+ id: node.id,
+ type: node.type,
+ version: template.version,
+ label: '',
+ notes: '',
+ isOpen: true,
+ isIntermediate: node.is_intermediate ?? false,
+ useCache: node.use_cache ?? true,
+ inputs,
+ },
+ });
+ });
+
+ forEach(graph.edges, (edge) => {
+ workflow.edges.push({
+ id: uuidv4(), // we don't have edge IDs in the graph
+ type: 'default',
+ source: edge.source.node_id,
+ sourceHandle: edge.source.field,
+ target: edge.destination.node_id,
+ targetHandle: edge.destination.field,
+ });
+ });
+
+ if (autoLayout) {
+ // Best-effort auto layout via dagre - not perfect but better than nothing
+ const dagreGraph = new dagre.graphlib.Graph();
+ // `rankdir` and `align` could be tweaked, but it's gonna be janky no matter what we choose
+ dagreGraph.setGraph({ rankdir: 'TB', align: 'UL' });
+ dagreGraph.setDefaultEdgeLabel(() => ({}));
+
+ // We don't know the dimensions of the nodes until we load the graph into `reactflow` - use a reasonable value
+ forEach(graph.nodes, (node) => {
+ const width = NODE_WIDTH;
+ const height = NODE_WIDTH * 1.5;
+ dagreGraph.setNode(node.id, { width, height });
+ });
+
+ graph.edges.forEach((edge) => {
+ dagreGraph.setEdge(edge.source.node_id, edge.destination.node_id);
+ });
+
+ // This does the magic
+ dagre.layout(dagreGraph);
+
+ // Update the workflow now that we've got the positions
+ workflow.nodes.forEach((node) => {
+ const nodeWithPosition = dagreGraph.node(node.id);
+ node.position = {
+ x: nodeWithPosition.x - nodeWithPosition.width / 2,
+ y: nodeWithPosition.y - nodeWithPosition.height / 2,
+ };
+ });
+ } else {
+ // Stack nodes with a 50px,50px offset from the previous ndoe
+ let x = 0;
+ let y = 0;
+ workflow.nodes.forEach((node) => {
+ node.position = { x, y };
+ x = x + 50;
+ y = y + 50;
+ });
+ }
+
+ return workflow;
+};
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal.tsx
new file mode 100644
index 0000000000..ecb4aa7dd4
--- /dev/null
+++ b/invokeai/frontend/web/src/features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal.tsx
@@ -0,0 +1,111 @@
+import {
+ Button,
+ Checkbox,
+ Flex,
+ FormControl,
+ FormLabel,
+ Modal,
+ ModalBody,
+ ModalCloseButton,
+ ModalContent,
+ ModalHeader,
+ ModalOverlay,
+ Spacer,
+ Textarea,
+} from '@invoke-ai/ui-library';
+import { useStore } from '@nanostores/react';
+import { useAppDispatch } from 'app/store/storeHooks';
+import { workflowLoadRequested } from 'features/nodes/store/actions';
+import { graphToWorkflow } from 'features/nodes/util/workflow/graphToWorkflow';
+import { atom } from 'nanostores';
+import type { ChangeEvent } from 'react';
+import { useCallback, useState } from 'react';
+import { useTranslation } from 'react-i18next';
+
+const $isOpen = atom(false);
+
+export const useLoadWorkflowFromGraphModal = () => {
+ const isOpen = useStore($isOpen);
+ const onOpen = useCallback(() => {
+ $isOpen.set(true);
+ }, []);
+ const onClose = useCallback(() => {
+ $isOpen.set(false);
+ }, []);
+
+ return { isOpen, onOpen, onClose };
+};
+
+export const LoadWorkflowFromGraphModal = () => {
+ const { t } = useTranslation();
+ const dispatch = useAppDispatch();
+ const { isOpen, onClose } = useLoadWorkflowFromGraphModal();
+ const [graphRaw, setGraphRaw] = useState('');
+ const [workflowRaw, setWorkflowRaw] = useState('');
+ const [shouldAutoLayout, setShouldAutoLayout] = useState(true);
+ const onChangeGraphRaw = useCallback((e: ChangeEvent) => {
+ setGraphRaw(e.target.value);
+ }, []);
+ const onChangeWorkflowRaw = useCallback((e: ChangeEvent) => {
+ setWorkflowRaw(e.target.value);
+ }, []);
+ const onChangeShouldAutoLayout = useCallback((e: ChangeEvent) => {
+ setShouldAutoLayout(e.target.checked);
+ }, []);
+ const parse = useCallback(() => {
+ const graph = JSON.parse(graphRaw);
+ const workflow = graphToWorkflow(graph, shouldAutoLayout);
+ setWorkflowRaw(JSON.stringify(workflow, null, 2));
+ }, [graphRaw, shouldAutoLayout]);
+ const loadWorkflow = useCallback(() => {
+ const workflow = JSON.parse(workflowRaw);
+ dispatch(workflowLoadRequested({ workflow, asCopy: true }));
+ onClose();
+ }, [dispatch, onClose, workflowRaw]);
+ return (
+
+
+
+ {t('workflows.loadFromGraph')}
+
+
+
+
+
+ {t('workflows.autoLayout')}
+
+
+
+
+
+
+ {t('nodes.graph')}
+
+
+
+ {t('nodes.workflow')}
+
+
+
+
+
+ );
+};
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/LoadWorkflowFromGraphMenuItem.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/LoadWorkflowFromGraphMenuItem.tsx
new file mode 100644
index 0000000000..8f3cb0c6f6
--- /dev/null
+++ b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/LoadWorkflowFromGraphMenuItem.tsx
@@ -0,0 +1,18 @@
+import { MenuItem } from '@invoke-ai/ui-library';
+import { useLoadWorkflowFromGraphModal } from 'features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal';
+import { memo } from 'react';
+import { useTranslation } from 'react-i18next';
+import { PiFlaskBold } from 'react-icons/pi';
+
+const LoadWorkflowFromGraphMenuItem = () => {
+ const { t } = useTranslation();
+ const { onOpen } = useLoadWorkflowFromGraphModal();
+
+ return (
+ } onClick={onOpen}>
+ {t('workflows.loadFromGraph')}
+
+ );
+};
+
+export default memo(LoadWorkflowFromGraphMenuItem);
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/WorkflowLibraryMenu.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/WorkflowLibraryMenu.tsx
index 55d8ac2626..73e9f5d4ba 100644
--- a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/WorkflowLibraryMenu.tsx
+++ b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/WorkflowLibraryMenu.tsx
@@ -6,8 +6,10 @@ import {
MenuList,
useDisclosure,
useGlobalMenuClose,
+ useShiftModifier,
} from '@invoke-ai/ui-library';
import DownloadWorkflowMenuItem from 'features/workflowLibrary/components/WorkflowLibraryMenu/DownloadWorkflowMenuItem';
+import LoadWorkflowFromGraphMenuItem from 'features/workflowLibrary/components/WorkflowLibraryMenu/LoadWorkflowFromGraphMenuItem';
import { NewWorkflowMenuItem } from 'features/workflowLibrary/components/WorkflowLibraryMenu/NewWorkflowMenuItem';
import SaveWorkflowAsMenuItem from 'features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowAsMenuItem';
import SaveWorkflowMenuItem from 'features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem';
@@ -20,6 +22,7 @@ import { PiDotsThreeOutlineFill } from 'react-icons/pi';
const WorkflowLibraryMenu = () => {
const { t } = useTranslation();
const { isOpen, onOpen, onClose } = useDisclosure();
+ const shift = useShiftModifier();
useGlobalMenuClose(onClose);
return (
);
diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts
index 72da0f1f8c..cb222bd497 100644
--- a/invokeai/frontend/web/src/services/api/schema.ts
+++ b/invokeai/frontend/web/src/services/api/schema.ts
@@ -4112,7 +4112,7 @@ export type components = {
* @description The nodes in this graph
*/
nodes: {
- [key: string]: components["schemas"]["ColorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"];
+ [key: string]: components["schemas"]["IntegerMathInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartImageProcessorInvocation"];
};
/**
* Edges
@@ -4149,7 +4149,7 @@ export type components = {
* @description The results of node executions
*/
results: {
- [key: string]: components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringOutput"];
+ [key: string]: components["schemas"]["FloatCollectionOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["String2Output"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"];
};
/**
* Errors
@@ -4435,7 +4435,7 @@ export type components = {
/**
* Clip Vision Model
* @description CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.
- * @default auto
+ * @default ViT-H
* @enum {string}
*/
clip_vision_model?: "ViT-H" | "ViT-G";
diff --git a/invokeai/invocation_api/__init__.py b/invokeai/invocation_api/__init__.py
index 300ecd751b..11f334e24e 100644
--- a/invokeai/invocation_api/__init__.py
+++ b/invokeai/invocation_api/__init__.py
@@ -27,6 +27,7 @@ from invokeai.app.invocations.fields import (
OutputField,
UIComponent,
UIType,
+ WithBoard,
WithMetadata,
WithWorkflow,
)
@@ -105,6 +106,7 @@ __all__ = [
"OutputField",
"UIComponent",
"UIType",
+ "WithBoard",
"WithMetadata",
"WithWorkflow",
# invokeai.app.invocations.latent
diff --git a/invokeai/version/invokeai_version.py b/invokeai/version/invokeai_version.py
index c7a18d13e8..4b56dfc53e 100644
--- a/invokeai/version/invokeai_version.py
+++ b/invokeai/version/invokeai_version.py
@@ -1 +1 @@
-__version__ = "4.0.3"
+__version__ = "4.0.4"