From 15b33ad50180eef9a155ff03e36767e9bffcfc07 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 11 Oct 2023 13:49:28 +1100 Subject: [PATCH 001/202] feat(nodes): add freeu support Add support for FreeU. See: - https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu - https://github.com/ChenyangSi/FreeU Implementation: - `ModelPatcher.apply_freeu()` handles the enabling freeu (which is very simple with diffusers). - `FreeUConfig` model added to hold the hyperparameters. - `freeu_config` added as optional sub-field on `UNetField`. - `FreeUInvocation` added, works like LoRA - chain it to add the FreeU config to the UNet - No support for model-dependent presets, this will be a future workflow editor enhancement Closes #4845 --- invokeai/app/invocations/baseinvocation.py | 4 ++ invokeai/app/invocations/latent.py | 1 + invokeai/app/invocations/model.py | 62 +++++++++++++++++----- invokeai/app/invocations/shared.py | 16 ++++++ invokeai/backend/model_management/lora.py | 21 ++++++++ 5 files changed, 91 insertions(+), 13 deletions(-) create mode 100644 invokeai/app/invocations/shared.py diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 497dafa102..71af414f5b 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -95,6 +95,10 @@ class FieldDescriptions: inclusive_low = "The inclusive low value" exclusive_high = "The exclusive high value" decimal_places = "The number of decimal places to round to" + freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.' + freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.' + freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features." + freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features." class Input(str, Enum): diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index c6bf37bdbc..077f6135da 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -655,6 +655,7 @@ class DenoiseLatentsInvocation(BaseInvocation): with ( ExitStack() as exit_stack, ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()), + ModelPatcher.apply_freeu(unet_info.context.model, self.unet.freeu_config), set_seamless(unet_info.context.model, self.unet.seamless_axes), unet_info as unet, ): diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 571cb2e730..625d848bce 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -3,6 +3,8 @@ from typing import List, Optional from pydantic import BaseModel, Field +from invokeai.app.invocations.shared import FreeUConfig + from ...backend.model_management import BaseModelType, ModelType, SubModelType from .baseinvocation import ( BaseInvocation, @@ -34,6 +36,7 @@ class UNetField(BaseModel): scheduler: ModelInfo = Field(description="Info to load scheduler submodel") loras: List[LoraInfo] = Field(description="Loras to apply on model loading") seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') + freeu_config: Optional[FreeUConfig] = Field(default=None, description="FreeU configuration") class ClipField(BaseModel): @@ -49,15 +52,34 @@ class VaeField(BaseModel): seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') -@invocation_output("model_loader_output") -class ModelLoaderOutput(BaseInvocationOutput): - """Model loader output""" +@invocation_output("unet_output") +class UNetOutput(BaseInvocationOutput): + """Base class for invocations that output a UNet field""" unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet") - clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP") + + +@invocation_output("vae_output") +class VAEOutput(BaseInvocationOutput): + """Base class for invocations that output a VAE field""" + vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE") +@invocation_output("clip_output") +class CLIPOutput(BaseInvocationOutput): + """Base class for invocations that output a CLIP field""" + + clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP") + + +@invocation_output("model_loader_output") +class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput): + """Model loader output""" + + pass + + class MainModelField(BaseModel): """Main model field""" @@ -331,13 +353,6 @@ class VAEModelField(BaseModel): base_model: BaseModelType = Field(description="Base model") -@invocation_output("vae_loader_output") -class VaeLoaderOutput(BaseInvocationOutput): - """VAE output""" - - vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE") - - @invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.0") class VaeLoaderInvocation(BaseInvocation): """Loads a VAE model, outputting a VaeLoaderOutput""" @@ -346,7 +361,7 @@ class VaeLoaderInvocation(BaseInvocation): description=FieldDescriptions.vae_model, input=Input.Direct, ui_type=UIType.VaeModel, title="VAE" ) - def invoke(self, context: InvocationContext) -> VaeLoaderOutput: + def invoke(self, context: InvocationContext) -> VAEOutput: base_model = self.vae_model.base_model model_name = self.vae_model.model_name model_type = ModelType.Vae @@ -357,7 +372,7 @@ class VaeLoaderInvocation(BaseInvocation): model_type=model_type, ): raise Exception(f"Unkown vae name: {model_name}!") - return VaeLoaderOutput( + return VAEOutput( vae=VaeField( vae=ModelInfo( model_name=model_name, @@ -407,3 +422,24 @@ class SeamlessModeInvocation(BaseInvocation): vae.seamless_axes = seamless_axes_list return SeamlessModeOutput(unet=unet, vae=vae) + + +@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.0") +class FreeUInvocation(BaseInvocation): + """ + Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2): + + SD1.5: 1.2/1.4/0.9/0.2, + SD2: 1.1/1.2/0.9/0.2, + SDXL: 1.1/1.2/0.6/0.4, + """ + + unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection, title="UNet") + b1: float = InputField(default=1.2, ge=-1, le=3, description=FieldDescriptions.freeu_b1) + b2: float = InputField(default=1.4, ge=-1, le=3, description=FieldDescriptions.freeu_b2) + s1: float = InputField(default=0.9, ge=-1, le=3, description=FieldDescriptions.freeu_s1) + s2: float = InputField(default=0.2, ge=-1, le=3, description=FieldDescriptions.freeu_s2) + + def invoke(self, context: InvocationContext) -> UNetOutput: + self.unet.freeu_config = FreeUConfig(s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) + return UNetOutput(unet=self.unet) diff --git a/invokeai/app/invocations/shared.py b/invokeai/app/invocations/shared.py new file mode 100644 index 0000000000..db742a3433 --- /dev/null +++ b/invokeai/app/invocations/shared.py @@ -0,0 +1,16 @@ +from pydantic import BaseModel, Field + +from invokeai.app.invocations.baseinvocation import FieldDescriptions + + +class FreeUConfig(BaseModel): + """ + Configuration for the FreeU hyperparameters. + - https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu + - https://github.com/ChenyangSi/FreeU + """ + + s1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s1) + s2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s2) + b1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b1) + b2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b2) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index bb44455c88..59aeef19ce 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -12,6 +12,8 @@ from diffusers.models import UNet2DConditionModel from safetensors.torch import load_file from transformers import CLIPTextModel, CLIPTokenizer +from invokeai.app.invocations.shared import FreeUConfig + from .models.lora import LoRAModel """ @@ -231,6 +233,25 @@ class ModelPatcher: while len(skipped_layers) > 0: text_encoder.text_model.encoder.layers.append(skipped_layers.pop()) + @classmethod + @contextmanager + def apply_freeu( + cls, + unet: UNet2DConditionModel, + freeu_config: Optional[FreeUConfig] = None, + ): + did_apply_freeu = False + try: + if freeu_config is not None: + unet.enable_freeu(b1=freeu_config.b1, b2=freeu_config.b2, s1=freeu_config.s1, s2=freeu_config.s2) + did_apply_freeu = True + + yield + + finally: + if did_apply_freeu: + unet.disable_freeu() + class TextualInversionModel: embedding: torch.Tensor # [n, 768]|[n, 1280] From 1a8f9d1ecb59d863fdfb901e12b458ea81ebf4e7 Mon Sep 17 00:00:00 2001 From: gallegonovato Date: Thu, 12 Oct 2023 12:43:45 +0000 Subject: [PATCH 002/202] translationBot(ui): update translation (Spanish) Currently translated at 100.0% (526 of 526 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (523 of 523 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (519 of 519 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (515 of 515 strings) Co-authored-by: gallegonovato Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/es/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/es.json | 25 ++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index d17cefaad2..f2dcfa569a 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -19,7 +19,7 @@ "postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.", "training": "Entrenamiento", "trainingDesc1": "Un flujo de trabajo dedicado para el entrenamiento de sus propios -embeddings- y puntos de control utilizando Inversión Textual y Dreambooth desde la interfaz web.", - "trainingDesc2": "InvokeAI ya soporta el entrenamiento de -embeddings- personalizados utilizando la Inversión Textual mediante el script principal.", + "trainingDesc2": "InvokeAI ya admite el entrenamiento de incrustaciones personalizadas mediante la inversión textual mediante el script principal.", "upload": "Subir imagen", "close": "Cerrar", "load": "Cargar", @@ -74,7 +74,12 @@ "txt2img": "De texto a imagen", "accept": "Aceptar", "cancel": "Cancelar", - "linear": "Lineal" + "linear": "Lineal", + "random": "Aleatorio", + "generate": "Generar", + "openInNewTab": "Abrir en una nueva pestaña", + "dontAskMeAgain": "No me preguntes de nuevo", + "areYouSure": "¿Estas seguro?" }, "gallery": { "generations": "Generaciones", @@ -90,7 +95,10 @@ "pinGallery": "Fijar galería", "allImagesLoaded": "Todas las imágenes cargadas", "loadMore": "Cargar más", - "noImagesInGallery": "Sin imágenes en la galería" + "noImagesInGallery": "Sin imágenes en la galería", + "deleteImage": "Eliminar Imagen", + "deleteImageBin": "Las imágenes eliminadas se enviarán a la papelera de tu sistema operativo.", + "deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar." }, "hotkeys": { "keyboardShortcuts": "Atajos de teclado", @@ -500,7 +508,12 @@ "resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.", "resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.", "resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla.", - "useSlidersForAll": "Utilice controles deslizantes para todas las opciones" + "useSlidersForAll": "Utilice controles deslizantes para todas las opciones", + "general": "General", + "consoleLogLevel": "Nivel del registro", + "shouldLogToConsole": "Registro de la consola", + "developer": "Desarrollador", + "autoShowProgress": "Mostrar automáticamente el progreso de las imágenes" }, "toast": { "tempFoldersEmptied": "Directorio temporal vaciado", @@ -634,5 +647,9 @@ "showGallery": "Mostrar galería", "showOptionsPanel": "Mostrar el panel de opciones", "menu": "Menú" + }, + "ui": { + "hideProgressImages": "Ocultar el progreso de la imagen", + "showProgressImages": "Mostrar el progreso de la imagen" } } From 3c029eee2958213ddf1fd4028855e69f0589a79d Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Thu, 12 Oct 2023 12:43:47 +0000 Subject: [PATCH 003/202] translationBot(ui): update translation (Italian) Currently translated at 100.0% (526 of 526 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (523 of 523 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (519 of 519 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (515 of 515 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 25 ++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 4ac5b6831d..0022758827 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -19,7 +19,7 @@ "postProcessDesc3": "L'interfaccia da riga di comando di 'Invoke AI' offre varie altre funzionalità tra cui Embiggen.", "training": "Addestramento", "trainingDesc1": "Un flusso di lavoro dedicato per addestrare i tuoi Incorporamenti e Checkpoint utilizzando Inversione Testuale e Dreambooth dall'interfaccia web.", - "trainingDesc2": "InvokeAI supporta già l'addestramento di incorporamenti personalizzati utilizzando l'inversione testuale utilizzando lo script principale.", + "trainingDesc2": "InvokeAI supporta già l'addestramento di incorporamenti personalizzati utilizzando l'inversione testuale tramite lo script principale.", "upload": "Caricamento", "close": "Chiudi", "load": "Carica", @@ -74,7 +74,12 @@ "txt2img": "Testo a Immagine", "accept": "Accetta", "cancel": "Annulla", - "linear": "Lineare" + "linear": "Lineare", + "generate": "Genera", + "random": "Casuale", + "openInNewTab": "Apri in una nuova scheda", + "areYouSure": "Sei sicuro?", + "dontAskMeAgain": "Non chiedermelo più" }, "gallery": { "generations": "Generazioni", @@ -90,7 +95,10 @@ "pinGallery": "Blocca la galleria", "allImagesLoaded": "Tutte le immagini caricate", "loadMore": "Carica di più", - "noImagesInGallery": "Nessuna immagine nella galleria" + "noImagesInGallery": "Nessuna immagine nella galleria", + "deleteImage": "Elimina l'immagine", + "deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.", + "deleteImageBin": "Le immagini eliminate verranno spostate nel Cestino del tuo sistema operativo." }, "hotkeys": { "keyboardShortcuts": "Tasti rapidi", @@ -500,7 +508,12 @@ "resetWebUIDesc1": "Il ripristino dell'interfaccia utente Web reimposta solo la cache locale del browser delle immagini e le impostazioni memorizzate. Non cancella alcuna immagine dal disco.", "resetWebUIDesc2": "Se le immagini non vengono visualizzate nella galleria o qualcos'altro non funziona, prova a reimpostare prima di segnalare un problema su GitHub.", "resetComplete": "L'interfaccia utente Web è stata reimpostata. Aggiorna la pagina per ricaricarla.", - "useSlidersForAll": "Usa i cursori per tutte le opzioni" + "useSlidersForAll": "Usa i cursori per tutte le opzioni", + "general": "Generale", + "consoleLogLevel": "Livello del registro", + "shouldLogToConsole": "Registrazione della console", + "developer": "Sviluppatore", + "autoShowProgress": "Visualizzazione automatica avanzamento immagini" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", @@ -634,5 +647,9 @@ "toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico", "modifyConfig": "Modifica configurazione", "menu": "Menu" + }, + "ui": { + "hideProgressImages": "Nascondi avanzamento immagini", + "showProgressImages": "Mostra avanzamento immagini" } } From 7dee6f51a3b49fae0a23d9e48c3ced7d7fb610c7 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:43:50 +0000 Subject: [PATCH 004/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 1 - invokeai/frontend/web/public/locales/de.json | 1 - invokeai/frontend/web/public/locales/es.json | 1 - invokeai/frontend/web/public/locales/fr.json | 1 - invokeai/frontend/web/public/locales/he.json | 1 - invokeai/frontend/web/public/locales/it.json | 1 - invokeai/frontend/web/public/locales/ja.json | 1 - invokeai/frontend/web/public/locales/nl.json | 1 - invokeai/frontend/web/public/locales/pl.json | 1 - invokeai/frontend/web/public/locales/pt.json | 1 - invokeai/frontend/web/public/locales/pt_BR.json | 1 - invokeai/frontend/web/public/locales/ru.json | 1 - invokeai/frontend/web/public/locales/uk.json | 1 - invokeai/frontend/web/public/locales/zh_CN.json | 1 - 14 files changed, 14 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index e5168da4a8..cd015ec99e 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -393,7 +393,6 @@ "useAll": "استخدام الكل", "useInitImg": "استخدام الصورة الأولية", "info": "معلومات", - "deleteImage": "حذف الصورة", "initialImage": "الصورة الأولية", "showOptionsPanel": "إظهار لوحة الخيارات" }, diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index cff09d46bb..e4fd732bf4 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -394,7 +394,6 @@ "useSeed": "Seed verwenden", "useAll": "Alle verwenden", "useInitImg": "Ausgangsbild verwenden", - "deleteImage": "Bild löschen", "initialImage": "Ursprüngliches Bild", "showOptionsPanel": "Optionsleiste zeigen", "cancel": { diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index f2dcfa569a..68a35ca445 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -475,7 +475,6 @@ "useAll": "Usar Todo", "useInitImg": "Usar Imagen Inicial", "info": "Información", - "deleteImage": "Eliminar Imagen", "initialImage": "Imagen Inicial", "showOptionsPanel": "Mostrar panel de opciones", "symmetry": "Simetría", diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index cf215d7d06..83be25d789 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -407,7 +407,6 @@ "useAll": "Tout utiliser", "useInitImg": "Utiliser l'image initiale", "info": "Info", - "deleteImage": "Supprimer l'image", "initialImage": "Image initiale", "showOptionsPanel": "Afficher le panneau d'options" }, diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index c9b4ff3b17..2e7303b4cc 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -465,7 +465,6 @@ "img2imgStrength": "חוזק תמונה לתמונה", "initialImage": "תמונה ראשונית", "copyImageToLink": "העתקת תמונה לקישור", - "deleteImage": "מחיקת תמונה", "promptPlaceholder": "הקלד בקשה כאן. [אסימונים שליליים], (העלאת משקל)++ , (הורדת משקל)--, החלפה ומיזוג זמינים (ראה מסמכים)" }, "settings": { diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 0022758827..97f309bac6 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -475,7 +475,6 @@ "useAll": "Usa Tutto", "useInitImg": "Usa l'immagine iniziale", "info": "Informazioni", - "deleteImage": "Elimina immagine", "initialImage": "Immagine iniziale", "showOptionsPanel": "Mostra pannello opzioni", "general": "Generale", diff --git a/invokeai/frontend/web/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json index 007aa9b491..bb899da814 100644 --- a/invokeai/frontend/web/public/locales/ja.json +++ b/invokeai/frontend/web/public/locales/ja.json @@ -355,7 +355,6 @@ "useSeed": "シード値を使用", "useAll": "すべてを使用", "info": "情報", - "deleteImage": "画像を削除", "showOptionsPanel": "オプションパネルを表示" }, "settings": { diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 230e3b5b64..95100fe9b5 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -466,7 +466,6 @@ "useAll": "Hergebruik alles", "useInitImg": "Gebruik initiële afbeelding", "info": "Info", - "deleteImage": "Verwijder afbeelding", "initialImage": "Initiële afbeelding", "showOptionsPanel": "Toon deelscherm Opties", "symmetry": "Symmetrie", diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index 246271658a..eff6faaa4b 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -324,7 +324,6 @@ "useAll": "Skopiuj wszystko", "useInitImg": "Użyj oryginalnego obrazu", "info": "Informacje", - "deleteImage": "Usuń obraz", "initialImage": "Oryginalny obraz", "showOptionsPanel": "Pokaż panel ustawień" }, diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index 6d19e3ad92..db4bc41fbb 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -441,7 +441,6 @@ "openInViewer": "Abrir No Visualizador", "closeViewer": "Fechar Visualizador", "usePrompt": "Usar Prompt", - "deleteImage": "Apagar Imagem", "initialImage": "Imagem inicial", "showOptionsPanel": "Mostrar Painel de Opções", "strength": "Força", diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index e77ef14719..060e9e4351 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -452,7 +452,6 @@ "useAll": "Usar Todos", "useInitImg": "Usar Imagem Inicial", "info": "Informações", - "deleteImage": "Apagar Imagem", "initialImage": "Imagem inicial", "showOptionsPanel": "Mostrar Painel de Opções", "vSymmetryStep": "V Passo de Simetria", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 822389d78a..d347feae5f 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -467,7 +467,6 @@ "useAll": "Использовать все", "useInitImg": "Использовать как исходное", "info": "Метаданные", - "deleteImage": "Удалить изображение", "initialImage": "Исходное изображение", "showOptionsPanel": "Показать панель настроек", "vSymmetryStep": "Шаг верт. симметрии", diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index 8261aa82e0..fbccdb630e 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -467,7 +467,6 @@ "useAll": "Використати все", "useInitImg": "Використати як початкове", "info": "Метадані", - "deleteImage": "Видалити зображення", "initialImage": "Початкове зображення", "showOptionsPanel": "Показати панель налаштувань", "general": "Основне", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index d4d7746926..2cf8baa825 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -374,7 +374,6 @@ "useAll": "使用所有参数", "useInitImg": "使用原图像", "info": "信息", - "deleteImage": "删除图像", "initialImage": "原图像", "showOptionsPanel": "显示选项浮窗" }, From 7a672bd2b22d8a5be6d91f8c0e2af8e65bc35a3e Mon Sep 17 00:00:00 2001 From: pand4z31 Date: Thu, 12 Oct 2023 12:43:51 +0000 Subject: [PATCH 005/202] translationBot(ui): update translation (French) Currently translated at 80.7% (419 of 519 strings) Co-authored-by: pand4z31 Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/fr/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/fr.json | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index 83be25d789..a60b12afcd 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -537,6 +537,10 @@ "useThisParameter": "Utiliser ce paramètre", "zoomIn": "Zoom avant", "zoomOut": "Zoom arrière", - "showOptionsPanel": "Montrer la page d'options" + "showOptionsPanel": "Montrer la page d'options", + "modelSelect": "Choix du modèle", + "invokeProgressBar": "Barre de Progression Invoke", + "copyMetadataJson": "Copie des métadonnées JSON", + "menu": "Menu" } } From 133ab91c8d0e77ae0722064dd11aeb6f5e2e3d02 Mon Sep 17 00:00:00 2001 From: System X - Files Date: Thu, 12 Oct 2023 12:43:56 +0000 Subject: [PATCH 006/202] translationBot(ui): update translation (Russian) Currently translated at 100.0% (526 of 526 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (519 of 519 strings) Co-authored-by: System X - Files Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ru/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ru.json | 23 +++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index d347feae5f..93c30025c3 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -74,7 +74,12 @@ "langPortuguese": "Португальский", "txt2img": "Текст в изображение (txt2img)", "langBrPortuguese": "Португальский (Бразилия)", - "linear": "Линейная обработка" + "linear": "Линейная обработка", + "dontAskMeAgain": "Больше не спрашивать", + "areYouSure": "Вы уверены?", + "random": "Случайное", + "generate": "Сгенерировать", + "openInNewTab": "Открыть в новой вкладке" }, "gallery": { "generations": "Генерации", @@ -90,7 +95,10 @@ "pinGallery": "Закрепить галерею", "allImagesLoaded": "Все изображения загружены", "loadMore": "Показать больше", - "noImagesInGallery": "Изображений нет" + "noImagesInGallery": "Изображений нет", + "deleteImagePermanent": "Удаленные изображения невозможно восстановить.", + "deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.", + "deleteImage": "Удалить изображение" }, "hotkeys": { "keyboardShortcuts": "Горячие клавиши", @@ -499,7 +507,12 @@ "resetWebUIDesc1": "Сброс настроек веб-интерфейса удаляет только локальный кэш браузера с вашими изображениями и настройками. Он не удаляет изображения с диска.", "resetWebUIDesc2": "Если изображения не отображаются в галерее или не работает что-то еще, пожалуйста, попробуйте сбросить настройки, прежде чем сообщать о проблеме на GitHub.", "resetComplete": "Интерфейс сброшен. Обновите эту страницу.", - "useSlidersForAll": "Использовать ползунки для всех параметров" + "useSlidersForAll": "Использовать ползунки для всех параметров", + "consoleLogLevel": "Уровень логирования", + "shouldLogToConsole": "Логи в консоль", + "developer": "Разработчик", + "autoShowProgress": "Автопоказ изображений прогресса", + "general": "Основное" }, "toast": { "tempFoldersEmptied": "Временная папка очищена", @@ -633,5 +646,9 @@ "copyMetadataJson": "Скопировать метаданные JSON", "exitViewer": "Закрыть просмотрщик", "menu": "Меню" + }, + "ui": { + "showProgressImages": "Показывать промежуточный итог", + "hideProgressImages": "Не показывать промежуточный итог" } } From 0a48c5a712868ce7b2e584eba2cd41fb35c079ab Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:44:01 +0000 Subject: [PATCH 007/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 2 -- invokeai/frontend/web/public/locales/de.json | 2 -- invokeai/frontend/web/public/locales/es.json | 5 +---- invokeai/frontend/web/public/locales/fr.json | 2 -- invokeai/frontend/web/public/locales/he.json | 2 -- invokeai/frontend/web/public/locales/it.json | 5 +---- invokeai/frontend/web/public/locales/ja.json | 1 - invokeai/frontend/web/public/locales/nl.json | 2 -- invokeai/frontend/web/public/locales/pl.json | 2 -- invokeai/frontend/web/public/locales/pt.json | 2 -- invokeai/frontend/web/public/locales/pt_BR.json | 2 -- invokeai/frontend/web/public/locales/ru.json | 3 --- invokeai/frontend/web/public/locales/uk.json | 2 -- invokeai/frontend/web/public/locales/zh_CN.json | 2 -- 14 files changed, 2 insertions(+), 32 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index cd015ec99e..4810b8afff 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -342,7 +342,6 @@ "cfgScale": "مقياس الإعداد الذاتي للجملة", "width": "عرض", "height": "ارتفاع", - "sampler": "مزج", "seed": "بذرة", "randomizeSeed": "تبديل بذرة", "shuffle": "تشغيل", @@ -412,7 +411,6 @@ "toast": { "tempFoldersEmptied": "تم تفريغ مجلد المؤقت", "uploadFailed": "فشل التحميل", - "uploadFailedMultipleImagesDesc": "تم الصق صور متعددة، قد يتم تحميل صورة واحدة فقط في الوقت الحالي", "uploadFailedUnableToLoadDesc": "تعذر تحميل الملف", "downloadImageStarted": "بدأ تنزيل الصورة", "imageCopied": "تم نسخ الصورة", diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index e4fd732bf4..c1d54ac199 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -346,7 +346,6 @@ "cfgScale": "CFG-Skala", "width": "Breite", "height": "Höhe", - "sampler": "Sampler", "randomizeSeed": "Zufälliger Seed", "shuffle": "Mischen", "noiseThreshold": "Rausch-Schwellenwert", @@ -415,7 +414,6 @@ "toast": { "tempFoldersEmptied": "Temp-Ordner geleert", "uploadFailed": "Hochladen fehlgeschlagen", - "uploadFailedMultipleImagesDesc": "Mehrere Bilder eingefügt, es kann nur ein Bild auf einmal hochgeladen werden", "uploadFailedUnableToLoadDesc": "Datei kann nicht geladen werden", "downloadImageStarted": "Bild wird heruntergeladen", "imageCopied": "Bild kopiert", diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 68a35ca445..8c39e3d207 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -425,7 +425,6 @@ "cfgScale": "Escala CFG", "width": "Ancho", "height": "Alto", - "sampler": "Muestreo", "seed": "Semilla", "randomizeSeed": "Semilla aleatoria", "shuffle": "Aleatorizar", @@ -511,13 +510,11 @@ "general": "General", "consoleLogLevel": "Nivel del registro", "shouldLogToConsole": "Registro de la consola", - "developer": "Desarrollador", - "autoShowProgress": "Mostrar automáticamente el progreso de las imágenes" + "developer": "Desarrollador" }, "toast": { "tempFoldersEmptied": "Directorio temporal vaciado", "uploadFailed": "Error al subir archivo", - "uploadFailedMultipleImagesDesc": "Únicamente se puede subir una imágen a la vez", "uploadFailedUnableToLoadDesc": "No se pudo cargar la imágen", "downloadImageStarted": "Descargando imágen", "imageCopied": "Imágen copiada", diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index a60b12afcd..9e78c5b9ed 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -356,7 +356,6 @@ "cfgScale": "CFG Echelle", "width": "Largeur", "height": "Hauteur", - "sampler": "Echantillonneur", "seed": "Graine", "randomizeSeed": "Graine Aléatoire", "shuffle": "Mélanger", @@ -426,7 +425,6 @@ "toast": { "tempFoldersEmptied": "Dossiers temporaires vidés", "uploadFailed": "Téléchargement échoué", - "uploadFailedMultipleImagesDesc": "Plusieurs images collées, peut uniquement télécharger une image à la fois", "uploadFailedUnableToLoadDesc": "Impossible de charger le fichier", "downloadImageStarted": "Téléchargement de l'image démarré", "imageCopied": "Image copiée", diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index 2e7303b4cc..b485a7d2be 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -399,7 +399,6 @@ "cfgScale": "סולם CFG", "width": "רוחב", "height": "גובה", - "sampler": "דוגם", "seed": "זרע", "imageToImage": "תמונה לתמונה", "randomizeSeed": "זרע אקראי", @@ -483,7 +482,6 @@ }, "toast": { "uploadFailed": "העלאה נכשלה", - "uploadFailedMultipleImagesDesc": "תמונות מרובות הודבקו, ניתן להעלות תמונה אחת בלבד בכל פעם", "imageCopied": "התמונה הועתקה", "imageLinkCopied": "קישור תמונה הועתק", "imageNotLoadedDesc": "לא נמצאה תמונה לשליחה למודול תמונה לתמונה", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 97f309bac6..3313e26c45 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -425,7 +425,6 @@ "cfgScale": "Scala CFG", "width": "Larghezza", "height": "Altezza", - "sampler": "Campionatore", "seed": "Seme", "randomizeSeed": "Seme randomizzato", "shuffle": "Casuale", @@ -511,13 +510,11 @@ "general": "Generale", "consoleLogLevel": "Livello del registro", "shouldLogToConsole": "Registrazione della console", - "developer": "Sviluppatore", - "autoShowProgress": "Visualizzazione automatica avanzamento immagini" + "developer": "Sviluppatore" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", "uploadFailed": "Caricamento fallito", - "uploadFailedMultipleImagesDesc": "Più immagini incollate, si può caricare solo un'immagine alla volta", "uploadFailedUnableToLoadDesc": "Impossibile caricare il file", "downloadImageStarted": "Download dell'immagine avviato", "imageCopied": "Immagine copiata", diff --git a/invokeai/frontend/web/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json index bb899da814..d7f4510789 100644 --- a/invokeai/frontend/web/public/locales/ja.json +++ b/invokeai/frontend/web/public/locales/ja.json @@ -372,7 +372,6 @@ }, "toast": { "uploadFailed": "アップロード失敗", - "uploadFailedMultipleImagesDesc": "一度にアップロードできる画像は1枚のみです。", "uploadFailedUnableToLoadDesc": "ファイルを読み込むことができません。", "downloadImageStarted": "画像ダウンロード開始", "imageCopied": "画像をコピー", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 95100fe9b5..8ffda36441 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -416,7 +416,6 @@ "cfgScale": "CFG-schaal", "width": "Breedte", "height": "Hoogte", - "sampler": "Sampler", "seed": "Seed", "randomizeSeed": "Willekeurige seed", "shuffle": "Meng", @@ -501,7 +500,6 @@ "toast": { "tempFoldersEmptied": "Tijdelijke map geleegd", "uploadFailed": "Upload mislukt", - "uploadFailedMultipleImagesDesc": "Meerdere afbeeldingen geplakt, slechts een afbeelding per keer toegestaan", "uploadFailedUnableToLoadDesc": "Kan bestand niet laden", "downloadImageStarted": "Afbeeldingsdownload gestart", "imageCopied": "Afbeelding gekopieerd", diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index eff6faaa4b..533bdca861 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -274,7 +274,6 @@ "cfgScale": "Skala CFG", "width": "Szerokość", "height": "Wysokość", - "sampler": "Próbkowanie", "seed": "Inicjator", "randomizeSeed": "Losowy inicjator", "shuffle": "Losuj", @@ -343,7 +342,6 @@ "toast": { "tempFoldersEmptied": "Wyczyszczono folder tymczasowy", "uploadFailed": "Błąd przesyłania obrazu", - "uploadFailedMultipleImagesDesc": "Możliwe jest przesłanie tylko jednego obrazu na raz", "uploadFailedUnableToLoadDesc": "Błąd wczytywania obrazu", "downloadImageStarted": "Rozpoczęto pobieranie", "imageCopied": "Skopiowano obraz", diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index db4bc41fbb..f45a02381a 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -453,7 +453,6 @@ "steps": "Passos", "cfgScale": "Escala CFG", "height": "Altura", - "sampler": "Amostrador", "imageToImage": "Imagem para Imagem", "variationAmount": "Quntidade de Variatções", "scaledWidth": "L Escalada", @@ -493,7 +492,6 @@ }, "toast": { "uploadFailed": "Envio Falhou", - "uploadFailedMultipleImagesDesc": "Várias imagens copiadas, só é permitido uma imagem de cada vez", "uploadFailedUnableToLoadDesc": "Não foj possível carregar o ficheiro", "downloadImageStarted": "Download de Imagem Começou", "imageNotLoadedDesc": "Nenhuma imagem encontrada a enviar para o módulo de imagem para imagem", diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index 060e9e4351..42e7709d75 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -402,7 +402,6 @@ "cfgScale": "Escala CFG", "width": "Largura", "height": "Altura", - "sampler": "Amostrador", "seed": "Seed", "randomizeSeed": "Seed Aleatório", "shuffle": "Embaralhar", @@ -487,7 +486,6 @@ "toast": { "tempFoldersEmptied": "Pasta de Arquivos Temporários Esvaziada", "uploadFailed": "Envio Falhou", - "uploadFailedMultipleImagesDesc": "Várias imagens copiadas, só é permitido uma imagem de cada vez", "uploadFailedUnableToLoadDesc": "Não foj possível carregar o arquivo", "downloadImageStarted": "Download de Imagem Começou", "imageCopied": "Imagem Copiada", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 93c30025c3..d823b71a9b 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -425,7 +425,6 @@ "cfgScale": "Уровень CFG", "width": "Ширина", "height": "Высота", - "sampler": "Семплер", "seed": "Сид", "randomizeSeed": "Случайный сид", "shuffle": "Обновить", @@ -511,13 +510,11 @@ "consoleLogLevel": "Уровень логирования", "shouldLogToConsole": "Логи в консоль", "developer": "Разработчик", - "autoShowProgress": "Автопоказ изображений прогресса", "general": "Основное" }, "toast": { "tempFoldersEmptied": "Временная папка очищена", "uploadFailed": "Загрузка не удалась", - "uploadFailedMultipleImagesDesc": "Можно вставить только одно изображение (вы попробовали вставить несколько)", "uploadFailedUnableToLoadDesc": "Невозможно загрузить файл", "downloadImageStarted": "Скачивание изображения началось", "imageCopied": "Изображение скопировано", diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index fbccdb630e..00d710b5e4 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -417,7 +417,6 @@ "cfgScale": "Рівень CFG", "width": "Ширина", "height": "Висота", - "sampler": "Семплер", "seed": "Сід", "randomizeSeed": "Випадковий сид", "shuffle": "Оновити", @@ -504,7 +503,6 @@ "toast": { "tempFoldersEmptied": "Тимчасова папка очищена", "uploadFailed": "Не вдалося завантажити", - "uploadFailedMultipleImagesDesc": "Можна вставити лише одне зображення (ви спробували вставити декілька)", "uploadFailedUnableToLoadDesc": "Неможливо завантажити файл", "downloadImageStarted": "Завантаження зображення почалося", "imageCopied": "Зображення скопійоване", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 2cf8baa825..e2ffa5797c 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -324,7 +324,6 @@ "cfgScale": "CFG 等级", "width": "宽度", "height": "高度", - "sampler": "采样算法", "seed": "种子", "randomizeSeed": "随机化种子", "shuffle": "随机化", @@ -393,7 +392,6 @@ "toast": { "tempFoldersEmptied": "临时文件夹已清空", "uploadFailed": "上传失败", - "uploadFailedMultipleImagesDesc": "多张图像被粘贴,同时只能上传一张图像", "uploadFailedUnableToLoadDesc": "无法加载文件", "downloadImageStarted": "图像下载已开始", "imageCopied": "图像已复制", From ba38aa56a5c0aecb3a854ede08c53fa701844402 Mon Sep 17 00:00:00 2001 From: gallegonovato Date: Thu, 12 Oct 2023 12:44:04 +0000 Subject: [PATCH 008/202] translationBot(ui): update translation (Spanish) Currently translated at 100.0% (591 of 591 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (586 of 586 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (578 of 578 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (563 of 563 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (550 of 550 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (550 of 550 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (548 of 548 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (546 of 546 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (544 of 544 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (543 of 543 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (542 of 542 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (542 of 542 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (540 of 540 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (536 of 536 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (536 of 536 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (533 of 533 strings) translationBot(ui): update translation (Spanish) Currently translated at 99.8% (532 of 533 strings) Co-authored-by: gallegonovato Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/es/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/es.json | 118 ++++++++++++++++--- 1 file changed, 101 insertions(+), 17 deletions(-) diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 8c39e3d207..8aa610653b 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -10,7 +10,7 @@ "greenTheme": "Verde", "img2img": "Imagen a Imagen", "unifiedCanvas": "Lienzo Unificado", - "nodes": "Nodos", + "nodes": "Editor de nodos", "langSpanish": "Español", "nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.", "postProcessing": "Post-procesamiento", @@ -79,7 +79,14 @@ "generate": "Generar", "openInNewTab": "Abrir en una nueva pestaña", "dontAskMeAgain": "No me preguntes de nuevo", - "areYouSure": "¿Estas seguro?" + "areYouSure": "¿Estas seguro?", + "imagePrompt": "Indicación de imagen", + "batch": "Administrador de lotes", + "modelmanager": "Administrador de modelos", + "darkMode": "Modo oscuro", + "lightMode": "Modo claro", + "modelManager": "Administrador de modelos", + "clearNodes": "¿Estás seguro de que deseas borrar todos los nodos?" }, "gallery": { "generations": "Generaciones", @@ -95,10 +102,12 @@ "pinGallery": "Fijar galería", "allImagesLoaded": "Todas las imágenes cargadas", "loadMore": "Cargar más", - "noImagesInGallery": "Sin imágenes en la galería", + "noImagesInGallery": "No hay imágenes para mostrar", "deleteImage": "Eliminar Imagen", "deleteImageBin": "Las imágenes eliminadas se enviarán a la papelera de tu sistema operativo.", - "deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar." + "deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.", + "images": "Imágenes", + "assets": "Activos" }, "hotkeys": { "keyboardShortcuts": "Atajos de teclado", @@ -357,8 +366,8 @@ "delete": "Eliminar", "deleteModel": "Eliminar Modelo", "deleteConfig": "Eliminar Configuración", - "deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?", - "deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas.", + "deleteMsg1": "¿Estás seguro de que deseas eliminar este modelo de InvokeAI?", + "deleteMsg2": "Esto eliminará el modelo del disco si está en la carpeta raíz de InvokeAI. Si está utilizando una ubicación personalizada, el modelo NO se eliminará del disco.", "safetensorModels": "SafeTensors", "addDiffuserModel": "Añadir difusores", "inpainting": "v1 Repintado", @@ -377,8 +386,8 @@ "convertToDiffusers": "Convertir en difusores", "convertToDiffusersHelpText1": "Este modelo se convertirá al formato 🧨 Difusores.", "convertToDiffusersHelpText2": "Este proceso sustituirá su entrada del Gestor de Modelos por la versión de Difusores del mismo modelo.", - "convertToDiffusersHelpText3": "Su archivo de puntos de control en el disco NO será borrado ni modificado de ninguna manera. Puede volver a añadir su punto de control al Gestor de Modelos si lo desea.", - "convertToDiffusersHelpText5": "Asegúrese de que dispone de suficiente espacio en disco. Los modelos suelen variar entre 4 GB y 7 GB de tamaño.", + "convertToDiffusersHelpText3": "Tu archivo del punto de control en el disco se eliminará si está en la carpeta raíz de InvokeAI. Si está en una ubicación personalizada, NO se eliminará.", + "convertToDiffusersHelpText5": "Por favor, asegúrate de tener suficiente espacio en el disco. Los modelos generalmente varían entre 2 GB y 7 GB de tamaño.", "convertToDiffusersHelpText6": "¿Desea transformar este modelo?", "convertToDiffusersSaveLocation": "Guardar ubicación", "v1": "v1", @@ -417,7 +426,24 @@ "pickModelType": "Elige el tipo de modelo", "v2_768": "v2 (768px)", "addDifference": "Añadir una diferencia", - "scanForModels": "Buscar modelos" + "scanForModels": "Buscar modelos", + "vae": "VAE", + "variant": "Variante", + "baseModel": "Modelo básico", + "modelConversionFailed": "Conversión al modelo fallida", + "selectModel": "Seleccionar un modelo", + "modelUpdateFailed": "Error al actualizar el modelo", + "modelsMergeFailed": "Fusión del modelo fallida", + "convertingModelBegin": "Convirtiendo el modelo. Por favor, espere.", + "modelDeleted": "Modelo eliminado", + "modelDeleteFailed": "Error al borrar el modelo", + "noCustomLocationProvided": "‐No se proporcionó una ubicación personalizada", + "importModels": "Importar los modelos", + "settings": "Ajustes", + "syncModels": "Sincronizar las plantillas", + "syncModelsDesc": "Si tus plantillas no están sincronizados con el backend, puedes actualizarlas usando esta opción. Esto suele ser útil en los casos en los que actualizas manualmente tu archivo models.yaml o añades plantillas a la carpeta raíz de InvokeAI después de que la aplicación haya arrancado.", + "modelsSynced": "Plantillas sincronizadas", + "modelSyncFailed": "La sincronización de la plantilla falló" }, "parameters": { "images": "Imágenes", @@ -427,7 +453,7 @@ "height": "Alto", "seed": "Semilla", "randomizeSeed": "Semilla aleatoria", - "shuffle": "Aleatorizar", + "shuffle": "Semilla aleatoria", "noiseThreshold": "Umbral de Ruido", "perlinNoise": "Ruido Perlin", "variations": "Variaciones", @@ -492,11 +518,22 @@ "denoisingStrength": "Intensidad de la eliminación del ruido", "hiresStrength": "Alta resistencia", "showPreview": "Mostrar la vista previa", - "hidePreview": "Ocultar la vista previa" + "hidePreview": "Ocultar la vista previa", + "noiseSettings": "Ruido", + "seamlessXAxis": "Eje x", + "seamlessYAxis": "Eje y", + "scheduler": "Programador", + "boundingBoxWidth": "Anchura del recuadro", + "boundingBoxHeight": "Altura del recuadro", + "positivePromptPlaceholder": "Prompt Positivo", + "negativePromptPlaceholder": "Prompt Negativo", + "controlNetControlMode": "Modo de control", + "clipSkip": "Omitir el CLIP", + "aspectRatio": "Relación" }, "settings": { "models": "Modelos", - "displayInProgress": "Mostrar imágenes en progreso", + "displayInProgress": "Mostrar las imágenes del progreso", "saveSteps": "Guardar imágenes cada n pasos", "confirmOnDelete": "Confirmar antes de eliminar", "displayHelpIcons": "Mostrar iconos de ayuda", @@ -510,7 +547,19 @@ "general": "General", "consoleLogLevel": "Nivel del registro", "shouldLogToConsole": "Registro de la consola", - "developer": "Desarrollador" + "developer": "Desarrollador", + "antialiasProgressImages": "Imágenes del progreso de Antialias", + "showProgressInViewer": "Mostrar las imágenes del progreso en el visor", + "availableSchedulers": "Programadores disponibles", + "ui": "Interfaz del usuario", + "generation": "Generación", + "favoriteSchedulers": "Programadores favoritos", + "favoriteSchedulersPlaceholder": "No hay programadores favoritos", + "showAdvancedOptions": "Mostrar las opciones avanzadas", + "alternateCanvasLayout": "Diseño alternativo del lienzo", + "beta": "Beta", + "enableNodesEditor": "Activar el editor de nodos", + "experimental": "Experimental" }, "toast": { "tempFoldersEmptied": "Directorio temporal vaciado", @@ -520,7 +569,7 @@ "imageCopied": "Imágen copiada", "imageLinkCopied": "Enlace de imágen copiado", "imageNotLoaded": "No se cargó la imágen", - "imageNotLoadedDesc": "No se encontró imagen para enviar al módulo Imagen a Imagen", + "imageNotLoadedDesc": "No se pudo encontrar la imagen", "imageSavedToGallery": "Imágen guardada en la galería", "canvasMerged": "Lienzo consolidado", "sentToImageToImage": "Enviar hacia Imagen a Imagen", @@ -545,7 +594,21 @@ "serverError": "Error en el servidor", "disconnected": "Desconectado del servidor", "canceled": "Procesando la cancelación", - "connected": "Conectado al servidor" + "connected": "Conectado al servidor", + "problemCopyingImageLink": "No se puede copiar el enlace de la imagen", + "uploadFailedInvalidUploadDesc": "Debe ser una sola imagen PNG o JPEG", + "parameterSet": "Conjunto de parámetros", + "parameterNotSet": "Parámetro no configurado", + "nodesSaved": "Nodos guardados", + "nodesLoadedFailed": "Error al cargar los nodos", + "nodesLoaded": "Nodos cargados", + "nodesCleared": "Nodos borrados", + "problemCopyingImage": "No se puede copiar la imagen", + "nodesNotValidJSON": "JSON no válido", + "nodesCorruptedGraph": "No se puede cargar. El gráfico parece estar dañado.", + "nodesUnrecognizedTypes": "No se puede cargar. El gráfico tiene tipos no reconocidos", + "nodesNotValidGraph": "Gráfico del nodo InvokeAI no válido", + "nodesBrokenConnections": "No se puede cargar. Algunas conexiones están rotas." }, "tooltip": { "feature": { @@ -619,7 +682,8 @@ "betaClear": "Limpiar", "betaDarkenOutside": "Oscurecer fuera", "betaLimitToBox": "Limitar a caja", - "betaPreserveMasked": "Preservar área enmascarada" + "betaPreserveMasked": "Preservar área enmascarada", + "antialiasing": "Suavizado" }, "accessibility": { "invokeProgressBar": "Activar la barra de progreso", @@ -646,6 +710,26 @@ }, "ui": { "hideProgressImages": "Ocultar el progreso de la imagen", - "showProgressImages": "Mostrar el progreso de la imagen" + "showProgressImages": "Mostrar el progreso de la imagen", + "swapSizes": "Cambiar los tamaños" + }, + "nodes": { + "reloadSchema": "Esquema de recarga", + "loadNodes": "Nodos de carga", + "clearNodes": "Borrar los nodos", + "saveNodes": "Guardar los nodos", + "showGraphNodes": "Mostrar la superposición de los gráficos", + "zoomInNodes": "Acercar", + "hideMinimapnodes": "Ocultar el minimapa", + "fitViewportNodes": "Ajustar la vista", + "zoomOutNodes": "Alejar", + "hideGraphNodes": "Ocultar la superposición de los gráficos", + "hideLegendNodes": "Ocultar la leyenda del tipo de campo", + "showLegendNodes": "Mostrar la leyenda del tipo de campo", + "showMinimapnodes": "Mostrar el minimapa", + "saveGraph": "Guardar el gráfico", + "clearGraph": "Borrar el gráfico", + "clearGraphDesc": "¿Estás seguro de que deseas borrar todos los nodos?", + "loadGraph": "Cargar el gráfico (guardado desde el Editor de nodos) (No copiar y pegar los metadatos)" } } From 732ab38ca6bf1717036075bebef9e7f8246a942f Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Thu, 12 Oct 2023 12:44:07 +0000 Subject: [PATCH 009/202] translationBot(ui): update translation (Italian) Currently translated at 100.0% (542 of 542 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (542 of 542 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (540 of 540 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (538 of 538 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (536 of 536 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (536 of 536 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (536 of 536 strings) translationBot(ui): update translation (Italian) Currently translated at 99.8% (535 of 536 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (533 of 533 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (533 of 533 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 43 +++++++++++++++----- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 3313e26c45..251bae38f0 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -2,7 +2,7 @@ "common": { "hotkeysLabel": "Tasti di scelta rapida", "themeLabel": "Tema", - "languagePickerLabel": "Seleziona lingua", + "languagePickerLabel": "Lingua", "reportBugLabel": "Segnala un errore", "settingsLabel": "Impostazioni", "darkTheme": "Scuro", @@ -10,7 +10,7 @@ "greenTheme": "Verde", "img2img": "Immagine a Immagine", "unifiedCanvas": "Tela unificata", - "nodes": "Nodi", + "nodes": "Editor dei Nodi", "langItalian": "Italiano", "nodesDesc": "Attualmente è in fase di sviluppo un sistema basato su nodi per la generazione di immagini. Resta sintonizzato per gli aggiornamenti su questa fantastica funzionalità.", "postProcessing": "Post-elaborazione", @@ -79,7 +79,8 @@ "random": "Casuale", "openInNewTab": "Apri in una nuova scheda", "areYouSure": "Sei sicuro?", - "dontAskMeAgain": "Non chiedermelo più" + "dontAskMeAgain": "Non chiedermelo più", + "imagePrompt": "Prompt Immagine" }, "gallery": { "generations": "Generazioni", @@ -98,7 +99,9 @@ "noImagesInGallery": "Nessuna immagine nella galleria", "deleteImage": "Elimina l'immagine", "deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.", - "deleteImageBin": "Le immagini eliminate verranno spostate nel Cestino del tuo sistema operativo." + "deleteImageBin": "Le immagini eliminate verranno spostate nel Cestino del tuo sistema operativo.", + "images": "Immagini", + "assets": "Risorse" }, "hotkeys": { "keyboardShortcuts": "Tasti rapidi", @@ -427,7 +430,7 @@ "height": "Altezza", "seed": "Seme", "randomizeSeed": "Seme randomizzato", - "shuffle": "Casuale", + "shuffle": "Mescola il seme", "noiseThreshold": "Soglia del rumore", "perlinNoise": "Rumore Perlin", "variations": "Variazioni", @@ -492,11 +495,19 @@ "vSymmetryStep": "Passi Simmetria Verticale", "symmetry": "Simmetria", "hidePreview": "Nascondi l'anteprima", - "showPreview": "Mostra l'anteprima" + "showPreview": "Mostra l'anteprima", + "noiseSettings": "Rumore", + "seamlessXAxis": "Asse X", + "seamlessYAxis": "Asse Y", + "scheduler": "Campionatore", + "boundingBoxWidth": "Larghezza riquadro di delimitazione", + "boundingBoxHeight": "Altezza riquadro di delimitazione", + "positivePromptPlaceholder": "Prompt Positivo", + "negativePromptPlaceholder": "Prompt Negativo" }, "settings": { "models": "Modelli", - "displayInProgress": "Visualizza immagini in corso", + "displayInProgress": "Visualizza le immagini di avanzamento", "saveSteps": "Salva le immagini ogni n passaggi", "confirmOnDelete": "Conferma l'eliminazione", "displayHelpIcons": "Visualizza le icone della Guida", @@ -510,7 +521,12 @@ "general": "Generale", "consoleLogLevel": "Livello del registro", "shouldLogToConsole": "Registrazione della console", - "developer": "Sviluppatore" + "developer": "Sviluppatore", + "antialiasProgressImages": "Anti aliasing delle immagini di avanzamento", + "showProgressInViewer": "Mostra le immagini di avanzamento nel visualizzatore", + "generation": "Generazione", + "ui": "Interfaccia Utente", + "availableSchedulers": "Campionatori disponibili" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", @@ -520,7 +536,7 @@ "imageCopied": "Immagine copiata", "imageLinkCopied": "Collegamento immagine copiato", "imageNotLoaded": "Nessuna immagine caricata", - "imageNotLoadedDesc": "Nessuna immagine trovata da inviare al modulo da Immagine a Immagine", + "imageNotLoadedDesc": "Impossibile trovare l'immagine", "imageSavedToGallery": "Immagine salvata nella Galleria", "canvasMerged": "Tela unita", "sentToImageToImage": "Inviato a da Immagine a Immagine", @@ -545,7 +561,11 @@ "serverError": "Errore del Server", "disconnected": "Disconnesso dal Server", "connected": "Connesso al Server", - "canceled": "Elaborazione annullata" + "canceled": "Elaborazione annullata", + "problemCopyingImageLink": "Impossibile copiare il collegamento dell'immagine", + "uploadFailedInvalidUploadDesc": "Deve essere una singola immagine PNG o JPEG", + "parameterSet": "Parametro impostato", + "parameterNotSet": "Parametro non impostato" }, "tooltip": { "feature": { @@ -619,7 +639,8 @@ "betaClear": "Svuota", "betaDarkenOutside": "Oscura all'esterno", "betaLimitToBox": "Limita al rettangolo", - "betaPreserveMasked": "Conserva quanto mascherato" + "betaPreserveMasked": "Conserva quanto mascherato", + "antialiasing": "Anti aliasing" }, "accessibility": { "modelSelect": "Seleziona modello", From 494bde785e18ebe1bab9e38a70e4c5a2295bef79 Mon Sep 17 00:00:00 2001 From: System X - Files Date: Thu, 12 Oct 2023 12:44:08 +0000 Subject: [PATCH 010/202] translationBot(ui): update translation (Russian) Currently translated at 100.0% (542 of 542 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (542 of 542 strings) translationBot(ui): update translation (Russian) Currently translated at 98.8% (536 of 542 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (536 of 536 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (533 of 533 strings) Co-authored-by: System X - Files Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ru/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ru.json | 39 +++++++++++++++----- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index d823b71a9b..a0d3a26ca6 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -10,7 +10,7 @@ "greenTheme": "Зеленая", "img2img": "Изображение в изображение (img2img)", "unifiedCanvas": "Единый холст", - "nodes": "Ноды", + "nodes": "Редактор нод", "langRussian": "Русский", "nodesDesc": "Cистема генерации изображений на основе нодов (узлов) уже разрабатывается. Следите за новостями об этой замечательной функции.", "postProcessing": "Постобработка", @@ -79,7 +79,8 @@ "areYouSure": "Вы уверены?", "random": "Случайное", "generate": "Сгенерировать", - "openInNewTab": "Открыть в новой вкладке" + "openInNewTab": "Открыть в новой вкладке", + "imagePrompt": "Запрос" }, "gallery": { "generations": "Генерации", @@ -98,7 +99,9 @@ "noImagesInGallery": "Изображений нет", "deleteImagePermanent": "Удаленные изображения невозможно восстановить.", "deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.", - "deleteImage": "Удалить изображение" + "deleteImage": "Удалить изображение", + "images": "Изображения", + "assets": "Ресурсы" }, "hotkeys": { "keyboardShortcuts": "Горячие клавиши", @@ -427,7 +430,7 @@ "height": "Высота", "seed": "Сид", "randomizeSeed": "Случайный сид", - "shuffle": "Обновить", + "shuffle": "Обновить сид", "noiseThreshold": "Порог шума", "perlinNoise": "Шум Перлина", "variations": "Вариации", @@ -492,7 +495,15 @@ "denoisingStrength": "Сила шумоподавления", "copyImage": "Скопировать изображение", "negativePrompts": "Исключающий запрос", - "showPreview": "Показать предпросмотр" + "showPreview": "Показать предпросмотр", + "noiseSettings": "Шум", + "seamlessXAxis": "Ось X", + "seamlessYAxis": "Ось Y", + "scheduler": "Планировщик", + "boundingBoxWidth": "Ширина ограничивающей рамки", + "boundingBoxHeight": "Высота ограничивающей рамки", + "positivePromptPlaceholder": "Запрос", + "negativePromptPlaceholder": "Исключающий запрос" }, "settings": { "models": "Модели", @@ -510,7 +521,12 @@ "consoleLogLevel": "Уровень логирования", "shouldLogToConsole": "Логи в консоль", "developer": "Разработчик", - "general": "Основное" + "general": "Основное", + "showProgressInViewer": "Показывать процесс генерации в Просмотрщике", + "antialiasProgressImages": "Сглаживать предпоказ процесса генерации", + "generation": "Поколение", + "ui": "Пользовательский интерфейс", + "availableSchedulers": "Доступные планировщики" }, "toast": { "tempFoldersEmptied": "Временная папка очищена", @@ -520,7 +536,7 @@ "imageCopied": "Изображение скопировано", "imageLinkCopied": "Ссылка на изображение скопирована", "imageNotLoaded": "Изображение не загружено", - "imageNotLoadedDesc": "Не найдены изображения для отправки в img2img", + "imageNotLoadedDesc": "Не удалось найти изображение", "imageSavedToGallery": "Изображение сохранено в галерею", "canvasMerged": "Холст объединен", "sentToImageToImage": "Отправить в img2img", @@ -545,7 +561,11 @@ "serverError": "Ошибка сервера", "disconnected": "Отключено от сервера", "connected": "Подключено к серверу", - "canceled": "Обработка отменена" + "canceled": "Обработка отменена", + "problemCopyingImageLink": "Не удалось скопировать ссылку на изображение", + "uploadFailedInvalidUploadDesc": "Должно быть одно изображение в формате PNG или JPEG", + "parameterNotSet": "Параметр не задан", + "parameterSet": "Параметр задан" }, "tooltip": { "feature": { @@ -619,7 +639,8 @@ "betaClear": "Очистить", "betaDarkenOutside": "Затемнить снаружи", "betaLimitToBox": "Ограничить выделением", - "betaPreserveMasked": "Сохранять маскируемую область" + "betaPreserveMasked": "Сохранять маскируемую область", + "antialiasing": "Не удалось скопировать ссылку на изображение" }, "accessibility": { "modelSelect": "Выбор модели", From 6905c61912cdb35a7d0d8304e3fde7a5beb4dece Mon Sep 17 00:00:00 2001 From: nekowaiz Date: Thu, 12 Oct 2023 12:44:09 +0000 Subject: [PATCH 011/202] translationBot(ui): update translation (Chinese (Traditional)) Currently translated at 8.9% (48 of 536 strings) Co-authored-by: nekowaiz Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hant/ Translation: InvokeAI/Web UI --- .../frontend/web/public/locales/zh_Hant.json | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/zh_Hant.json b/invokeai/frontend/web/public/locales/zh_Hant.json index 98b4882018..f69e1c7f39 100644 --- a/invokeai/frontend/web/public/locales/zh_Hant.json +++ b/invokeai/frontend/web/public/locales/zh_Hant.json @@ -33,6 +33,24 @@ "langBrPortuguese": "巴西葡萄牙語", "langRussian": "俄語", "langSpanish": "西班牙語", - "unifiedCanvas": "統一畫布" + "unifiedCanvas": "統一畫布", + "cancel": "取消", + "langHebrew": "希伯來語", + "txt2img": "文字轉圖片" + }, + "accessibility": { + "modelSelect": "選擇模型", + "invokeProgressBar": "Invoke 進度條", + "uploadImage": "上傳圖片", + "reset": "重設", + "nextImage": "下一張圖片", + "previousImage": "上一張圖片", + "flipHorizontally": "水平翻轉", + "useThisParameter": "使用此參數", + "zoomIn": "放大", + "zoomOut": "縮小", + "flipVertically": "垂直翻轉", + "modifyConfig": "修改配置", + "menu": "選單" } } From 320ef15ee9c7c2e40ff1cf928673fbc337a10a0e Mon Sep 17 00:00:00 2001 From: Dennis Date: Thu, 12 Oct 2023 12:44:11 +0000 Subject: [PATCH 012/202] translationBot(ui): update translation (Dutch) Currently translated at 100.0% (538 of 538 strings) Co-authored-by: Dennis Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/nl/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/nl.json | 63 ++++++++++++++++---- 1 file changed, 51 insertions(+), 12 deletions(-) diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 8ffda36441..10babac1c5 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -2,7 +2,7 @@ "common": { "hotkeysLabel": "Sneltoetsen", "themeLabel": "Thema", - "languagePickerLabel": "Taalkeuze", + "languagePickerLabel": "Taal", "reportBugLabel": "Meld bug", "settingsLabel": "Instellingen", "darkTheme": "Donker", @@ -10,7 +10,7 @@ "greenTheme": "Groen", "img2img": "Afbeelding naar afbeelding", "unifiedCanvas": "Centraal canvas", - "nodes": "Knooppunten", + "nodes": "Knooppunt-editor", "langDutch": "Nederlands", "nodesDesc": "Een op knooppunten gebaseerd systeem voor het genereren van afbeeldingen is momenteel in ontwikkeling. Blijf op de hoogte voor nieuws over deze verbluffende functie.", "postProcessing": "Naverwerking", @@ -73,7 +73,14 @@ "langHebrew": "עברית", "langKorean": "한국어", "txt2img": "Tekst naar afbeelding", - "postprocessing": "Nabewerking" + "postprocessing": "Nabewerking", + "dontAskMeAgain": "Vraag niet opnieuw", + "imagePrompt": "Afbeeldingsprompt", + "random": "Willekeurig", + "generate": "Genereer", + "openInNewTab": "Open in nieuw tabblad", + "areYouSure": "Weet je het zeker?", + "linear": "Lineair" }, "gallery": { "generations": "Gegenereerde afbeeldingen", @@ -89,7 +96,12 @@ "pinGallery": "Zet galerij vast", "allImagesLoaded": "Alle afbeeldingen geladen", "loadMore": "Laad meer", - "noImagesInGallery": "Geen afbeeldingen in galerij" + "noImagesInGallery": "Geen afbeeldingen in galerij", + "deleteImage": "Wis afbeelding", + "deleteImageBin": "Gewiste afbeeldingen worden naar de prullenbak van je besturingssysteem gestuurd.", + "deleteImagePermanent": "Gewiste afbeeldingen kunnen niet worden hersteld.", + "assets": "Eigen onderdelen", + "images": "Afbeeldingen" }, "hotkeys": { "keyboardShortcuts": "Sneltoetsen", @@ -418,7 +430,7 @@ "height": "Hoogte", "seed": "Seed", "randomizeSeed": "Willekeurige seed", - "shuffle": "Meng", + "shuffle": "Mengseed", "noiseThreshold": "Drempelwaarde ruis", "perlinNoise": "Perlinruis", "variations": "Variaties", @@ -481,11 +493,17 @@ "copyImage": "Kopieer afbeelding", "imageToImage": "Afbeelding naar afbeelding", "denoisingStrength": "Sterkte ontruisen", - "hiresStrength": "Sterkte hogere resolutie" + "hiresStrength": "Sterkte hogere resolutie", + "scheduler": "Planner", + "noiseSettings": "Ruis", + "seamlessXAxis": "X-as", + "seamlessYAxis": "Y-as", + "hidePreview": "Verberg voorvertoning", + "showPreview": "Toon voorvertoning" }, "settings": { "models": "Modellen", - "displayInProgress": "Toon afbeeldingen gedurende verwerking", + "displayInProgress": "Toon voortgangsafbeeldingen", "saveSteps": "Bewaar afbeeldingen elke n stappen", "confirmOnDelete": "Bevestig bij verwijderen", "displayHelpIcons": "Toon hulppictogrammen", @@ -495,7 +513,16 @@ "resetWebUIDesc1": "Herstel web-UI herstelt alleen de lokale afbeeldingscache en de onthouden instellingen van je browser. Het verwijdert geen afbeeldingen van schijf.", "resetWebUIDesc2": "Als afbeeldingen niet getoond worden in de galerij of iets anders werkt niet, probeer dan eerst deze herstelfunctie voordat je een fout aanmeldt op GitHub.", "resetComplete": "Webgebruikersinterface is hersteld. Vernieuw de pasgina om opnieuw te laden.", - "useSlidersForAll": "Gebruik schuifbalken voor alle opties" + "useSlidersForAll": "Gebruik schuifbalken voor alle opties", + "consoleLogLevel": "Logboekniveau", + "shouldLogToConsole": "Schrijf logboek naar console", + "developer": "Ontwikkelaar", + "general": "Algemeen", + "showProgressInViewer": "Toon voortgangsafbeeldingen in viewer", + "generation": "Generatie", + "ui": "Gebruikersinterface", + "availableSchedulers": "Beschikbare planners", + "antialiasProgressImages": "Voer anti-aliasing uit op voortgangsafbeeldingen" }, "toast": { "tempFoldersEmptied": "Tijdelijke map geleegd", @@ -505,7 +532,7 @@ "imageCopied": "Afbeelding gekopieerd", "imageLinkCopied": "Afbeeldingskoppeling gekopieerd", "imageNotLoaded": "Geen afbeelding geladen", - "imageNotLoadedDesc": "Geen afbeelding gevonden om te sturen naar de module Afbeelding naar afbeelding", + "imageNotLoadedDesc": "Geen afbeeldingen gevonden", "imageSavedToGallery": "Afbeelding opgeslagen naar galerij", "canvasMerged": "Canvas samengevoegd", "sentToImageToImage": "Gestuurd naar Afbeelding naar afbeelding", @@ -526,7 +553,13 @@ "metadataLoadFailed": "Fout bij laden metagegevens", "initialImageSet": "Initiële afbeelding ingesteld", "initialImageNotSet": "Initiële afbeelding niet ingesteld", - "initialImageNotSetDesc": "Kan initiële afbeelding niet laden" + "initialImageNotSetDesc": "Kan initiële afbeelding niet laden", + "serverError": "Serverfout", + "disconnected": "Verbinding met server verbroken", + "connected": "Verbonden met server", + "canceled": "Verwerking geannuleerd", + "uploadFailedInvalidUploadDesc": "Moet een enkele PNG- of JPEG-afbeelding zijn", + "problemCopyingImageLink": "Kan afbeeldingslink niet kopiëren" }, "tooltip": { "feature": { @@ -600,7 +633,8 @@ "betaClear": "Wis", "betaDarkenOutside": "Verduister buiten tekenvak", "betaLimitToBox": "Beperk tot tekenvak", - "betaPreserveMasked": "Behoud masker" + "betaPreserveMasked": "Behoud masker", + "antialiasing": "Anti-aliasing" }, "accessibility": { "exitViewer": "Stop viewer", @@ -622,6 +656,11 @@ "toggleAutoscroll": "Autom. scrollen aan/uit", "toggleLogViewer": "Logboekviewer aan/uit", "showGallery": "Toon galerij", - "showOptionsPanel": "Toon deelscherm Opties" + "showOptionsPanel": "Toon deelscherm Opties", + "menu": "Menu" + }, + "ui": { + "showProgressImages": "Toon voortgangsafbeeldingen", + "hideProgressImages": "Verberg voortgangsafbeeldingen" } } From 83b123f1f6e8d97a2cf36d917d6b8f3202cad19f Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:44:15 +0000 Subject: [PATCH 013/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 1 - invokeai/frontend/web/public/locales/de.json | 1 - invokeai/frontend/web/public/locales/es.json | 2 -- invokeai/frontend/web/public/locales/fr.json | 1 - invokeai/frontend/web/public/locales/he.json | 4 +--- invokeai/frontend/web/public/locales/it.json | 2 -- invokeai/frontend/web/public/locales/nl.json | 2 -- invokeai/frontend/web/public/locales/pl.json | 1 - invokeai/frontend/web/public/locales/pt.json | 2 -- invokeai/frontend/web/public/locales/pt_BR.json | 2 -- invokeai/frontend/web/public/locales/ru.json | 2 -- invokeai/frontend/web/public/locales/uk.json | 4 +--- invokeai/frontend/web/public/locales/zh_CN.json | 1 - 13 files changed, 2 insertions(+), 23 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index 4810b8afff..47a4169ef2 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -378,7 +378,6 @@ "img2imgStrength": "قوة صورة إلى صورة", "toggleLoopback": "تبديل الإعادة", "invoke": "إطلاق", - "promptPlaceholder": "اكتب المحث هنا. [العلامات السلبية], (زيادة الوزن) ++, (نقص الوزن)--, التبديل و الخلط متاحة (انظر الوثائق)", "sendTo": "أرسل إلى", "sendToImg2Img": "أرسل إلى صورة إلى صورة", "sendToUnifiedCanvas": "أرسل إلى الخطوط الموحدة", diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index c1d54ac199..15278a8905 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -381,7 +381,6 @@ "img2imgStrength": "Bild-zu-Bild-Stärke", "toggleLoopback": "Toggle Loopback", "invoke": "Invoke", - "promptPlaceholder": "Prompt hier eingeben. [negative Token], (mehr Gewicht)++, (geringeres Gewicht)--, Tausch und Überblendung sind verfügbar (siehe Dokumente)", "sendTo": "Senden an", "sendToImg2Img": "Senden an Bild zu Bild", "sendToUnifiedCanvas": "Senden an Unified Canvas", diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 8aa610653b..fdd4ec3a51 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -487,7 +487,6 @@ "img2imgStrength": "Peso de Imagen a Imagen", "toggleLoopback": "Alternar Retroalimentación", "invoke": "Invocar", - "promptPlaceholder": "Ingrese la entrada aquí. [símbolos negativos], (subir peso)++, (bajar peso)--, también disponible alternado y mezclado (ver documentación)", "sendTo": "Enviar a", "sendToImg2Img": "Enviar a Imagen a Imagen", "sendToUnifiedCanvas": "Enviar a Lienzo Unificado", @@ -513,7 +512,6 @@ }, "copyImage": "Copiar la imagen", "general": "General", - "negativePrompts": "Preguntas negativas", "imageToImage": "Imagen a imagen", "denoisingStrength": "Intensidad de la eliminación del ruido", "hiresStrength": "Alta resistencia", diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index 9e78c5b9ed..31c092a9d7 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -392,7 +392,6 @@ "img2imgStrength": "Force de l'Image à l'Image", "toggleLoopback": "Activer/Désactiver la Boucle", "invoke": "Invoker", - "promptPlaceholder": "Tapez le prompt ici. [tokens négatifs], (poids positif)++, (poids négatif)--, swap et blend sont disponibles (voir les docs)", "sendTo": "Envoyer à", "sendToImg2Img": "Envoyer à Image à Image", "sendToUnifiedCanvas": "Envoyer au Canvas Unifié", diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index b485a7d2be..2b07762b9a 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -438,7 +438,6 @@ "immediate": "ביטול מיידי", "setType": "הגדר סוג ביטול" }, - "negativePrompts": "בקשות שליליות", "sendTo": "שליחה אל", "copyImage": "העתקת תמונה", "downloadImage": "הורדת תמונה", @@ -463,8 +462,7 @@ "seamlessTiling": "ריצוף חלק", "img2imgStrength": "חוזק תמונה לתמונה", "initialImage": "תמונה ראשונית", - "copyImageToLink": "העתקת תמונה לקישור", - "promptPlaceholder": "הקלד בקשה כאן. [אסימונים שליליים], (העלאת משקל)++ , (הורדת משקל)--, החלפה ומיזוג זמינים (ראה מסמכים)" + "copyImageToLink": "העתקת תמונה לקישור" }, "settings": { "models": "מודלים", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 251bae38f0..a7098f0370 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -464,7 +464,6 @@ "img2imgStrength": "Forza da Immagine a Immagine", "toggleLoopback": "Attiva/disattiva elaborazione ricorsiva", "invoke": "Invoke", - "promptPlaceholder": "Digita qui il prompt usando termini in lingua inglese. [token negativi], (aumenta il peso)++, (diminuisci il peso)--, scambia e fondi sono disponibili (consulta la documentazione)", "sendTo": "Invia a", "sendToImg2Img": "Invia a da Immagine a Immagine", "sendToUnifiedCanvas": "Invia a Tela Unificata", @@ -483,7 +482,6 @@ "denoisingStrength": "Forza riduzione rumore", "copyImage": "Copia immagine", "hiresStrength": "Forza Alta Risoluzione", - "negativePrompts": "Prompt Negativi", "imageToImage": "Immagine a Immagine", "cancel": { "schedule": "Annulla dopo l'iterazione corrente", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 10babac1c5..f392116fe4 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -464,7 +464,6 @@ "img2imgStrength": "Sterkte Afbeelding naar afbeelding", "toggleLoopback": "Zet recursieve verwerking aan/uit", "invoke": "Genereer", - "promptPlaceholder": "Voer invoertekst hier in. [negatieve trefwoorden], (verhoogdgewicht)++, (verlaagdgewicht)--, swap (wisselen) en blend (mengen) zijn beschikbaar (zie documentatie)", "sendTo": "Stuur naar", "sendToImg2Img": "Stuur naar Afbeelding naar afbeelding", "sendToUnifiedCanvas": "Stuur naar Centraal canvas", @@ -488,7 +487,6 @@ "setType": "Stel annuleervorm in", "schedule": "Annuleer na huidige iteratie" }, - "negativePrompts": "Negatieve invoer", "general": "Algemeen", "copyImage": "Kopieer afbeelding", "imageToImage": "Afbeelding naar afbeelding", diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index 533bdca861..42c0d95ba7 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -310,7 +310,6 @@ "img2imgStrength": "Wpływ sugestii na obraz", "toggleLoopback": "Wł/wył sprzężenie zwrotne", "invoke": "Wywołaj", - "promptPlaceholder": "W tym miejscu wprowadź swoje sugestie. [negatywne sugestie], (wzmocnienie), (osłabienie)--, po więcej opcji (np. swap lub blend) zajrzyj do dokumentacji", "sendTo": "Wyślij do", "sendToImg2Img": "Użyj w trybie \"Obraz na obraz\"", "sendToUnifiedCanvas": "Użyj w trybie uniwersalnym", diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index f45a02381a..2ea05b2d8f 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -407,7 +407,6 @@ "width": "Largura", "seed": "Seed", "hiresStrength": "Força da Alta Resolução", - "negativePrompts": "Indicações negativas", "general": "Geral", "randomizeSeed": "Seed Aleatório", "shuffle": "Embaralhar", @@ -436,7 +435,6 @@ "img2imgStrength": "Força de Imagem Para Imagem", "toggleLoopback": "Ativar Loopback", "symmetry": "Simetria", - "promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)", "sendTo": "Mandar para", "openInViewer": "Abrir No Visualizador", "closeViewer": "Fechar Visualizador", diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index 42e7709d75..0288fba521 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -438,7 +438,6 @@ "img2imgStrength": "Força de Imagem Para Imagem", "toggleLoopback": "Ativar Loopback", "invoke": "Invoke", - "promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)", "sendTo": "Mandar para", "sendToImg2Img": "Mandar para Imagem Para Imagem", "sendToUnifiedCanvas": "Mandar para Tela Unificada", @@ -457,7 +456,6 @@ "hSymmetryStep": "H Passo de Simetria", "symmetry": "Simetria", "copyImage": "Copiar imagem", - "negativePrompts": "Indicações negativas", "hiresStrength": "Força da Alta Resolução", "denoisingStrength": "A força de remoção de ruído", "imageToImage": "Imagem para Imagem", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index a0d3a26ca6..19659ae412 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -464,7 +464,6 @@ "img2imgStrength": "Сила обработки img2img", "toggleLoopback": "Зациклить обработку", "invoke": "Invoke", - "promptPlaceholder": "Введите запрос здесь (на английском). [исключенные токены], (более значимые)++, (менее значимые)--, swap и blend тоже доступны (смотрите Github)", "sendTo": "Отправить", "sendToImg2Img": "Отправить в img2img", "sendToUnifiedCanvas": "Отправить на Единый холст", @@ -494,7 +493,6 @@ "imageToImage": "Изображение в изображение", "denoisingStrength": "Сила шумоподавления", "copyImage": "Скопировать изображение", - "negativePrompts": "Исключающий запрос", "showPreview": "Показать предпросмотр", "noiseSettings": "Шум", "seamlessXAxis": "Ось X", diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index 00d710b5e4..64f9e50ec9 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -453,7 +453,6 @@ "img2imgStrength": "Сила обробки img2img", "toggleLoopback": "Зациклити обробку", "invoke": "Викликати", - "promptPlaceholder": "Введіть запит тут (англійською). [видалені токени], (більш вагомі)++, (менш вагомі)--, swap и blend також доступні (дивіться Github)", "sendTo": "Надіслати", "sendToImg2Img": "Надіслати у img2img", "sendToUnifiedCanvas": "Надіслати на полотно", @@ -483,8 +482,7 @@ "denoisingStrength": "Сила шумоподавлення", "copyImage": "Копіювати зображення", "symmetry": "Симетрія", - "hSymmetryStep": "Крок гор. симетрії", - "negativePrompts": "Виключний запит" + "hSymmetryStep": "Крок гор. симетрії" }, "settings": { "models": "Моделі", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index e2ffa5797c..5b800fe5ba 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -360,7 +360,6 @@ "img2imgStrength": "图像到图像强度", "toggleLoopback": "切换环回", "invoke": "Invoke", - "promptPlaceholder": "在这里输入提示。可以使用[反提示]、(加权)++、(减权)--、交换和混合(见文档)", "sendTo": "发送到", "sendToImg2Img": "发送到图像到图像", "sendToUnifiedCanvas": "发送到统一画布", From 227046bdb0d008da7b0ced62ad03c552bc4642ff Mon Sep 17 00:00:00 2001 From: "Song, Pengcheng" <17528592@qq.com> Date: Thu, 12 Oct 2023 12:44:17 +0000 Subject: [PATCH 014/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 100.0% (542 of 542 strings) translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 88.0% (477 of 542 strings) Co-authored-by: Song, Pengcheng <17528592@qq.com> Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- .../frontend/web/public/locales/zh_CN.json | 220 ++++++++++++++++-- 1 file changed, 196 insertions(+), 24 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 5b800fe5ba..4cf15ce618 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -8,9 +8,9 @@ "darkTheme": "暗色", "lightTheme": "亮色", "greenTheme": "绿色", - "img2img": "图像到图像", + "img2img": "图生图", "unifiedCanvas": "统一画布", - "nodes": "节点", + "nodes": "节点编辑器", "langSimplifiedChinese": "简体中文", "nodesDesc": "一个基于节点的图像生成系统目前正在开发中。请持续关注关于这一功能的更新。", "postProcessing": "后期处理", @@ -43,7 +43,44 @@ "statusUpscaling": "放大中", "statusUpscalingESRGAN": "放大中 (ESRGAN)", "statusLoadingModel": "模型加载中", - "statusModelChanged": "模型已切换" + "statusModelChanged": "模型已切换", + "accept": "同意", + "cancel": "取消", + "dontAskMeAgain": "不要再次询问", + "areYouSure": "你确认吗?", + "imagePrompt": "图片提示词", + "langKorean": "朝鲜语", + "langPortuguese": "葡萄牙语", + "random": "随机", + "generate": "生成", + "openInNewTab": "在新的标签页打开", + "langUkranian": "乌克兰语", + "back": "返回", + "statusMergedModels": "模型合并完成", + "statusConvertingModel": "模型变换", + "statusModelConverted": "模型变换完成", + "statusMergingModels": "合并模型", + "githubLabel": "Github", + "discordLabel": "Discord", + "langPolish": "波兰语", + "langBrPortuguese": "葡萄牙语(巴西)", + "langDutch": "荷兰语", + "langFrench": "法语", + "langRussian": "俄语", + "langGerman": "德语", + "langHebrew": "希伯来语", + "langItalian": "意大利语", + "langJapanese": "日语", + "langSpanish": "西班牙语", + "langEnglish": "英语", + "langArabic": "阿拉伯语", + "txt2img": "文生图", + "postprocessing": "后期处理", + "oceanTheme": "海洋蓝", + "loading": "加载中", + "loadingInvokeAI": "Invoke AI 加载中", + "linear": "线性的", + "pinOptionsPanel": "固定选项面板" }, "gallery": { "generations": "生成的图像", @@ -59,7 +96,12 @@ "pinGallery": "保持图库常开", "allImagesLoaded": "所有图像加载完成", "loadMore": "加载更多", - "noImagesInGallery": "图库中无图像" + "noImagesInGallery": "图库中无图像", + "deleteImage": "删除图片", + "deleteImageBin": "被删除的图片会发送到你操作系统的回收站。", + "deleteImagePermanent": "无法恢复删除的图片。", + "images": "图片", + "assets": "素材" }, "hotkeys": { "keyboardShortcuts": "快捷方式", @@ -284,15 +326,15 @@ "description": "描述", "descriptionValidationMsg": "添加模型的描述", "config": "配置", - "configValidationMsg": "模型配置文件的路径", + "configValidationMsg": "模型配置文件的路径。", "modelLocation": "模型位置", - "modelLocationValidationMsg": "模型文件的路径", + "modelLocationValidationMsg": "模型文件的本地路径。", "vaeLocation": "VAE 位置", - "vaeLocationValidationMsg": "VAE 文件的路径", + "vaeLocationValidationMsg": "VAE 文件的路径。", "width": "宽度", - "widthValidationMsg": "模型的默认宽度", + "widthValidationMsg": "模型的默认宽度。", "height": "高度", - "heightValidationMsg": "模型的默认高度", + "heightValidationMsg": "模型的默认高度。", "addModel": "添加模型", "updateModel": "更新模型", "availableModels": "可用模型", @@ -316,7 +358,69 @@ "deleteModel": "删除模型", "deleteConfig": "删除配置", "deleteMsg1": "您确定要将这个模型从 InvokeAI 删除吗?", - "deleteMsg2": "这不会从磁盘中删除模型检查点文件。如果您愿意,可以重新添加它们。" + "deleteMsg2": "这不会从磁盘中删除模型检查点文件。如果您愿意,可以重新添加它们。", + "convertToDiffusersHelpText1": "模型会被转换成Diffusers格式。", + "convertToDiffusersHelpText2": "这个过程会替换你的模型管理器的入口中相同Diffusers版本的模型。", + "mergedModelSaveLocation": "保存路径", + "mergedModelCustomSaveLocation": "自定义路径", + "checkpointModels": "检查点(Checkpoints)", + "formMessageDiffusersVAELocation": "VAE 路径", + "convertToDiffusersHelpText4": "这是一次性的处理过程。根据你电脑的配置不同耗时30-60秒。", + "convertToDiffusersHelpText6": "你希望转换这个模型吗?", + "interpolationType": "插值类型", + "modelTwo": "模型2", + "modelThree": "模型3", + "v2_768": "版本2(768px)", + "mergedModelName": "合并的模型名称", + "alpha": "透明度", + "allModels": "全部模型", + "convertToDiffusers": "转换为Diffusers", + "formMessageDiffusersModelLocation": "Diffusers 模型路径", + "custom": "自定义", + "formMessageDiffusersVAELocationDesc": "如果没有特别指定,InvokeAI会从上面指定的模型路径中寻找VAE文件。", + "safetensorModels": "安全张量(SafeTensors)", + "modelsMerged": "模型合并完成", + "mergeModels": "合并模型", + "modelOne": "模型1", + "diffusersModels": "扩散器(Diffusers)", + "scanForModels": "扫描模型", + "repo_id": "项目 ID", + "repoIDValidationMsg": "你的模型的在线项目地址", + "v1": "版本1", + "invokeRoot": "InvokeAI 文件夹", + "inpainting": "版本1(Inpainting)", + "customSaveLocation": "自定义保存路径", + "scanAgain": "重新扫描", + "customConfig": "个性化配置", + "pathToCustomConfig": "个性化配置路径", + "modelConverted": "模型已转换", + "statusConverting": "转换中", + "sameFolder": "相同文件夹", + "invokeAIFolder": "Invoke AI 文件夹", + "ignoreMismatch": "忽略所选模型之间的不匹配", + "modelMergeHeaderHelp1": "您可以合并最多三种不同的模型,以创建符合您需求的混合模型。", + "modelMergeHeaderHelp2": "只有扩散器(Diffusers)可以用于模型合并。如果您想要合并一个检查点模型,请先将其转换为扩散器。", + "addCheckpointModel": "添加 Checkpoint / Safetensor 模型", + "addDiffuserModel": "添加 Diffusers 模型", + "vaeRepoID": "VAE 项目 ID", + "vaeRepoIDValidationMsg": "你的模型的在线VAE项目地址", + "selectAndAdd": "选择下表中的模型并添加", + "noModelsFound": "没有找到模型", + "formMessageDiffusersModelLocationDesc": "请至少输入一个。", + "convertToDiffusersSaveLocation": "保存路径", + "convertToDiffusersHelpText3": "您在磁盘上的检查点(checkpoint)文件不会被删除或修改。如果需要,您可以再次将检查点添加到模型管理器中。", + "v2_base": "版本2(512px)", + "convertToDiffusersHelpText5": "请确认你有足够的此版空间,模型大小通常在4GB-7GB之间。", + "convert": "转换", + "merge": "合并", + "pickModelType": "选择模型类型", + "addDifference": "增加差异", + "none": "无", + "inverseSigmoid": "反Sigmoid函数", + "weightedSum": "加权求和", + "modelMergeAlphaHelp": "透明度参数控制模型的混合强度。较低的透明度值会导致第二个模型的影响减弱。", + "sigmoid": "Sigmoid函数", + "modelMergeInterpAddDifferenceHelp": "在这种模式下,首先从模型2中减去模型3,得到的版本再用上述值的透明度与模型1进行混合。" }, "parameters": { "images": "图像", @@ -326,7 +430,7 @@ "height": "高度", "seed": "种子", "randomizeSeed": "随机化种子", - "shuffle": "随机化", + "shuffle": "随机生成种子", "noiseThreshold": "噪声阈值", "perlinNoise": "Perlin 噪声", "variations": "变种", @@ -373,7 +477,31 @@ "useInitImg": "使用原图像", "info": "信息", "initialImage": "原图像", - "showOptionsPanel": "显示选项浮窗" + "showOptionsPanel": "显示选项浮窗", + "seamlessYAxis": "Y轴", + "seamlessXAxis": "X轴", + "boundingBoxWidth": "边界框宽度", + "boundingBoxHeight": "边界框高度", + "denoisingStrength": "降噪强度", + "vSymmetryStep": "纵向对称步数", + "cancel": { + "immediate": "立即取消", + "isScheduled": "取消中", + "schedule": "当前步骤后取消", + "setType": "设置取消类型" + }, + "copyImage": "复制图片", + "showPreview": "显示预览", + "symmetry": "对称性", + "positivePromptPlaceholder": "正向提示词", + "negativePromptPlaceholder": "负向提示词", + "scheduler": "计划表", + "general": "通用", + "hiresStrength": "高分辨强度", + "hidePreview": "影藏预览", + "hSymmetryStep": "横向对称步数", + "imageToImage": "图生图", + "noiseSettings": "噪音" }, "settings": { "models": "模型", @@ -386,7 +514,17 @@ "resetWebUI": "重置网页界面", "resetWebUIDesc1": "重置网页只会重置浏览器中缓存的图像和设置,不会删除任何图像。", "resetWebUIDesc2": "如果图像没有显示在图库中,或者其他东西不工作,请在GitHub上提交问题之前尝试重置。", - "resetComplete": "网页界面已重置。刷新页面以重新加载。" + "resetComplete": "网页界面已重置。刷新页面以重新加载。", + "showProgressInViewer": "在视口中展示过程图片", + "antialiasProgressImages": "对过程图片抗锯齿", + "generation": "生成", + "ui": "用户界面", + "availableSchedulers": "可用的计划表", + "useSlidersForAll": "对所有参数使用滑动条设置", + "general": "通用", + "consoleLogLevel": "日志等级", + "shouldLogToConsole": "终端日志", + "developer": "开发者" }, "toast": { "tempFoldersEmptied": "临时文件夹已清空", @@ -396,28 +534,36 @@ "imageCopied": "图像已复制", "imageLinkCopied": "图像链接已复制", "imageNotLoaded": "没有加载图像", - "imageNotLoadedDesc": "没有图像可供送往图像到图像界面", + "imageNotLoadedDesc": "找不到图片", "imageSavedToGallery": "图像已保存到图库", "canvasMerged": "画布已合并", "sentToImageToImage": "已送往图像到图像", "sentToUnifiedCanvas": "已送往统一画布", "parametersSet": "参数已设定", "parametersNotSet": "参数未设定", - "parametersNotSetDesc": "此图像不存在元数据", + "parametersNotSetDesc": "此图像不存在元数据。", "parametersFailed": "加载参数失败", - "parametersFailedDesc": "加载初始图像失败", + "parametersFailedDesc": "加载初始图像失败。", "seedSet": "种子已设定", "seedNotSet": "种子未设定", - "seedNotSetDesc": "无法找到该图像的种子", + "seedNotSetDesc": "无法找到该图像的种子。", "promptSet": "提示已设定", "promptNotSet": "提示未设定", - "promptNotSetDesc": "无法找到该图像的提示", + "promptNotSetDesc": "无法找到该图像的提示。", "upscalingFailed": "放大失败", "faceRestoreFailed": "脸部修复失败", "metadataLoadFailed": "加载元数据失败", "initialImageSet": "初始图像已设定", "initialImageNotSet": "初始图像未设定", - "initialImageNotSetDesc": "无法加载初始图像" + "initialImageNotSetDesc": "无法加载初始图像", + "problemCopyingImageLink": "无法复制图片链接", + "uploadFailedInvalidUploadDesc": "必须是单张的 PNG 或 JPEG 图片", + "disconnected": "服务器断开", + "connected": "服务器连接", + "parameterSet": "参数已设置", + "parameterNotSet": "参数未设置", + "serverError": "服务器错误", + "canceled": "处理取消" }, "unifiedCanvas": { "layer": "图层", @@ -451,10 +597,10 @@ "autoSaveToGallery": "自动保存至图库", "saveBoxRegionOnly": "只保存框内区域", "limitStrokesToBox": "限制画笔在框内", - "showCanvasDebugInfo": "显示画布调试信息", + "showCanvasDebugInfo": "显示附加画布信息", "clearCanvasHistory": "清除画布历史", "clearHistory": "清除历史", - "clearCanvasHistoryMessage": "清除画布历史不会影响当前画布,但会不可撤销地清除所有撤销/重做历史!", + "clearCanvasHistoryMessage": "清除画布历史不会影响当前画布,但会不可撤销地清除所有撤销/重做历史。", "clearCanvasHistoryConfirm": "确认清除所有画布历史?", "emptyTempImageFolder": "清除临时文件夹", "emptyFolder": "清除文件夹", @@ -476,7 +622,8 @@ "betaClear": "清除", "betaDarkenOutside": "暗化外部区域", "betaLimitToBox": "限制在框内", - "betaPreserveMasked": "保留遮罩层" + "betaPreserveMasked": "保留遮罩层", + "antialiasing": "抗锯齿" }, "accessibility": { "modelSelect": "模型选择", @@ -487,12 +634,37 @@ "uploadImage": "上传图片", "previousImage": "上一张图片", "copyMetadataJson": "复制JSON元数据", - "exitViewer": "退出视口(ExitViewer)", + "exitViewer": "退出视口", "zoomIn": "放大", "zoomOut": "缩小", "rotateCounterClockwise": "逆时针旋转", "rotateClockwise": "顺时针旋转", "flipHorizontally": "水平翻转", - "flipVertically": "垂直翻转" + "flipVertically": "垂直翻转", + "showGallery": "显示图库", + "showOptionsPanel": "显示选项面板", + "toggleLogViewer": "切换日志浏览器", + "modifyConfig": "修改设置", + "toggleAutoscroll": "切换自动缩放", + "menu": "菜单" + }, + "ui": { + "showProgressImages": "显示处理中的图片", + "hideProgressImages": "隐藏处理中的图片" + }, + "tooltip": { + "feature": { + "prompt": "这是提示词区域。提示词包括生成对象和风格术语。您也可以在提示中添加权重(Token重要性),但命令行命令和参数不起作用。", + "imageToImage": "图生图模式加载任何图像作为初始图像,然后与提示一起用于生成新图像。值越高,结果图像的变化就越大。可能的值为0.0到1.0,建议的范围是0.25到0.75", + "upscale": "使用 ESRGAN可以在图片生成后立即放大图片。", + "variations": "尝试将变化值设置在0.1到1.0之间,以更改给定种子的结果。种子的有趣变化在0.1到0.3之间。", + "boundingBox": "边界框的高和宽的设定对文生图和图生图模式是一样的,只有边界框中的区域会被处理。", + "other": "这些选项将为Invoke启用替代处理模式。 \"无缝平铺\"将在输出中创建重复图案。 \"高分辨率\"是通过img2img进行两步生成:当您想要更大、更连贯且不带伪影的图像时,请使用此设置。这将比通常的txt2img需要更长的时间。", + "faceCorrection": "使用GFPGAN或Codeformer进行人脸校正:该算法会检测图像中的人脸并纠正任何缺陷。较高的值将更改图像,并产生更有吸引力的人脸。在保留较高保真度的情况下使用Codeformer将导致更强的人脸校正,同时也会保留原始图像。", + "gallery": "图片库展示输出文件夹中的图片,设置和文件一起储存,可以通过内容菜单访问。", + "seed": "种子值影响形成图像的初始噪声。您可以使用以前图像中已存在的种子。 “噪声阈值”用于减轻在高CFG值(尝试0-10范围)下的伪像,并使用Perlin在生成过程中添加Perlin噪声:这两者都可以为您的输出添加变化。", + "seamCorrection": "控制在画布上生成的图像之间出现的可见接缝的处理方式。", + "infillAndScaling": "管理填充方法(用于画布的掩模或擦除区域)和缩放(对于较小的边界框大小非常有用)。" + } } } From 4c93202ee4611b3bdbd52179e23ec225d2dd0648 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:44:20 +0000 Subject: [PATCH 015/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 4 ---- invokeai/frontend/web/public/locales/de.json | 5 ----- invokeai/frontend/web/public/locales/es.json | 6 ------ invokeai/frontend/web/public/locales/fi.json | 5 ----- invokeai/frontend/web/public/locales/fr.json | 4 ---- invokeai/frontend/web/public/locales/he.json | 4 ---- invokeai/frontend/web/public/locales/it.json | 8 +------- invokeai/frontend/web/public/locales/ja.json | 5 ----- invokeai/frontend/web/public/locales/ko.json | 4 ---- invokeai/frontend/web/public/locales/nl.json | 6 ------ invokeai/frontend/web/public/locales/pl.json | 4 ---- invokeai/frontend/web/public/locales/pt.json | 5 ----- invokeai/frontend/web/public/locales/pt_BR.json | 4 ---- invokeai/frontend/web/public/locales/ru.json | 8 +------- invokeai/frontend/web/public/locales/sv.json | 5 ----- invokeai/frontend/web/public/locales/tr.json | 5 ----- invokeai/frontend/web/public/locales/uk.json | 5 ----- invokeai/frontend/web/public/locales/zh_CN.json | 6 ------ invokeai/frontend/web/public/locales/zh_Hant.json | 3 --- 19 files changed, 2 insertions(+), 94 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index 47a4169ef2..0283d3fafb 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "مفاتيح الأختصار", - "themeLabel": "الموضوع", "languagePickerLabel": "منتقي اللغة", "reportBugLabel": "بلغ عن خطأ", "settingsLabel": "إعدادات", - "darkTheme": "داكن", - "lightTheme": "فاتح", - "greenTheme": "أخضر", "img2img": "صورة إلى صورة", "unifiedCanvas": "لوحة موحدة", "nodes": "عقد", diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 15278a8905..5ae0a4b519 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -1,12 +1,8 @@ { "common": { - "themeLabel": "Thema", "languagePickerLabel": "Sprachauswahl", "reportBugLabel": "Fehler melden", "settingsLabel": "Einstellungen", - "darkTheme": "Dunkel", - "lightTheme": "Hell", - "greenTheme": "Grün", "img2img": "Bild zu Bild", "nodes": "Knoten", "langGerman": "Deutsch", @@ -48,7 +44,6 @@ "langEnglish": "Englisch", "langDutch": "Niederländisch", "langFrench": "Französisch", - "oceanTheme": "Ozean", "langItalian": "Italienisch", "langPortuguese": "Portogisisch", "langRussian": "Russisch", diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index fdd4ec3a51..f880270edd 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "Atajos de teclado", - "themeLabel": "Tema", "languagePickerLabel": "Selector de idioma", "reportBugLabel": "Reportar errores", "settingsLabel": "Ajustes", - "darkTheme": "Oscuro", - "lightTheme": "Claro", - "greenTheme": "Verde", "img2img": "Imagen a Imagen", "unifiedCanvas": "Lienzo Unificado", "nodes": "Editor de nodos", @@ -63,7 +59,6 @@ "statusConvertingModel": "Convertir el modelo", "statusModelConverted": "Modelo adaptado", "statusMergingModels": "Fusionar modelos", - "oceanTheme": "Océano", "langPortuguese": "Portugués", "langKorean": "Coreano", "langHebrew": "Hebreo", @@ -548,7 +543,6 @@ "developer": "Desarrollador", "antialiasProgressImages": "Imágenes del progreso de Antialias", "showProgressInViewer": "Mostrar las imágenes del progreso en el visor", - "availableSchedulers": "Programadores disponibles", "ui": "Interfaz del usuario", "generation": "Generación", "favoriteSchedulers": "Programadores favoritos", diff --git a/invokeai/frontend/web/public/locales/fi.json b/invokeai/frontend/web/public/locales/fi.json index a6edd6d8b8..790c63fc85 100644 --- a/invokeai/frontend/web/public/locales/fi.json +++ b/invokeai/frontend/web/public/locales/fi.json @@ -34,18 +34,13 @@ "hotkeysLabel": "Pikanäppäimet", "reportBugLabel": "Raportoi Bugista", "langPolish": "Puola", - "themeLabel": "Teema", "langDutch": "Hollanti", "settingsLabel": "Asetukset", "githubLabel": "Github", - "darkTheme": "Tumma", - "lightTheme": "Vaalea", - "greenTheme": "Vihreä", "langGerman": "Saksa", "langPortuguese": "Portugali", "discordLabel": "Discord", "langEnglish": "Englanti", - "oceanTheme": "Meren sininen", "langRussian": "Venäjä", "langUkranian": "Ukraina", "langSpanish": "Espanja", diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index 31c092a9d7..c9def33979 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "Raccourcis clavier", - "themeLabel": "Thème", "languagePickerLabel": "Sélecteur de langue", "reportBugLabel": "Signaler un bug", "settingsLabel": "Paramètres", - "darkTheme": "Sombre", - "lightTheme": "Clair", - "greenTheme": "Vert", "img2img": "Image en image", "unifiedCanvas": "Canvas unifié", "nodes": "Nœuds", diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index 2b07762b9a..229a82b1f4 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -107,13 +107,10 @@ }, "common": { "nodesDesc": "מערכת מבוססת צמתים עבור יצירת תמונות עדיין תחת פיתוח. השארו קשובים לעדכונים עבור הפיצ׳ר המדהים הזה.", - "themeLabel": "ערכת נושא", "languagePickerLabel": "בחירת שפה", "githubLabel": "גיטהאב", "discordLabel": "דיסקורד", "settingsLabel": "הגדרות", - "darkTheme": "חשוך", - "lightTheme": "מואר", "langEnglish": "אנגלית", "langDutch": "הולנדית", "langArabic": "ערבית", @@ -155,7 +152,6 @@ "statusMergedModels": "מודלים מוזגו", "hotkeysLabel": "מקשים חמים", "reportBugLabel": "דווח באג", - "greenTheme": "ירוק", "langItalian": "איטלקית", "upload": "העלאה", "langPolish": "פולנית", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index a7098f0370..81251585de 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "Tasti di scelta rapida", - "themeLabel": "Tema", "languagePickerLabel": "Lingua", "reportBugLabel": "Segnala un errore", "settingsLabel": "Impostazioni", - "darkTheme": "Scuro", - "lightTheme": "Chiaro", - "greenTheme": "Verde", "img2img": "Immagine a Immagine", "unifiedCanvas": "Tela unificata", "nodes": "Editor dei Nodi", @@ -67,7 +63,6 @@ "langPortuguese": "Portoghese", "pinOptionsPanel": "Blocca il pannello Opzioni", "loading": "Caricamento in corso", - "oceanTheme": "Oceano", "langHebrew": "Ebraico", "loadingInvokeAI": "Caricamento Invoke AI", "postprocessing": "Post Elaborazione", @@ -523,8 +518,7 @@ "antialiasProgressImages": "Anti aliasing delle immagini di avanzamento", "showProgressInViewer": "Mostra le immagini di avanzamento nel visualizzatore", "generation": "Generazione", - "ui": "Interfaccia Utente", - "availableSchedulers": "Campionatori disponibili" + "ui": "Interfaccia Utente" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", diff --git a/invokeai/frontend/web/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json index d7f4510789..190862b0d1 100644 --- a/invokeai/frontend/web/public/locales/ja.json +++ b/invokeai/frontend/web/public/locales/ja.json @@ -1,12 +1,8 @@ { "common": { - "themeLabel": "テーマ", "languagePickerLabel": "言語選択", "reportBugLabel": "バグ報告", "settingsLabel": "設定", - "darkTheme": "ダーク", - "lightTheme": "ライト", - "greenTheme": "緑", "langJapanese": "日本語", "nodesDesc": "現在、画像生成のためのノードベースシステムを開発中です。機能についてのアップデートにご期待ください。", "postProcessing": "後処理", @@ -63,7 +59,6 @@ "discordLabel": "Discord", "langItalian": "Italiano", "langEnglish": "English", - "oceanTheme": "オーシャン", "langArabic": "アラビア語", "langDutch": "Nederlands", "langFrench": "Français", diff --git a/invokeai/frontend/web/public/locales/ko.json b/invokeai/frontend/web/public/locales/ko.json index 47cde5fec3..8baab54ac9 100644 --- a/invokeai/frontend/web/public/locales/ko.json +++ b/invokeai/frontend/web/public/locales/ko.json @@ -1,13 +1,9 @@ { "common": { - "themeLabel": "테마 설정", "languagePickerLabel": "언어 설정", "reportBugLabel": "버그 리포트", "githubLabel": "Github", "settingsLabel": "설정", - "darkTheme": "다크 모드", - "lightTheme": "라이트 모드", - "greenTheme": "그린 모드", "langArabic": "العربية", "langEnglish": "English", "langDutch": "Nederlands", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index f392116fe4..8410edb998 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "Sneltoetsen", - "themeLabel": "Thema", "languagePickerLabel": "Taal", "reportBugLabel": "Meld bug", "settingsLabel": "Instellingen", - "darkTheme": "Donker", - "lightTheme": "Licht", - "greenTheme": "Groen", "img2img": "Afbeelding naar afbeelding", "unifiedCanvas": "Centraal canvas", "nodes": "Knooppunt-editor", @@ -69,7 +65,6 @@ "pinOptionsPanel": "Zet deelscherm Opties vast", "loading": "Bezig met laden", "loadingInvokeAI": "Bezig met laden van Invoke AI", - "oceanTheme": "Oceaan", "langHebrew": "עברית", "langKorean": "한국어", "txt2img": "Tekst naar afbeelding", @@ -519,7 +514,6 @@ "showProgressInViewer": "Toon voortgangsafbeeldingen in viewer", "generation": "Generatie", "ui": "Gebruikersinterface", - "availableSchedulers": "Beschikbare planners", "antialiasProgressImages": "Voer anti-aliasing uit op voortgangsafbeeldingen" }, "toast": { diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index 42c0d95ba7..02b1bc36b0 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "Skróty klawiszowe", - "themeLabel": "Motyw", "languagePickerLabel": "Wybór języka", "reportBugLabel": "Zgłoś błąd", "settingsLabel": "Ustawienia", - "darkTheme": "Ciemny", - "lightTheme": "Jasny", - "greenTheme": "Zielony", "img2img": "Obraz na obraz", "unifiedCanvas": "Tryb uniwersalny", "nodes": "Węzły", diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index 2ea05b2d8f..b1fe68c6ab 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -1,11 +1,8 @@ { "common": { - "greenTheme": "Verde", "langArabic": "العربية", - "themeLabel": "Tema", "reportBugLabel": "Reportar Bug", "settingsLabel": "Configurações", - "lightTheme": "Claro", "langBrPortuguese": "Português do Brasil", "languagePickerLabel": "Seletor de Idioma", "langDutch": "Nederlands", @@ -57,13 +54,11 @@ "statusModelChanged": "Modelo Alterado", "githubLabel": "Github", "discordLabel": "Discord", - "darkTheme": "Escuro", "training": "Treinando", "statusGeneratingOutpainting": "Geração de Ampliação", "statusGenerationComplete": "Geração Completa", "statusMergingModels": "Mesclando Modelos", "statusMergedModels": "Modelos Mesclados", - "oceanTheme": "Oceano", "pinOptionsPanel": "Fixar painel de opções", "loading": "A carregar", "loadingInvokeAI": "A carregar Invoke AI", diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index 0288fba521..3fb9e76acb 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "Teclas de atalho", - "themeLabel": "Tema", "languagePickerLabel": "Seletor de Idioma", "reportBugLabel": "Relatar Bug", "settingsLabel": "Configurações", - "darkTheme": "Noite", - "lightTheme": "Dia", - "greenTheme": "Verde", "img2img": "Imagem Para Imagem", "unifiedCanvas": "Tela Unificada", "nodes": "Nódulos", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 19659ae412..44310dd084 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "Горячие клавиши", - "themeLabel": "Тема", "languagePickerLabel": "Язык", "reportBugLabel": "Сообщить об ошибке", "settingsLabel": "Настройки", - "darkTheme": "Темная", - "lightTheme": "Светлая", - "greenTheme": "Зеленая", "img2img": "Изображение в изображение (img2img)", "unifiedCanvas": "Единый холст", "nodes": "Редактор нод", @@ -56,7 +52,6 @@ "statusConvertingModel": "Конвертация модели", "cancel": "Отменить", "accept": "Принять", - "oceanTheme": "Океан", "langUkranian": "Украинский", "langEnglish": "Английский", "postprocessing": "Постобработка", @@ -523,8 +518,7 @@ "showProgressInViewer": "Показывать процесс генерации в Просмотрщике", "antialiasProgressImages": "Сглаживать предпоказ процесса генерации", "generation": "Поколение", - "ui": "Пользовательский интерфейс", - "availableSchedulers": "Доступные планировщики" + "ui": "Пользовательский интерфейс" }, "toast": { "tempFoldersEmptied": "Временная папка очищена", diff --git a/invokeai/frontend/web/public/locales/sv.json b/invokeai/frontend/web/public/locales/sv.json index da2266135d..6030f57256 100644 --- a/invokeai/frontend/web/public/locales/sv.json +++ b/invokeai/frontend/web/public/locales/sv.json @@ -27,10 +27,6 @@ "githubLabel": "Github", "discordLabel": "Discord", "settingsLabel": "Inställningar", - "darkTheme": "Mörk", - "lightTheme": "Ljus", - "greenTheme": "Grön", - "oceanTheme": "Hav", "langEnglish": "Engelska", "langDutch": "Nederländska", "langFrench": "Franska", @@ -68,7 +64,6 @@ "loadingInvokeAI": "Laddar Invoke AI", "statusRestoringFaces": "Återskapar ansikten", "languagePickerLabel": "Språkväljare", - "themeLabel": "Tema", "txt2img": "Text till bild", "nodes": "Noder", "img2img": "Bild till bild", diff --git a/invokeai/frontend/web/public/locales/tr.json b/invokeai/frontend/web/public/locales/tr.json index 316908b4a9..1f285f956b 100644 --- a/invokeai/frontend/web/public/locales/tr.json +++ b/invokeai/frontend/web/public/locales/tr.json @@ -24,16 +24,11 @@ }, "common": { "hotkeysLabel": "Kısayol Tuşları", - "themeLabel": "Tema", "languagePickerLabel": "Dil Seçimi", "reportBugLabel": "Hata Bildir", "githubLabel": "Github", "discordLabel": "Discord", "settingsLabel": "Ayarlar", - "darkTheme": "Karanlık Tema", - "lightTheme": "Aydınlık Tema", - "greenTheme": "Yeşil Tema", - "oceanTheme": "Okyanus Tema", "langArabic": "Arapça", "langEnglish": "İngilizce", "langDutch": "Hollandaca", diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index 64f9e50ec9..efc3b60ee3 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "Гарячi клавіші", - "themeLabel": "Тема", "languagePickerLabel": "Мова", "reportBugLabel": "Повідомити про помилку", "settingsLabel": "Налаштування", - "darkTheme": "Темна", - "lightTheme": "Світла", - "greenTheme": "Зелена", "img2img": "Зображення із зображення (img2img)", "unifiedCanvas": "Універсальне полотно", "nodes": "Вузли", @@ -56,7 +52,6 @@ "langKorean": "Корейська", "langPortuguese": "Португальська", "pinOptionsPanel": "Закріпити панель налаштувань", - "oceanTheme": "Океан", "langArabic": "Арабська", "langSimplifiedChinese": "Китайська (спрощена)", "langSpanish": "Іспанська", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 4cf15ce618..5b3b2f1039 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -1,13 +1,9 @@ { "common": { "hotkeysLabel": "快捷键", - "themeLabel": "主题", "languagePickerLabel": "语言", "reportBugLabel": "提交错误报告", "settingsLabel": "设置", - "darkTheme": "暗色", - "lightTheme": "亮色", - "greenTheme": "绿色", "img2img": "图生图", "unifiedCanvas": "统一画布", "nodes": "节点编辑器", @@ -76,7 +72,6 @@ "langArabic": "阿拉伯语", "txt2img": "文生图", "postprocessing": "后期处理", - "oceanTheme": "海洋蓝", "loading": "加载中", "loadingInvokeAI": "Invoke AI 加载中", "linear": "线性的", @@ -519,7 +514,6 @@ "antialiasProgressImages": "对过程图片抗锯齿", "generation": "生成", "ui": "用户界面", - "availableSchedulers": "可用的计划表", "useSlidersForAll": "对所有参数使用滑动条设置", "general": "通用", "consoleLogLevel": "日志等级", diff --git a/invokeai/frontend/web/public/locales/zh_Hant.json b/invokeai/frontend/web/public/locales/zh_Hant.json index f69e1c7f39..fe51856117 100644 --- a/invokeai/frontend/web/public/locales/zh_Hant.json +++ b/invokeai/frontend/web/public/locales/zh_Hant.json @@ -13,9 +13,6 @@ "settingsLabel": "設定", "upload": "上傳", "langArabic": "阿拉伯語", - "greenTheme": "綠色", - "lightTheme": "淺色", - "darkTheme": "深色", "discordLabel": "Discord", "nodesDesc": "使用Node生成圖像的系統正在開發中。敬請期待有關於這項功能的更新。", "reportBugLabel": "回報錯誤", From 035f1e12e14837b642f60fb42306be4d596778f0 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Thu, 12 Oct 2023 12:44:23 +0000 Subject: [PATCH 016/202] translationBot(ui): update translation (Italian) Currently translated at 100.0% (550 of 550 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (548 of 548 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (546 of 546 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (541 of 541 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (544 of 544 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (543 of 543 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 24 ++++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 81251585de..b73bf07afb 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -75,7 +75,11 @@ "openInNewTab": "Apri in una nuova scheda", "areYouSure": "Sei sicuro?", "dontAskMeAgain": "Non chiedermelo più", - "imagePrompt": "Prompt Immagine" + "imagePrompt": "Prompt Immagine", + "darkMode": "Modalità scura", + "lightMode": "Modalità chiara", + "batch": "Gestione Lotto", + "modelmanager": "Gestione Modello" }, "gallery": { "generations": "Generazioni", @@ -415,7 +419,10 @@ "none": "niente", "addDifference": "Aggiungi differenza", "pickModelType": "Scegli il tipo di modello", - "scanForModels": "Cerca modelli" + "scanForModels": "Cerca modelli", + "variant": "Variante", + "baseModel": "Modello Base", + "vae": "VAE" }, "parameters": { "images": "Immagini", @@ -496,7 +503,10 @@ "boundingBoxWidth": "Larghezza riquadro di delimitazione", "boundingBoxHeight": "Altezza riquadro di delimitazione", "positivePromptPlaceholder": "Prompt Positivo", - "negativePromptPlaceholder": "Prompt Negativo" + "negativePromptPlaceholder": "Prompt Negativo", + "controlNetControlMode": "Modalità di controllo", + "clipSkip": "Salta CLIP", + "aspectRatio": "Proporzioni" }, "settings": { "models": "Modelli", @@ -518,7 +528,10 @@ "antialiasProgressImages": "Anti aliasing delle immagini di avanzamento", "showProgressInViewer": "Mostra le immagini di avanzamento nel visualizzatore", "generation": "Generazione", - "ui": "Interfaccia Utente" + "ui": "Interfaccia Utente", + "favoriteSchedulersPlaceholder": "Nessun campionatore preferito", + "favoriteSchedulers": "Campionatori preferiti", + "showAdvancedOptions": "Mostra Opzioni Avanzate" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", @@ -659,6 +672,7 @@ }, "ui": { "hideProgressImages": "Nascondi avanzamento immagini", - "showProgressImages": "Mostra avanzamento immagini" + "showProgressImages": "Mostra avanzamento immagini", + "swapSizes": "Scambia dimensioni" } } From 7c2aa1dc203379f8bfe227061606a66a2d6596d9 Mon Sep 17 00:00:00 2001 From: System X - Files Date: Thu, 12 Oct 2023 12:44:25 +0000 Subject: [PATCH 017/202] translationBot(ui): update translation (Russian) Currently translated at 99.5% (602 of 605 strings) translationBot(ui): update translation (Russian) Currently translated at 99.8% (605 of 606 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (596 of 596 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (595 of 595 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (593 of 593 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (592 of 592 strings) translationBot(ui): update translation (Russian) Currently translated at 90.2% (534 of 592 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (543 of 543 strings) Co-authored-by: System X - Files Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ru/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ru.json | 103 +++++++++++++++++-- 1 file changed, 92 insertions(+), 11 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 44310dd084..cccf770682 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -75,7 +75,12 @@ "random": "Случайное", "generate": "Сгенерировать", "openInNewTab": "Открыть в новой вкладке", - "imagePrompt": "Запрос" + "imagePrompt": "Запрос", + "communityLabel": "Сообщество", + "lightMode": "Светлая тема", + "batch": "Пакетный менеджер", + "modelManager": "Менеджер моделей", + "darkMode": "Темная тема" }, "gallery": { "generations": "Генерации", @@ -96,7 +101,8 @@ "deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.", "deleteImage": "Удалить изображение", "images": "Изображения", - "assets": "Ресурсы" + "assets": "Ресурсы", + "autoAssignBoardOnClick": "Авто-назначение доски по клику" }, "hotkeys": { "keyboardShortcuts": "Горячие клавиши", @@ -303,7 +309,12 @@ "acceptStagingImage": { "title": "Принять изображение", "desc": "Принять текущее изображение" - } + }, + "addNodes": { + "desc": "Открывает меню добавления узла", + "title": "Добавление узлов" + }, + "nodesHotkeys": "Горячие клавиши узлов" }, "modelManager": { "modelManager": "Менеджер моделей", @@ -356,14 +367,14 @@ "deleteModel": "Удалить модель", "deleteConfig": "Удалить конфигурацию", "deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?", - "deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.", + "deleteMsg2": "Это приведет К УДАЛЕНИЮ модели С ДИСКА, если она находится в корневой папке Invoke. Если вы используете пользовательское расположение, то модель НЕ будет удалена с диска.", "repoIDValidationMsg": "Онлайн-репозиторий модели", - "convertToDiffusersHelpText5": "Пожалуйста, убедитесь, что у вас достаточно места на диске. Модели обычно занимают 4 – 7 Гб.", + "convertToDiffusersHelpText5": "Пожалуйста, убедитесь, что у вас достаточно места на диске. Модели обычно занимают 2–7 Гб.", "invokeAIFolder": "Каталог InvokeAI", "ignoreMismatch": "Игнорировать несоответствия между выбранными моделями", "addCheckpointModel": "Добавить модель Checkpoint/Safetensor", "formMessageDiffusersModelLocationDesc": "Укажите хотя бы одно.", - "convertToDiffusersHelpText3": "Файл модели на диске НЕ будет удалён или изменён. Вы сможете заново добавить его в Model Manager при необходимости.", + "convertToDiffusersHelpText3": "Ваш файл контрольной точки НА ДИСКЕ будет УДАЛЕН, если он находится в корневой папке InvokeAI. Если он находится в пользовательском расположении, то он НЕ будет удален.", "vaeRepoID": "ID репозитория VAE", "mergedModelName": "Название объединенной модели", "checkpointModels": "Checkpoints", @@ -415,7 +426,27 @@ "weightedSum": "Взвешенная сумма", "safetensorModels": "SafeTensors", "v2_768": "v2 (768px)", - "v2_base": "v2 (512px)" + "v2_base": "v2 (512px)", + "modelDeleted": "Модель удалена", + "importModels": "Импорт Моделей", + "variant": "Вариант", + "baseModel": "Базовая модель", + "modelsSynced": "Модели синхронизированы", + "modelSyncFailed": "Не удалось синхронизировать модели", + "vae": "VAE", + "modelDeleteFailed": "Не удалось удалить модель", + "noCustomLocationProvided": "Пользовательское местоположение не указано", + "convertingModelBegin": "Конвертация модели. Пожалуйста, подождите.", + "settings": "Настройки", + "selectModel": "Выберите модель", + "syncModels": "Синхронизация моделей", + "syncModelsDesc": "Если ваши модели не синхронизированы с серверной частью, вы можете обновить их, используя эту опцию. Обычно это удобно в тех случаях, когда вы вручную обновляете свой файл \"models.yaml\" или добавляете модели в корневую папку InvokeAI после загрузки приложения.", + "modelUpdateFailed": "Не удалось обновить модель", + "modelConversionFailed": "Не удалось сконвертировать модель", + "modelsMergeFailed": "Не удалось выполнить слияние моделей", + "loraModels": "LoRAs", + "onnxModels": "Onnx", + "oliveModels": "Olives" }, "parameters": { "images": "Изображения", @@ -496,7 +527,17 @@ "boundingBoxWidth": "Ширина ограничивающей рамки", "boundingBoxHeight": "Высота ограничивающей рамки", "positivePromptPlaceholder": "Запрос", - "negativePromptPlaceholder": "Исключающий запрос" + "negativePromptPlaceholder": "Исключающий запрос", + "controlNetControlMode": "Режим управления", + "clipSkip": "CLIP Пропуск", + "aspectRatio": "Соотношение", + "maskAdjustmentsHeader": "Настройка маски", + "maskBlur": "Размытие маски", + "maskBlurMethod": "Метод размытия маски", + "seamPaintingHeader": "Окрашивание швов", + "seamThreshold": "Порог шва", + "seamLowThreshold": "Низкий", + "seamHighThreshold": "Высокий" }, "settings": { "models": "Модели", @@ -518,7 +559,14 @@ "showProgressInViewer": "Показывать процесс генерации в Просмотрщике", "antialiasProgressImages": "Сглаживать предпоказ процесса генерации", "generation": "Поколение", - "ui": "Пользовательский интерфейс" + "ui": "Пользовательский интерфейс", + "favoriteSchedulers": "Избранные планировщики", + "favoriteSchedulersPlaceholder": "Нет избранных планировщиков", + "enableNodesEditor": "Включить редактор узлов", + "experimental": "Экспериментальные", + "beta": "Бета", + "alternateCanvasLayout": "Альтернативный слой холста", + "showAdvancedOptions": "Показать доп. параметры" }, "toast": { "tempFoldersEmptied": "Временная папка очищена", @@ -557,7 +605,17 @@ "problemCopyingImageLink": "Не удалось скопировать ссылку на изображение", "uploadFailedInvalidUploadDesc": "Должно быть одно изображение в формате PNG или JPEG", "parameterNotSet": "Параметр не задан", - "parameterSet": "Параметр задан" + "parameterSet": "Параметр задан", + "nodesLoaded": "Узлы загружены", + "problemCopyingImage": "Не удается скопировать изображение", + "nodesLoadedFailed": "Не удалось загрузить Узлы", + "nodesCleared": "Узлы очищены", + "nodesBrokenConnections": "Не удается загрузить. Некоторые соединения повреждены.", + "nodesUnrecognizedTypes": "Не удается загрузить. Граф имеет нераспознанные типы", + "nodesNotValidJSON": "Недопустимый JSON", + "nodesCorruptedGraph": "Не удается загрузить. Граф, похоже, поврежден.", + "nodesSaved": "Узлы сохранены", + "nodesNotValidGraph": "Недопустимый граф узлов InvokeAI" }, "tooltip": { "feature": { @@ -659,6 +717,29 @@ }, "ui": { "showProgressImages": "Показывать промежуточный итог", - "hideProgressImages": "Не показывать промежуточный итог" + "hideProgressImages": "Не показывать промежуточный итог", + "swapSizes": "Поменять местами размеры" + }, + "nodes": { + "reloadSchema": "Перезагрузить схему", + "saveGraph": "Сохранить граф", + "clearGraph": "Очистить граф", + "zoomInNodes": "Увеличьте масштаб", + "zoomOutNodes": "Уменьшите масштаб", + "fitViewportNodes": "Уместить вид", + "hideGraphNodes": "Скрыть оверлей графа", + "showGraphNodes": "Показать оверлей графа", + "showLegendNodes": "Показать тип поля", + "hideMinimapnodes": "Скрыть миникарту", + "loadGraph": "Загрузить граф (сохраненный из Редактора узлов) (Не копировать и не вставлять метаданные)", + "clearGraphDesc": "Вы уверены, что хотите очистить все узлы?", + "hideLegendNodes": "Скрыть тип поля", + "showMinimapnodes": "Показать миникарту", + "saveWorkflow": "Сохранить рабочий процесс", + "loadWorkflow": "Загрузить рабочий процесс", + "resetWorkflowDesc2": "Сброс рабочего процесса очистит все узлы, ребра и детали рабочего процесса.", + "resetWorkflow": "Сбросить рабочий процесс", + "resetWorkflowDesc": "Вы уверены, что хотите сбросить этот рабочий процесс?", + "reloadNodeTemplates": "Перезагрузить шаблоны узлов" } } From 516cc258f9a2bcb20fd105b27b3a8e35876bde48 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:44:28 +0000 Subject: [PATCH 018/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 1 - invokeai/frontend/web/public/locales/de.json | 1 - invokeai/frontend/web/public/locales/es.json | 2 -- invokeai/frontend/web/public/locales/fr.json | 1 - invokeai/frontend/web/public/locales/he.json | 1 - invokeai/frontend/web/public/locales/it.json | 4 +--- invokeai/frontend/web/public/locales/ja.json | 1 - invokeai/frontend/web/public/locales/nl.json | 1 - invokeai/frontend/web/public/locales/pl.json | 1 - invokeai/frontend/web/public/locales/pt.json | 1 - invokeai/frontend/web/public/locales/pt_BR.json | 1 - invokeai/frontend/web/public/locales/ru.json | 1 - invokeai/frontend/web/public/locales/uk.json | 1 - invokeai/frontend/web/public/locales/zh_CN.json | 1 - 14 files changed, 1 insertion(+), 17 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index 0283d3fafb..7ec6126e23 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -396,7 +396,6 @@ "saveSteps": "حفظ الصور كل n خطوات", "confirmOnDelete": "تأكيد عند الحذف", "displayHelpIcons": "عرض أيقونات المساعدة", - "useCanvasBeta": "استخدام مخطط الأزرار بيتا", "enableImageDebugging": "تمكين التصحيح عند التصوير", "resetWebUI": "إعادة تعيين واجهة الويب", "resetWebUIDesc1": "إعادة تعيين واجهة الويب يعيد فقط ذاكرة التخزين المؤقت للمتصفح لصورك وإعداداتك المذكورة. لا يحذف أي صور من القرص.", diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 5ae0a4b519..0ce584c656 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -398,7 +398,6 @@ "saveSteps": "Speichern der Bilder alle n Schritte", "confirmOnDelete": "Bestätigen beim Löschen", "displayHelpIcons": "Hilfesymbole anzeigen", - "useCanvasBeta": "Canvas Beta Layout verwenden", "enableImageDebugging": "Bild-Debugging aktivieren", "resetWebUI": "Web-Oberfläche zurücksetzen", "resetWebUIDesc1": "Das Zurücksetzen der Web-Oberfläche setzt nur den lokalen Cache des Browsers mit Ihren Bildern und gespeicherten Einstellungen zurück. Es werden keine Bilder von der Festplatte gelöscht.", diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index f880270edd..ec140836d2 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -77,7 +77,6 @@ "areYouSure": "¿Estas seguro?", "imagePrompt": "Indicación de imagen", "batch": "Administrador de lotes", - "modelmanager": "Administrador de modelos", "darkMode": "Modo oscuro", "lightMode": "Modo claro", "modelManager": "Administrador de modelos", @@ -530,7 +529,6 @@ "saveSteps": "Guardar imágenes cada n pasos", "confirmOnDelete": "Confirmar antes de eliminar", "displayHelpIcons": "Mostrar iconos de ayuda", - "useCanvasBeta": "Usar versión beta del Lienzo", "enableImageDebugging": "Habilitar depuración de imágenes", "resetWebUI": "Restablecer interfaz web", "resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.", diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index c9def33979..0a99845f03 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -410,7 +410,6 @@ "saveSteps": "Enregistrer les images tous les n étapes", "confirmOnDelete": "Confirmer la suppression", "displayHelpIcons": "Afficher les icônes d'aide", - "useCanvasBeta": "Utiliser la mise en page bêta de Canvas", "enableImageDebugging": "Activer le débogage d'image", "resetWebUI": "Réinitialiser l'interface Web", "resetWebUIDesc1": "Réinitialiser l'interface Web ne réinitialise que le cache local du navigateur de vos images et de vos paramètres enregistrés. Cela n'efface pas les images du disque.", diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index 229a82b1f4..323be9d8b6 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -464,7 +464,6 @@ "models": "מודלים", "displayInProgress": "הצגת תמונות בתהליך", "confirmOnDelete": "אישור בעת המחיקה", - "useCanvasBeta": "שימוש בגרסת ביתא של תצוגת הקנבס", "useSlidersForAll": "שימוש במחוונים לכל האפשרויות", "resetWebUI": "איפוס ממשק משתמש", "resetWebUIDesc1": "איפוס ממשק המשתמש האינטרנטי מאפס רק את המטמון המקומי של הדפדפן של התמונות וההגדרות שנשמרו. זה לא מוחק תמונות מהדיסק.", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index b73bf07afb..ea8fb710d3 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -78,8 +78,7 @@ "imagePrompt": "Prompt Immagine", "darkMode": "Modalità scura", "lightMode": "Modalità chiara", - "batch": "Gestione Lotto", - "modelmanager": "Gestione Modello" + "batch": "Gestione Lotto" }, "gallery": { "generations": "Generazioni", @@ -514,7 +513,6 @@ "saveSteps": "Salva le immagini ogni n passaggi", "confirmOnDelete": "Conferma l'eliminazione", "displayHelpIcons": "Visualizza le icone della Guida", - "useCanvasBeta": "Utilizza il layout beta di Canvas", "enableImageDebugging": "Abilita il debug dell'immagine", "resetWebUI": "Reimposta l'interfaccia utente Web", "resetWebUIDesc1": "Il ripristino dell'interfaccia utente Web reimposta solo la cache locale del browser delle immagini e le impostazioni memorizzate. Non cancella alcuna immagine dal disco.", diff --git a/invokeai/frontend/web/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json index 190862b0d1..3672a224ad 100644 --- a/invokeai/frontend/web/public/locales/ja.json +++ b/invokeai/frontend/web/public/locales/ja.json @@ -358,7 +358,6 @@ "saveSteps": "nステップごとに画像を保存", "confirmOnDelete": "削除時に確認", "displayHelpIcons": "ヘルプアイコンを表示", - "useCanvasBeta": "キャンバスレイアウト(Beta)を使用する", "enableImageDebugging": "画像のデバッグを有効化", "resetWebUI": "WebUIをリセット", "resetWebUIDesc1": "WebUIのリセットは、画像と保存された設定のキャッシュをリセットするだけです。画像を削除するわけではありません。", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 8410edb998..8458f5e21b 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -500,7 +500,6 @@ "saveSteps": "Bewaar afbeeldingen elke n stappen", "confirmOnDelete": "Bevestig bij verwijderen", "displayHelpIcons": "Toon hulppictogrammen", - "useCanvasBeta": "Gebruik bètavormgeving van canvas", "enableImageDebugging": "Schakel foutopsporing afbeelding in", "resetWebUI": "Herstel web-UI", "resetWebUIDesc1": "Herstel web-UI herstelt alleen de lokale afbeeldingscache en de onthouden instellingen van je browser. Het verwijdert geen afbeeldingen van schijf.", diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index 02b1bc36b0..9a1b0cf3fb 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -327,7 +327,6 @@ "saveSteps": "Zapisuj obrazy co X kroków", "confirmOnDelete": "Potwierdzaj usuwanie", "displayHelpIcons": "Wyświetlaj ikony pomocy", - "useCanvasBeta": "Nowy układ trybu uniwersalnego", "enableImageDebugging": "Włącz debugowanie obrazu", "resetWebUI": "Zresetuj interfejs", "resetWebUIDesc1": "Resetowanie interfejsu wyczyści jedynie dane i ustawienia zapisane w pamięci przeglądarki. Nie usunie żadnych obrazów z dysku.", diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index b1fe68c6ab..c57a005779 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -472,7 +472,6 @@ "settings": { "confirmOnDelete": "Confirmar Antes de Apagar", "displayHelpIcons": "Mostrar Ícones de Ajuda", - "useCanvasBeta": "Usar Layout de Telas Beta", "enableImageDebugging": "Ativar Depuração de Imagem", "useSlidersForAll": "Usar deslizadores para todas as opções", "resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.", diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index 3fb9e76acb..f76703e676 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -469,7 +469,6 @@ "saveSteps": "Salvar imagens a cada n passos", "confirmOnDelete": "Confirmar Antes de Apagar", "displayHelpIcons": "Mostrar Ícones de Ajuda", - "useCanvasBeta": "Usar Layout de Telas Beta", "enableImageDebugging": "Ativar Depuração de Imagem", "resetWebUI": "Reiniciar Interface", "resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index cccf770682..8b1a7d3d25 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -545,7 +545,6 @@ "saveSteps": "Сохранять каждые n щагов", "confirmOnDelete": "Подтверждать удаление", "displayHelpIcons": "Показывать значки подсказок", - "useCanvasBeta": "Показывать инструменты слева (Beta UI)", "enableImageDebugging": "Включить отладку", "resetWebUI": "Сброс настроек Web UI", "resetWebUIDesc1": "Сброс настроек веб-интерфейса удаляет только локальный кэш браузера с вашими изображениями и настройками. Он не удаляет изображения с диска.", diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index efc3b60ee3..b874cdc7eb 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -485,7 +485,6 @@ "saveSteps": "Зберігати кожні n кроків", "confirmOnDelete": "Підтверджувати видалення", "displayHelpIcons": "Показувати значки підказок", - "useCanvasBeta": "Показувати інструменты зліва (Beta UI)", "enableImageDebugging": "Увімкнути налагодження", "resetWebUI": "Повернути початкові", "resetWebUIDesc1": "Скидання настройок веб-інтерфейсу видаляє лише локальний кеш браузера з вашими зображеннями та налаштуваннями. Це не призводить до видалення зображень з диску.", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 5b3b2f1039..749bd2b5b1 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -504,7 +504,6 @@ "saveSteps": "每n步保存图像", "confirmOnDelete": "删除时确认", "displayHelpIcons": "显示帮助按钮", - "useCanvasBeta": "使用测试版画布视图", "enableImageDebugging": "开启图像调试", "resetWebUI": "重置网页界面", "resetWebUIDesc1": "重置网页只会重置浏览器中缓存的图像和设置,不会删除任何图像。", From 1047d08835d33ca84a3b4844d627445c61cd7093 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Thu, 12 Oct 2023 12:44:30 +0000 Subject: [PATCH 019/202] translationBot(ui): update translation (Italian) Currently translated at 100.0% (591 of 591 strings) translationBot(ui): update translation (Italian) Currently translated at 99.3% (587 of 591 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (586 of 586 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (578 of 578 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (563 of 563 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (559 of 559 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (559 of 559 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (551 of 551 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 65 +++++++++++++++++--- 1 file changed, 57 insertions(+), 8 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index ea8fb710d3..7e56c76c2d 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -78,7 +78,9 @@ "imagePrompt": "Prompt Immagine", "darkMode": "Modalità scura", "lightMode": "Modalità chiara", - "batch": "Gestione Lotto" + "batch": "Gestione Lotto", + "modelManager": "Gestione del modello", + "clearNodes": "Sei sicuro di voler cancellare tutti i nodi?" }, "gallery": { "generations": "Generazioni", @@ -94,7 +96,7 @@ "pinGallery": "Blocca la galleria", "allImagesLoaded": "Tutte le immagini caricate", "loadMore": "Carica di più", - "noImagesInGallery": "Nessuna immagine nella galleria", + "noImagesInGallery": "Nessuna immagine da visualizzare", "deleteImage": "Elimina l'immagine", "deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.", "deleteImageBin": "Le immagini eliminate verranno spostate nel Cestino del tuo sistema operativo.", @@ -369,7 +371,7 @@ "deleteModel": "Elimina modello", "deleteConfig": "Elimina configurazione", "deleteMsg1": "Sei sicuro di voler eliminare questo modello da InvokeAI?", - "deleteMsg2": "Questo non eliminerà il file Checkpoint del modello dal tuo disco. Puoi aggiungerlo nuovamente se lo desideri.", + "deleteMsg2": "Questo eliminerà il modello dal disco se si trova nella cartella principale di InvokeAI. Se utilizzi una cartella personalizzata, il modello NON verrà eliminato dal disco.", "formMessageDiffusersModelLocation": "Ubicazione modelli diffusori", "formMessageDiffusersModelLocationDesc": "Inseriscine almeno uno.", "formMessageDiffusersVAELocation": "Ubicazione file VAE", @@ -378,7 +380,7 @@ "convertToDiffusers": "Converti in Diffusori", "convertToDiffusersHelpText2": "Questo processo sostituirà la voce in Gestione Modelli con la versione Diffusori dello stesso modello.", "convertToDiffusersHelpText4": "Questo è un processo una tantum. Potrebbero essere necessari circa 30-60 secondi a seconda delle specifiche del tuo computer.", - "convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 4 GB e 7 GB di dimensioni.", + "convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 2 GB e 7 GB di dimensioni.", "convertToDiffusersHelpText6": "Vuoi convertire questo modello?", "convertToDiffusersSaveLocation": "Ubicazione salvataggio", "inpainting": "v1 Inpainting", @@ -403,7 +405,7 @@ "mergedModelSaveLocation": "Ubicazione salvataggio", "convertToDiffusersHelpText1": "Questo modello verrà convertito nel formato 🧨 Diffusore.", "custom": "Personalizzata", - "convertToDiffusersHelpText3": "Il tuo file checkpoint sul disco NON verrà comunque cancellato o modificato. Se lo desideri, puoi aggiungerlo di nuovo in Gestione Modelli.", + "convertToDiffusersHelpText3": "Il file checkpoint su disco SARÀ eliminato se si trova nella cartella principale di InvokeAI. Se si trova in una posizione personalizzata, NON verrà eliminato.", "v1": "v1", "pathToCustomConfig": "Percorso alla configurazione personalizzata", "modelThree": "Modello 3", @@ -421,7 +423,21 @@ "scanForModels": "Cerca modelli", "variant": "Variante", "baseModel": "Modello Base", - "vae": "VAE" + "vae": "VAE", + "modelUpdateFailed": "Aggiornamento del modello non riuscito", + "modelConversionFailed": "Conversione del modello non riuscita", + "modelsMergeFailed": "Unione modelli non riuscita", + "selectModel": "Seleziona Modello", + "modelDeleted": "Modello cancellato", + "modelDeleteFailed": "Impossibile eliminare il modello", + "noCustomLocationProvided": "Nessuna posizione personalizzata fornita", + "convertingModelBegin": "Conversione del modello. Attendere prego.", + "importModels": "Importa modelli", + "modelsSynced": "Modelli sincronizzati", + "modelSyncFailed": "Sincronizzazione del modello non riuscita", + "settings": "Impostazioni", + "syncModels": "Sincronizza Modelli", + "syncModelsDesc": "Se i tuoi modelli non sono sincronizzati con il back-end, puoi aggiornarli utilizzando questa opzione. Questo è generalmente utile nei casi in cui aggiorni manualmente il tuo file models.yaml o aggiungi modelli alla cartella principale di InvokeAI dopo l'avvio dell'applicazione." }, "parameters": { "images": "Immagini", @@ -529,7 +545,11 @@ "ui": "Interfaccia Utente", "favoriteSchedulersPlaceholder": "Nessun campionatore preferito", "favoriteSchedulers": "Campionatori preferiti", - "showAdvancedOptions": "Mostra Opzioni Avanzate" + "showAdvancedOptions": "Mostra Opzioni Avanzate", + "alternateCanvasLayout": "Layout alternativo della tela", + "beta": "Beta", + "enableNodesEditor": "Abilita l'editor dei nodi", + "experimental": "Sperimentale" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", @@ -568,7 +588,17 @@ "problemCopyingImageLink": "Impossibile copiare il collegamento dell'immagine", "uploadFailedInvalidUploadDesc": "Deve essere una singola immagine PNG o JPEG", "parameterSet": "Parametro impostato", - "parameterNotSet": "Parametro non impostato" + "parameterNotSet": "Parametro non impostato", + "nodesLoadedFailed": "Impossibile caricare i nodi", + "nodesSaved": "Nodi salvati", + "nodesLoaded": "Nodi caricati", + "nodesCleared": "Nodi cancellati", + "problemCopyingImage": "Impossibile copiare l'immagine", + "nodesNotValidGraph": "Grafico del nodo InvokeAI non valido", + "nodesCorruptedGraph": "Impossibile caricare. Il grafico sembra essere danneggiato.", + "nodesUnrecognizedTypes": "Impossibile caricare. Il grafico ha tipi di dati non riconosciuti", + "nodesNotValidJSON": "JSON non valido", + "nodesBrokenConnections": "Impossibile caricare. Alcune connessioni sono interrotte." }, "tooltip": { "feature": { @@ -672,5 +702,24 @@ "hideProgressImages": "Nascondi avanzamento immagini", "showProgressImages": "Mostra avanzamento immagini", "swapSizes": "Scambia dimensioni" + }, + "nodes": { + "reloadSchema": "Ricarica lo schema", + "clearNodes": "Cancella nodi", + "saveNodes": "Salva nodi", + "loadNodes": "Carica nodi", + "zoomOutNodes": "Rimpicciolire", + "hideGraphNodes": "Nascondi sovrapposizione grafico", + "hideLegendNodes": "Nascondi la legenda del tipo di campo", + "showLegendNodes": "Mostra legenda del tipo di campo", + "hideMinimapnodes": "Nascondi minimappa", + "showMinimapnodes": "Mostra minimappa", + "zoomInNodes": "Ingrandire", + "fitViewportNodes": "Adatta vista", + "showGraphNodes": "Mostra sovrapposizione grafico", + "saveGraph": "Salva grafico", + "loadGraph": "Carica grafico (salvato dal Editor dei Nodi) (non copiare e incollare i metadati)", + "clearGraph": "Cancella il grafico", + "clearGraphDesc": "Sei sicuro di voler cancellare tutti i nodi?" } } From 5d3ab4f333ea959fa30a239d8127a1b6793bb808 Mon Sep 17 00:00:00 2001 From: Dennis Date: Thu, 12 Oct 2023 12:44:32 +0000 Subject: [PATCH 020/202] translationBot(ui): update translation (Dutch) Currently translated at 100.0% (563 of 563 strings) translationBot(ui): update translation (Dutch) Currently translated at 100.0% (563 of 563 strings) Co-authored-by: Dennis Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/nl/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/nl.json | 57 ++++++++++++++++---- 1 file changed, 46 insertions(+), 11 deletions(-) diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 8458f5e21b..74d46d728b 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -68,14 +68,19 @@ "langHebrew": "עברית", "langKorean": "한국어", "txt2img": "Tekst naar afbeelding", - "postprocessing": "Nabewerking", + "postprocessing": "Naverwerking", "dontAskMeAgain": "Vraag niet opnieuw", "imagePrompt": "Afbeeldingsprompt", "random": "Willekeurig", "generate": "Genereer", "openInNewTab": "Open in nieuw tabblad", "areYouSure": "Weet je het zeker?", - "linear": "Lineair" + "linear": "Lineair", + "batch": "Seriebeheer", + "modelManager": "Modelbeheer", + "clearNodes": "Weet je zeker dat je alle knooppunten wilt weghalen?", + "darkMode": "Donkere modus", + "lightMode": "Lichte modus" }, "gallery": { "generations": "Gegenereerde afbeeldingen", @@ -91,7 +96,7 @@ "pinGallery": "Zet galerij vast", "allImagesLoaded": "Alle afbeeldingen geladen", "loadMore": "Laad meer", - "noImagesInGallery": "Geen afbeeldingen in galerij", + "noImagesInGallery": "Geen afbeeldingen om te tonen", "deleteImage": "Wis afbeelding", "deleteImageBin": "Gewiste afbeeldingen worden naar de prullenbak van je besturingssysteem gestuurd.", "deleteImagePermanent": "Gewiste afbeeldingen kunnen niet worden hersteld.", @@ -355,8 +360,8 @@ "delete": "Verwijder", "deleteModel": "Verwijder model", "deleteConfig": "Verwijder configuratie", - "deleteMsg1": "Weet je zeker dat je deze modelregel wilt verwijderen uit InvokeAI?", - "deleteMsg2": "Hiermee wordt het checkpointbestand niet van je schijf verwijderd. Je kunt deze opnieuw toevoegen als je dat wilt.", + "deleteMsg1": "Weet je zeker dat je dit model wilt verwijderen uit InvokeAI?", + "deleteMsg2": "Hiermee ZAL het model van schijf worden verwijderd als het zich bevindt in de InvokeAI-beginmap. Als je het model vanaf een eigen locatie gebruikt, dan ZAL het model NIET van schijf worden verwijderd.", "formMessageDiffusersVAELocationDesc": "Indien niet opgegeven, dan zal InvokeAI kijken naar het VAE-bestand in de hierboven gegeven modellocatie.", "repoIDValidationMsg": "Online repository van je model", "formMessageDiffusersModelLocation": "Locatie Diffusers-model", @@ -378,7 +383,7 @@ "convertToDiffusersHelpText1": "Dit model wordt omgezet naar de🧨 Diffusers-indeling.", "convertToDiffusersHelpText2": "Dit proces vervangt het onderdeel in Modelonderhoud met de Diffusers-versie van hetzelfde model.", "convertToDiffusersHelpText4": "Dit is een eenmalig proces. Dit neemt ongeveer 30 tot 60 sec. in beslag, afhankelijk van de specificaties van je computer.", - "convertToDiffusersHelpText5": "Zorg ervoor dat je genoeg schijfruimte hebt. Modellen nemen gewoonlijk ongeveer 4 - 7 GB ruimte in beslag.", + "convertToDiffusersHelpText5": "Zorg ervoor dat je genoeg schijfruimte hebt. Modellen nemen gewoonlijk ongeveer 2 tot 7 GB ruimte in beslag.", "convertToDiffusersSaveLocation": "Bewaarlocatie", "v1": "v1", "inpainting": "v1-inpainting", @@ -415,7 +420,14 @@ "none": "geen", "addDifference": "Voeg verschil toe", "scanForModels": "Scan naar modellen", - "pickModelType": "Kies modelsoort" + "pickModelType": "Kies modelsoort", + "baseModel": "Basismodel", + "vae": "VAE", + "variant": "Variant", + "modelConversionFailed": "Omzetten model mislukt", + "modelUpdateFailed": "Bijwerken model mislukt", + "modelsMergeFailed": "Samenvoegen model mislukt", + "selectModel": "Kies model" }, "parameters": { "images": "Afbeeldingen", @@ -492,7 +504,14 @@ "seamlessXAxis": "X-as", "seamlessYAxis": "Y-as", "hidePreview": "Verberg voorvertoning", - "showPreview": "Toon voorvertoning" + "showPreview": "Toon voorvertoning", + "boundingBoxWidth": "Kaderbreedte", + "boundingBoxHeight": "Kaderhoogte", + "clipSkip": "Overslaan CLIP", + "aspectRatio": "Verhouding", + "negativePromptPlaceholder": "Negatieve prompt", + "controlNetControlMode": "Aansturingsmodus", + "positivePromptPlaceholder": "Positieve prompt" }, "settings": { "models": "Modellen", @@ -513,7 +532,10 @@ "showProgressInViewer": "Toon voortgangsafbeeldingen in viewer", "generation": "Generatie", "ui": "Gebruikersinterface", - "antialiasProgressImages": "Voer anti-aliasing uit op voortgangsafbeeldingen" + "antialiasProgressImages": "Voer anti-aliasing uit op voortgangsafbeeldingen", + "showAdvancedOptions": "Toon uitgebreide opties", + "favoriteSchedulers": "Favoriete planners", + "favoriteSchedulersPlaceholder": "Geen favoriete planners ingesteld" }, "toast": { "tempFoldersEmptied": "Tijdelijke map geleegd", @@ -550,7 +572,13 @@ "connected": "Verbonden met server", "canceled": "Verwerking geannuleerd", "uploadFailedInvalidUploadDesc": "Moet een enkele PNG- of JPEG-afbeelding zijn", - "problemCopyingImageLink": "Kan afbeeldingslink niet kopiëren" + "problemCopyingImageLink": "Kan afbeeldingslink niet kopiëren", + "parameterNotSet": "Parameter niet ingesteld", + "parameterSet": "Instellen parameters", + "nodesSaved": "Knooppunten bewaard", + "nodesLoaded": "Knooppunten geladen", + "nodesCleared": "Knooppunten weggehaald", + "nodesLoadedFailed": "Laden knooppunten mislukt" }, "tooltip": { "feature": { @@ -652,6 +680,13 @@ }, "ui": { "showProgressImages": "Toon voortgangsafbeeldingen", - "hideProgressImages": "Verberg voortgangsafbeeldingen" + "hideProgressImages": "Verberg voortgangsafbeeldingen", + "swapSizes": "Wissel afmetingen om" + }, + "nodes": { + "reloadSchema": "Laad schema opnieuw in", + "loadNodes": "Laad knooppunten", + "saveNodes": "Bewaar knooppunten", + "clearNodes": "Haal knooppunten weg" } } From 8e943b2ce142c6820fbdb9ed89451c67d337ceed Mon Sep 17 00:00:00 2001 From: Simona Liliac Date: Thu, 12 Oct 2023 12:44:33 +0000 Subject: [PATCH 021/202] translationBot(ui): update translation (Polish) Currently translated at 58.4% (338 of 578 strings) Co-authored-by: Simona Liliac Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/pl/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/pl.json | 29 +++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index 9a1b0cf3fb..2bde4c95c0 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -39,7 +39,11 @@ "statusUpscaling": "Powiększanie obrazu", "statusUpscalingESRGAN": "Powiększanie (ESRGAN)", "statusLoadingModel": "Wczytywanie modelu", - "statusModelChanged": "Zmieniono model" + "statusModelChanged": "Zmieniono model", + "githubLabel": "GitHub", + "discordLabel": "Discord", + "darkMode": "Tryb ciemny", + "lightMode": "Tryb jasny" }, "gallery": { "generations": "Wygenerowane", @@ -437,5 +441,28 @@ "betaDarkenOutside": "Przyciemnienie", "betaLimitToBox": "Ogranicz do zaznaczenia", "betaPreserveMasked": "Zachowaj obszar" + }, + "accessibility": { + "zoomIn": "Przybliż", + "exitViewer": "Wyjdź z podglądu", + "modelSelect": "Wybór modelu", + "invokeProgressBar": "Pasek postępu", + "reset": "Zerowanie", + "useThisParameter": "Użyj tego parametru", + "copyMetadataJson": "Kopiuj metadane JSON", + "uploadImage": "Wgrywanie obrazu", + "previousImage": "Poprzedni obraz", + "nextImage": "Następny obraz", + "zoomOut": "Oddal", + "rotateClockwise": "Obróć zgodnie ze wskazówkami zegara", + "rotateCounterClockwise": "Obróć przeciwnie do wskazówek zegara", + "flipHorizontally": "Odwróć horyzontalnie", + "flipVertically": "Odwróć wertykalnie", + "modifyConfig": "Modyfikuj ustawienia", + "toggleAutoscroll": "Przełącz autoprzewijanie", + "toggleLogViewer": "Przełącz podgląd logów", + "showGallery": "Pokaż galerię", + "showOptionsPanel": "Pokaż panel opcji", + "menu": "Menu" } } From 2bad8b9f29c2ef335583449e09c62e7aef05c192 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:44:36 +0000 Subject: [PATCH 022/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/es.json | 6 +----- invokeai/frontend/web/public/locales/it.json | 6 +----- invokeai/frontend/web/public/locales/nl.json | 6 +----- 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index ec140836d2..5c182692b5 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -79,8 +79,7 @@ "batch": "Administrador de lotes", "darkMode": "Modo oscuro", "lightMode": "Modo claro", - "modelManager": "Administrador de modelos", - "clearNodes": "¿Estás seguro de que deseas borrar todos los nodos?" + "modelManager": "Administrador de modelos" }, "gallery": { "generations": "Generaciones", @@ -705,9 +704,6 @@ }, "nodes": { "reloadSchema": "Esquema de recarga", - "loadNodes": "Nodos de carga", - "clearNodes": "Borrar los nodos", - "saveNodes": "Guardar los nodos", "showGraphNodes": "Mostrar la superposición de los gráficos", "zoomInNodes": "Acercar", "hideMinimapnodes": "Ocultar el minimapa", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 7e56c76c2d..74663d95f4 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -79,8 +79,7 @@ "darkMode": "Modalità scura", "lightMode": "Modalità chiara", "batch": "Gestione Lotto", - "modelManager": "Gestione del modello", - "clearNodes": "Sei sicuro di voler cancellare tutti i nodi?" + "modelManager": "Gestione del modello" }, "gallery": { "generations": "Generazioni", @@ -705,9 +704,6 @@ }, "nodes": { "reloadSchema": "Ricarica lo schema", - "clearNodes": "Cancella nodi", - "saveNodes": "Salva nodi", - "loadNodes": "Carica nodi", "zoomOutNodes": "Rimpicciolire", "hideGraphNodes": "Nascondi sovrapposizione grafico", "hideLegendNodes": "Nascondi la legenda del tipo di campo", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 74d46d728b..045ace3dbf 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -78,7 +78,6 @@ "linear": "Lineair", "batch": "Seriebeheer", "modelManager": "Modelbeheer", - "clearNodes": "Weet je zeker dat je alle knooppunten wilt weghalen?", "darkMode": "Donkere modus", "lightMode": "Lichte modus" }, @@ -684,9 +683,6 @@ "swapSizes": "Wissel afmetingen om" }, "nodes": { - "reloadSchema": "Laad schema opnieuw in", - "loadNodes": "Laad knooppunten", - "saveNodes": "Bewaar knooppunten", - "clearNodes": "Haal knooppunten weg" + "reloadSchema": "Laad schema opnieuw in" } } From 77aeb9a421e6d8851d0562ece2a653afb2a8ca9c Mon Sep 17 00:00:00 2001 From: gallegonovato Date: Thu, 12 Oct 2023 12:44:38 +0000 Subject: [PATCH 023/202] translationBot(ui): update translation (Spanish) Currently translated at 99.6% (601 of 603 strings) translationBot(ui): update translation (Spanish) Currently translated at 99.5% (600 of 603 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (599 of 599 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (596 of 596 strings) translationBot(ui): update translation (Spanish) Currently translated at 99.8% (594 of 595 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (593 of 593 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (592 of 592 strings) Co-authored-by: gallegonovato Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/es/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/es.json | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 5c182692b5..d30ee3c188 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -79,7 +79,8 @@ "batch": "Administrador de lotes", "darkMode": "Modo oscuro", "lightMode": "Modo claro", - "modelManager": "Administrador de modelos" + "modelManager": "Administrador de modelos", + "communityLabel": "Comunidad" }, "gallery": { "generations": "Generaciones", @@ -100,7 +101,8 @@ "deleteImageBin": "Las imágenes eliminadas se enviarán a la papelera de tu sistema operativo.", "deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.", "images": "Imágenes", - "assets": "Activos" + "assets": "Activos", + "autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic" }, "hotkeys": { "keyboardShortcuts": "Atajos de teclado", @@ -436,7 +438,10 @@ "syncModels": "Sincronizar las plantillas", "syncModelsDesc": "Si tus plantillas no están sincronizados con el backend, puedes actualizarlas usando esta opción. Esto suele ser útil en los casos en los que actualizas manualmente tu archivo models.yaml o añades plantillas a la carpeta raíz de InvokeAI después de que la aplicación haya arrancado.", "modelsSynced": "Plantillas sincronizadas", - "modelSyncFailed": "La sincronización de la plantilla falló" + "modelSyncFailed": "La sincronización de la plantilla falló", + "loraModels": "LoRA", + "onnxModels": "Onnx", + "oliveModels": "Olives" }, "parameters": { "images": "Imágenes", @@ -520,7 +525,12 @@ "negativePromptPlaceholder": "Prompt Negativo", "controlNetControlMode": "Modo de control", "clipSkip": "Omitir el CLIP", - "aspectRatio": "Relación" + "aspectRatio": "Relación", + "maskAdjustmentsHeader": "Ajustes de la máscara", + "maskBlur": "Máscara de Desenfoque", + "maskBlurMethod": "Método del desenfoque de la máscara", + "seamHighThreshold": "Alto", + "seamPaintingHeader": "Pintando las uniones" }, "settings": { "models": "Modelos", From 9940cbfa870ad528f685e3e9e78ccaa98725f5ff Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Thu, 12 Oct 2023 12:44:40 +0000 Subject: [PATCH 024/202] translationBot(ui): update translation (Italian) Currently translated at 97.8% (589 of 602 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (603 of 603 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (599 of 599 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (596 of 596 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (595 of 595 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (595 of 595 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (593 of 593 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (592 of 592 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 24 ++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 74663d95f4..4fb45990aa 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -79,7 +79,8 @@ "darkMode": "Modalità scura", "lightMode": "Modalità chiara", "batch": "Gestione Lotto", - "modelManager": "Gestione del modello" + "modelManager": "Gestione del modello", + "communityLabel": "Comunità" }, "gallery": { "generations": "Generazioni", @@ -100,7 +101,8 @@ "deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.", "deleteImageBin": "Le immagini eliminate verranno spostate nel Cestino del tuo sistema operativo.", "images": "Immagini", - "assets": "Risorse" + "assets": "Risorse", + "autoAssignBoardOnClick": "Assegna automaticamente la bacheca al clic" }, "hotkeys": { "keyboardShortcuts": "Tasti rapidi", @@ -307,6 +309,10 @@ "acceptStagingImage": { "title": "Accetta l'immagine della sessione", "desc": "Accetta l'immagine dell'area della sessione corrente" + }, + "nodesHotkeys": "Tasti di scelta rapida dei Nodi", + "addNodes": { + "title": "Aggiungi Nodi" } }, "modelManager": { @@ -436,7 +442,10 @@ "modelSyncFailed": "Sincronizzazione del modello non riuscita", "settings": "Impostazioni", "syncModels": "Sincronizza Modelli", - "syncModelsDesc": "Se i tuoi modelli non sono sincronizzati con il back-end, puoi aggiornarli utilizzando questa opzione. Questo è generalmente utile nei casi in cui aggiorni manualmente il tuo file models.yaml o aggiungi modelli alla cartella principale di InvokeAI dopo l'avvio dell'applicazione." + "syncModelsDesc": "Se i tuoi modelli non sono sincronizzati con il back-end, puoi aggiornarli utilizzando questa opzione. Questo è generalmente utile nei casi in cui aggiorni manualmente il tuo file models.yaml o aggiungi modelli alla cartella principale di InvokeAI dopo l'avvio dell'applicazione.", + "loraModels": "LoRA", + "oliveModels": "Olive", + "onnxModels": "ONNX" }, "parameters": { "images": "Immagini", @@ -520,7 +529,14 @@ "negativePromptPlaceholder": "Prompt Negativo", "controlNetControlMode": "Modalità di controllo", "clipSkip": "Salta CLIP", - "aspectRatio": "Proporzioni" + "aspectRatio": "Proporzioni", + "maskAdjustmentsHeader": "Regolazioni della maschera", + "maskBlur": "Sfocatura maschera", + "maskBlurMethod": "Metodo di sfocatura della maschera", + "seamPaintingHeader": "Pittura della cucitura", + "seamThreshold": "Soglia di cucitura", + "seamLowThreshold": "Basso", + "seamHighThreshold": "Alto" }, "settings": { "models": "Modelli", From bd97c6b70813f697eaad15e4d7a9b2bb093b32ec Mon Sep 17 00:00:00 2001 From: Arnold Cordewiner Date: Thu, 12 Oct 2023 12:44:41 +0000 Subject: [PATCH 025/202] translationBot(ui): update translation (Dutch) Currently translated at 99.6% (591 of 593 strings) Co-authored-by: Arnold Cordewiner Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/nl/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/nl.json | 53 ++++++++++++++++---- 1 file changed, 44 insertions(+), 9 deletions(-) diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 045ace3dbf..c0b260e70b 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -61,7 +61,7 @@ "statusMergedModels": "Modellen samengevoegd", "cancel": "Annuleer", "accept": "Akkoord", - "langPortuguese": "Português", + "langPortuguese": "Portugees", "pinOptionsPanel": "Zet deelscherm Opties vast", "loading": "Bezig met laden", "loadingInvokeAI": "Bezig met laden van Invoke AI", @@ -79,7 +79,8 @@ "batch": "Seriebeheer", "modelManager": "Modelbeheer", "darkMode": "Donkere modus", - "lightMode": "Lichte modus" + "lightMode": "Lichte modus", + "communityLabel": "Gemeenschap" }, "gallery": { "generations": "Gegenereerde afbeeldingen", @@ -364,7 +365,7 @@ "formMessageDiffusersVAELocationDesc": "Indien niet opgegeven, dan zal InvokeAI kijken naar het VAE-bestand in de hierboven gegeven modellocatie.", "repoIDValidationMsg": "Online repository van je model", "formMessageDiffusersModelLocation": "Locatie Diffusers-model", - "convertToDiffusersHelpText3": "Je Checkpoint-bestand op schijf zal NIET worden verwijderd of gewijzigd. Je kunt je Checkpoint opnieuw toevoegen aan Modelonderhoud als je dat wilt.", + "convertToDiffusersHelpText3": "Je checkpoint-bestand op schijf ZAL worden verwijderd als het zich in de InvokeAI root map bevindt. Het zal NIET worden verwijderd als het zich in een andere locatie bevindt.", "convertToDiffusersHelpText6": "Wil je dit model omzetten?", "allModels": "Alle modellen", "checkpointModels": "Checkpoints", @@ -426,7 +427,18 @@ "modelConversionFailed": "Omzetten model mislukt", "modelUpdateFailed": "Bijwerken model mislukt", "modelsMergeFailed": "Samenvoegen model mislukt", - "selectModel": "Kies model" + "selectModel": "Kies model", + "settings": "Instellingen", + "modelDeleted": "Model verwijderd", + "noCustomLocationProvided": "Geen Aangepaste Locatie Opgegeven", + "syncModels": "Synchroniseer Modellen", + "modelsSynced": "Modellen Gesynchroniseerd", + "modelSyncFailed": "Synchronisatie Modellen Gefaald", + "modelDeleteFailed": "Model kon niet verwijderd worden", + "convertingModelBegin": "Model aan het converteren. Even geduld.", + "importModels": "Importeer Modellen", + "syncModelsDesc": "Als je modellen niet meer synchroon zijn met de backend, kan je ze met deze optie verversen. Dit wordt typisch gebruikt in het geval je het models.yaml bestand met de hand bewerkt of als je modellen aan de InvokeAI root map toevoegt nadat de applicatie gestart werd.", + "loraModels": "LoRA's" }, "parameters": { "images": "Afbeeldingen", @@ -504,8 +516,8 @@ "seamlessYAxis": "Y-as", "hidePreview": "Verberg voorvertoning", "showPreview": "Toon voorvertoning", - "boundingBoxWidth": "Kaderbreedte", - "boundingBoxHeight": "Kaderhoogte", + "boundingBoxWidth": "Tekenvak breedte", + "boundingBoxHeight": "Tekenvak hoogte", "clipSkip": "Overslaan CLIP", "aspectRatio": "Verhouding", "negativePromptPlaceholder": "Negatieve prompt", @@ -534,7 +546,11 @@ "antialiasProgressImages": "Voer anti-aliasing uit op voortgangsafbeeldingen", "showAdvancedOptions": "Toon uitgebreide opties", "favoriteSchedulers": "Favoriete planners", - "favoriteSchedulersPlaceholder": "Geen favoriete planners ingesteld" + "favoriteSchedulersPlaceholder": "Geen favoriete planners ingesteld", + "beta": "Bèta", + "experimental": "Experimenteel", + "alternateCanvasLayout": "Omwisselen Canvas Layout", + "enableNodesEditor": "Knopen Editor Inschakelen" }, "toast": { "tempFoldersEmptied": "Tijdelijke map geleegd", @@ -577,7 +593,13 @@ "nodesSaved": "Knooppunten bewaard", "nodesLoaded": "Knooppunten geladen", "nodesCleared": "Knooppunten weggehaald", - "nodesLoadedFailed": "Laden knooppunten mislukt" + "nodesLoadedFailed": "Laden knooppunten mislukt", + "problemCopyingImage": "Kan Afbeelding Niet Kopiëren", + "nodesNotValidJSON": "Ongeldige JSON", + "nodesCorruptedGraph": "Kan niet laden. Graph lijkt corrupt.", + "nodesUnrecognizedTypes": "Laden mislukt. Graph heeft onherkenbare types", + "nodesBrokenConnections": "Laden mislukt. Sommige verbindingen zijn verbroken.", + "nodesNotValidGraph": "Geen geldige knooppunten graph" }, "tooltip": { "feature": { @@ -683,6 +705,19 @@ "swapSizes": "Wissel afmetingen om" }, "nodes": { - "reloadSchema": "Laad schema opnieuw in" + "reloadSchema": "Laad schema opnieuw in", + "zoomOutNodes": "Uitzoomen", + "fitViewportNodes": "Aanpassen aan beeld", + "hideMinimapnodes": "Minimap verbergen", + "showLegendNodes": "Typelegende veld tonen", + "zoomInNodes": "Inzoomen", + "hideGraphNodes": "Graph overlay verbergen", + "clearGraph": "Graph verwijderen", + "showGraphNodes": "Graph overlay tonen", + "showMinimapnodes": "Minimap tonen", + "clearGraphDesc": "Ben je zeker dat je alle knooppunten wil verwijderen?", + "saveGraph": "Graph opslaan", + "loadGraph": "Graph laden (bewaard van knooppunten editor) (Metadata niet kopiëren-plakken)", + "hideLegendNodes": "Typelegende veld verbergen" } } From be52eb153ce5a99fd1acf9b6e5b69ad7bc5078de Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:44:44 +0000 Subject: [PATCH 026/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 1 - invokeai/frontend/web/public/locales/de.json | 1 - invokeai/frontend/web/public/locales/es.json | 9 +-------- invokeai/frontend/web/public/locales/fi.json | 2 -- invokeai/frontend/web/public/locales/fr.json | 2 -- invokeai/frontend/web/public/locales/he.json | 1 - invokeai/frontend/web/public/locales/it.json | 9 +-------- invokeai/frontend/web/public/locales/ja.json | 2 -- invokeai/frontend/web/public/locales/nl.json | 7 ------- invokeai/frontend/web/public/locales/pl.json | 1 - invokeai/frontend/web/public/locales/pt.json | 2 -- invokeai/frontend/web/public/locales/pt_BR.json | 2 -- invokeai/frontend/web/public/locales/ru.json | 7 ------- invokeai/frontend/web/public/locales/sv.json | 2 -- invokeai/frontend/web/public/locales/uk.json | 2 -- invokeai/frontend/web/public/locales/zh_CN.json | 4 +--- 16 files changed, 3 insertions(+), 51 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index 7ec6126e23..fd0f524b73 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -53,7 +53,6 @@ "maintainAspectRatio": "الحفاظ على نسبة الأبعاد", "autoSwitchNewImages": "التبديل التلقائي إلى الصور الجديدة", "singleColumnLayout": "تخطيط عمود واحد", - "pinGallery": "تثبيت المعرض", "allImagesLoaded": "تم تحميل جميع الصور", "loadMore": "تحميل المزيد", "noImagesInGallery": "لا توجد صور في المعرض" diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 0ce584c656..3601fc0053 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -71,7 +71,6 @@ "maintainAspectRatio": "Seitenverhältnis beibehalten", "autoSwitchNewImages": "Automatisch zu neuen Bildern wechseln", "singleColumnLayout": "Einspaltiges Layout", - "pinGallery": "Galerie anpinnen", "allImagesLoaded": "Alle Bilder geladen", "loadMore": "Mehr laden", "noImagesInGallery": "Keine Bilder in der Galerie" diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index d30ee3c188..8ecce18263 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -62,7 +62,6 @@ "langPortuguese": "Portugués", "langKorean": "Coreano", "langHebrew": "Hebreo", - "pinOptionsPanel": "Pin del panel de opciones", "loading": "Cargando", "loadingInvokeAI": "Cargando invocar a la IA", "postprocessing": "Tratamiento posterior", @@ -93,7 +92,6 @@ "maintainAspectRatio": "Mantener relación de aspecto", "autoSwitchNewImages": "Auto seleccionar Imágenes nuevas", "singleColumnLayout": "Diseño de una columna", - "pinGallery": "Fijar galería", "allImagesLoaded": "Todas las imágenes cargadas", "loadMore": "Cargar más", "noImagesInGallery": "No hay imágenes para mostrar", @@ -713,7 +711,6 @@ "swapSizes": "Cambiar los tamaños" }, "nodes": { - "reloadSchema": "Esquema de recarga", "showGraphNodes": "Mostrar la superposición de los gráficos", "zoomInNodes": "Acercar", "hideMinimapnodes": "Ocultar el minimapa", @@ -722,10 +719,6 @@ "hideGraphNodes": "Ocultar la superposición de los gráficos", "hideLegendNodes": "Ocultar la leyenda del tipo de campo", "showLegendNodes": "Mostrar la leyenda del tipo de campo", - "showMinimapnodes": "Mostrar el minimapa", - "saveGraph": "Guardar el gráfico", - "clearGraph": "Borrar el gráfico", - "clearGraphDesc": "¿Estás seguro de que deseas borrar todos los nodos?", - "loadGraph": "Cargar el gráfico (guardado desde el Editor de nodos) (No copiar y pegar los metadatos)" + "showMinimapnodes": "Mostrar el minimapa" } } diff --git a/invokeai/frontend/web/public/locales/fi.json b/invokeai/frontend/web/public/locales/fi.json index 790c63fc85..f257344cf1 100644 --- a/invokeai/frontend/web/public/locales/fi.json +++ b/invokeai/frontend/web/public/locales/fi.json @@ -74,7 +74,6 @@ "statusGeneratingInpainting": "Generoidaan sisällemaalausta", "statusGeneratingOutpainting": "Generoidaan ulosmaalausta", "statusRestoringFaces": "Korjataan kasvoja", - "pinOptionsPanel": "Kiinnitä asetukset -paneeli", "loadingInvokeAI": "Ladataan Invoke AI:ta", "loading": "Ladataan", "statusGenerating": "Generoidaan", @@ -90,7 +89,6 @@ "galleryImageResetSize": "Resetoi koko", "maintainAspectRatio": "Säilytä kuvasuhde", "galleryImageSize": "Kuvan koko", - "pinGallery": "Kiinnitä galleria", "showGenerations": "Näytä generaatiot", "singleColumnLayout": "Yhden sarakkeen asettelu", "generations": "Generoinnit", diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index 0a99845f03..9c5d9e634e 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -51,7 +51,6 @@ "statusConvertingModel": "Conversion du modèle", "statusModelConverted": "Modèle converti", "loading": "Chargement", - "pinOptionsPanel": "Épingler la page d'options", "statusMergedModels": "Modèles mélangés", "txt2img": "Texte vers image", "postprocessing": "Post-Traitement" @@ -67,7 +66,6 @@ "maintainAspectRatio": "Maintenir le rapport d'aspect", "autoSwitchNewImages": "Basculer automatiquement vers de nouvelles images", "singleColumnLayout": "Mise en page en colonne unique", - "pinGallery": "Épingler la galerie", "allImagesLoaded": "Toutes les images chargées", "loadMore": "Charger plus", "noImagesInGallery": "Aucune image dans la galerie" diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index 323be9d8b6..f8065e16dd 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -380,7 +380,6 @@ "maintainAspectRatio": "שמור על יחס רוחב-גובה", "autoSwitchNewImages": "החלף אוטומטית לתמונות חדשות", "singleColumnLayout": "תצוגת עמודה אחת", - "pinGallery": "הצמד גלריה", "allImagesLoaded": "כל התמונות נטענו", "loadMore": "טען עוד", "noImagesInGallery": "אין תמונות בגלריה", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 4fb45990aa..881d27195f 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -61,7 +61,6 @@ "statusConvertingModel": "Conversione Modello", "langKorean": "Coreano", "langPortuguese": "Portoghese", - "pinOptionsPanel": "Blocca il pannello Opzioni", "loading": "Caricamento in corso", "langHebrew": "Ebraico", "loadingInvokeAI": "Caricamento Invoke AI", @@ -93,7 +92,6 @@ "maintainAspectRatio": "Mantenere le proporzioni", "autoSwitchNewImages": "Passaggio automatico a nuove immagini", "singleColumnLayout": "Layout a colonna singola", - "pinGallery": "Blocca la galleria", "allImagesLoaded": "Tutte le immagini caricate", "loadMore": "Carica di più", "noImagesInGallery": "Nessuna immagine da visualizzare", @@ -719,7 +717,6 @@ "swapSizes": "Scambia dimensioni" }, "nodes": { - "reloadSchema": "Ricarica lo schema", "zoomOutNodes": "Rimpicciolire", "hideGraphNodes": "Nascondi sovrapposizione grafico", "hideLegendNodes": "Nascondi la legenda del tipo di campo", @@ -728,10 +725,6 @@ "showMinimapnodes": "Mostra minimappa", "zoomInNodes": "Ingrandire", "fitViewportNodes": "Adatta vista", - "showGraphNodes": "Mostra sovrapposizione grafico", - "saveGraph": "Salva grafico", - "loadGraph": "Carica grafico (salvato dal Editor dei Nodi) (non copiare e incollare i metadati)", - "clearGraph": "Cancella il grafico", - "clearGraphDesc": "Sei sicuro di voler cancellare tutti i nodi?" + "showGraphNodes": "Mostra sovrapposizione grafico" } } diff --git a/invokeai/frontend/web/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json index 3672a224ad..cc4dbbdde3 100644 --- a/invokeai/frontend/web/public/locales/ja.json +++ b/invokeai/frontend/web/public/locales/ja.json @@ -52,7 +52,6 @@ "loadingInvokeAI": "Invoke AIをロード中", "statusConvertingModel": "モデルの変換", "statusMergedModels": "マージ済モデル", - "pinOptionsPanel": "オプションパネルを固定", "githubLabel": "Github", "hotkeysLabel": "ホットキー", "langHebrew": "עברית", @@ -78,7 +77,6 @@ "gallerySettings": "ギャラリーの設定", "maintainAspectRatio": "アスペクト比を維持", "singleColumnLayout": "1カラムレイアウト", - "pinGallery": "ギャラリーにピン留め", "allImagesLoaded": "すべての画像を読み込む", "loadMore": "さらに読み込む", "noImagesInGallery": "ギャラリーに画像がありません", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index c0b260e70b..4d284e4d5d 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -62,7 +62,6 @@ "cancel": "Annuleer", "accept": "Akkoord", "langPortuguese": "Portugees", - "pinOptionsPanel": "Zet deelscherm Opties vast", "loading": "Bezig met laden", "loadingInvokeAI": "Bezig met laden van Invoke AI", "langHebrew": "עברית", @@ -93,7 +92,6 @@ "maintainAspectRatio": "Behoud beeldverhoiding", "autoSwitchNewImages": "Wissel autom. naar nieuwe afbeeldingen", "singleColumnLayout": "Eenkolomsindeling", - "pinGallery": "Zet galerij vast", "allImagesLoaded": "Alle afbeeldingen geladen", "loadMore": "Laad meer", "noImagesInGallery": "Geen afbeeldingen om te tonen", @@ -705,19 +703,14 @@ "swapSizes": "Wissel afmetingen om" }, "nodes": { - "reloadSchema": "Laad schema opnieuw in", "zoomOutNodes": "Uitzoomen", "fitViewportNodes": "Aanpassen aan beeld", "hideMinimapnodes": "Minimap verbergen", "showLegendNodes": "Typelegende veld tonen", "zoomInNodes": "Inzoomen", "hideGraphNodes": "Graph overlay verbergen", - "clearGraph": "Graph verwijderen", "showGraphNodes": "Graph overlay tonen", "showMinimapnodes": "Minimap tonen", - "clearGraphDesc": "Ben je zeker dat je alle knooppunten wil verwijderen?", - "saveGraph": "Graph opslaan", - "loadGraph": "Graph laden (bewaard van knooppunten editor) (Metadata niet kopiëren-plakken)", "hideLegendNodes": "Typelegende veld verbergen" } } diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index 2bde4c95c0..2958ef982c 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -56,7 +56,6 @@ "maintainAspectRatio": "Zachowaj proporcje", "autoSwitchNewImages": "Przełączaj na nowe obrazy", "singleColumnLayout": "Układ jednokolumnowy", - "pinGallery": "Przypnij galerię", "allImagesLoaded": "Koniec listy", "loadMore": "Wczytaj więcej", "noImagesInGallery": "Brak obrazów w galerii" diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index c57a005779..a605f9c49d 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -59,7 +59,6 @@ "statusGenerationComplete": "Geração Completa", "statusMergingModels": "Mesclando Modelos", "statusMergedModels": "Modelos Mesclados", - "pinOptionsPanel": "Fixar painel de opções", "loading": "A carregar", "loadingInvokeAI": "A carregar Invoke AI", "langPortuguese": "Português" @@ -69,7 +68,6 @@ "gallerySettings": "Configurações de Galeria", "maintainAspectRatio": "Mater Proporções", "autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente", - "pinGallery": "Fixar Galeria", "singleColumnLayout": "Disposição em Coluna Única", "allImagesLoaded": "Todas as Imagens Carregadas", "loadMore": "Carregar Mais", diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index f76703e676..d11b0758e6 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -59,7 +59,6 @@ "statusMergedModels": "Modelos Mesclados", "langRussian": "Russo", "langSpanish": "Espanhol", - "pinOptionsPanel": "Fixar painel de opções", "loadingInvokeAI": "Carregando Invoke AI", "loading": "Carregando" }, @@ -74,7 +73,6 @@ "maintainAspectRatio": "Mater Proporções", "autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente", "singleColumnLayout": "Disposição em Coluna Única", - "pinGallery": "Fixar Galeria", "allImagesLoaded": "Todas as Imagens Carregadas", "loadMore": "Carregar Mais", "noImagesInGallery": "Sem Imagens na Galeria" diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 8b1a7d3d25..17494bdd57 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -45,7 +45,6 @@ "statusMergingModels": "Слияние моделей", "statusModelConverted": "Модель сконвертирована", "statusMergedModels": "Модели объединены", - "pinOptionsPanel": "Закрепить панель настроек", "loading": "Загрузка", "loadingInvokeAI": "Загрузка Invoke AI", "back": "Назад", @@ -93,7 +92,6 @@ "maintainAspectRatio": "Сохранять пропорции", "autoSwitchNewImages": "Автоматически выбирать новые", "singleColumnLayout": "Одна колонка", - "pinGallery": "Закрепить галерею", "allImagesLoaded": "Все изображения загружены", "loadMore": "Показать больше", "noImagesInGallery": "Изображений нет", @@ -720,9 +718,6 @@ "swapSizes": "Поменять местами размеры" }, "nodes": { - "reloadSchema": "Перезагрузить схему", - "saveGraph": "Сохранить граф", - "clearGraph": "Очистить граф", "zoomInNodes": "Увеличьте масштаб", "zoomOutNodes": "Уменьшите масштаб", "fitViewportNodes": "Уместить вид", @@ -730,8 +725,6 @@ "showGraphNodes": "Показать оверлей графа", "showLegendNodes": "Показать тип поля", "hideMinimapnodes": "Скрыть миникарту", - "loadGraph": "Загрузить граф (сохраненный из Редактора узлов) (Не копировать и не вставлять метаданные)", - "clearGraphDesc": "Вы уверены, что хотите очистить все узлы?", "hideLegendNodes": "Скрыть тип поля", "showMinimapnodes": "Показать миникарту", "saveWorkflow": "Сохранить рабочий процесс", diff --git a/invokeai/frontend/web/public/locales/sv.json b/invokeai/frontend/web/public/locales/sv.json index 6030f57256..c3f25e65d8 100644 --- a/invokeai/frontend/web/public/locales/sv.json +++ b/invokeai/frontend/web/public/locales/sv.json @@ -59,7 +59,6 @@ "statusGenerationComplete": "Generering klar", "statusModelConverted": "Modell konverterad", "statusMergingModels": "Sammanfogar modeller", - "pinOptionsPanel": "Nåla fast inställningspanelen", "loading": "Laddar", "loadingInvokeAI": "Laddar Invoke AI", "statusRestoringFaces": "Återskapar ansikten", @@ -103,7 +102,6 @@ "galleryImageResetSize": "Återställ storlek", "gallerySettings": "Galleriinställningar", "maintainAspectRatio": "Behåll bildförhållande", - "pinGallery": "Nåla fast galleri", "noImagesInGallery": "Inga bilder i galleriet", "autoSwitchNewImages": "Ändra automatiskt till nya bilder", "singleColumnLayout": "Enkolumnslayout" diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index b874cdc7eb..e5e2d66b6d 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -51,7 +51,6 @@ "langHebrew": "Іврит", "langKorean": "Корейська", "langPortuguese": "Португальська", - "pinOptionsPanel": "Закріпити панель налаштувань", "langArabic": "Арабська", "langSimplifiedChinese": "Китайська (спрощена)", "langSpanish": "Іспанська", @@ -82,7 +81,6 @@ "maintainAspectRatio": "Зберігати пропорції", "autoSwitchNewImages": "Автоматично вибирати нові", "singleColumnLayout": "Одна колонка", - "pinGallery": "Закріпити галерею", "allImagesLoaded": "Всі зображення завантажені", "loadMore": "Завантажити більше", "noImagesInGallery": "Зображень немає" diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 749bd2b5b1..e260d6b188 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -74,8 +74,7 @@ "postprocessing": "后期处理", "loading": "加载中", "loadingInvokeAI": "Invoke AI 加载中", - "linear": "线性的", - "pinOptionsPanel": "固定选项面板" + "linear": "线性的" }, "gallery": { "generations": "生成的图像", @@ -88,7 +87,6 @@ "maintainAspectRatio": "保持比例", "autoSwitchNewImages": "自动切换到新图像", "singleColumnLayout": "单列布局", - "pinGallery": "保持图库常开", "allImagesLoaded": "所有图像加载完成", "loadMore": "加载更多", "noImagesInGallery": "图库中无图像", From ffcf6dfde6cb98869678da54ef3d42348462ea5d Mon Sep 17 00:00:00 2001 From: gallegonovato Date: Thu, 12 Oct 2023 12:44:46 +0000 Subject: [PATCH 027/202] translationBot(ui): update translation (Spanish) Currently translated at 100.0% (605 of 605 strings) Co-authored-by: gallegonovato Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/es/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/es.json | 25 +++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 8ecce18263..8c79353b85 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -6,7 +6,7 @@ "settingsLabel": "Ajustes", "img2img": "Imagen a Imagen", "unifiedCanvas": "Lienzo Unificado", - "nodes": "Editor de nodos", + "nodes": "Editor del flujo de trabajo", "langSpanish": "Español", "nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.", "postProcessing": "Post-procesamiento", @@ -307,7 +307,12 @@ "acceptStagingImage": { "title": "Aceptar imagen", "desc": "Aceptar la imagen actual en el área de preparación" - } + }, + "addNodes": { + "title": "Añadir Nodos", + "desc": "Abre el menú para añadir nodos" + }, + "nodesHotkeys": "Teclas de acceso rápido a los nodos" }, "modelManager": { "modelManager": "Gestor de Modelos", @@ -528,7 +533,9 @@ "maskBlur": "Máscara de Desenfoque", "maskBlurMethod": "Método del desenfoque de la máscara", "seamHighThreshold": "Alto", - "seamPaintingHeader": "Pintando las uniones" + "seamPaintingHeader": "Pintando las uniones", + "seamThreshold": "Umbral de la junta", + "seamLowThreshold": "Bajo" }, "settings": { "models": "Modelos", @@ -540,7 +547,7 @@ "resetWebUI": "Restablecer interfaz web", "resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.", "resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.", - "resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla.", + "resetComplete": "Se ha restablecido la interfaz web.", "useSlidersForAll": "Utilice controles deslizantes para todas las opciones", "general": "General", "consoleLogLevel": "Nivel del registro", @@ -702,7 +709,7 @@ "toggleAutoscroll": "Activar el autodesplazamiento", "toggleLogViewer": "Alternar el visor de registros", "showGallery": "Mostrar galería", - "showOptionsPanel": "Mostrar el panel de opciones", + "showOptionsPanel": "Mostrar el panel lateral", "menu": "Menú" }, "ui": { @@ -719,6 +726,12 @@ "hideGraphNodes": "Ocultar la superposición de los gráficos", "hideLegendNodes": "Ocultar la leyenda del tipo de campo", "showLegendNodes": "Mostrar la leyenda del tipo de campo", - "showMinimapnodes": "Mostrar el minimapa" + "showMinimapnodes": "Mostrar el minimapa", + "reloadNodeTemplates": "Recargar las plantillas de nodos", + "saveWorkflow": "Guardar el flujo de trabajo", + "loadWorkflow": "Cargar el flujo de trabajo", + "resetWorkflow": "Reiniciar e flujo de trabajo", + "resetWorkflowDesc": "¿Está seguro de que deseas restablecer este flujo de trabajo?", + "resetWorkflowDesc2": "Al reiniciar el flujo de trabajo se borrarán todos los nodos, aristas y detalles del flujo de trabajo." } } From 44932923eb78fcca5816f21eb8484ce6465ba973 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:44:50 +0000 Subject: [PATCH 028/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 4 ---- invokeai/frontend/web/public/locales/de.json | 4 ---- invokeai/frontend/web/public/locales/es.json | 6 ------ invokeai/frontend/web/public/locales/fr.json | 4 ---- invokeai/frontend/web/public/locales/he.json | 4 ---- invokeai/frontend/web/public/locales/it.json | 6 ------ invokeai/frontend/web/public/locales/nl.json | 4 ---- invokeai/frontend/web/public/locales/pl.json | 4 ---- invokeai/frontend/web/public/locales/pt.json | 4 ---- invokeai/frontend/web/public/locales/pt_BR.json | 4 ---- invokeai/frontend/web/public/locales/ru.json | 6 ------ invokeai/frontend/web/public/locales/uk.json | 4 ---- invokeai/frontend/web/public/locales/zh_CN.json | 4 ---- 13 files changed, 58 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index fd0f524b73..8ff402dee3 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -358,10 +358,6 @@ "hiresOptim": "تحسين الدقة العالية", "imageFit": "ملائمة الصورة الأولية لحجم الخرج", "codeformerFidelity": "الوثوقية", - "seamSize": "حجم التشقق", - "seamBlur": "ضباب التشقق", - "seamStrength": "قوة التشقق", - "seamSteps": "خطوات التشقق", "scaleBeforeProcessing": "تحجيم قبل المعالجة", "scaledWidth": "العرض المحجوب", "scaledHeight": "الارتفاع المحجوب", diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 3601fc0053..b712fa30bc 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -360,10 +360,6 @@ "hiresOptim": "High-Res-Optimierung", "imageFit": "Ausgangsbild an Ausgabegröße anpassen", "codeformerFidelity": "Glaubwürdigkeit", - "seamSize": "Nahtgröße", - "seamBlur": "Nahtunschärfe", - "seamStrength": "Stärke der Naht", - "seamSteps": "Nahtstufen", "scaleBeforeProcessing": "Skalieren vor der Verarbeitung", "scaledWidth": "Skaliert W", "scaledHeight": "Skaliert H", diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 8c79353b85..6500244bba 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -473,10 +473,6 @@ "hiresOptim": "Optimización de Alta Resolución", "imageFit": "Ajuste tamaño de imagen inicial al tamaño objetivo", "codeformerFidelity": "Fidelidad", - "seamSize": "Tamaño del parche", - "seamBlur": "Desenfoque del parche", - "seamStrength": "Fuerza del parche", - "seamSteps": "Pasos del parche", "scaleBeforeProcessing": "Redimensionar antes de procesar", "scaledWidth": "Ancho escalado", "scaledHeight": "Alto escalado", @@ -533,8 +529,6 @@ "maskBlur": "Máscara de Desenfoque", "maskBlurMethod": "Método del desenfoque de la máscara", "seamHighThreshold": "Alto", - "seamPaintingHeader": "Pintando las uniones", - "seamThreshold": "Umbral de la junta", "seamLowThreshold": "Bajo" }, "settings": { diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index 9c5d9e634e..ca2227dbe6 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -371,10 +371,6 @@ "hiresOptim": "Optimisation Haute Résolution", "imageFit": "Ajuster Image Initiale à la Taille de Sortie", "codeformerFidelity": "Fidélité", - "seamSize": "Taille des Joints", - "seamBlur": "Flou des Joints", - "seamStrength": "Force des Joints", - "seamSteps": "Etapes des Joints", "scaleBeforeProcessing": "Echelle Avant Traitement", "scaledWidth": "Larg. Échelle", "scaledHeight": "Haut. Échelle", diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index f8065e16dd..49e42062e8 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -410,10 +410,6 @@ "hiresOptim": "אופטימיזצית רזולוציה גבוהה", "hiresStrength": "חוזק רזולוציה גבוהה", "codeformerFidelity": "דבקות", - "seamSize": "גודל תפר", - "seamBlur": "טשטוש תפר", - "seamStrength": "חוזק תפר", - "seamSteps": "שלבי תפר", "scaleBeforeProcessing": "שנה קנה מידה לפני עיבוד", "scaledWidth": "קנה מידה לאחר שינוי W", "scaledHeight": "קנה מידה לאחר שינוי H", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 881d27195f..9112643647 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -472,10 +472,6 @@ "hiresOptim": "Ottimizzazione alta risoluzione", "imageFit": "Adatta l'immagine iniziale alle dimensioni di output", "codeformerFidelity": "Fedeltà", - "seamSize": "Dimensione della cucitura", - "seamBlur": "Sfocatura cucitura", - "seamStrength": "Forza della cucitura", - "seamSteps": "Passaggi di cucitura", "scaleBeforeProcessing": "Scala prima dell'elaborazione", "scaledWidth": "Larghezza ridimensionata", "scaledHeight": "Altezza ridimensionata", @@ -531,8 +527,6 @@ "maskAdjustmentsHeader": "Regolazioni della maschera", "maskBlur": "Sfocatura maschera", "maskBlurMethod": "Metodo di sfocatura della maschera", - "seamPaintingHeader": "Pittura della cucitura", - "seamThreshold": "Soglia di cucitura", "seamLowThreshold": "Basso", "seamHighThreshold": "Alto" }, diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 4d284e4d5d..a78a073319 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -465,10 +465,6 @@ "hiresOptim": "Hogeresolutie-optimalisatie", "imageFit": "Pas initiële afbeelding in uitvoergrootte", "codeformerFidelity": "Getrouwheid", - "seamSize": "Grootte naad", - "seamBlur": "Vervaging naad", - "seamStrength": "Sterkte naad", - "seamSteps": "Stappen naad", "scaleBeforeProcessing": "Schalen voor verwerking", "scaledWidth": "Geschaalde B", "scaledHeight": "Geschaalde H", diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index 2958ef982c..f7bd0f1d60 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -294,10 +294,6 @@ "hiresOptim": "Optymalizacja wys. rozdzielczości", "imageFit": "Przeskaluj oryginalny obraz", "codeformerFidelity": "Dokładność", - "seamSize": "Rozmiar", - "seamBlur": "Rozmycie", - "seamStrength": "Siła", - "seamSteps": "Kroki", "scaleBeforeProcessing": "Tryb skalowania", "scaledWidth": "Sk. do szer.", "scaledHeight": "Sk. do wys.", diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index a605f9c49d..f4bfed272c 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -417,10 +417,6 @@ "hiresOptim": "Otimização de Alta Res", "imageFit": "Caber Imagem Inicial No Tamanho de Saída", "codeformerFidelity": "Fidelidade", - "seamSize": "Tamanho da Fronteira", - "seamBlur": "Desfoque da Fronteira", - "seamStrength": "Força da Fronteira", - "seamSteps": "Passos da Fronteira", "tileSize": "Tamanho do Ladrilho", "boundingBoxHeader": "Caixa Delimitadora", "seamCorrectionHeader": "Correção de Fronteira", diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index d11b0758e6..d263bf6251 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -417,10 +417,6 @@ "hiresOptim": "Otimização de Alta Res", "imageFit": "Caber Imagem Inicial No Tamanho de Saída", "codeformerFidelity": "Fidelidade", - "seamSize": "Tamanho da Fronteira", - "seamBlur": "Desfoque da Fronteira", - "seamStrength": "Força da Fronteira", - "seamSteps": "Passos da Fronteira", "scaleBeforeProcessing": "Escala Antes do Processamento", "scaledWidth": "L Escalada", "scaledHeight": "A Escalada", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 17494bdd57..fe2805e5fe 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -473,10 +473,6 @@ "hiresOptim": "Оптимизация High Res", "imageFit": "Уместить изображение", "codeformerFidelity": "Точность", - "seamSize": "Размер шва", - "seamBlur": "Размытие шва", - "seamStrength": "Сила шва", - "seamSteps": "Шаги шва", "scaleBeforeProcessing": "Масштабировать", "scaledWidth": "Масштаб Ш", "scaledHeight": "Масштаб В", @@ -532,8 +528,6 @@ "maskAdjustmentsHeader": "Настройка маски", "maskBlur": "Размытие маски", "maskBlurMethod": "Метод размытия маски", - "seamPaintingHeader": "Окрашивание швов", - "seamThreshold": "Порог шва", "seamLowThreshold": "Низкий", "seamHighThreshold": "Высокий" }, diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index e5e2d66b6d..5c02693a76 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -431,10 +431,6 @@ "hiresOptim": "Оптимізація High Res", "imageFit": "Вмістити зображення", "codeformerFidelity": "Точність", - "seamSize": "Размір шву", - "seamBlur": "Розмиття шву", - "seamStrength": "Сила шву", - "seamSteps": "Кроки шву", "scaleBeforeProcessing": "Масштабувати", "scaledWidth": "Масштаб Ш", "scaledHeight": "Масштаб В", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index e260d6b188..e4ea8f3cf7 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -442,10 +442,6 @@ "hiresOptim": "高清优化", "imageFit": "使生成图像长宽适配原图像", "codeformerFidelity": "保真", - "seamSize": "接缝尺寸", - "seamBlur": "接缝模糊", - "seamStrength": "接缝强度", - "seamSteps": "接缝步数", "scaleBeforeProcessing": "处理前缩放", "scaledWidth": "缩放宽度", "scaledHeight": "缩放长度", From 962e51320b99c2011d0a454243351095118e224e Mon Sep 17 00:00:00 2001 From: nemuruibai Date: Thu, 12 Oct 2023 12:44:52 +0000 Subject: [PATCH 029/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 96.1% (579 of 602 strings) Co-authored-by: nemuruibai Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- .../frontend/web/public/locales/zh_CN.json | 78 +++++++++++++++++-- 1 file changed, 71 insertions(+), 7 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index e4ea8f3cf7..bd3949fde2 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -74,7 +74,10 @@ "postprocessing": "后期处理", "loading": "加载中", "loadingInvokeAI": "Invoke AI 加载中", - "linear": "线性的" + "linear": "线性的", + "batch": "批次管理器", + "communityLabel": "社区", + "modelManager": "模型管理器" }, "gallery": { "generations": "生成的图像", @@ -94,7 +97,8 @@ "deleteImageBin": "被删除的图片会发送到你操作系统的回收站。", "deleteImagePermanent": "无法恢复删除的图片。", "images": "图片", - "assets": "素材" + "assets": "素材", + "autoAssignBoardOnClick": "点击后自动分配情景板" }, "hotkeys": { "keyboardShortcuts": "快捷方式", @@ -301,6 +305,11 @@ "acceptStagingImage": { "title": "接受暂存图像", "desc": "接受当前暂存区中的图像" + }, + "nodesHotkeys": "节点快捷键", + "addNodes": { + "title": "添加节点", + "desc": "打开添加节点菜单" } }, "modelManager": { @@ -413,7 +422,22 @@ "weightedSum": "加权求和", "modelMergeAlphaHelp": "透明度参数控制模型的混合强度。较低的透明度值会导致第二个模型的影响减弱。", "sigmoid": "Sigmoid函数", - "modelMergeInterpAddDifferenceHelp": "在这种模式下,首先从模型2中减去模型3,得到的版本再用上述值的透明度与模型1进行混合。" + "modelMergeInterpAddDifferenceHelp": "在这种模式下,首先从模型2中减去模型3,得到的版本再用上述值的透明度与模型1进行混合。", + "modelsSynced": "模型已同步", + "modelSyncFailed": "模型同步失败", + "modelDeleteFailed": "模型删除失败", + "syncModelsDesc": "如果您的模型与后端不同步, 您可以使用此选项刷新它们. 便于您在应用程序启动的情况下手动更新models.yaml文件或将模型添加到InvokeAI根文件夹.", + "selectModel": "选择模型", + "importModels": "导入模型", + "settings": "设置", + "syncModels": "同步模型", + "noCustomLocationProvided": "未提供自定义路径", + "modelDeleted": "模型已删除", + "modelUpdateFailed": "模型更新失败", + "modelConversionFailed": "模型转换失败", + "modelsMergeFailed": "模型融合失败", + "baseModel": "基底模型", + "convertingModelBegin": "模型转换中. 请稍候." }, "parameters": { "images": "图像", @@ -490,7 +514,14 @@ "hidePreview": "影藏预览", "hSymmetryStep": "横向对称步数", "imageToImage": "图生图", - "noiseSettings": "噪音" + "noiseSettings": "噪音", + "controlNetControlMode": "控制模式", + "maskAdjustmentsHeader": "调整遮罩", + "maskBlur": "模糊遮罩", + "maskBlurMethod": "遮罩模糊方式", + "aspectRatio": "比率", + "seamLowThreshold": "降低", + "seamHighThreshold": "提升" }, "settings": { "models": "模型", @@ -511,7 +542,12 @@ "general": "通用", "consoleLogLevel": "日志等级", "shouldLogToConsole": "终端日志", - "developer": "开发者" + "developer": "开发者", + "alternateCanvasLayout": "切换统一画布布局", + "enableNodesEditor": "启用节点编辑器", + "favoriteSchedulersPlaceholder": "没有偏好的采样算法", + "showAdvancedOptions": "显示进阶选项", + "favoriteSchedulers": "采样算法偏好" }, "toast": { "tempFoldersEmptied": "临时文件夹已清空", @@ -550,7 +586,17 @@ "parameterSet": "参数已设置", "parameterNotSet": "参数未设置", "serverError": "服务器错误", - "canceled": "处理取消" + "canceled": "处理取消", + "nodesLoaded": "节点图已加载", + "nodesSaved": "节点图已保存", + "problemCopyingImage": "无法复制图像", + "nodesCorruptedGraph": "无法加载. 节点图似乎已损坏.", + "nodesBrokenConnections": "无法加载. 部分链接已断开.", + "nodesUnrecognizedTypes": "无法加载. 节点图有无法识别的节点类型", + "nodesNotValidJSON": "无效的 JSON", + "nodesNotValidGraph": "无效的 InvokeAi 节点图", + "nodesCleared": "节点图已清空", + "nodesLoadedFailed": "节点图加载失败" }, "unifiedCanvas": { "layer": "图层", @@ -637,7 +683,8 @@ }, "ui": { "showProgressImages": "显示处理中的图片", - "hideProgressImages": "隐藏处理中的图片" + "hideProgressImages": "隐藏处理中的图片", + "swapSizes": "XY尺寸互换" }, "tooltip": { "feature": { @@ -653,5 +700,22 @@ "seamCorrection": "控制在画布上生成的图像之间出现的可见接缝的处理方式。", "infillAndScaling": "管理填充方法(用于画布的掩模或擦除区域)和缩放(对于较小的边界框大小非常有用)。" } + }, + "nodes": { + "zoomInNodes": "放大", + "resetWorkflowDesc": "是否确定要清空节点图?", + "resetWorkflow": "清空节点图", + "loadWorkflow": "读取节点图", + "zoomOutNodes": "缩小", + "resetWorkflowDesc2": "重置节点图将清除所有节点、边际和节点图详情", + "saveWorkflow": "保存节点图", + "reloadNodeTemplates": "重载节点模板", + "hideGraphNodes": "隐藏节点图信息", + "fitViewportNodes": "自适应视图", + "showMinimapnodes": "显示缩略图", + "hideMinimapnodes": "隐藏缩略图", + "showLegendNodes": "显示字段类型图例", + "hideLegendNodes": "隐藏字段类型图例", + "showGraphNodes": "显示节点图信息" } } From 9fb624f3904779e5cb44d2a078b1deceeefd1627 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Thu, 12 Oct 2023 12:44:53 +0000 Subject: [PATCH 030/202] translationBot(ui): update translation (Italian) Currently translated at 81.2% (958 of 1179 strings) translationBot(ui): update translation (Italian) Currently translated at 81.2% (958 of 1179 strings) translationBot(ui): update translation (Italian) Currently translated at 76.6% (904 of 1179 strings) translationBot(ui): update translation (Italian) Currently translated at 76.5% (903 of 1179 strings) translationBot(ui): update translation (Italian) Currently translated at 71.9% (848 of 1179 strings) translationBot(ui): update translation (Italian) Currently translated at 71.7% (845 of 1177 strings) translationBot(ui): update translation (Italian) Currently translated at 71.7% (845 of 1177 strings) translationBot(ui): update translation (Italian) Currently translated at 67.8% (799 of 1177 strings) translationBot(ui): update translation (Italian) Currently translated at 58.5% (689 of 1177 strings) translationBot(ui): update translation (Italian) Currently translated at 59.8% (640 of 1069 strings) translationBot(ui): update translation (Italian) Currently translated at 57.2% (612 of 1069 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (607 of 607 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (605 of 605 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (605 of 605 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (602 of 602 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 590 ++++++++++++++++++- 1 file changed, 558 insertions(+), 32 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 9112643647..200f3c902b 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -6,7 +6,7 @@ "settingsLabel": "Impostazioni", "img2img": "Immagine a Immagine", "unifiedCanvas": "Tela unificata", - "nodes": "Editor dei Nodi", + "nodes": "Editor del flusso di lavoro", "langItalian": "Italiano", "nodesDesc": "Attualmente è in fase di sviluppo un sistema basato su nodi per la generazione di immagini. Resta sintonizzato per gli aggiornamenti su questa fantastica funzionalità.", "postProcessing": "Post-elaborazione", @@ -78,8 +78,15 @@ "darkMode": "Modalità scura", "lightMode": "Modalità chiara", "batch": "Gestione Lotto", - "modelManager": "Gestione del modello", - "communityLabel": "Comunità" + "modelManager": "Gestore modello", + "communityLabel": "Comunità", + "nodeEditor": "Editor dei nodi", + "controlNet": "ControlNet", + "statusProcessing": "Elaborazione in corso", + "advanced": "Avanzate", + "imageFailedToLoad": "Impossibile caricare l'immagine", + "learnMore": "Per saperne di più", + "ipAdapter": "Adattatore IP" }, "gallery": { "generations": "Generazioni", @@ -93,14 +100,21 @@ "autoSwitchNewImages": "Passaggio automatico a nuove immagini", "singleColumnLayout": "Layout a colonna singola", "allImagesLoaded": "Tutte le immagini caricate", - "loadMore": "Carica di più", + "loadMore": "Carica altro", "noImagesInGallery": "Nessuna immagine da visualizzare", "deleteImage": "Elimina l'immagine", "deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.", "deleteImageBin": "Le immagini eliminate verranno spostate nel Cestino del tuo sistema operativo.", "images": "Immagini", "assets": "Risorse", - "autoAssignBoardOnClick": "Assegna automaticamente la bacheca al clic" + "autoAssignBoardOnClick": "Assegna automaticamente la bacheca al clic", + "featuresWillReset": "Se elimini questa immagine, quelle funzionalità verranno immediatamente ripristinate.", + "loading": "Caricamento in corso", + "unableToLoad": "Impossibile caricare la Galleria", + "currentlyInUse": "Questa immagine è attualmente utilizzata nelle seguenti funzionalità:", + "copy": "Copia", + "download": "Scarica", + "setCurrentImage": "Imposta come immagine corrente" }, "hotkeys": { "keyboardShortcuts": "Tasti rapidi", @@ -310,7 +324,8 @@ }, "nodesHotkeys": "Tasti di scelta rapida dei Nodi", "addNodes": { - "title": "Aggiungi Nodi" + "title": "Aggiungi Nodi", + "desc": "Apre il menu Aggiungi Nodi" } }, "modelManager": { @@ -322,7 +337,7 @@ "safetensorModels": "SafeTensor", "modelAdded": "Modello Aggiunto", "modelUpdated": "Modello Aggiornato", - "modelEntryDeleted": "Modello Rimosso", + "modelEntryDeleted": "Voce del modello eliminata", "cannotUseSpaces": "Impossibile utilizzare gli spazi", "addNew": "Aggiungi nuovo", "addNewModel": "Aggiungi nuovo Modello", @@ -337,7 +352,7 @@ "config": "Configurazione", "configValidationMsg": "Percorso del file di configurazione del modello.", "modelLocation": "Posizione del modello", - "modelLocationValidationMsg": "Percorso dove si trova il modello.", + "modelLocationValidationMsg": "Fornisci il percorso di una cartella locale in cui è archiviato il tuo modello di diffusori", "repo_id": "Repo ID", "repoIDValidationMsg": "Repository online del modello", "vaeLocation": "Posizione file VAE", @@ -392,12 +407,12 @@ "modelConverted": "Modello convertito", "sameFolder": "Stessa cartella", "invokeRoot": "Cartella InvokeAI", - "merge": "Fondere", - "modelsMerged": "Modelli fusi", - "mergeModels": "Fondi Modelli", + "merge": "Unisci", + "modelsMerged": "Modelli uniti", + "mergeModels": "Unisci Modelli", "modelOne": "Modello 1", "modelTwo": "Modello 2", - "mergedModelName": "Nome del modello fuso", + "mergedModelName": "Nome del modello unito", "alpha": "Alpha", "interpolationType": "Tipo di interpolazione", "mergedModelCustomSaveLocation": "Percorso personalizzato", @@ -420,7 +435,7 @@ "inverseSigmoid": "Sigmoide inverso", "v2_base": "v2 (512px)", "v2_768": "v2 (768px)", - "none": "niente", + "none": "nessuno", "addDifference": "Aggiungi differenza", "pickModelType": "Scegli il tipo di modello", "scanForModels": "Cerca modelli", @@ -437,13 +452,22 @@ "convertingModelBegin": "Conversione del modello. Attendere prego.", "importModels": "Importa modelli", "modelsSynced": "Modelli sincronizzati", - "modelSyncFailed": "Sincronizzazione del modello non riuscita", + "modelSyncFailed": "Sincronizzazione modello non riuscita", "settings": "Impostazioni", "syncModels": "Sincronizza Modelli", "syncModelsDesc": "Se i tuoi modelli non sono sincronizzati con il back-end, puoi aggiornarli utilizzando questa opzione. Questo è generalmente utile nei casi in cui aggiorni manualmente il tuo file models.yaml o aggiungi modelli alla cartella principale di InvokeAI dopo l'avvio dell'applicazione.", "loraModels": "LoRA", "oliveModels": "Olive", - "onnxModels": "ONNX" + "onnxModels": "ONNX", + "noModels": "Nessun modello trovato", + "predictionType": "Tipo di previsione (per modelli Stable Diffusion 2.x ed alcuni modelli Stable Diffusion 1.x)", + "quickAdd": "Aggiunta rapida", + "simpleModelDesc": "Fornire un percorso a un modello diffusori locale, un modello checkpoint/safetensor locale, un ID repository HuggingFace o un URL del modello checkpoint/diffusori.", + "advanced": "Avanzate", + "useCustomConfig": "Utilizza configurazione personalizzata", + "closeAdvanced": "Chiudi Avanzate", + "modelType": "Tipo di modello", + "customConfigFileLocation": "Posizione del file di configurazione personalizzato" }, "parameters": { "images": "Immagini", @@ -464,7 +488,7 @@ "type": "Tipo", "strength": "Forza", "upscaling": "Ampliamento", - "upscale": "Amplia", + "upscale": "Amplia (Shift + U)", "upscaleImage": "Amplia Immagine", "scale": "Scala", "otherOptions": "Altre opzioni", @@ -496,9 +520,9 @@ "useInitImg": "Usa l'immagine iniziale", "info": "Informazioni", "initialImage": "Immagine iniziale", - "showOptionsPanel": "Mostra pannello opzioni", + "showOptionsPanel": "Mostra il pannello laterale (O o T)", "general": "Generale", - "denoisingStrength": "Forza riduzione rumore", + "denoisingStrength": "Forza di riduzione del rumore", "copyImage": "Copia immagine", "hiresStrength": "Forza Alta Risoluzione", "imageToImage": "Immagine a Immagine", @@ -506,7 +530,8 @@ "schedule": "Annulla dopo l'iterazione corrente", "isScheduled": "Annullamento", "setType": "Imposta il tipo di annullamento", - "immediate": "Annulla immediatamente" + "immediate": "Annulla immediatamente", + "cancel": "Annulla" }, "hSymmetryStep": "Passi Simmetria Orizzontale", "vSymmetryStep": "Passi Simmetria Verticale", @@ -522,13 +547,50 @@ "positivePromptPlaceholder": "Prompt Positivo", "negativePromptPlaceholder": "Prompt Negativo", "controlNetControlMode": "Modalità di controllo", - "clipSkip": "Salta CLIP", + "clipSkip": "CLIP Skip", "aspectRatio": "Proporzioni", "maskAdjustmentsHeader": "Regolazioni della maschera", - "maskBlur": "Sfocatura maschera", - "maskBlurMethod": "Metodo di sfocatura della maschera", + "maskBlur": "Sfocatura", + "maskBlurMethod": "Metodo di sfocatura", "seamLowThreshold": "Basso", - "seamHighThreshold": "Alto" + "seamHighThreshold": "Alto", + "coherencePassHeader": "Passaggio di coerenza", + "coherenceSteps": "Passi", + "coherenceStrength": "Forza", + "compositingSettingsHeader": "Impostazioni di composizione", + "patchmatchDownScaleSize": "Ridimensiona", + "coherenceMode": "Modalità", + "invoke": { + "noNodesInGraph": "Nessun nodo nel grafico", + "noModelSelected": "Nessun modello selezionato", + "noPrompts": "Nessun prompt generato", + "noInitialImageSelected": "Nessuna immagine iniziale selezionata", + "readyToInvoke": "Pronto per invocare", + "addingImagesTo": "Aggiungi immagini a", + "systemBusy": "Sistema occupato", + "unableToInvoke": "Impossibile invocare", + "systemDisconnected": "Sistema disconnesso", + "noControlImageForControlNet": "ControlNet {{index}} non ha un'immagine di controllo", + "noModelForControlNet": "ControlNet {{index}} non ha alcun modello selezionato." + }, + "enableNoiseSettings": "Abilita le impostazioni del rumore", + "cpuNoise": "Rumore CPU", + "gpuNoise": "Rumore GPU", + "useCpuNoise": "Usa la CPU per generare rumore", + "manualSeed": "Seme manuale", + "randomSeed": "Seme casuale", + "iterations": "Iterazioni", + "iterationsWithCount_one": "{{count}} Iterazione", + "iterationsWithCount_many": "{{count}} Iterazioni", + "iterationsWithCount_other": "", + "seamlessX&Y": "Senza cuciture X & Y", + "isAllowedToUpscale": { + "useX2Model": "L'immagine è troppo grande per l'ampliamento con il modello x4, utilizza il modello x2", + "tooLarge": "L'immagine è troppo grande per l'ampliamento, seleziona un'immagine più piccola" + }, + "seamlessX": "Senza cuciture X", + "seamlessY": "Senza cuciture Y", + "imageActions": "Azioni Immagine" }, "settings": { "models": "Modelli", @@ -540,7 +602,7 @@ "resetWebUI": "Reimposta l'interfaccia utente Web", "resetWebUIDesc1": "Il ripristino dell'interfaccia utente Web reimposta solo la cache locale del browser delle immagini e le impostazioni memorizzate. Non cancella alcuna immagine dal disco.", "resetWebUIDesc2": "Se le immagini non vengono visualizzate nella galleria o qualcos'altro non funziona, prova a reimpostare prima di segnalare un problema su GitHub.", - "resetComplete": "L'interfaccia utente Web è stata reimpostata. Aggiorna la pagina per ricaricarla.", + "resetComplete": "L'interfaccia utente Web è stata reimpostata.", "useSlidersForAll": "Usa i cursori per tutte le opzioni", "general": "Generale", "consoleLogLevel": "Livello del registro", @@ -556,7 +618,8 @@ "alternateCanvasLayout": "Layout alternativo della tela", "beta": "Beta", "enableNodesEditor": "Abilita l'editor dei nodi", - "experimental": "Sperimentale" + "experimental": "Sperimentale", + "autoChangeDimensions": "Aggiorna L/A alle impostazioni predefinite del modello in caso di modifica" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", @@ -569,7 +632,7 @@ "imageNotLoadedDesc": "Impossibile trovare l'immagine", "imageSavedToGallery": "Immagine salvata nella Galleria", "canvasMerged": "Tela unita", - "sentToImageToImage": "Inviato a da Immagine a Immagine", + "sentToImageToImage": "Inviato a Immagine a Immagine", "sentToUnifiedCanvas": "Inviato a Tela Unificata", "parametersSet": "Parametri impostati", "parametersNotSet": "Parametri non impostati", @@ -605,7 +668,42 @@ "nodesCorruptedGraph": "Impossibile caricare. Il grafico sembra essere danneggiato.", "nodesUnrecognizedTypes": "Impossibile caricare. Il grafico ha tipi di dati non riconosciuti", "nodesNotValidJSON": "JSON non valido", - "nodesBrokenConnections": "Impossibile caricare. Alcune connessioni sono interrotte." + "nodesBrokenConnections": "Impossibile caricare. Alcune connessioni sono interrotte.", + "baseModelChangedCleared": "Modello base modificato, cancellato", + "imageSavingFailed": "Salvataggio dell'immagine non riuscito", + "canvasSentControlnetAssets": "Tela inviata a ControlNet & Risorse", + "problemCopyingCanvasDesc": "Impossibile copiare la tela", + "loadedWithWarnings": "Flusso di lavoro caricato con avvisi", + "canvasCopiedClipboard": "Tela copiata negli appunti", + "maskSavedAssets": "Maschera salvata nelle risorse", + "modelAddFailed": "Aggiunta del modello non riuscita", + "problemDownloadingCanvas": "Problema durante il download della tela", + "problemMergingCanvas": "Problema nell'unione delle tele", + "imageUploaded": "Immagine caricata", + "addedToBoard": "Aggiunto alla bacheca", + "modelAddedSimple": "Modello aggiunto", + "problemImportingMaskDesc": "Impossibile importare la maschera", + "problemCopyingCanvas": "Problema durante la copia della tela", + "incompatibleSubmodel": "sottomodello incompatibile", + "problemSavingCanvas": "Problema nel salvataggio della tela", + "canvasDownloaded": "Tela scaricata", + "problemMergingCanvasDesc": "Impossibile unire le tele", + "problemDownloadingCanvasDesc": "Impossibile scaricare la tela", + "imageSaved": "Immagine salvata", + "maskSentControlnetAssets": "Maschera inviata a ControlNet & Risorse", + "canvasSavedGallery": "Tela salvata nella Galleria", + "imageUploadFailed": "Caricamento immagine non riuscito", + "modelAdded": "Modello aggiunto: {{modelName}}", + "problemImportingMask": "Problema durante l'importazione della maschera", + "setInitialImage": "Imposta come immagine iniziale", + "setControlImage": "Imposta come immagine di controllo", + "setNodeField": "Imposta come campo nodo", + "problemSavingMask": "Problema nel salvataggio della maschera", + "problemSavingCanvasDesc": "Impossibile salvare la tela", + "setCanvasInitialImage": "Imposta come immagine iniziale della tela", + "workflowLoaded": "Flusso di lavoro caricato", + "setIPAdapterImage": "Imposta come immagine per l'Adattatore IP", + "problemSavingMaskDesc": "Impossibile salvare la maschera" }, "tooltip": { "feature": { @@ -680,7 +778,9 @@ "betaDarkenOutside": "Oscura all'esterno", "betaLimitToBox": "Limita al rettangolo", "betaPreserveMasked": "Conserva quanto mascherato", - "antialiasing": "Anti aliasing" + "antialiasing": "Anti aliasing", + "showResultsOn": "Mostra i risultati (attivato)", + "showResultsOff": "Mostra i risultati (disattivato)" }, "accessibility": { "modelSelect": "Seleziona modello", @@ -699,16 +799,19 @@ "flipHorizontally": "Capovolgi orizzontalmente", "toggleLogViewer": "Attiva/disattiva visualizzatore registro", "showGallery": "Mostra la galleria immagini", - "showOptionsPanel": "Mostra il pannello opzioni", + "showOptionsPanel": "Mostra il pannello laterale", "flipVertically": "Capovolgi verticalmente", "toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico", "modifyConfig": "Modifica configurazione", - "menu": "Menu" + "menu": "Menu", + "showGalleryPanel": "Mostra il pannello Galleria", + "loadMore": "Carica altro" }, "ui": { "hideProgressImages": "Nascondi avanzamento immagini", "showProgressImages": "Mostra avanzamento immagini", - "swapSizes": "Scambia dimensioni" + "swapSizes": "Scambia dimensioni", + "lockRatio": "Blocca le proporzioni" }, "nodes": { "zoomOutNodes": "Rimpicciolire", @@ -719,6 +822,429 @@ "showMinimapnodes": "Mostra minimappa", "zoomInNodes": "Ingrandire", "fitViewportNodes": "Adatta vista", - "showGraphNodes": "Mostra sovrapposizione grafico" + "showGraphNodes": "Mostra sovrapposizione grafico", + "resetWorkflowDesc2": "Reimpostare il flusso di lavoro cancellerà tutti i nodi, i bordi e i dettagli del flusso di lavoro.", + "reloadNodeTemplates": "Ricarica i modelli di nodo", + "saveWorkflow": "Salva il flusso di lavoro", + "loadWorkflow": "Carica il flusso di lavoro", + "resetWorkflow": "Reimposta flusso di lavoro", + "resetWorkflowDesc": "Sei sicuro di voler reimpostare questo flusso di lavoro?", + "downloadWorkflow": "Scarica il flusso di lavoro JSON", + "scheduler": "Campionatore", + "addNode": "Aggiungi nodo", + "sDXLMainModelFieldDescription": "Campo del modello SDXL.", + "boardField": "Bacheca", + "animatedEdgesHelp": "Anima i bordi selezionati e i bordi collegati ai nodi selezionati", + "sDXLMainModelField": "Modello SDXL", + "executionStateInProgress": "In corso", + "executionStateError": "Errore", + "executionStateCompleted": "Completato", + "boardFieldDescription": "Una bacheca della galleria", + "addNodeToolTip": "Aggiungi nodo (Shift+A, Space)", + "sDXLRefinerModelField": "Modello Refiner" + }, + "boards": { + "autoAddBoard": "Aggiungi automaticamente bacheca", + "menuItemAutoAdd": "Aggiungi automaticamente a questa Bacheca", + "cancel": "Annulla", + "addBoard": "Aggiungi Bacheca", + "bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.", + "changeBoard": "Cambia Bacheca", + "loading": "Caricamento in corso ...", + "clearSearch": "Cancella Ricerca", + "topMessage": "Questa bacheca contiene immagini utilizzate nelle seguenti funzionalità:", + "move": "Sposta", + "myBoard": "Bacheca", + "searchBoard": "Cerca bacheche ...", + "noMatching": "Nessuna bacheca corrispondente", + "selectBoard": "Seleziona una Bacheca", + "uncategorized": "Non categorizzato" + }, + "controlnet": { + "contentShuffleDescription": "Rimescola il contenuto di un'immagine", + "contentShuffle": "Rimescola contenuto", + "beginEndStepPercent": "Percentuale passi Inizio / Fine", + "duplicate": "Duplica", + "balanced": "Bilanciato", + "depthMidasDescription": "Generazione di mappe di profondità usando Midas", + "control": "Controllo", + "crop": "Ritaglia", + "depthMidas": "Profondità (Midas)", + "enableControlnet": "Abilita ControlNet", + "detectResolution": "Rileva risoluzione", + "controlMode": "Modalità Controllo", + "cannyDescription": "Canny rilevamento bordi", + "depthZoe": "Profondità (Zoe)", + "autoConfigure": "Configura automaticamente il processore", + "delete": "Elimina", + "depthZoeDescription": "Generazione di mappe di profondità usando Zoe", + "resize": "Ridimensiona", + "showAdvanced": "Mostra opzioni Avanzate", + "bgth": "Soglia rimozione sfondo", + "importImageFromCanvas": "Importa immagine dalla Tela", + "lineartDescription": "Converte l'immagine in lineart", + "importMaskFromCanvas": "Importa maschera dalla Tela", + "hideAdvanced": "Nascondi opzioni avanzate", + "ipAdapterModel": "Modello Adattatore", + "resetControlImage": "Reimposta immagine di controllo", + "f": "F", + "h": "H", + "prompt": "Prompt", + "openPoseDescription": "Stima della posa umana utilizzando Openpose", + "resizeMode": "Modalità ridimensionamento", + "weight": "Peso", + "selectModel": "Seleziona un modello", + "w": "W", + "processor": "Processore", + "none": "Nessuno", + "incompatibleBaseModel": "Modello base incompatibile:", + "pidiDescription": "Elaborazione immagini PIDI", + "fill": "Riempire", + "colorMapDescription": "Genera una mappa dei colori dall'immagine", + "lineartAnimeDescription": "Elaborazione lineart in stile anime", + "imageResolution": "Risoluzione dell'immagine", + "colorMap": "Colore", + "lowThreshold": "Soglia inferiore", + "highThreshold": "Soglia superiore", + "normalBaeDescription": "Elaborazione BAE normale", + "noneDescription": "Nessuna elaborazione applicata", + "saveControlImage": "Salva immagine di controllo", + "toggleControlNet": "Attiva/disattiva questa ControlNet", + "safe": "Sicuro", + "colorMapTileSize": "Dimensione piastrella", + "ipAdapterImageFallback": "Nessuna immagine dell'Adattatore IP selezionata", + "mediapipeFaceDescription": "Rilevamento dei volti tramite Mediapipe", + "hedDescription": "Rilevamento dei bordi nidificati olisticamente", + "setControlImageDimensions": "Imposta le dimensioni dell'immagine di controllo su L/A", + "resetIPAdapterImage": "Reimposta immagine Adattatore IP", + "handAndFace": "Mano e faccia", + "enableIPAdapter": "Abilita Adattatore IP", + "maxFaces": "Numero massimo di volti" + }, + "queue": { + "queueFront": "Aggiungi all'inizio della coda", + "queueBack": "Aggiungi alla coda", + "queueCountPrediction": "Aggiungi {{predicted}} alla coda", + "queue": "Coda", + "status": "Stato", + "pruneSucceeded": "Rimossi {{item_count}} elementi completati dalla coda", + "cancelTooltip": "Annulla l'elemento corrente", + "queueEmpty": "Coda vuota", + "pauseSucceeded": "Elaborazione sospesa", + "in_progress": "In corso", + "notReady": "Impossibile mettere in coda", + "batchFailedToQueue": "Impossibile mettere in coda il lotto", + "completed": "Completati", + "batchValues": "Valori del lotto", + "cancelFailed": "Problema durante l'annullamento dell'elemento", + "batchQueued": "Lotto aggiunto alla coda", + "pauseFailed": "Problema durante la sospensione dell'elaborazione", + "clearFailed": "Problema nella cancellazione la coda", + "queuedCount": "{{pending}} In attesa", + "front": "inizio", + "clearSucceeded": "Coda cancellata", + "pause": "Sospendi", + "pruneTooltip": "Rimuovi {{item_count}} elementi completati", + "cancelSucceeded": "Elemento annullato", + "batchQueuedDesc": "Aggiunte {{item_count}} sessioni a {{direction}} della coda", + "graphQueued": "Grafico in coda", + "batch": "Lotto", + "clearQueueAlertDialog": "Lo svuotamento della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda.", + "pending": "In attesa", + "completedIn": "Completato in", + "resumeFailed": "Problema nel riavvio dell'elaborazione", + "clear": "Cancella", + "prune": "Rimuovi", + "total": "Totale", + "canceled": "Annullati", + "pruneFailed": "Problema nel rimuovere la coda", + "cancelBatchSucceeded": "Lotto annullato", + "clearTooltip": "Annulla e cancella tutti gli elementi", + "current": "Attuale", + "pauseTooltip": "Sospende l'elaborazione", + "failed": "Falliti", + "cancelItem": "Annulla l'elemento", + "next": "Prossimo", + "cancelBatch": "Annulla lotto", + "back": "fine", + "cancel": "Annulla", + "session": "Sessione", + "queueTotal": "{{total}} Totale", + "resumeSucceeded": "Elaborazione ripresa", + "enqueueing": "Lotto in coda", + "resumeTooltip": "Riprendi l'elaborazione", + "resume": "Riprendi", + "cancelBatchFailed": "Problema durante l'annullamento del lotto", + "clearQueueAlertDialog2": "Sei sicuro di voler cancellare la coda?", + "item": "Elemento", + "graphFailedToQueue": "Impossibile mettere in coda il grafico", + "queueMaxExceeded": "È stato superato il limite massimo di {{max_queue_size}} e {{skip}} elementi verrebbero saltati" + }, + "embedding": { + "noMatchingEmbedding": "Nessun Incorporamento corrispondente", + "addEmbedding": "Aggiungi Incorporamento", + "incompatibleModel": "Modello base incompatibile:" + }, + "models": { + "noMatchingModels": "Nessun modello corrispondente", + "loading": "caricamento", + "noMatchingLoRAs": "Nessun LoRA corrispondente", + "noLoRAsAvailable": "Nessun LoRA disponibile", + "noModelsAvailable": "Nessun modello disponibile", + "selectModel": "Seleziona un modello", + "selectLoRA": "Seleziona un LoRA" + }, + "invocationCache": { + "disable": "Disabilita", + "misses": "Non trovati in cache", + "enableFailed": "Problema nell'abilitazione della cache delle invocazioni", + "invocationCache": "Cache delle invocazioni", + "clearSucceeded": "Cache delle invocazioni svuotata", + "enableSucceeded": "Cache delle invocazioni abilitata", + "clearFailed": "Problema durante lo svuotamento della cache delle invocazioni", + "hits": "Trovati in cache", + "disableSucceeded": "Cache delle invocazioni disabilitata", + "disableFailed": "Problema durante la disabilitazione della cache delle invocazioni", + "enable": "Abilita", + "clear": "Svuota", + "maxCacheSize": "Dimensione max cache", + "cacheSize": "Dimensione cache" + }, + "dynamicPrompts": { + "seedBehaviour": { + "perPromptDesc": "Utilizza un seme diverso per ogni immagine", + "perIterationLabel": "Per iterazione", + "perIterationDesc": "Utilizza un seme diverso per ogni iterazione", + "perPromptLabel": "Per immagine", + "label": "Comportamento del seme" + }, + "enableDynamicPrompts": "Abilita prompt dinamici", + "combinatorial": "Generazione combinatoria", + "maxPrompts": "Numero massimo di prompt", + "promptsWithCount_one": "{{count}} Prompt", + "promptsWithCount_many": "{{count}} Prompt", + "promptsWithCount_other": "", + "dynamicPrompts": "Prompt dinamici" + }, + "popovers": { + "paramScheduler": { + "paragraphs": [ + "Il campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello." + ], + "heading": "Campionatore" + }, + "compositingMaskAdjustments": { + "heading": "Regolazioni della maschera", + "paragraphs": [ + "Regola la maschera." + ] + }, + "compositingCoherenceSteps": { + "heading": "Passi", + "paragraphs": [ + "Numero di passi di riduzione del rumore utilizzati nel Passaggio di Coerenza.", + "Uguale al parametro principale Passi." + ] + }, + "compositingBlur": { + "heading": "Sfocatura", + "paragraphs": [ + "Il raggio di sfocatura della maschera." + ] + }, + "compositingCoherenceMode": { + "heading": "Modalità", + "paragraphs": [ + "La modalità del Passaggio di Coerenza." + ] + }, + "clipSkip": { + "paragraphs": [ + "Scegli quanti livelli del modello CLIP saltare.", + "Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip.", + "Un valore più alto in genere produce un'immagine meno dettagliata." + ] + }, + "compositingCoherencePass": { + "heading": "Passaggio di Coerenza", + "paragraphs": [ + "Un secondo ciclo di riduzione del rumore aiuta a comporre l'immagine Inpaint/Outpaint." + ] + }, + "compositingStrength": { + "heading": "Forza", + "paragraphs": [ + "Intensità di riduzione del rumore per il passaggio di coerenza.", + "Uguale al parametro intensità di riduzione del rumore da immagine a immagine." + ] + }, + "paramNegativeConditioning": { + "paragraphs": [ + "Il processo di generazione evita i concetti nel prompt negativo. Utilizzatelo per escludere qualità o oggetti dall'output.", + "Supporta la sintassi e gli incorporamenti di Compel." + ], + "heading": "Prompt negativo" + }, + "compositingBlurMethod": { + "heading": "Metodo di sfocatura", + "paragraphs": [ + "Il metodo di sfocatura applicato all'area mascherata." + ] + }, + "paramPositiveConditioning": { + "heading": "Prompt positivo", + "paragraphs": [ + "Guida il processo di generazione. Puoi usare qualsiasi parola o frase.", + "Supporta sintassi e incorporamenti di Compel e Prompt Dinamici." + ] + }, + "controlNetBeginEnd": { + "heading": "Percentuale passi Inizio / Fine", + "paragraphs": [ + "A quali passi del processo di rimozione del rumore verrà applicato ControlNet.", + "I ControlNet applicati all'inizio del processo guidano la composizione, mentre i ControlNet applicati alla fine guidano i dettagli." + ] + }, + "noiseUseCPU": { + "paragraphs": [ + "Controlla se viene generato rumore sulla CPU o sulla GPU.", + "Con il rumore della CPU abilitato, un seme particolare produrrà la stessa immagine su qualsiasi macchina.", + "Non vi è alcun impatto sulle prestazioni nell'abilitare il rumore della CPU." + ], + "heading": "Usa la CPU per generare rumore" + }, + "scaleBeforeProcessing": { + "paragraphs": [ + "Ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine." + ], + "heading": "Scala prima dell'elaborazione" + }, + "paramRatio": { + "heading": "Proporzioni", + "paragraphs": [ + "Le proporzioni delle dimensioni dell'immagine generata.", + "Per i modelli SD1.5 si consiglia una dimensione dell'immagine (in numero di pixel) equivalente a 512x512 mentre per i modelli SDXL si consiglia una dimensione equivalente a 1024x1024." + ] + }, + "dynamicPrompts": { + "paragraphs": [ + "Prompt Dinamici crea molte variazioni a partire da un singolo prompt.", + "La sintassi di base è \"a {red|green|blue} ball\". Ciò produrrà tre prompt: \"a red ball\", \"a green ball\" e \"a blue ball\".", + "Puoi utilizzare la sintassi quante volte vuoi in un singolo prompt, ma assicurati di tenere sotto controllo il numero di prompt generati con l'impostazione \"Numero massimo di prompt\"." + ], + "heading": "Prompt Dinamici" + }, + "paramVAE": { + "paragraphs": [ + "Modello utilizzato per tradurre l'output dell'intelligenza artificiale nell'immagine finale." + ], + "heading": "VAE" + }, + "paramIterations": { + "paragraphs": [ + "Il numero di immagini da generare.", + "Se i prompt dinamici sono abilitati, ciascuno dei prompt verrà generato questo numero di volte." + ], + "heading": "Iterazioni" + }, + "paramVAEPrecision": { + "heading": "Precisione VAE", + "paragraphs": [ + "La precisione utilizzata durante la codifica e decodifica VAE. FP16/mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine." + ] + }, + "paramSeed": { + "paragraphs": [ + "Controlla il rumore iniziale utilizzato per la generazione.", + "Disabilita seme \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione." + ], + "heading": "Seme" + }, + "controlNetResizeMode": { + "heading": "Modalità ridimensionamento", + "paragraphs": [ + "Come l'immagine ControlNet verrà adattata alle dimensioni di output dell'immagine." + ] + }, + "dynamicPromptsSeedBehaviour": { + "paragraphs": [ + "Controlla il modo in cui viene utilizzato il seme durante la generazione dei prompt.", + "Per iterazione utilizzerà un seme univoco per ogni iterazione. Usalo per esplorare variazioni del prompt su un singolo seme.", + "Ad esempio, se hai 5 prompt, ogni immagine utilizzerà lo stesso seme.", + "Per immagine utilizzerà un seme univoco per ogni immagine. Ciò fornisce più variazione." + ], + "heading": "Comportamento del seme" + }, + "paramModel": { + "heading": "Modello", + "paragraphs": [ + "Modello utilizzato per i passaggi di riduzione del rumore.", + "Diversi modelli sono generalmente addestrati per specializzarsi nella produzione di particolari risultati e contenuti estetici." + ] + }, + "paramDenoisingStrength": { + "paragraphs": [ + "Quanto rumore viene aggiunto all'immagine in ingresso.", + "0 risulterà in un'immagine identica, mentre 1 risulterà in un'immagine completamente nuova." + ], + "heading": "Forza di riduzione del rumore" + }, + "dynamicPromptsMaxPrompts": { + "heading": "Numero massimo di prompt", + "paragraphs": [ + "Limita il numero di prompt che possono essere generati da Prompt Dinamici." + ] + }, + "infillMethod": { + "paragraphs": [ + "Metodo per riempire l'area selezionata." + ], + "heading": "Metodo di riempimento" + }, + "controlNetWeight": { + "heading": "Peso", + "paragraphs": [ + "Quanto forte sarà l'impatto di ControlNet sull'immagine generata." + ] + }, + "paramCFGScale": { + "heading": "Scala CFG", + "paragraphs": [ + "Controlla quanto il tuo prompt influenza il processo di generazione." + ] + }, + "controlNetControlMode": { + "paragraphs": [ + "Attribuisce più peso al prompt o a ControlNet." + ], + "heading": "Modalità di controllo" + }, + "paramSteps": { + "heading": "Passi", + "paragraphs": [ + "Numero di passi che verranno eseguiti in ogni generazione.", + "Un numero di passi più elevato generalmente creerà immagini migliori ma richiederà più tempo di generazione." + ] + }, + "lora": { + "heading": "Peso LoRA", + "paragraphs": [ + "Un peso LoRA più elevato porterà a impatti maggiori sull'immagine finale." + ] + }, + "controlNet": { + "paragraphs": [ + "ControlNet fornisce una guida al processo di generazione, aiutando a creare immagini con composizione, struttura o stile controllati, a seconda del modello selezionato." + ] + } + }, + "sdxl": { + "selectAModel": "Seleziona un modello", + "scheduler": "Campionatore", + "noModelsAvailable": "Nessun modello disponibile", + "denoisingStrength": "Forza di riduzione del rumore", + "concatPromptStyle": "Concatena Prompt & Stile", + "loading": "Caricamento...", + "steps": "Passi" } } From 45f9aca7e50daaf39455485baf93083a4c9a567a Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:44:56 +0000 Subject: [PATCH 031/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 1 - invokeai/frontend/web/public/locales/de.json | 1 - invokeai/frontend/web/public/locales/es.json | 2 -- invokeai/frontend/web/public/locales/fr.json | 1 - invokeai/frontend/web/public/locales/he.json | 1 - invokeai/frontend/web/public/locales/it.json | 2 -- invokeai/frontend/web/public/locales/nl.json | 1 - invokeai/frontend/web/public/locales/pl.json | 1 - invokeai/frontend/web/public/locales/pt.json | 1 - invokeai/frontend/web/public/locales/pt_BR.json | 1 - invokeai/frontend/web/public/locales/ru.json | 2 -- invokeai/frontend/web/public/locales/uk.json | 1 - invokeai/frontend/web/public/locales/zh_CN.json | 2 -- 13 files changed, 17 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index 8ff402dee3..7354b21ea0 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -368,7 +368,6 @@ "infillScalingHeader": "التعبئة والتحجيم", "img2imgStrength": "قوة صورة إلى صورة", "toggleLoopback": "تبديل الإعادة", - "invoke": "إطلاق", "sendTo": "أرسل إلى", "sendToImg2Img": "أرسل إلى صورة إلى صورة", "sendToUnifiedCanvas": "أرسل إلى الخطوط الموحدة", diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index b712fa30bc..61cef2637e 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -370,7 +370,6 @@ "infillScalingHeader": "Infill und Skalierung", "img2imgStrength": "Bild-zu-Bild-Stärke", "toggleLoopback": "Toggle Loopback", - "invoke": "Invoke", "sendTo": "Senden an", "sendToImg2Img": "Senden an Bild zu Bild", "sendToUnifiedCanvas": "Senden an Unified Canvas", diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 6500244bba..863d2cc3cf 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -483,7 +483,6 @@ "infillScalingHeader": "Remplazo y escalado", "img2imgStrength": "Peso de Imagen a Imagen", "toggleLoopback": "Alternar Retroalimentación", - "invoke": "Invocar", "sendTo": "Enviar a", "sendToImg2Img": "Enviar a Imagen a Imagen", "sendToUnifiedCanvas": "Enviar a Lienzo Unificado", @@ -722,7 +721,6 @@ "showLegendNodes": "Mostrar la leyenda del tipo de campo", "showMinimapnodes": "Mostrar el minimapa", "reloadNodeTemplates": "Recargar las plantillas de nodos", - "saveWorkflow": "Guardar el flujo de trabajo", "loadWorkflow": "Cargar el flujo de trabajo", "resetWorkflow": "Reiniciar e flujo de trabajo", "resetWorkflowDesc": "¿Está seguro de que deseas restablecer este flujo de trabajo?", diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index ca2227dbe6..b7ab932fcc 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -381,7 +381,6 @@ "infillScalingHeader": "Remplissage et Mise à l'Échelle", "img2imgStrength": "Force de l'Image à l'Image", "toggleLoopback": "Activer/Désactiver la Boucle", - "invoke": "Invoker", "sendTo": "Envoyer à", "sendToImg2Img": "Envoyer à Image à Image", "sendToUnifiedCanvas": "Envoyer au Canvas Unifié", diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index 49e42062e8..dfb5ea0360 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -422,7 +422,6 @@ "symmetry": "סימטריה", "vSymmetryStep": "צעד סימטריה V", "hSymmetryStep": "צעד סימטריה H", - "invoke": "הפעלה", "cancel": { "schedule": "ביטול לאחר האיטרציה הנוכחית", "isScheduled": "מבטל", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 200f3c902b..47f5be5943 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -506,7 +506,6 @@ "infillScalingHeader": "Riempimento e ridimensionamento", "img2imgStrength": "Forza da Immagine a Immagine", "toggleLoopback": "Attiva/disattiva elaborazione ricorsiva", - "invoke": "Invoke", "sendTo": "Invia a", "sendToImg2Img": "Invia a da Immagine a Immagine", "sendToUnifiedCanvas": "Invia a Tela Unificata", @@ -825,7 +824,6 @@ "showGraphNodes": "Mostra sovrapposizione grafico", "resetWorkflowDesc2": "Reimpostare il flusso di lavoro cancellerà tutti i nodi, i bordi e i dettagli del flusso di lavoro.", "reloadNodeTemplates": "Ricarica i modelli di nodo", - "saveWorkflow": "Salva il flusso di lavoro", "loadWorkflow": "Carica il flusso di lavoro", "resetWorkflow": "Reimposta flusso di lavoro", "resetWorkflowDesc": "Sei sicuro di voler reimpostare questo flusso di lavoro?", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index a78a073319..c5cdec159b 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -475,7 +475,6 @@ "infillScalingHeader": "Infill en schaling", "img2imgStrength": "Sterkte Afbeelding naar afbeelding", "toggleLoopback": "Zet recursieve verwerking aan/uit", - "invoke": "Genereer", "sendTo": "Stuur naar", "sendToImg2Img": "Stuur naar Afbeelding naar afbeelding", "sendToUnifiedCanvas": "Stuur naar Centraal canvas", diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index f7bd0f1d60..ee6ee2d9a9 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -304,7 +304,6 @@ "infillScalingHeader": "Wypełnienie i skalowanie", "img2imgStrength": "Wpływ sugestii na obraz", "toggleLoopback": "Wł/wył sprzężenie zwrotne", - "invoke": "Wywołaj", "sendTo": "Wyślij do", "sendToImg2Img": "Użyj w trybie \"Obraz na obraz\"", "sendToUnifiedCanvas": "Użyj w trybie uniwersalnym", diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index f4bfed272c..8a4cac3d87 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -435,7 +435,6 @@ "upscale": "Redimensionar", "upscaleImage": "Redimensionar Imagem", "scaleBeforeProcessing": "Escala Antes do Processamento", - "invoke": "Invocar", "images": "Imagems", "steps": "Passos", "cfgScale": "Escala CFG", diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index d263bf6251..3b45dbbbf3 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -427,7 +427,6 @@ "infillScalingHeader": "Preencimento e Escala", "img2imgStrength": "Força de Imagem Para Imagem", "toggleLoopback": "Ativar Loopback", - "invoke": "Invoke", "sendTo": "Mandar para", "sendToImg2Img": "Mandar para Imagem Para Imagem", "sendToUnifiedCanvas": "Mandar para Tela Unificada", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index fe2805e5fe..9cc29c25db 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -483,7 +483,6 @@ "infillScalingHeader": "Заполнение и масштабирование", "img2imgStrength": "Сила обработки img2img", "toggleLoopback": "Зациклить обработку", - "invoke": "Invoke", "sendTo": "Отправить", "sendToImg2Img": "Отправить в img2img", "sendToUnifiedCanvas": "Отправить на Единый холст", @@ -721,7 +720,6 @@ "hideMinimapnodes": "Скрыть миникарту", "hideLegendNodes": "Скрыть тип поля", "showMinimapnodes": "Показать миникарту", - "saveWorkflow": "Сохранить рабочий процесс", "loadWorkflow": "Загрузить рабочий процесс", "resetWorkflowDesc2": "Сброс рабочего процесса очистит все узлы, ребра и детали рабочего процесса.", "resetWorkflow": "Сбросить рабочий процесс", diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index 5c02693a76..77768f2793 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -441,7 +441,6 @@ "infillScalingHeader": "Заповнення і масштабування", "img2imgStrength": "Сила обробки img2img", "toggleLoopback": "Зациклити обробку", - "invoke": "Викликати", "sendTo": "Надіслати", "sendToImg2Img": "Надіслати у img2img", "sendToUnifiedCanvas": "Надіслати на полотно", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index bd3949fde2..6087e65ebb 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -476,7 +476,6 @@ "infillScalingHeader": "内填充和缩放", "img2imgStrength": "图像到图像强度", "toggleLoopback": "切换环回", - "invoke": "Invoke", "sendTo": "发送到", "sendToImg2Img": "发送到图像到图像", "sendToUnifiedCanvas": "发送到统一画布", @@ -708,7 +707,6 @@ "loadWorkflow": "读取节点图", "zoomOutNodes": "缩小", "resetWorkflowDesc2": "重置节点图将清除所有节点、边际和节点图详情", - "saveWorkflow": "保存节点图", "reloadNodeTemplates": "重载节点模板", "hideGraphNodes": "隐藏节点图信息", "fitViewportNodes": "自适应视图", From 74c666aaa2a256e55fed60a89ef063d5dde293f5 Mon Sep 17 00:00:00 2001 From: System X - Files Date: Thu, 12 Oct 2023 12:44:58 +0000 Subject: [PATCH 032/202] translationBot(ui): update translation (Russian) Currently translated at 65.5% (643 of 981 strings) translationBot(ui): update translation (Russian) Currently translated at 100.0% (605 of 605 strings) Co-authored-by: System X - Files Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ru/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ru.json | 71 +++++++++++++++++--- 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 9cc29c25db..2523949e31 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -6,7 +6,7 @@ "settingsLabel": "Настройки", "img2img": "Изображение в изображение (img2img)", "unifiedCanvas": "Единый холст", - "nodes": "Редактор нод", + "nodes": "Редактор рабочего процесса", "langRussian": "Русский", "nodesDesc": "Cистема генерации изображений на основе нодов (узлов) уже разрабатывается. Следите за новостями об этой замечательной функции.", "postProcessing": "Постобработка", @@ -79,7 +79,10 @@ "lightMode": "Светлая тема", "batch": "Пакетный менеджер", "modelManager": "Менеджер моделей", - "darkMode": "Темная тема" + "darkMode": "Темная тема", + "nodeEditor": "Редактор Нодов (Узлов)", + "controlNet": "Controlnet", + "advanced": "Расширенные" }, "gallery": { "generations": "Генерации", @@ -525,10 +528,14 @@ "clipSkip": "CLIP Пропуск", "aspectRatio": "Соотношение", "maskAdjustmentsHeader": "Настройка маски", - "maskBlur": "Размытие маски", - "maskBlurMethod": "Метод размытия маски", + "maskBlur": "Размытие", + "maskBlurMethod": "Метод размытия", "seamLowThreshold": "Низкий", - "seamHighThreshold": "Высокий" + "seamHighThreshold": "Высокий", + "coherenceSteps": "Шагов", + "coherencePassHeader": "Порог Coherence", + "coherenceStrength": "Сила", + "compositingSettingsHeader": "Настройки компоновки" }, "settings": { "models": "Модели", @@ -540,7 +547,7 @@ "resetWebUI": "Сброс настроек Web UI", "resetWebUIDesc1": "Сброс настроек веб-интерфейса удаляет только локальный кэш браузера с вашими изображениями и настройками. Он не удаляет изображения с диска.", "resetWebUIDesc2": "Если изображения не отображаются в галерее или не работает что-то еще, пожалуйста, попробуйте сбросить настройки, прежде чем сообщать о проблеме на GitHub.", - "resetComplete": "Интерфейс сброшен. Обновите эту страницу.", + "resetComplete": "Настройки веб-интерфейса были сброшены.", "useSlidersForAll": "Использовать ползунки для всех параметров", "consoleLogLevel": "Уровень логирования", "shouldLogToConsole": "Логи в консоль", @@ -556,7 +563,8 @@ "experimental": "Экспериментальные", "beta": "Бета", "alternateCanvasLayout": "Альтернативный слой холста", - "showAdvancedOptions": "Показать доп. параметры" + "showAdvancedOptions": "Показать доп. параметры", + "autoChangeDimensions": "Обновить Ш/В на стандартные для модели при изменении" }, "toast": { "tempFoldersEmptied": "Временная папка очищена", @@ -695,7 +703,7 @@ "flipHorizontally": "Отразить горизонтально", "toggleAutoscroll": "Включить автопрокрутку", "toggleLogViewer": "Показать или скрыть просмотрщик логов", - "showOptionsPanel": "Показать опции", + "showOptionsPanel": "Показать боковую панель", "showGallery": "Показать галерею", "invokeProgressBar": "Индикатор выполнения", "reset": "Сброс", @@ -708,7 +716,8 @@ "ui": { "showProgressImages": "Показывать промежуточный итог", "hideProgressImages": "Не показывать промежуточный итог", - "swapSizes": "Поменять местами размеры" + "swapSizes": "Поменять местами размеры", + "lockRatio": "Зафиксировать пропорции" }, "nodes": { "zoomInNodes": "Увеличьте масштаб", @@ -724,6 +733,48 @@ "resetWorkflowDesc2": "Сброс рабочего процесса очистит все узлы, ребра и детали рабочего процесса.", "resetWorkflow": "Сбросить рабочий процесс", "resetWorkflowDesc": "Вы уверены, что хотите сбросить этот рабочий процесс?", - "reloadNodeTemplates": "Перезагрузить шаблоны узлов" + "reloadNodeTemplates": "Перезагрузить шаблоны узлов", + "downloadWorkflow": "Скачать JSON рабочего процесса" + }, + "controlnet": { + "amult": "a_mult", + "contentShuffleDescription": "Перетасовывает содержимое изображения", + "bgth": "bg_th", + "contentShuffle": "Перетасовка содержимого", + "beginEndStepPercent": "Процент начала/конца шага", + "duplicate": "Дублировать", + "balanced": "Сбалансированный", + "f": "F", + "depthMidasDescription": "Генерация карты глубины с использованием Midas", + "control": "Контроль", + "coarse": "Грубость обработки", + "crop": "Обрезка", + "depthMidas": "Глубина (Midas)", + "enableControlnet": "Включить ControlNet", + "detectResolution": "Определить разрешение", + "controlMode": "Режим контроля", + "cannyDescription": "Детектор границ Canny", + "depthZoe": "Глубина (Zoe)", + "autoConfigure": "Автонастройка процессора", + "delete": "Удалить", + "canny": "Canny", + "depthZoeDescription": "Генерация карты глубины с использованием Zoe" + }, + "boards": { + "autoAddBoard": "Авто добавление Доски", + "topMessage": "Эта доска содержит изображения, используемые в следующих функциях:", + "move": "Перемещение", + "menuItemAutoAdd": "Авто добавление на эту доску", + "myBoard": "Моя Доска", + "searchBoard": "Поиск Доски...", + "noMatching": "Нет подходящих Досок", + "selectBoard": "Выбрать Доску", + "cancel": "Отменить", + "addBoard": "Добавить Доску", + "bottomMessage": "Удаление этой доски и ее изображений приведет к сбросу всех функций, использующихся их в данный момент.", + "uncategorized": "Без категории", + "changeBoard": "Изменить Доску", + "loading": "Загрузка...", + "clearSearch": "Очистить поиск" } } From 65af7dd8f8a84a0eeab8c2ed686af730748c2599 Mon Sep 17 00:00:00 2001 From: nemuruibai Date: Thu, 12 Oct 2023 12:44:59 +0000 Subject: [PATCH 033/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 95.7% (579 of 605 strings) Co-authored-by: nemuruibai Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/zh_CN.json | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 6087e65ebb..6c2331a346 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -683,7 +683,8 @@ "ui": { "showProgressImages": "显示处理中的图片", "hideProgressImages": "隐藏处理中的图片", - "swapSizes": "XY尺寸互换" + "swapSizes": "XY尺寸互换", + "lockRatio": "锁定比率" }, "tooltip": { "feature": { @@ -706,7 +707,7 @@ "resetWorkflow": "清空节点图", "loadWorkflow": "读取节点图", "zoomOutNodes": "缩小", - "resetWorkflowDesc2": "重置节点图将清除所有节点、边际和节点图详情", + "resetWorkflowDesc2": "重置节点图将清除所有节点、边际和节点图详情.", "reloadNodeTemplates": "重载节点模板", "hideGraphNodes": "隐藏节点图信息", "fitViewportNodes": "自适应视图", @@ -714,6 +715,7 @@ "hideMinimapnodes": "隐藏缩略图", "showLegendNodes": "显示字段类型图例", "hideLegendNodes": "隐藏字段类型图例", - "showGraphNodes": "显示节点图信息" + "showGraphNodes": "显示节点图信息", + "downloadWorkflow": "下载节点图 JSON" } } From f0bf7333090001fb728bd98634852b254a29f08b Mon Sep 17 00:00:00 2001 From: gallegonovato Date: Thu, 12 Oct 2023 12:45:00 +0000 Subject: [PATCH 034/202] translationBot(ui): update translation (Spanish) Currently translated at 100.0% (607 of 607 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (605 of 605 strings) Co-authored-by: gallegonovato Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/es/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/es.json | 21 ++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 863d2cc3cf..1695cbd099 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -525,10 +525,16 @@ "clipSkip": "Omitir el CLIP", "aspectRatio": "Relación", "maskAdjustmentsHeader": "Ajustes de la máscara", - "maskBlur": "Máscara de Desenfoque", - "maskBlurMethod": "Método del desenfoque de la máscara", + "maskBlur": "Difuminar", + "maskBlurMethod": "Método del desenfoque", "seamHighThreshold": "Alto", - "seamLowThreshold": "Bajo" + "seamLowThreshold": "Bajo", + "coherencePassHeader": "Parámetros de la coherencia", + "compositingSettingsHeader": "Ajustes de la composición", + "coherenceSteps": "Pasos", + "coherenceStrength": "Fuerza", + "patchmatchDownScaleSize": "Reducir a escala", + "coherenceMode": "Modo" }, "settings": { "models": "Modelos", @@ -556,7 +562,8 @@ "alternateCanvasLayout": "Diseño alternativo del lienzo", "beta": "Beta", "enableNodesEditor": "Activar el editor de nodos", - "experimental": "Experimental" + "experimental": "Experimental", + "autoChangeDimensions": "Actualiza W/H a los valores predeterminados del modelo cuando se modifica" }, "toast": { "tempFoldersEmptied": "Directorio temporal vaciado", @@ -708,7 +715,8 @@ "ui": { "hideProgressImages": "Ocultar el progreso de la imagen", "showProgressImages": "Mostrar el progreso de la imagen", - "swapSizes": "Cambiar los tamaños" + "swapSizes": "Cambiar los tamaños", + "lockRatio": "Proporción del bloqueo" }, "nodes": { "showGraphNodes": "Mostrar la superposición de los gráficos", @@ -724,6 +732,7 @@ "loadWorkflow": "Cargar el flujo de trabajo", "resetWorkflow": "Reiniciar e flujo de trabajo", "resetWorkflowDesc": "¿Está seguro de que deseas restablecer este flujo de trabajo?", - "resetWorkflowDesc2": "Al reiniciar el flujo de trabajo se borrarán todos los nodos, aristas y detalles del flujo de trabajo." + "resetWorkflowDesc2": "Al reiniciar el flujo de trabajo se borrarán todos los nodos, aristas y detalles del flujo de trabajo.", + "downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON" } } From d6310885663c1164776aaaad08cf4d297f3dbf9c Mon Sep 17 00:00:00 2001 From: Dennis Date: Thu, 12 Oct 2023 12:45:01 +0000 Subject: [PATCH 035/202] translationBot(ui): update translation (Dutch) Currently translated at 100.0% (605 of 605 strings) Co-authored-by: Dennis Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/nl/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/nl.json | 45 +++++++++++++++----- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index c5cdec159b..ceb3d869e6 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -6,7 +6,7 @@ "settingsLabel": "Instellingen", "img2img": "Afbeelding naar afbeelding", "unifiedCanvas": "Centraal canvas", - "nodes": "Knooppunt-editor", + "nodes": "Werkstroom-editor", "langDutch": "Nederlands", "nodesDesc": "Een op knooppunten gebaseerd systeem voor het genereren van afbeeldingen is momenteel in ontwikkeling. Blijf op de hoogte voor nieuws over deze verbluffende functie.", "postProcessing": "Naverwerking", @@ -99,7 +99,8 @@ "deleteImageBin": "Gewiste afbeeldingen worden naar de prullenbak van je besturingssysteem gestuurd.", "deleteImagePermanent": "Gewiste afbeeldingen kunnen niet worden hersteld.", "assets": "Eigen onderdelen", - "images": "Afbeeldingen" + "images": "Afbeeldingen", + "autoAssignBoardOnClick": "Ken automatisch bord toe bij klikken" }, "hotkeys": { "keyboardShortcuts": "Sneltoetsen", @@ -306,7 +307,12 @@ "acceptStagingImage": { "title": "Accepteer sessie-afbeelding", "desc": "Accepteert de huidige sessie-afbeelding" - } + }, + "addNodes": { + "title": "Voeg knooppunten toe", + "desc": "Opent het menu Voeg knooppunt toe" + }, + "nodesHotkeys": "Sneltoetsen knooppunten" }, "modelManager": { "modelManager": "Modelonderhoud", @@ -436,7 +442,9 @@ "convertingModelBegin": "Model aan het converteren. Even geduld.", "importModels": "Importeer Modellen", "syncModelsDesc": "Als je modellen niet meer synchroon zijn met de backend, kan je ze met deze optie verversen. Dit wordt typisch gebruikt in het geval je het models.yaml bestand met de hand bewerkt of als je modellen aan de InvokeAI root map toevoegt nadat de applicatie gestart werd.", - "loraModels": "LoRA's" + "loraModels": "LoRA's", + "onnxModels": "Onnx", + "oliveModels": "Olives" }, "parameters": { "images": "Afbeeldingen", @@ -515,7 +523,16 @@ "aspectRatio": "Verhouding", "negativePromptPlaceholder": "Negatieve prompt", "controlNetControlMode": "Aansturingsmodus", - "positivePromptPlaceholder": "Positieve prompt" + "positivePromptPlaceholder": "Positieve prompt", + "maskAdjustmentsHeader": "Maskeraanpassingen", + "compositingSettingsHeader": "Instellingen afbeeldingsopbouw", + "coherencePassHeader": "Coherentiestap", + "maskBlur": "Vervaag", + "maskBlurMethod": "Vervagingsmethode", + "coherenceSteps": "Stappen", + "coherenceStrength": "Sterkte", + "seamHighThreshold": "Hoog", + "seamLowThreshold": "Laag" }, "settings": { "models": "Modellen", @@ -527,7 +544,7 @@ "resetWebUI": "Herstel web-UI", "resetWebUIDesc1": "Herstel web-UI herstelt alleen de lokale afbeeldingscache en de onthouden instellingen van je browser. Het verwijdert geen afbeeldingen van schijf.", "resetWebUIDesc2": "Als afbeeldingen niet getoond worden in de galerij of iets anders werkt niet, probeer dan eerst deze herstelfunctie voordat je een fout aanmeldt op GitHub.", - "resetComplete": "Webgebruikersinterface is hersteld. Vernieuw de pasgina om opnieuw te laden.", + "resetComplete": "Webgebruikersinterface is hersteld.", "useSlidersForAll": "Gebruik schuifbalken voor alle opties", "consoleLogLevel": "Logboekniveau", "shouldLogToConsole": "Schrijf logboek naar console", @@ -543,7 +560,8 @@ "beta": "Bèta", "experimental": "Experimenteel", "alternateCanvasLayout": "Omwisselen Canvas Layout", - "enableNodesEditor": "Knopen Editor Inschakelen" + "enableNodesEditor": "Knopen Editor Inschakelen", + "autoChangeDimensions": "Werk bij wijziging afmetingen bij naar modelstandaard" }, "toast": { "tempFoldersEmptied": "Tijdelijke map geleegd", @@ -689,13 +707,14 @@ "toggleAutoscroll": "Autom. scrollen aan/uit", "toggleLogViewer": "Logboekviewer aan/uit", "showGallery": "Toon galerij", - "showOptionsPanel": "Toon deelscherm Opties", + "showOptionsPanel": "Toon zijscherm", "menu": "Menu" }, "ui": { "showProgressImages": "Toon voortgangsafbeeldingen", "hideProgressImages": "Verberg voortgangsafbeeldingen", - "swapSizes": "Wissel afmetingen om" + "swapSizes": "Wissel afmetingen om", + "lockRatio": "Zet verhouding vast" }, "nodes": { "zoomOutNodes": "Uitzoomen", @@ -706,6 +725,12 @@ "hideGraphNodes": "Graph overlay verbergen", "showGraphNodes": "Graph overlay tonen", "showMinimapnodes": "Minimap tonen", - "hideLegendNodes": "Typelegende veld verbergen" + "hideLegendNodes": "Typelegende veld verbergen", + "reloadNodeTemplates": "Herlaad knooppuntsjablonen", + "loadWorkflow": "Laad werkstroom", + "resetWorkflow": "Herstel werkstroom", + "resetWorkflowDesc": "Weet je zeker dat je deze werkstroom wilt herstellen?", + "resetWorkflowDesc2": "Herstel van een werkstroom haalt alle knooppunten, randen en werkstroomdetails weg.", + "downloadWorkflow": "Download JSON van werkstroom" } } From 851ce36250e7baee2c90d41925bb71e3d0d118fd Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:45:04 +0000 Subject: [PATCH 036/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 1 - invokeai/frontend/web/public/locales/es.json | 1 - invokeai/frontend/web/public/locales/fi.json | 1 - invokeai/frontend/web/public/locales/it.json | 6 +----- invokeai/frontend/web/public/locales/ja.json | 1 - invokeai/frontend/web/public/locales/nl.json | 1 - invokeai/frontend/web/public/locales/pl.json | 1 - invokeai/frontend/web/public/locales/pt.json | 1 - invokeai/frontend/web/public/locales/ru.json | 1 - invokeai/frontend/web/public/locales/sv.json | 1 - invokeai/frontend/web/public/locales/tr.json | 3 +-- invokeai/frontend/web/public/locales/uk.json | 1 - invokeai/frontend/web/public/locales/zh_CN.json | 1 - 13 files changed, 2 insertions(+), 18 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 61cef2637e..9baa6eb6a2 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -516,7 +516,6 @@ "modifyConfig": "Optionen einstellen", "toggleAutoscroll": "Auroscroll ein/ausschalten", "toggleLogViewer": "Log Betrachter ein/ausschalten", - "showGallery": "Zeige Galerie", "showOptionsPanel": "Zeige Optionen", "reset": "Zurücksetzen", "nextImage": "Nächstes Bild", diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index 1695cbd099..8ff4c53165 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -708,7 +708,6 @@ "modifyConfig": "Modificar la configuración", "toggleAutoscroll": "Activar el autodesplazamiento", "toggleLogViewer": "Alternar el visor de registros", - "showGallery": "Mostrar galería", "showOptionsPanel": "Mostrar el panel lateral", "menu": "Menú" }, diff --git a/invokeai/frontend/web/public/locales/fi.json b/invokeai/frontend/web/public/locales/fi.json index f257344cf1..cf7fc6701b 100644 --- a/invokeai/frontend/web/public/locales/fi.json +++ b/invokeai/frontend/web/public/locales/fi.json @@ -15,7 +15,6 @@ "rotateCounterClockwise": "Kierrä vastapäivään", "rotateClockwise": "Kierrä myötäpäivään", "flipVertically": "Käännä pystysuoraan", - "showGallery": "Näytä galleria", "modifyConfig": "Muokkaa konfiguraatiota", "toggleAutoscroll": "Kytke automaattinen vieritys", "toggleLogViewer": "Kytke lokin katselutila", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 47f5be5943..480ceceb42 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -568,9 +568,7 @@ "addingImagesTo": "Aggiungi immagini a", "systemBusy": "Sistema occupato", "unableToInvoke": "Impossibile invocare", - "systemDisconnected": "Sistema disconnesso", - "noControlImageForControlNet": "ControlNet {{index}} non ha un'immagine di controllo", - "noModelForControlNet": "ControlNet {{index}} non ha alcun modello selezionato." + "systemDisconnected": "Sistema disconnesso" }, "enableNoiseSettings": "Abilita le impostazioni del rumore", "cpuNoise": "Rumore CPU", @@ -683,7 +681,6 @@ "modelAddedSimple": "Modello aggiunto", "problemImportingMaskDesc": "Impossibile importare la maschera", "problemCopyingCanvas": "Problema durante la copia della tela", - "incompatibleSubmodel": "sottomodello incompatibile", "problemSavingCanvas": "Problema nel salvataggio della tela", "canvasDownloaded": "Tela scaricata", "problemMergingCanvasDesc": "Impossibile unire le tele", @@ -797,7 +794,6 @@ "rotateClockwise": "Ruotare in senso orario", "flipHorizontally": "Capovolgi orizzontalmente", "toggleLogViewer": "Attiva/disattiva visualizzatore registro", - "showGallery": "Mostra la galleria immagini", "showOptionsPanel": "Mostra il pannello laterale", "flipVertically": "Capovolgi verticalmente", "toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico", diff --git a/invokeai/frontend/web/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json index cc4dbbdde3..a53ea50b46 100644 --- a/invokeai/frontend/web/public/locales/ja.json +++ b/invokeai/frontend/web/public/locales/ja.json @@ -461,7 +461,6 @@ "toggleAutoscroll": "自動スクロールの切替", "modifyConfig": "Modify Config", "toggleLogViewer": "Log Viewerの切替", - "showGallery": "ギャラリーを表示", "showOptionsPanel": "オプションパネルを表示" } } diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index ceb3d869e6..f682886dae 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -706,7 +706,6 @@ "modifyConfig": "Wijzig configuratie", "toggleAutoscroll": "Autom. scrollen aan/uit", "toggleLogViewer": "Logboekviewer aan/uit", - "showGallery": "Toon galerij", "showOptionsPanel": "Toon zijscherm", "menu": "Menu" }, diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index ee6ee2d9a9..f77c0c4710 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -455,7 +455,6 @@ "modifyConfig": "Modyfikuj ustawienia", "toggleAutoscroll": "Przełącz autoprzewijanie", "toggleLogViewer": "Przełącz podgląd logów", - "showGallery": "Pokaż galerię", "showOptionsPanel": "Pokaż panel opcji", "menu": "Menu" } diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index 8a4cac3d87..ac9dd50b4d 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -593,7 +593,6 @@ "flipVertically": "Espelhar verticalmente", "modifyConfig": "Modificar config", "toggleAutoscroll": "Alternar rolagem automática", - "showGallery": "Mostrar galeria", "showOptionsPanel": "Mostrar painel de opções", "uploadImage": "Enviar imagem", "previousImage": "Imagem anterior", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 2523949e31..808db9e803 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -704,7 +704,6 @@ "toggleAutoscroll": "Включить автопрокрутку", "toggleLogViewer": "Показать или скрыть просмотрщик логов", "showOptionsPanel": "Показать боковую панель", - "showGallery": "Показать галерею", "invokeProgressBar": "Индикатор выполнения", "reset": "Сброс", "modifyConfig": "Изменить конфиг", diff --git a/invokeai/frontend/web/public/locales/sv.json b/invokeai/frontend/web/public/locales/sv.json index c3f25e65d8..eef46c4513 100644 --- a/invokeai/frontend/web/public/locales/sv.json +++ b/invokeai/frontend/web/public/locales/sv.json @@ -15,7 +15,6 @@ "reset": "Starta om", "previousImage": "Föregående bild", "useThisParameter": "Använd denna parametern", - "showGallery": "Visa galleri", "rotateCounterClockwise": "Rotera moturs", "rotateClockwise": "Rotera medurs", "modifyConfig": "Ändra konfiguration", diff --git a/invokeai/frontend/web/public/locales/tr.json b/invokeai/frontend/web/public/locales/tr.json index 1f285f956b..0c222eecf7 100644 --- a/invokeai/frontend/web/public/locales/tr.json +++ b/invokeai/frontend/web/public/locales/tr.json @@ -19,8 +19,7 @@ "reset": "Sıfırla", "uploadImage": "Resim Yükle", "previousImage": "Önceki Resim", - "menu": "Menü", - "showGallery": "Galeriyi Göster" + "menu": "Menü" }, "common": { "hotkeysLabel": "Kısayol Tuşları", diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index 77768f2793..a85faee727 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -608,7 +608,6 @@ "rotateClockwise": "Обертати за годинниковою стрілкою", "toggleAutoscroll": "Увімкнути автопрокручування", "toggleLogViewer": "Показати або приховати переглядач журналів", - "showGallery": "Показати галерею", "previousImage": "Попереднє зображення", "copyMetadataJson": "Скопіювати метадані JSON", "flipVertically": "Перевернути по вертикалі", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 6c2331a346..1e545eb4b3 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -673,7 +673,6 @@ "rotateClockwise": "顺时针旋转", "flipHorizontally": "水平翻转", "flipVertically": "垂直翻转", - "showGallery": "显示图库", "showOptionsPanel": "显示选项面板", "toggleLogViewer": "切换日志浏览器", "modifyConfig": "修改设置", From 7bc6c23dfa63e8b50c7cc3ac00d2617dab781f69 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Thu, 12 Oct 2023 12:45:05 +0000 Subject: [PATCH 037/202] translationBot(ui): update translation (Italian) Currently translated at 87.1% (1054 of 1210 strings) translationBot(ui): update translation (Italian) Currently translated at 85.5% (1026 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 84.7% (1016 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 84.7% (1016 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 84.4% (1012 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 84.3% (1011 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 83.5% (1002 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 81.5% (978 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 80.8% (969 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 80.7% (968 of 1199 strings) translationBot(ui): update translation (Italian) Currently translated at 81.3% (959 of 1179 strings) translationBot(ui): update translation (Italian) Currently translated at 81.3% (959 of 1179 strings) translationBot(ui): update translation (Italian) Currently translated at 81.3% (959 of 1179 strings) translationBot(ui): update translation (Italian) Currently translated at 81.3% (959 of 1179 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 149 ++++++++++++++++--- 1 file changed, 129 insertions(+), 20 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 480ceceb42..c219e0f7f7 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -27,14 +27,14 @@ "statusProcessingCanceled": "Elaborazione annullata", "statusProcessingComplete": "Elaborazione completata", "statusGenerating": "Generazione in corso", - "statusGeneratingTextToImage": "Generazione da Testo a Immagine", + "statusGeneratingTextToImage": "Generazione Testo a Immagine", "statusGeneratingImageToImage": "Generazione da Immagine a Immagine", "statusGeneratingInpainting": "Generazione Inpainting", "statusGeneratingOutpainting": "Generazione Outpainting", "statusGenerationComplete": "Generazione completata", "statusIterationComplete": "Iterazione completata", "statusSavingImage": "Salvataggio dell'immagine", - "statusRestoringFaces": "Restaura i volti", + "statusRestoringFaces": "Restaura volti", "statusRestoringFacesGFPGAN": "Restaura volti (GFPGAN)", "statusRestoringFacesCodeFormer": "Restaura volti (CodeFormer)", "statusUpscaling": "Ampliamento", @@ -81,12 +81,15 @@ "modelManager": "Gestore modello", "communityLabel": "Comunità", "nodeEditor": "Editor dei nodi", - "controlNet": "ControlNet", + "controlNet": "", "statusProcessing": "Elaborazione in corso", "advanced": "Avanzate", "imageFailedToLoad": "Impossibile caricare l'immagine", "learnMore": "Per saperne di più", - "ipAdapter": "Adattatore IP" + "ipAdapter": "Adattatore IP", + "t2iAdapter": "Adattatore T2I", + "controlAdapter": "Adattatore di Controllo", + "controlNet": "" }, "gallery": { "generations": "Generazioni", @@ -114,7 +117,10 @@ "currentlyInUse": "Questa immagine è attualmente utilizzata nelle seguenti funzionalità:", "copy": "Copia", "download": "Scarica", - "setCurrentImage": "Imposta come immagine corrente" + "setCurrentImage": "Imposta come immagine corrente", + "preparingDownload": "Preparazione del download", + "preparingDownloadFailed": "Problema durante la preparazione del download", + "downloadSelection": "Scarica gli elementi selezionati" }, "hotkeys": { "keyboardShortcuts": "Tasti rapidi", @@ -187,7 +193,7 @@ "desc": "Mostra le informazioni sui metadati dell'immagine corrente" }, "sendToImageToImage": { - "title": "Invia a da Immagine a Immagine", + "title": "Invia a Immagine a Immagine", "desc": "Invia l'immagine corrente a da Immagine a Immagine" }, "deleteImage": { @@ -446,7 +452,7 @@ "modelConversionFailed": "Conversione del modello non riuscita", "modelsMergeFailed": "Unione modelli non riuscita", "selectModel": "Seleziona Modello", - "modelDeleted": "Modello cancellato", + "modelDeleted": "Modello eliminato", "modelDeleteFailed": "Impossibile eliminare il modello", "noCustomLocationProvided": "Nessuna posizione personalizzata fornita", "convertingModelBegin": "Conversione del modello. Attendere prego.", @@ -507,7 +513,7 @@ "img2imgStrength": "Forza da Immagine a Immagine", "toggleLoopback": "Attiva/disattiva elaborazione ricorsiva", "sendTo": "Invia a", - "sendToImg2Img": "Invia a da Immagine a Immagine", + "sendToImg2Img": "Invia a Immagine a Immagine", "sendToUnifiedCanvas": "Invia a Tela Unificata", "copyImageToLink": "Copia l'immagine nel collegamento", "downloadImage": "Scarica l'immagine", @@ -568,7 +574,11 @@ "addingImagesTo": "Aggiungi immagini a", "systemBusy": "Sistema occupato", "unableToInvoke": "Impossibile invocare", - "systemDisconnected": "Sistema disconnesso" + "systemDisconnected": "Sistema disconnesso", + "noControlImageForControlAdapter": "L'adattatore di controllo {{number}} non ha un'immagine di controllo", + "noModelForControlAdapter": "Nessun modello selezionato per l'adattatore di controllo {{number}}.", + "incompatibleBaseModelForControlAdapter": "Il modello dell'adattatore di controllo {{number}} non è compatibile con il modello principale.", + "missingNodeTemplate": "Modello di nodo mancante" }, "enableNoiseSettings": "Abilita le impostazioni del rumore", "cpuNoise": "Rumore CPU", @@ -616,7 +626,16 @@ "beta": "Beta", "enableNodesEditor": "Abilita l'editor dei nodi", "experimental": "Sperimentale", - "autoChangeDimensions": "Aggiorna L/A alle impostazioni predefinite del modello in caso di modifica" + "autoChangeDimensions": "Aggiorna L/A alle impostazioni predefinite del modello in caso di modifica", + "clearIntermediates": "Cancella le immagini intermedie", + "clearIntermediatesDesc3": "Le immagini della galleria non verranno eliminate.", + "clearIntermediatesDesc2": "Le immagini intermedie sono sottoprodotti della generazione, diversi dalle immagini risultanti nella galleria. La cancellazione degli intermedi libererà spazio su disco.", + "intermediatesCleared_one": "Cancellata 1 immagine intermedia", + "intermediatesCleared_many": "Cancellate {{number}} immagini intermedie", + "intermediatesCleared_other": "", + "clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato di Tela Unificata e ControlNet.", + "intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie", + "noIntermediates": "Nessuna immagine intermedia da cancellare" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", @@ -666,7 +685,9 @@ "nodesUnrecognizedTypes": "Impossibile caricare. Il grafico ha tipi di dati non riconosciuti", "nodesNotValidJSON": "JSON non valido", "nodesBrokenConnections": "Impossibile caricare. Alcune connessioni sono interrotte.", - "baseModelChangedCleared": "Modello base modificato, cancellato", + "baseModelChangedCleared_one": "Il modello base è stato modificato, cancellato o disabilitato {{number}} sotto-modello incompatibile", + "baseModelChangedCleared_many": "", + "baseModelChangedCleared_other": "", "imageSavingFailed": "Salvataggio dell'immagine non riuscito", "canvasSentControlnetAssets": "Tela inviata a ControlNet & Risorse", "problemCopyingCanvasDesc": "Impossibile copiare la tela", @@ -820,10 +841,10 @@ "showGraphNodes": "Mostra sovrapposizione grafico", "resetWorkflowDesc2": "Reimpostare il flusso di lavoro cancellerà tutti i nodi, i bordi e i dettagli del flusso di lavoro.", "reloadNodeTemplates": "Ricarica i modelli di nodo", - "loadWorkflow": "Carica il flusso di lavoro", + "loadWorkflow": "Importa flusso di lavoro JSON", "resetWorkflow": "Reimposta flusso di lavoro", "resetWorkflowDesc": "Sei sicuro di voler reimpostare questo flusso di lavoro?", - "downloadWorkflow": "Scarica il flusso di lavoro JSON", + "downloadWorkflow": "Esporta flusso di lavoro JSON", "scheduler": "Campionatore", "addNode": "Aggiungi nodo", "sDXLMainModelFieldDescription": "Campo del modello SDXL.", @@ -835,7 +856,51 @@ "executionStateCompleted": "Completato", "boardFieldDescription": "Una bacheca della galleria", "addNodeToolTip": "Aggiungi nodo (Shift+A, Space)", - "sDXLRefinerModelField": "Modello Refiner" + "sDXLRefinerModelField": "Modello Refiner", + "problemReadingMetadata": "Problema durante la lettura dei metadati dall'immagine", + "colorCodeEdgesHelp": "Bordi con codice colore in base ai campi collegati", + "animatedEdges": "Bordi animati", + "snapToGrid": "Aggancia alla griglia", + "validateConnections": "Convalida connessioni e grafico", + "validateConnectionsHelp": "Impedisce che vengano effettuate connessioni non valide e che vengano \"invocati\" grafici non validi", + "fullyContainNodesHelp": "I nodi devono essere completamente all'interno della casella di selezione per essere selezionati", + "fullyContainNodes": "Contenere completamente i nodi da selezionare", + "snapToGridHelp": "Aggancia i nodi alla griglia quando vengono spostati", + "workflowSettings": "Impostazioni Editor del flusso di lavoro", + "colorCodeEdges": "Bordi con codice colore", + "mainModelField": "Modello", + "noOutputRecorded": "Nessun output registrato", + "noFieldsLinearview": "Nessun campo aggiunto alla vista lineare", + "removeLinearView": "Rimuovi dalla vista lineare", + "workflowDescription": "Breve descrizione", + "workflowContact": "Contatto", + "workflowVersion": "Versione", + "workflow": "Flusso di lavoro", + "noWorkflow": "Nessun flusso di lavoro", + "workflowTags": "Tag", + "workflowValidation": "Errore di convalida del flusso di lavoro", + "workflowAuthor": "Autore", + "workflowName": "Nome", + "workflowNotes": "Note", + "unhandledInputProperty": "Proprietà di input non gestita", + "versionUnknown": " Versione sconosciuta", + "unableToValidateWorkflow": "Impossibile convalidare il flusso di lavoro", + "updateApp": "Aggiorna App", + "problemReadingWorkflow": "Problema durante la lettura del flusso di lavoro dall'immagine", + "unableToLoadWorkflow": "Impossibile caricare il flusso di lavoro", + "updateNode": "Aggiorna nodo", + "version": "Versione", + "notes": "Note", + "problemSettingTitle": "Problema nell'impostazione del titolo", + "unkownInvocation": "Tipo di invocazione sconosciuta", + "unknownTemplate": "Modello sconosciuto", + "nodeType": "Tipo di nodo", + "vaeField": "VAE", + "unhandledOutputProperty": "Proprietà di output non gestita", + "notesDescription": "Aggiunge note sul tuo flusso di lavoro", + "unknownField": "Campo sconosciuto", + "unknownNode": "Nodo sconosciuto", + "vaeFieldDescription": "Sotto modello VAE." }, "boards": { "autoAddBoard": "Aggiungi automaticamente bacheca", @@ -852,7 +917,8 @@ "searchBoard": "Cerca bacheche ...", "noMatching": "Nessuna bacheca corrispondente", "selectBoard": "Seleziona una Bacheca", - "uncategorized": "Non categorizzato" + "uncategorized": "Non categorizzato", + "downloadBoard": "Scarica la bacheca" }, "controlnet": { "contentShuffleDescription": "Rimescola il contenuto di un'immagine", @@ -861,7 +927,7 @@ "duplicate": "Duplica", "balanced": "Bilanciato", "depthMidasDescription": "Generazione di mappe di profondità usando Midas", - "control": "Controllo", + "control": "ControlNet", "crop": "Ritaglia", "depthMidas": "Profondità (Midas)", "enableControlnet": "Abilita ControlNet", @@ -903,7 +969,7 @@ "normalBaeDescription": "Elaborazione BAE normale", "noneDescription": "Nessuna elaborazione applicata", "saveControlImage": "Salva immagine di controllo", - "toggleControlNet": "Attiva/disattiva questa ControlNet", + "toggleControlNet": "Attiva/disattiva questo ControlNet", "safe": "Sicuro", "colorMapTileSize": "Dimensione piastrella", "ipAdapterImageFallback": "Nessuna immagine dell'Adattatore IP selezionata", @@ -913,7 +979,15 @@ "resetIPAdapterImage": "Reimposta immagine Adattatore IP", "handAndFace": "Mano e faccia", "enableIPAdapter": "Abilita Adattatore IP", - "maxFaces": "Numero massimo di volti" + "maxFaces": "Numero massimo di volti", + "addT2IAdapter": "Aggiungi $t(common.t2iAdapter)", + "controlNetEnabledT2IDisabled": "$t(common.controlNet) abilitato, $t(common.t2iAdapter) disabilitati", + "t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) abilitato, $t(common.controlNet) disabilitati", + "addControlNet": "Aggiungi $t(common.controlNet)", + "controlNetT2IMutexDesc": "$t(common.controlNet) e $t(common.t2iAdapter) contemporaneamente non sono attualmente supportati.", + "addIPAdapter": "Aggiungi $t(common.ipAdapter)", + "controlAdapter": "Adattatore di Controllo", + "megaControl": "Mega ControlNet" }, "queue": { "queueFront": "Aggiungi all'inizio della coda", @@ -933,7 +1007,7 @@ "cancelFailed": "Problema durante l'annullamento dell'elemento", "batchQueued": "Lotto aggiunto alla coda", "pauseFailed": "Problema durante la sospensione dell'elaborazione", - "clearFailed": "Problema nella cancellazione la coda", + "clearFailed": "Problema nella cancellazione della coda", "queuedCount": "{{pending}} In attesa", "front": "inizio", "clearSucceeded": "Coda cancellata", @@ -1239,6 +1313,41 @@ "denoisingStrength": "Forza di riduzione del rumore", "concatPromptStyle": "Concatena Prompt & Stile", "loading": "Caricamento...", - "steps": "Passi" + "steps": "Passi", + "refinerStart": "Inizio Affinamento", + "cfgScale": "Scala CFG", + "negStylePrompt": "Prompt Stile negativo", + "refiner": "Affinatore", + "negAestheticScore": "Punteggio estetico negativo", + "useRefiner": "Utilizza l'affinatore", + "refinermodel": "Modello Affinatore", + "posAestheticScore": "Punteggio estetico positivo", + "posStylePrompt": "Prompt Stile positivo" + }, + "metadata": { + "initImage": "Immagine iniziale", + "seamless": "Senza giunture", + "positivePrompt": "Prompt positivo", + "negativePrompt": "Prompt negativo", + "generationMode": "Modalità generazione", + "Threshold": "Livello di soglia del rumore", + "metadata": "Metadati", + "strength": "Forza Immagine a Immagine", + "seed": "Seme", + "imageDetails": "Dettagli dell'immagine", + "perlin": "Rumore Perlin", + "model": "Modello", + "noImageDetails": "Nessun dettaglio dell'immagine trovato", + "hiresFix": "Ottimizzazione Alta Risoluzione", + "cfgScale": "Scala CFG", + "fit": "Adatta Immagine a Immagine", + "height": "Altezza", + "variations": "Coppie Peso-Seme", + "noMetaData": "Nessun metadato trovato", + "width": "Larghezza", + "createdBy": "Creato da", + "workflow": "Flusso di lavoro", + "steps": "Passi", + "scheduler": "Campionatore" } } From c0534d65192f6c8f629cb0d5e8a4aaecdd0e6e4d Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:45:07 +0000 Subject: [PATCH 038/202] translationBot(ui): update translation files Updated by "Remove blank strings" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index c219e0f7f7..baa70c26be 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -81,7 +81,6 @@ "modelManager": "Gestore modello", "communityLabel": "Comunità", "nodeEditor": "Editor dei nodi", - "controlNet": "", "statusProcessing": "Elaborazione in corso", "advanced": "Avanzate", "imageFailedToLoad": "Impossibile caricare l'immagine", From 670f3aa165cd8462ce3e28cfc3a91b30594a3ef2 Mon Sep 17 00:00:00 2001 From: Surisen Date: Thu, 12 Oct 2023 12:45:09 +0000 Subject: [PATCH 039/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 92.0% (1104 of 1199 strings) translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 92.1% (1105 of 1199 strings) translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 83.2% (998 of 1199 strings) translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 83.0% (996 of 1199 strings) translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 67.5% (810 of 1199 strings) Co-authored-by: Surisen Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- .../frontend/web/public/locales/zh_CN.json | 996 +++++++++++++++--- 1 file changed, 840 insertions(+), 156 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 1e545eb4b3..6924a86d89 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -2,20 +2,20 @@ "common": { "hotkeysLabel": "快捷键", "languagePickerLabel": "语言", - "reportBugLabel": "提交错误报告", + "reportBugLabel": "反馈错误", "settingsLabel": "设置", "img2img": "图生图", "unifiedCanvas": "统一画布", - "nodes": "节点编辑器", + "nodes": "工作流编辑器", "langSimplifiedChinese": "简体中文", "nodesDesc": "一个基于节点的图像生成系统目前正在开发中。请持续关注关于这一功能的更新。", "postProcessing": "后期处理", - "postProcessDesc1": "Invoke AI 提供各种各样的后期处理功能。图像放大和面部修复在网页界面中已经可用。你可以从文本到图像和图像到图像页面的高级选项菜单中访问它们。你也可以直接使用图像显示上方或查看器中的图像操作按钮处理图像。", + "postProcessDesc1": "Invoke AI 提供各种各样的后期处理功能。图像放大和面部修复在网页界面中已经可用。你可以从文生图和图生图页面的高级选项菜单中访问它们。你也可以直接使用图像显示上方或查看器中的图像操作按钮处理图像。", "postProcessDesc2": "一个专门的界面将很快发布,新的界面能够处理更复杂的后期处理流程。", - "postProcessDesc3": "Invoke AI 命令行界面提供例如Embiggen的各种其他功能。", + "postProcessDesc3": "Invoke AI 命令行界面提供例如 Embiggen 的各种其他功能。", "training": "训练", - "trainingDesc1": "一个专门用于从网络UI使用Textual Inversion和Dreambooth训练自己的嵌入模型和检查点的工作流程。", - "trainingDesc2": "InvokeAI已经支持使用主脚本中的Textual Inversion来训练自定义的嵌入模型。", + "trainingDesc1": "一个专门用于从 Web UI 使用 Textual Inversion 和 Dreambooth 训练自己的 Embedding 和 checkpoint 的工作流。", + "trainingDesc2": "InvokeAI 已经支持使用主脚本中的 Textual Inversion 来训练自定义 embeddouring。", "upload": "上传", "close": "关闭", "load": "加载", @@ -23,19 +23,19 @@ "statusDisconnected": "未连接", "statusError": "错误", "statusPreparing": "准备中", - "statusProcessingCanceled": "处理取消", + "statusProcessingCanceled": "处理已取消", "statusProcessingComplete": "处理完成", "statusGenerating": "生成中", - "statusGeneratingTextToImage": "文字到图像生成中", - "statusGeneratingImageToImage": "图像到图像生成中", - "statusGeneratingInpainting": "生成内画中", - "statusGeneratingOutpainting": "生成外画中", + "statusGeneratingTextToImage": "文生图生成中", + "statusGeneratingImageToImage": "图生图生成中", + "statusGeneratingInpainting": "(Inpainting) 内补生成中", + "statusGeneratingOutpainting": "(Outpainting) 外扩生成中", "statusGenerationComplete": "生成完成", "statusIterationComplete": "迭代完成", "statusSavingImage": "图像保存中", - "statusRestoringFaces": "脸部修复中", - "statusRestoringFacesGFPGAN": "脸部修复中 (GFPGAN)", - "statusRestoringFacesCodeFormer": "脸部修复中 (CodeFormer)", + "statusRestoringFaces": "面部修复中", + "statusRestoringFacesGFPGAN": "面部修复中 (GFPGAN)", + "statusRestoringFacesCodeFormer": "面部修复中 (CodeFormer)", "statusUpscaling": "放大中", "statusUpscalingESRGAN": "放大中 (ESRGAN)", "statusLoadingModel": "模型加载中", @@ -52,11 +52,11 @@ "openInNewTab": "在新的标签页打开", "langUkranian": "乌克兰语", "back": "返回", - "statusMergedModels": "模型合并完成", - "statusConvertingModel": "模型变换", - "statusModelConverted": "模型变换完成", + "statusMergedModels": "模型已合并", + "statusConvertingModel": "转换模型中", + "statusModelConverted": "模型转换完成", "statusMergingModels": "合并模型", - "githubLabel": "Github", + "githubLabel": "GitHub", "discordLabel": "Discord", "langPolish": "波兰语", "langBrPortuguese": "葡萄牙语(巴西)", @@ -77,7 +77,14 @@ "linear": "线性的", "batch": "批次管理器", "communityLabel": "社区", - "modelManager": "模型管理器" + "modelManager": "模型管理器", + "nodeEditor": "节点编辑器", + "statusProcessing": "处理中", + "imageFailedToLoad": "无法加载图像", + "lightMode": "浅色模式", + "learnMore": "了解更多", + "darkMode": "深色模式", + "advanced": "高级" }, "gallery": { "generations": "生成的图像", @@ -87,25 +94,32 @@ "galleryImageSize": "预览大小", "galleryImageResetSize": "重置预览大小", "gallerySettings": "预览设置", - "maintainAspectRatio": "保持比例", + "maintainAspectRatio": "保持纵横比", "autoSwitchNewImages": "自动切换到新图像", "singleColumnLayout": "单列布局", - "allImagesLoaded": "所有图像加载完成", + "allImagesLoaded": "所有图像已加载", "loadMore": "加载更多", - "noImagesInGallery": "图库中无图像", + "noImagesInGallery": "无图像可用于显示", "deleteImage": "删除图片", "deleteImageBin": "被删除的图片会发送到你操作系统的回收站。", - "deleteImagePermanent": "无法恢复删除的图片。", + "deleteImagePermanent": "删除的图片无法被恢复。", "images": "图片", "assets": "素材", - "autoAssignBoardOnClick": "点击后自动分配情景板" + "autoAssignBoardOnClick": "点击后自动分配面板", + "featuresWillReset": "如果您删除该图像,这些功能会立即被重置。", + "loading": "加载中", + "unableToLoad": "无法加载图库", + "currentlyInUse": "该图像目前在以下功能中使用:", + "copy": "复制", + "download": "下载", + "setCurrentImage": "设为当前图像" }, "hotkeys": { - "keyboardShortcuts": "快捷方式", - "appHotkeys": "应用快捷方式", - "generalHotkeys": "一般快捷方式", - "galleryHotkeys": "图库快捷方式", - "unifiedCanvasHotkeys": "统一画布快捷方式", + "keyboardShortcuts": "键盘快捷键", + "appHotkeys": "应用快捷键", + "generalHotkeys": "一般快捷键", + "galleryHotkeys": "图库快捷键", + "unifiedCanvasHotkeys": "统一画布快捷键", "invoke": { "title": "Invoke", "desc": "生成图像" @@ -115,31 +129,31 @@ "desc": "取消图像生成" }, "focusPrompt": { - "title": "打开提示框", - "desc": "打开提示文本框" + "title": "打开提示词框", + "desc": "打开提示词文本框" }, "toggleOptions": { "title": "切换选项卡", - "desc": "打开或关闭选项卡" + "desc": "打开或关闭选项浮窗" }, "pinOptions": { "title": "常开选项卡", - "desc": "保持选项卡常开" + "desc": "保持选项浮窗常开" }, "toggleViewer": { - "title": "切换图像视图", - "desc": "打开或关闭图像视图" + "title": "切换图像查看器", + "desc": "打开或关闭图像查看器" }, "toggleGallery": { "title": "切换图库", "desc": "打开或关闭图库" }, "maximizeWorkSpace": { - "title": "工作台最大化", + "title": "工作区最大化", "desc": "关闭所有浮窗,将工作区域最大化" }, "changeTabs": { - "title": "切换卡片", + "title": "切换选项卡", "desc": "切换到另一个工作区" }, "consoleToggle": { @@ -147,7 +161,7 @@ "desc": "打开或关闭命令行" }, "setPrompt": { - "title": "使用提示", + "title": "使用当前提示词", "desc": "使用当前图像的提示词" }, "setSeed": { @@ -155,12 +169,12 @@ "desc": "使用当前图像的种子" }, "setParameters": { - "title": "使用所有参数", + "title": "使用当前参数", "desc": "使用当前图像的所有参数" }, "restoreFaces": { - "title": "脸部修复", - "desc": "对当前图像进行脸部修复" + "title": "面部修复", + "desc": "对当前图像进行面部修复" }, "upscale": { "title": "放大", @@ -171,8 +185,8 @@ "desc": "显示当前图像的元数据" }, "sendToImageToImage": { - "title": "送往图像到图像", - "desc": "将当前图像送往图像到图像" + "title": "发送到图生图", + "desc": "发送当前图像到图生图" }, "deleteImage": { "title": "删除图像", @@ -184,23 +198,23 @@ }, "previousImage": { "title": "上一张图像", - "desc": "显示相册中的上一张图像" + "desc": "显示图库中的上一张图像" }, "nextImage": { "title": "下一张图像", - "desc": "显示相册中的下一张图像" + "desc": "显示图库中的下一张图像" }, "toggleGalleryPin": { "title": "切换图库常开", "desc": "开关图库在界面中的常开模式" }, "increaseGalleryThumbSize": { - "title": "增大预览大小", - "desc": "增大图库中预览的大小" + "title": "增大预览尺寸", + "desc": "增大图库中预览的尺寸" }, "decreaseGalleryThumbSize": { - "title": "减小预览大小", - "desc": "减小图库中预览的大小" + "title": "缩小预览尺寸", + "desc": "缩小图库中预览的尺寸" }, "selectBrush": { "title": "选择刷子", @@ -228,19 +242,19 @@ }, "moveTool": { "title": "移动工具", - "desc": "在画布上移动" + "desc": "画布允许导航" }, "fillBoundingBox": { "title": "填充选择区域", "desc": "在选择区域中填充刷子颜色" }, "eraseBoundingBox": { - "title": "取消选择区域", - "desc": "将选择区域抹除" + "title": "擦除选择框", + "desc": "将选择区域擦除" }, "colorPicker": { - "title": "颜色提取工具", - "desc": "选择颜色提取工具" + "title": "选择颜色拾取工具", + "desc": "选择画布颜色拾取工具" }, "toggleSnap": { "title": "切换网格对齐", @@ -256,7 +270,7 @@ }, "clearMask": { "title": "清除遮罩", - "desc": "清除整个遮罩层" + "desc": "清除整个遮罩" }, "hideMask": { "title": "隐藏遮罩", @@ -272,7 +286,7 @@ }, "saveToGallery": { "title": "保存至图库", - "desc": "将画板当前内容保存至图库" + "desc": "将画布当前内容保存至图库" }, "copyToClipboard": { "title": "复制到剪贴板", @@ -292,7 +306,7 @@ }, "resetView": { "title": "重置视图", - "desc": "重置画板视图" + "desc": "重置画布视图" }, "previousStagingImage": { "title": "上一张暂存图像", @@ -315,7 +329,7 @@ "modelManager": { "modelManager": "模型管理器", "model": "模型", - "modelAdded": "模型已添加", + "modelAdded": "已添加模型", "modelUpdated": "模型已更新", "modelEntryDeleted": "模型已删除", "cannotUseSpaces": "不能使用空格", @@ -330,7 +344,7 @@ "config": "配置", "configValidationMsg": "模型配置文件的路径。", "modelLocation": "模型位置", - "modelLocationValidationMsg": "模型文件的本地路径。", + "modelLocationValidationMsg": "提供 Diffusers 模型文件的本地存储路径", "vaeLocation": "VAE 位置", "vaeLocationValidationMsg": "VAE 文件的路径。", "width": "宽度", @@ -340,18 +354,18 @@ "addModel": "添加模型", "updateModel": "更新模型", "availableModels": "可用模型", - "search": "搜索", + "search": "检索", "load": "加载", "active": "活跃", "notLoaded": "未加载", "cached": "缓存", "checkpointFolder": "模型检查点文件夹", - "clearCheckpointFolder": "清除模型检查点文件夹", + "clearCheckpointFolder": "清除 Checkpoint 模型文件夹", "findModels": "寻找模型", "modelsFound": "找到的模型", "selectFolder": "选择文件夹", "selected": "已选择", - "selectAll": "选择所有", + "selectAll": "全选", "deselectAll": "取消选择所有", "showExisting": "显示已存在", "addSelected": "添加选择", @@ -359,38 +373,38 @@ "delete": "删除", "deleteModel": "删除模型", "deleteConfig": "删除配置", - "deleteMsg1": "您确定要将这个模型从 InvokeAI 删除吗?", - "deleteMsg2": "这不会从磁盘中删除模型检查点文件。如果您愿意,可以重新添加它们。", - "convertToDiffusersHelpText1": "模型会被转换成Diffusers格式。", - "convertToDiffusersHelpText2": "这个过程会替换你的模型管理器的入口中相同Diffusers版本的模型。", + "deleteMsg1": "您确定要将该模型从 InvokeAI 删除吗?", + "deleteMsg2": "磁盘中放置在 InvokeAI 根文件夹的 checkpoint 文件会被删除。若你正在使用自定义目录,则不会从磁盘中删除他们。", + "convertToDiffusersHelpText1": "模型会被转换成 🧨 Diffusers 格式。", + "convertToDiffusersHelpText2": "这个过程会替换你的模型管理器的入口中相同 Diffusers 版本的模型。", "mergedModelSaveLocation": "保存路径", "mergedModelCustomSaveLocation": "自定义路径", - "checkpointModels": "检查点(Checkpoints)", + "checkpointModels": "Checkpoints", "formMessageDiffusersVAELocation": "VAE 路径", - "convertToDiffusersHelpText4": "这是一次性的处理过程。根据你电脑的配置不同耗时30-60秒。", + "convertToDiffusersHelpText4": "这是一次性的处理过程。根据你电脑的配置不同耗时 30 - 60 秒。", "convertToDiffusersHelpText6": "你希望转换这个模型吗?", "interpolationType": "插值类型", - "modelTwo": "模型2", - "modelThree": "模型3", - "v2_768": "版本2(768px)", + "modelTwo": "模型 2", + "modelThree": "模型 3", + "v2_768": "v2 (768px)", "mergedModelName": "合并的模型名称", - "alpha": "透明度", + "alpha": "", "allModels": "全部模型", - "convertToDiffusers": "转换为Diffusers", + "convertToDiffusers": "转换为 Diffusers", "formMessageDiffusersModelLocation": "Diffusers 模型路径", "custom": "自定义", - "formMessageDiffusersVAELocationDesc": "如果没有特别指定,InvokeAI会从上面指定的模型路径中寻找VAE文件。", - "safetensorModels": "安全张量(SafeTensors)", + "formMessageDiffusersVAELocationDesc": "如果没有特别指定,InvokeAI 会从上面指定的模型路径中寻找 VAE 文件。", + "safetensorModels": "SafeTensors", "modelsMerged": "模型合并完成", "mergeModels": "合并模型", - "modelOne": "模型1", - "diffusersModels": "扩散器(Diffusers)", + "modelOne": "模型 1", + "diffusersModels": "Diffusers", "scanForModels": "扫描模型", "repo_id": "项目 ID", "repoIDValidationMsg": "你的模型的在线项目地址", - "v1": "版本1", + "v1": "v1", "invokeRoot": "InvokeAI 文件夹", - "inpainting": "版本1(Inpainting)", + "inpainting": "v1 Inpainting", "customSaveLocation": "自定义保存路径", "scanAgain": "重新扫描", "customConfig": "个性化配置", @@ -405,28 +419,28 @@ "addCheckpointModel": "添加 Checkpoint / Safetensor 模型", "addDiffuserModel": "添加 Diffusers 模型", "vaeRepoID": "VAE 项目 ID", - "vaeRepoIDValidationMsg": "你的模型的在线VAE项目地址", + "vaeRepoIDValidationMsg": "VAE 模型在线仓库地址", "selectAndAdd": "选择下表中的模型并添加", - "noModelsFound": "没有找到模型", + "noModelsFound": "未有找到模型", "formMessageDiffusersModelLocationDesc": "请至少输入一个。", "convertToDiffusersSaveLocation": "保存路径", - "convertToDiffusersHelpText3": "您在磁盘上的检查点(checkpoint)文件不会被删除或修改。如果需要,您可以再次将检查点添加到模型管理器中。", - "v2_base": "版本2(512px)", - "convertToDiffusersHelpText5": "请确认你有足够的此版空间,模型大小通常在4GB-7GB之间。", + "convertToDiffusersHelpText3": "磁盘中放置在 InvokeAI 根文件夹的 checkpoint 文件会被删除. 若位于自定义目录, 则不会受影响.", + "v2_base": "v2 (512px)", + "convertToDiffusersHelpText5": "请确认你有足够的磁盘空间,模型大小通常在 2 GB - 7 GB 之间。", "convert": "转换", "merge": "合并", "pickModelType": "选择模型类型", "addDifference": "增加差异", "none": "无", - "inverseSigmoid": "反Sigmoid函数", + "inverseSigmoid": "反 Sigmoid 函数", "weightedSum": "加权求和", - "modelMergeAlphaHelp": "透明度参数控制模型的混合强度。较低的透明度值会导致第二个模型的影响减弱。", - "sigmoid": "Sigmoid函数", - "modelMergeInterpAddDifferenceHelp": "在这种模式下,首先从模型2中减去模型3,得到的版本再用上述值的透明度与模型1进行混合。", + "modelMergeAlphaHelp": "Alpha 参数控制模型的混合强度。较低的 Alpha 值会导致第二个模型的影响减弱。", + "sigmoid": "Sigmoid 函数", + "modelMergeInterpAddDifferenceHelp": "在这种模式下,首先从模型 2 中减去模型 3,得到的版本再用上述的 Alpha 值与模型1进行混合。", "modelsSynced": "模型已同步", "modelSyncFailed": "模型同步失败", "modelDeleteFailed": "模型删除失败", - "syncModelsDesc": "如果您的模型与后端不同步, 您可以使用此选项刷新它们. 便于您在应用程序启动的情况下手动更新models.yaml文件或将模型添加到InvokeAI根文件夹.", + "syncModelsDesc": "如果您的模型与后端不同步,您可以使用此选项刷新它们。便于您在应用程序启动的情况下手动更新 models.yaml 文件或将模型添加到 InvokeAI 根文件夹。", "selectModel": "选择模型", "importModels": "导入模型", "settings": "设置", @@ -437,7 +451,18 @@ "modelConversionFailed": "模型转换失败", "modelsMergeFailed": "模型融合失败", "baseModel": "基底模型", - "convertingModelBegin": "模型转换中. 请稍候." + "convertingModelBegin": "模型转换中. 请稍候.", + "vae": "", + "noModels": "未找到模型", + "predictionType": "预测类型(适用于 Stable Diffusion 2.x 模型和部分 Stable Diffusion 1.x 模型)", + "quickAdd": "快速添加", + "simpleModelDesc": "提供一个指向本地 Diffusers 模型的路径,本地 checkpoint / safetensors 模型或一个HuggingFace 项目 ID,又或者一个 checkpoint/diffusers 模型链接。", + "advanced": "高级", + "useCustomConfig": "使用自定义配置", + "closeAdvanced": "关闭高级", + "modelType": "模型类别", + "customConfigFileLocation": "自定义配置文件目录", + "variant": "变体" }, "parameters": { "images": "图像", @@ -453,78 +478,118 @@ "variations": "变种", "variationAmount": "变种数量", "seedWeights": "种子权重", - "faceRestoration": "脸部修复", - "restoreFaces": "修复脸部", + "faceRestoration": "面部修复", + "restoreFaces": "修复面部", "type": "种类", "strength": "强度", "upscaling": "放大", - "upscale": "放大", + "upscale": "放大 (Shift + U)", "upscaleImage": "放大图像", "scale": "等级", "otherOptions": "其他选项", "seamlessTiling": "无缝拼贴", - "hiresOptim": "高清优化", - "imageFit": "使生成图像长宽适配原图像", - "codeformerFidelity": "保真", + "hiresOptim": "高分辨率优化", + "imageFit": "使生成图像长宽适配初始图像", + "codeformerFidelity": "保真度", "scaleBeforeProcessing": "处理前缩放", "scaledWidth": "缩放宽度", "scaledHeight": "缩放长度", - "infillMethod": "填充法", + "infillMethod": "填充方法", "tileSize": "方格尺寸", "boundingBoxHeader": "选择区域", "seamCorrectionHeader": "接缝修正", "infillScalingHeader": "内填充和缩放", - "img2imgStrength": "图像到图像强度", + "img2imgStrength": "图生图强度", "toggleLoopback": "切换环回", "sendTo": "发送到", - "sendToImg2Img": "发送到图像到图像", + "sendToImg2Img": "发送到图生图", "sendToUnifiedCanvas": "发送到统一画布", "copyImageToLink": "复制图像链接", "downloadImage": "下载图像", - "openInViewer": "在视图中打开", - "closeViewer": "关闭视图", + "openInViewer": "在查看器中打开", + "closeViewer": "关闭查看器", "usePrompt": "使用提示", "useSeed": "使用种子", "useAll": "使用所有参数", - "useInitImg": "使用原图像", + "useInitImg": "使用初始图像", "info": "信息", - "initialImage": "原图像", - "showOptionsPanel": "显示选项浮窗", + "initialImage": "初始图像", + "showOptionsPanel": "显示侧栏浮窗 (O 或 T)", "seamlessYAxis": "Y轴", "seamlessXAxis": "X轴", "boundingBoxWidth": "边界框宽度", "boundingBoxHeight": "边界框高度", - "denoisingStrength": "降噪强度", + "denoisingStrength": "去噪强度", "vSymmetryStep": "纵向对称步数", "cancel": { "immediate": "立即取消", "isScheduled": "取消中", - "schedule": "当前步骤后取消", - "setType": "设置取消类型" + "schedule": "当前迭代后取消", + "setType": "设定取消类型", + "cancel": "取消" }, "copyImage": "复制图片", "showPreview": "显示预览", "symmetry": "对称性", "positivePromptPlaceholder": "正向提示词", "negativePromptPlaceholder": "负向提示词", - "scheduler": "计划表", + "scheduler": "调度器", "general": "通用", "hiresStrength": "高分辨强度", - "hidePreview": "影藏预览", + "hidePreview": "隐藏预览", "hSymmetryStep": "横向对称步数", "imageToImage": "图生图", "noiseSettings": "噪音", "controlNetControlMode": "控制模式", - "maskAdjustmentsHeader": "调整遮罩", - "maskBlur": "模糊遮罩", - "maskBlurMethod": "遮罩模糊方式", - "aspectRatio": "比率", + "maskAdjustmentsHeader": "遮罩调整", + "maskBlur": "模糊", + "maskBlurMethod": "模糊方式", + "aspectRatio": "纵横比", "seamLowThreshold": "降低", - "seamHighThreshold": "提升" + "seamHighThreshold": "提升", + "invoke": { + "noNodesInGraph": "节点图中无节点", + "noModelSelected": "无已选中的模型", + "invoke": "调用", + "systemBusy": "系统繁忙", + "noInitialImageSelected": "无选中的初始图像", + "missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} 缺失输入", + "unableToInvoke": "无法调用", + "systemDisconnected": "系统已断开连接", + "missingNodeTemplate": "缺失节点模板", + "missingFieldTemplate": "缺失模板", + "addingImagesTo": "添加图像到", + "noPrompts": "没有已生成的提示词", + "readyToInvoke": "准备调用", + "noControlImageForControlAdapter": "Control Adapter {{number}} 没有控制图像", + "noModelForControlAdapter": "Control Adapter {{number}} 没有选择模型。", + "incompatibleBaseModelForControlAdapter": "Control Adapter {{number}} 与主模型不匹配。" + }, + "patchmatchDownScaleSize": "缩小", + "coherenceSteps": "步数", + "clipSkip": "CLIP 跳过层", + "compositingSettingsHeader": "合成设置", + "useCpuNoise": "使用 CPU 噪声", + "coherenceStrength": "强度", + "enableNoiseSettings": "启用噪声设置", + "coherenceMode": "模式", + "cpuNoise": "CPU 噪声", + "gpuNoise": "GPU 噪声", + "clipSkipWithLayerCount": "CLIP 跳过 {{layerCount}} 层", + "coherencePassHeader": "一致性层", + "manualSeed": "手动设定种子", + "imageActions": "图像操作", + "randomSeed": "随机种子", + "iterations": "迭代数", + "isAllowedToUpscale": { + "useX2Model": "图像太大,无法使用 x4 模型,使用 x2 模型作为替代", + "tooLarge": "图像太大无法进行放大,请选择更小的图像" + }, + "iterationsWithCount_other": "{{count}} 次迭代生成" }, "settings": { "models": "模型", - "displayInProgress": "显示进行中的图像", + "displayInProgress": "显示处理中的图像", "saveSteps": "每n步保存图像", "confirmOnDelete": "删除时确认", "displayHelpIcons": "显示帮助按钮", @@ -532,9 +597,9 @@ "resetWebUI": "重置网页界面", "resetWebUIDesc1": "重置网页只会重置浏览器中缓存的图像和设置,不会删除任何图像。", "resetWebUIDesc2": "如果图像没有显示在图库中,或者其他东西不工作,请在GitHub上提交问题之前尝试重置。", - "resetComplete": "网页界面已重置。刷新页面以重新加载。", - "showProgressInViewer": "在视口中展示过程图片", - "antialiasProgressImages": "对过程图片抗锯齿", + "resetComplete": "网页界面已重置。", + "showProgressInViewer": "在查看器中展示过程图片", + "antialiasProgressImages": "对过程图像应用抗锯齿", "generation": "生成", "ui": "用户界面", "useSlidersForAll": "对所有参数使用滑动条设置", @@ -546,21 +611,24 @@ "enableNodesEditor": "启用节点编辑器", "favoriteSchedulersPlaceholder": "没有偏好的采样算法", "showAdvancedOptions": "显示进阶选项", - "favoriteSchedulers": "采样算法偏好" + "favoriteSchedulers": "采样算法偏好", + "autoChangeDimensions": "更改时将宽/高更新为模型默认值", + "beta": "", + "experimental": "实验性" }, "toast": { "tempFoldersEmptied": "临时文件夹已清空", "uploadFailed": "上传失败", "uploadFailedUnableToLoadDesc": "无法加载文件", - "downloadImageStarted": "图像下载已开始", + "downloadImageStarted": "图像已开始下载", "imageCopied": "图像已复制", "imageLinkCopied": "图像链接已复制", "imageNotLoaded": "没有加载图像", "imageNotLoadedDesc": "找不到图片", "imageSavedToGallery": "图像已保存到图库", "canvasMerged": "画布已合并", - "sentToImageToImage": "已送往图像到图像", - "sentToUnifiedCanvas": "已送往统一画布", + "sentToImageToImage": "已发送到图生图", + "sentToUnifiedCanvas": "已发送到统一画布", "parametersSet": "参数已设定", "parametersNotSet": "参数未设定", "parametersNotSetDesc": "此图像不存在元数据。", @@ -569,11 +637,11 @@ "seedSet": "种子已设定", "seedNotSet": "种子未设定", "seedNotSetDesc": "无法找到该图像的种子。", - "promptSet": "提示已设定", + "promptSet": "提示词已设定", "promptNotSet": "提示未设定", "promptNotSetDesc": "无法找到该图像的提示。", "upscalingFailed": "放大失败", - "faceRestoreFailed": "脸部修复失败", + "faceRestoreFailed": "面部修复失败", "metadataLoadFailed": "加载元数据失败", "initialImageSet": "初始图像已设定", "initialImageNotSet": "初始图像未设定", @@ -582,29 +650,63 @@ "uploadFailedInvalidUploadDesc": "必须是单张的 PNG 或 JPEG 图片", "disconnected": "服务器断开", "connected": "服务器连接", - "parameterSet": "参数已设置", - "parameterNotSet": "参数未设置", + "parameterSet": "参数已设定", + "parameterNotSet": "参数未设定", "serverError": "服务器错误", "canceled": "处理取消", - "nodesLoaded": "节点图已加载", - "nodesSaved": "节点图已保存", + "nodesLoaded": "节点已加载", + "nodesSaved": "节点已保存", "problemCopyingImage": "无法复制图像", - "nodesCorruptedGraph": "无法加载. 节点图似乎已损坏.", - "nodesBrokenConnections": "无法加载. 部分链接已断开.", - "nodesUnrecognizedTypes": "无法加载. 节点图有无法识别的节点类型", + "nodesCorruptedGraph": "无法加载。节点图似乎已损坏。", + "nodesBrokenConnections": "无法加载。部分连接已断开。", + "nodesUnrecognizedTypes": "无法加载。节点图有无法识别的节点类型", "nodesNotValidJSON": "无效的 JSON", "nodesNotValidGraph": "无效的 InvokeAi 节点图", - "nodesCleared": "节点图已清空", - "nodesLoadedFailed": "节点图加载失败" + "nodesCleared": "节点已清空", + "nodesLoadedFailed": "节点图加载失败", + "modelAddedSimple": "已添加模型", + "modelAdded": "已添加模型: {{modelName}}", + "baseModelChangedCleared_other": "", + "imageSavingFailed": "图像保存失败", + "canvasSentControlnetAssets": "画布已发送到 ControlNet & 素材", + "problemCopyingCanvasDesc": "无法导出基础层", + "loadedWithWarnings": "已加载带有警告的工作流", + "setInitialImage": "设为初始图像", + "canvasCopiedClipboard": "画布已复制到剪贴板", + "setControlImage": "设为控制图像", + "setNodeField": "设为节点字段", + "problemSavingMask": "保存遮罩时出现问题", + "problemSavingCanvasDesc": "无法导出基础层", + "maskSavedAssets": "遮罩已保存到素材", + "modelAddFailed": "模型添加失败", + "problemDownloadingCanvas": "下载画布时出现问题", + "problemMergingCanvas": "合并画布时出现问题", + "setCanvasInitialImage": "设为画布初始图像", + "imageUploaded": "图像已上传", + "addedToBoard": "已添加到面板", + "workflowLoaded": "工作流已加载", + "problemImportingMaskDesc": "无法导出遮罩", + "problemCopyingCanvas": "复制画布时出现问题", + "problemSavingCanvas": "保存画布时出现问题", + "canvasDownloaded": "画布已下载", + "setIPAdapterImage": "设为 IP Adapter 图像", + "problemMergingCanvasDesc": "无法导出基础层", + "problemDownloadingCanvasDesc": "无法导出基础层", + "problemSavingMaskDesc": "无法导出遮罩", + "imageSaved": "图像已保存", + "maskSentControlnetAssets": "遮罩已发送到 ControlNet & 素材", + "canvasSavedGallery": "画布已保存到图库", + "imageUploadFailed": "图像上传失败", + "problemImportingMask": "导入遮罩时出现问题" }, "unifiedCanvas": { "layer": "图层", "base": "基础层", - "mask": "遮罩层层", - "maskingOptions": "遮罩层选项", - "enableMask": "启用遮罩层", - "preserveMaskedArea": "保留遮罩层区域", - "clearMask": "清除遮罩层", + "mask": "遮罩", + "maskingOptions": "遮罩选项", + "enableMask": "启用遮罩", + "preserveMaskedArea": "保留遮罩区域", + "clearMask": "清除遮罩", "brush": "刷子", "eraser": "橡皮擦", "fillBoundingBox": "填充选择区域", @@ -655,7 +757,9 @@ "betaDarkenOutside": "暗化外部区域", "betaLimitToBox": "限制在框内", "betaPreserveMasked": "保留遮罩层", - "antialiasing": "抗锯齿" + "antialiasing": "抗锯齿", + "showResultsOn": "显示结果 (开)", + "showResultsOff": "显示结果 (关)" }, "accessibility": { "modelSelect": "模型选择", @@ -665,39 +769,41 @@ "useThisParameter": "使用此参数", "uploadImage": "上传图片", "previousImage": "上一张图片", - "copyMetadataJson": "复制JSON元数据", - "exitViewer": "退出视口", + "copyMetadataJson": "复制 JSON 元数据", + "exitViewer": "退出查看器", "zoomIn": "放大", "zoomOut": "缩小", "rotateCounterClockwise": "逆时针旋转", "rotateClockwise": "顺时针旋转", "flipHorizontally": "水平翻转", "flipVertically": "垂直翻转", - "showOptionsPanel": "显示选项面板", - "toggleLogViewer": "切换日志浏览器", - "modifyConfig": "修改设置", + "showOptionsPanel": "显示侧栏浮窗", + "toggleLogViewer": "切换日志查看器", + "modifyConfig": "修改配置", "toggleAutoscroll": "切换自动缩放", - "menu": "菜单" + "menu": "菜单", + "showGalleryPanel": "显示图库浮窗", + "loadMore": "加载更多" }, "ui": { "showProgressImages": "显示处理中的图片", "hideProgressImages": "隐藏处理中的图片", - "swapSizes": "XY尺寸互换", - "lockRatio": "锁定比率" + "swapSizes": "XY 尺寸互换", + "lockRatio": "锁定纵横比" }, "tooltip": { "feature": { - "prompt": "这是提示词区域。提示词包括生成对象和风格术语。您也可以在提示中添加权重(Token重要性),但命令行命令和参数不起作用。", - "imageToImage": "图生图模式加载任何图像作为初始图像,然后与提示一起用于生成新图像。值越高,结果图像的变化就越大。可能的值为0.0到1.0,建议的范围是0.25到0.75", - "upscale": "使用 ESRGAN可以在图片生成后立即放大图片。", - "variations": "尝试将变化值设置在0.1到1.0之间,以更改给定种子的结果。种子的有趣变化在0.1到0.3之间。", + "prompt": "这是提示词区域。提示词包括生成对象和风格术语。您也可以在提示中添加权重(Token 的重要性),但命令行命令和参数不起作用。", + "imageToImage": "图生图模式加载任何图像作为初始图像,然后与提示一起用于生成新图像。值越高,结果图像的变化就越大。可能的值为 0.0 到 1.0,建议的范围是 0.25 到 0.75", + "upscale": "使用 ESRGAN 可以在图片生成后立即放大图片。", + "variations": "尝试将变化值设置在 0.1 到 1.0 之间,以更改给定种子的结果。种子的变化在 0.1 到 0.3 之间会很有趣。", "boundingBox": "边界框的高和宽的设定对文生图和图生图模式是一样的,只有边界框中的区域会被处理。", - "other": "这些选项将为Invoke启用替代处理模式。 \"无缝平铺\"将在输出中创建重复图案。 \"高分辨率\"是通过img2img进行两步生成:当您想要更大、更连贯且不带伪影的图像时,请使用此设置。这将比通常的txt2img需要更长的时间。", - "faceCorrection": "使用GFPGAN或Codeformer进行人脸校正:该算法会检测图像中的人脸并纠正任何缺陷。较高的值将更改图像,并产生更有吸引力的人脸。在保留较高保真度的情况下使用Codeformer将导致更强的人脸校正,同时也会保留原始图像。", + "other": "这些选项将为 Invoke 启用替代处理模式。 \"无缝拼贴\" 将在输出中创建重复图案。\"高分辨率\" 是通过图生图进行两步生成:当您想要更大、更连贯且不带伪影的图像时,请使用此设置。这将比通常的文生图需要更长的时间。", + "faceCorrection": "使用 GFPGAN 或 Codeformer 进行人脸校正:该算法会检测图像中的人脸并纠正任何缺陷。较高的值将更改图像,并产生更有吸引力的人脸。在保留较高保真度的情况下使用 Codeformer 将导致更强的人脸校正,同时也会保留原始图像。", "gallery": "图片库展示输出文件夹中的图片,设置和文件一起储存,可以通过内容菜单访问。", - "seed": "种子值影响形成图像的初始噪声。您可以使用以前图像中已存在的种子。 “噪声阈值”用于减轻在高CFG值(尝试0-10范围)下的伪像,并使用Perlin在生成过程中添加Perlin噪声:这两者都可以为您的输出添加变化。", + "seed": "种子值影响形成图像的初始噪声。您可以使用以前图像中已存在的种子。 “噪声阈值”用于减轻在高 CFG 等级(尝试 0 - 10 范围)下的伪像,并使用 Perlin 在生成过程中添加 Perlin 噪声:这两者都可以为您的输出添加变化。", "seamCorrection": "控制在画布上生成的图像之间出现的可见接缝的处理方式。", - "infillAndScaling": "管理填充方法(用于画布的掩模或擦除区域)和缩放(对于较小的边界框大小非常有用)。" + "infillAndScaling": "管理填充方法(用于画布的遮罩或擦除区域)和缩放(对于较小的边界框大小非常有用)。" } }, "nodes": { @@ -715,6 +821,584 @@ "showLegendNodes": "显示字段类型图例", "hideLegendNodes": "隐藏字段类型图例", "showGraphNodes": "显示节点图信息", - "downloadWorkflow": "下载节点图 JSON" + "downloadWorkflow": "下载节点图 JSON", + "workflowDescription": "简述", + "versionUnknown": " 未知版本", + "noNodeSelected": "无选中的节点", + "addNode": "添加节点", + "unableToValidateWorkflow": "无法验证工作流", + "noOutputRecorded": "无已记录输出", + "updateApp": "升级 App", + "colorCodeEdgesHelp": "根据连接区域对边缘编码颜色", + "workflowContact": "联系", + "animatedEdges": "边缘动效", + "nodeTemplate": "节点模板", + "pickOne": "选择一个", + "unableToLoadWorkflow": "无法验证工作流", + "snapToGrid": "对齐网格", + "noFieldsLinearview": "线性视图中未添加任何字段", + "nodeSearch": "检索节点", + "version": "版本", + "validateConnections": "验证连接和节点图", + "inputMayOnlyHaveOneConnection": "输入仅能有一个连接", + "notes": "节点", + "nodeOutputs": "节点输出", + "currentImageDescription": "在节点编辑器中显示当前图像", + "validateConnectionsHelp": "防止建立无效连接和调用无效节点图", + "problemSettingTitle": "设定标题时出现问题", + "noConnectionInProgress": "没有正在进行的连接", + "workflowVersion": "版本", + "noConnectionData": "无连接数据", + "fieldTypesMustMatch": "类型必须匹配", + "workflow": "工作流", + "unkownInvocation": "未知调用类型", + "animatedEdgesHelp": "为选中边缘和其连接的选中节点的边缘添加动画", + "unknownTemplate": "未知模板", + "removeLinearView": "从线性视图中移除", + "workflowTags": "标签", + "fullyContainNodesHelp": "节点必须完全位于选择框中才能被选中", + "workflowValidation": "工作流验证错误", + "noMatchingNodes": "无相匹配的节点", + "executionStateInProgress": "处理中", + "noFieldType": "无字段类型", + "executionStateError": "错误", + "executionStateCompleted": "已完成", + "workflowAuthor": "作者", + "currentImage": "当前图像", + "workflowName": "名称", + "cannotConnectInputToInput": "无法将输入连接到输入", + "workflowNotes": "节点", + "cannotConnectOutputToOutput": "无法将输出连接到输出", + "connectionWouldCreateCycle": "连接将创建一个循环", + "cannotConnectToSelf": "无法连接自己", + "notesDescription": "添加有关您的工作流的节点", + "unknownField": "未知", + "colorCodeEdges": "边缘颜色编码", + "unknownNode": "未知节点", + "addNodeToolTip": "添加节点 (Shift+A, Space)", + "loadingNodes": "加载节点中...", + "snapToGridHelp": "移动时将节点与网格对齐", + "workflowSettings": "工作流编辑器设置", + "booleanPolymorphicDescription": "布尔集合。", + "scheduler": "调度器", + "inputField": "输入", + "controlFieldDescription": "节点间传递的控制信息。", + "skippingUnknownOutputType": "跳过未知类型的输出", + "latentsFieldDescription": "Latents 可以在节点间传递。", + "denoiseMaskFieldDescription": "去噪遮罩可以在节点间传递", + "missingTemplate": "缺失模板", + "outputSchemaNotFound": "未找到输出模式", + "latentsPolymorphicDescription": "Latents 可以在节点间传递。", + "colorFieldDescription": "一种 RGBA 颜色。", + "mainModelField": "模型", + "unhandledInputProperty": "未处理的输入属性", + "maybeIncompatible": "可能与已安装的不兼容", + "collectionDescription": "待办事项", + "skippingReservedFieldType": "跳过保留类型", + "booleanCollectionDescription": "布尔集合。", + "sDXLMainModelFieldDescription": "SDXL 模型。", + "boardField": "面板", + "problemReadingWorkflow": "从图像读取工作流时出现问题", + "sourceNode": "源节点", + "nodeOpacity": "节点不透明度", + "collectionItemDescription": "待办事项", + "integerDescription": "整数 (Integer) 是没有与小数点的数字。", + "outputField": "输出", + "skipped": "跳过", + "updateNode": "更新节点", + "sDXLRefinerModelFieldDescription": "待办事项", + "imagePolymorphicDescription": "一个图像合集。", + "doesNotExist": "不存在", + "unableToParseNode": "无法解析节点", + "controlCollection": "控制合集", + "collectionItem": "项目合集", + "controlCollectionDescription": "节点间传递的控制信息。", + "skippedReservedInput": "跳过保留的输入", + "outputFields": "输出", + "edge": "边缘", + "inputNode": "输入节点", + "enumDescription": "枚举 (Enums) 可能是多个选项的一个数值。", + "loRAModelFieldDescription": "待办事项", + "imageField": "图像", + "skippedReservedOutput": "跳过保留的输出", + "noWorkflow": "无工作流", + "colorCollectionDescription": "待办事项", + "colorPolymorphicDescription": "一个颜色合集。", + "sDXLMainModelField": "SDXL 模型", + "denoiseMaskField": "去噪遮罩", + "schedulerDescription": "待办事项", + "missingCanvaInitImage": "缺失画布初始图像", + "clipFieldDescription": "词元分析器和文本编码器的子模型。", + "noImageFoundState": "状态中未发现初始图像", + "nodeType": "节点类型", + "fullyContainNodes": "完全包含节点来进行选择", + "noOutputSchemaName": "在 ref 对象中找不到输出模式名称", + "vaeModelFieldDescription": "待办事项", + "skippingInputNoTemplate": "跳过无模板的输入", + "missingCanvaInitMaskImages": "缺失初始化画布和遮罩图像", + "problemReadingMetadata": "从图像读取元数据时出现问题", + "oNNXModelField": "ONNX 模型", + "node": "节点", + "skippingUnknownInputType": "跳过未知类型的输入", + "booleanDescription": "布尔值为真或为假。", + "collection": "合集", + "invalidOutputSchema": "无效的输出模式", + "boardFieldDescription": "图库面板", + "floatDescription": "浮点数是带小数点的数字。", + "unhandledOutputProperty": "未处理的输出属性", + "string": "字符串", + "inputFields": "输入", + "uNetFieldDescription": "UNet 子模型。", + "mismatchedVersion": "不匹配的版本", + "vaeFieldDescription": "Vae 子模型。", + "imageFieldDescription": "图像可以在节点间传递。", + "outputNode": "输出节点", + "mainModelFieldDescription": "待办事项", + "sDXLRefinerModelField": "Refiner 模型", + "unableToParseEdge": "无法解析边缘", + "latentsCollectionDescription": "Latents 可以在节点间传递。", + "oNNXModelFieldDescription": "ONNX 模型。", + "cannotDuplicateConnection": "无法创建重复的连接", + "ipAdapterModel": "IP-Adapter 模型", + "ipAdapterDescription": "图像提示词自适应 (IP-Adapter)。", + "ipAdapterModelDescription": "IP-Adapter 模型" + }, + "controlnet": { + "resize": "直接缩放", + "showAdvanced": "显示高级", + "contentShuffleDescription": "随机打乱图像内容", + "importImageFromCanvas": "从画布导入图像", + "lineartDescription": "将图像转换为线稿", + "importMaskFromCanvas": "从画布导入遮罩", + "hideAdvanced": "隐藏高级", + "ipAdapterModel": "Adapter 模型", + "resetControlImage": "重置控制图像", + "beginEndStepPercent": "开始 / 结束步数百分比", + "mlsdDescription": "简洁的分割线段(直线)检测器", + "duplicate": "复制", + "balanced": "平衡", + "prompt": "提示词", + "depthMidasDescription": "使用 Midas 生成深度图", + "openPoseDescription": "使用 Openpose 进行人体姿态估计", + "resizeMode": "缩放模式", + "weight": "权重", + "selectModel": "选择一个模型", + "crop": "裁剪", + "processor": "处理器", + "none": "无", + "incompatibleBaseModel": "不兼容的基础模型:", + "enableControlnet": "启用 ControlNet", + "detectResolution": "检测分辨率", + "pidiDescription": "像素差分 (PIDI) 图像处理", + "controlMode": "控制模式", + "fill": "填充", + "cannyDescription": "Canny 边缘检测", + "colorMapDescription": "从图像生成一张颜色图", + "imageResolution": "图像分辨率", + "autoConfigure": "自动配置处理器", + "normalBaeDescription": "法线 BAE 处理", + "noneDescription": "不应用任何处理", + "saveControlImage": "保存控制图像", + "toggleControlNet": "开关此 ControlNet", + "delete": "删除", + "colorMapTileSize": "分块大小", + "ipAdapterImageFallback": "无选中的 IP Adapter 图像", + "mediapipeFaceDescription": "使用 Mediapipe 检测面部", + "depthZoeDescription": "使用 Zoe 生成深度图", + "hedDescription": "整体嵌套边缘检测", + "setControlImageDimensions": "设定控制图像尺寸宽/高为", + "resetIPAdapterImage": "重置 IP Adapter 图像", + "handAndFace": "手部和面部", + "enableIPAdapter": "启用 IP Adapter", + "amult": "角度倍率 (a_mult)", + "bgth": "背景移除阈值 (bg_th)", + "lineartAnimeDescription": "动漫风格线稿处理", + "minConfidence": "最小置信度", + "lowThreshold": "弱判断阈值", + "highThreshold": "强判断阈值", + "addT2IAdapter": "添加 $t(common.t2iAdapter)", + "controlNetEnabledT2IDisabled": "$t(common.controlNet) 已启用, $t(common.t2iAdapter) 已禁用", + "t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) 已启用, $t(common.controlNet) 已禁用", + "addControlNet": "添加 $t(common.controlNet)", + "controlNetT2IMutexDesc": "$t(common.controlNet) 和 $t(common.t2iAdapter) 目前不支持同时启用。", + "addIPAdapter": "添加 $t(common.ipAdapter)", + "safe": "保守模式", + "scribble": "草绘 (scribble)", + "maxFaces": "最大面部数" + }, + "queue": { + "status": "状态", + "cancelTooltip": "取消当前项目", + "queueEmpty": "队列为空", + "pauseSucceeded": "处理器已暂停", + "in_progress": "处理中", + "queueFront": "添加到队列前", + "completed": "已完成", + "queueBack": "添加到队列", + "cancelFailed": "取消项目时出现问题", + "pauseFailed": "暂停处理器时出现问题", + "clearFailed": "清除队列时出现问题", + "clearSucceeded": "队列已清除", + "pause": "暂停", + "cancelSucceeded": "项目已取消", + "queue": "队列", + "batch": "批处理", + "clearQueueAlertDialog": "清除队列时会立即取消所有处理中的项目并且会完全清除队列。", + "pending": "待定", + "completedIn": "完成于", + "resumeFailed": "恢复处理器时出现问题", + "clear": "清除", + "prune": "修剪", + "total": "总计", + "canceled": "已取消", + "pruneFailed": "修剪队列时出现问题", + "cancelBatchSucceeded": "批处理已取消", + "clearTooltip": "取消并清除所有项目", + "current": "当前", + "pauseTooltip": "暂停处理器", + "failed": "已失败", + "cancelItem": "取消项目", + "next": "下一个", + "cancelBatch": "取消批处理", + "cancel": "取消", + "resumeSucceeded": "处理器已恢复", + "resumeTooltip": "恢复处理器", + "resume": "恢复", + "cancelBatchFailed": "取消批处理时出现问题", + "clearQueueAlertDialog2": "您确定要清除队列吗?", + "item": "项目", + "pruneSucceeded": "从队列修剪 {{item_count}} 个已完成的项目", + "notReady": "无法排队", + "batchFailedToQueue": "批次加入队列失败", + "batchValues": "批次数", + "queueCountPrediction": "添加 {{predicted}} 到队列", + "batchQueued": "加入队列的批次", + "queuedCount": "{{pending}} 待处理", + "front": "前", + "pruneTooltip": "修剪 {{item_count}} 个已完成的项目", + "batchQueuedDesc": "在队列的 {{direction}} 中添加了 {{item_count}} 个会话", + "graphQueued": "节点图已加入队列", + "back": "后", + "session": "会话", + "queueTotal": "总计 {{total}}", + "enqueueing": "队列中的批次", + "queueMaxExceeded": "超出最大值 {{max_queue_size}},将跳过 {{skip}}", + "graphFailedToQueue": "节点图加入队列失败" + }, + "sdxl": { + "refinerStart": "Refiner 开始作用时机", + "selectAModel": "选择一个模型", + "scheduler": "调度器", + "cfgScale": "CFG 等级", + "negStylePrompt": "负向样式提示词", + "noModelsAvailable": "无可用模型", + "negAestheticScore": "负向美学评分", + "useRefiner": "启用 Refiner", + "denoisingStrength": "去噪强度", + "refinermodel": "Refiner 模型", + "posAestheticScore": "正向美学评分", + "concatPromptStyle": "连接提示词 & 样式", + "loading": "加载中...", + "steps": "步数", + "posStylePrompt": "正向样式提示词" + }, + "metadata": { + "positivePrompt": "正向提示词", + "negativePrompt": "负向提示词", + "generationMode": "生成模式", + "Threshold": "噪声阈值", + "metadata": "元数据", + "strength": "图生图强度", + "seed": "种子", + "imageDetails": "图像详细信息", + "perlin": "Perlin 噪声", + "model": "模型", + "noImageDetails": "未找到图像详细信息", + "hiresFix": "高分辨率优化", + "cfgScale": "CFG 等级", + "initImage": "初始图像", + "height": "高度", + "variations": "(成对/第二)种子权重", + "noMetaData": "未找到元数据", + "width": "宽度", + "createdBy": "创建者是", + "workflow": "工作流", + "steps": "步数", + "scheduler": "调度器", + "seamless": "无缝", + "fit": "图生图适应" + }, + "models": { + "noMatchingModels": "无相匹配的模型", + "loading": "加载中", + "noMatchingLoRAs": "无相匹配的 LoRA", + "noLoRAsAvailable": "无可用 LoRA", + "noModelsAvailable": "无可用模型", + "selectModel": "选择一个模型", + "selectLoRA": "选择一个 LoRA" + }, + "boards": { + "autoAddBoard": "自动添加面板", + "topMessage": "该面板包含的图像正使用以下功能:", + "move": "移动", + "menuItemAutoAdd": "自动添加到该面板", + "myBoard": "我的面板", + "searchBoard": "检索面板...", + "noMatching": "没有相匹配的面板", + "selectBoard": "选择一个面板", + "cancel": "取消", + "addBoard": "添加面板", + "bottomMessage": "删除该面板并且将其对应的图像将重置当前使用该面板的所有功能。", + "uncategorized": "未分类", + "changeBoard": "更改面板", + "loading": "加载中...", + "clearSearch": "清除检索" + }, + "embedding": { + "noMatchingEmbedding": "不匹配的 Embedding", + "addEmbedding": "添加 Embedding", + "incompatibleModel": "不兼容的基础模型:" + }, + "dynamicPrompts": { + "seedBehaviour": { + "perPromptDesc": "每次生成图像使用不同的种子", + "perIterationLabel": "每次迭代的种子", + "perIterationDesc": "每次迭代使用不同的种子", + "perPromptLabel": "每张图像的种子", + "label": "种子行为" + }, + "enableDynamicPrompts": "启用动态提示词", + "combinatorial": "组合生成", + "maxPrompts": "最大提示词数", + "dynamicPrompts": "动态提示词", + "promptsWithCount_other": "{{count}} 个提示词" + }, + "popovers": { + "compositingMaskAdjustments": { + "heading": "遮罩调整", + "paragraphs": [ + "调整遮罩。" + ] + }, + "paramRatio": { + "heading": "纵横比", + "paragraphs": [ + "生成图像的尺寸纵横比。", + "图像尺寸(单位:像素)建议 SD 1.5 模型使用等效 512x512 的尺寸,SDXL 模型使用等效 1024x1024 的尺寸。" + ] + }, + "compositingCoherenceSteps": { + "heading": "步数", + "paragraphs": [ + "一致性层中使用的去噪步数。", + "与主参数中的步数相同。" + ] + }, + "compositingBlur": { + "heading": "模糊", + "paragraphs": [ + "遮罩模糊半径。" + ] + }, + "noiseUseCPU": { + "heading": "使用 CPU 噪声", + "paragraphs": [ + "选择由 CPU 或 GPU 生成噪声。", + "启用 CPU 噪声后,特定的种子将会在不同的设备上产生下相同的图像。", + "启用 CPU 噪声不会对性能造成影响。" + ] + }, + "paramVAEPrecision": { + "heading": "VAE 精度", + "paragraphs": [ + "VAE 编解码过程种使用的精度。FP16/半精度以微小的图像变化为代价提高效率。" + ] + }, + "compositingCoherenceMode": { + "heading": "模式", + "paragraphs": [ + "一致性层模式。" + ] + }, + "controlNetResizeMode": { + "heading": "缩放模式", + "paragraphs": [ + "ControlNet 输入图像适应输出图像大小的方法。" + ] + }, + "clipSkip": { + "paragraphs": [ + "选择要跳过 CLIP 模型多少层。", + "部分模型跳过特定数值的层时效果会更好。", + "较高的数值通常会导致图像细节更少。" + ], + "heading": "CLIP 跳过层" + }, + "paramModel": { + "heading": "模型", + "paragraphs": [ + "用于去噪过程的模型。", + "不同的模型一般会通过接受训练来专门产生特定的美学内容和结果。" + ] + }, + "paramIterations": { + "heading": "迭代数", + "paragraphs": [ + "生成图像的数量。", + "若启用动态提示词,每种提示词都会生成这么多次。" + ] + }, + "compositingCoherencePass": { + "heading": "一致性层", + "paragraphs": [ + "第二轮去噪有助于合成内补/外扩图像。" + ] + }, + "compositingStrength": { + "heading": "强度", + "paragraphs": [ + "一致性层使用的去噪强度。", + "去噪强度与图生图的参数相同。" + ] + }, + "paramNegativeConditioning": { + "paragraphs": [ + "生成过程会避免生成负向提示词中的概念。使用此选项来使输出排除部分质量或对象。", + "支持 Compel 语法 和 embeddings。" + ], + "heading": "负向提示词" + }, + "compositingBlurMethod": { + "heading": "模糊方式", + "paragraphs": [ + "应用于遮罩区域的模糊方法。" + ] + }, + "paramScheduler": { + "heading": "调度器", + "paragraphs": [ + "调度器定义如何在图像迭代过程中添加噪声,或者定义如何根据一个模型的输出来更新采样。" + ] + }, + "controlNetWeight": { + "heading": "权重", + "paragraphs": [ + "ControlNet 对生成图像的影响强度。" + ] + }, + "paramCFGScale": { + "heading": "CFG 等级", + "paragraphs": [ + "控制提示词对生成过程的影响程度。" + ] + }, + "paramSteps": { + "heading": "步数", + "paragraphs": [ + "每次生成迭代执行的步数。", + "通常情况下步数越多结果越好,但需要更多生成时间。" + ] + }, + "paramPositiveConditioning": { + "heading": "正向提示词", + "paragraphs": [ + "引导生成过程。您可以使用任何单词或短语。", + "Compel 语法、动态提示词语法和 embeddings。" + ] + }, + "lora": { + "heading": "LoRA 权重", + "paragraphs": [ + "更高的 LoRA 权重会对最终图像产生更大的影响。" + ] + }, + "infillMethod": { + "heading": "填充方法", + "paragraphs": [ + "填充选定区域的方式。" + ] + }, + "controlNetBeginEnd": { + "heading": "开始 / 结束步数百分比", + "paragraphs": [ + "去噪过程中在哪部分步数应用 ControlNet。", + "在组合处理开始阶段应用 ControlNet,且在引导细节生成的结束阶段应用 ControlNet。" + ] + }, + "scaleBeforeProcessing": { + "heading": "处理前缩放", + "paragraphs": [ + "生成图像前将所选区域缩放为最适合模型的大小。" + ] + }, + "paramDenoisingStrength": { + "heading": "去噪强度", + "paragraphs": [ + "为输入图像添加的噪声量。", + "输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。" + ] + }, + "paramSeed": { + "heading": "种子", + "paragraphs": [ + "控制用于生成的起始噪声。", + "禁用 “随机种子” 来以相同设置生成相同的结果。" + ] + }, + "controlNetControlMode": { + "heading": "控制模式", + "paragraphs": [ + "给提示词或 ControlNet 增加更大的权重。" + ] + }, + "dynamicPrompts": { + "paragraphs": [ + "动态提示词可将单个提示词解析为多个。", + "基本语法示例:\"a {red|green|blue} ball\"。这会产生三种提示词:\"a red ball\", \"a green ball\" 和 \"a blue ball\"。", + "可以在单个提示词中多次使用该语法,但务必请使用最大提示词设置来控制生成的提示词数量。" + ], + "heading": "动态提示词" + }, + "paramVAE": { + "paragraphs": [ + "用于将 AI 输出转换成最终图像的模型。" + ] + }, + "dynamicPromptsSeedBehaviour": { + "paragraphs": [ + "控制生成提示词时种子的使用方式。", + "每次迭代过程都会使用一个唯一的种子。使用本选项来探索单个种子的提示词变化。", + "例如,如果你有 5 种提示词,则生成的每个图像都会使用相同种子。", + "为每张图像使用独立的唯一种子。这可以提供更多变化。" + ], + "heading": "种子行为" + }, + "dynamicPromptsMaxPrompts": { + "heading": "最大提示词数量", + "paragraphs": [ + "限制动态提示词可生成的提示词数量。" + ] + }, + "controlNet": { + "paragraphs": [ + "ControlNet 为生成过程提供引导,为生成具有受控构图、结构、样式的图像提供帮助,具体的功能由所选的模型决定。" + ] + } + }, + "invocationCache": { + "disable": "禁用", + "misses": "缓存未中", + "enableFailed": "启用调用缓存时出现问题", + "invocationCache": "调用缓存", + "clearSucceeded": "调用缓存已清除", + "enableSucceeded": "调用缓存已启用", + "clearFailed": "清除调用缓存时出现问题", + "hits": "缓存命中", + "disableSucceeded": "调用缓存已禁用", + "disableFailed": "禁用调用缓存时出现问题", + "enable": "启用", + "clear": "清除", + "maxCacheSize": "最大缓存大小", + "cacheSize": "缓存大小" } } From e543db5a5d1a85d24ad4faea5347ee784314d66b Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:45:10 +0000 Subject: [PATCH 040/202] translationBot(ui): update translation files Updated by "Remove blank strings" hook in Weblate. translationBot(ui): update translation files Updated by "Remove blank strings" hook in Weblate. translationBot(ui): update translation files Updated by "Remove blank strings" hook in Weblate. translationBot(ui): update translation files Updated by "Remove blank strings" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 3 +-- invokeai/frontend/web/public/locales/zh_CN.json | 4 ---- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index baa70c26be..c0f309320d 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -87,8 +87,7 @@ "learnMore": "Per saperne di più", "ipAdapter": "Adattatore IP", "t2iAdapter": "Adattatore T2I", - "controlAdapter": "Adattatore di Controllo", - "controlNet": "" + "controlAdapter": "Adattatore di Controllo" }, "gallery": { "generations": "Generazioni", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 6924a86d89..a598f555ac 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -388,7 +388,6 @@ "modelThree": "模型 3", "v2_768": "v2 (768px)", "mergedModelName": "合并的模型名称", - "alpha": "", "allModels": "全部模型", "convertToDiffusers": "转换为 Diffusers", "formMessageDiffusersModelLocation": "Diffusers 模型路径", @@ -452,7 +451,6 @@ "modelsMergeFailed": "模型融合失败", "baseModel": "基底模型", "convertingModelBegin": "模型转换中. 请稍候.", - "vae": "", "noModels": "未找到模型", "predictionType": "预测类型(适用于 Stable Diffusion 2.x 模型和部分 Stable Diffusion 1.x 模型)", "quickAdd": "快速添加", @@ -613,7 +611,6 @@ "showAdvancedOptions": "显示进阶选项", "favoriteSchedulers": "采样算法偏好", "autoChangeDimensions": "更改时将宽/高更新为模型默认值", - "beta": "", "experimental": "实验性" }, "toast": { @@ -666,7 +663,6 @@ "nodesLoadedFailed": "节点图加载失败", "modelAddedSimple": "已添加模型", "modelAdded": "已添加模型: {{modelName}}", - "baseModelChangedCleared_other": "", "imageSavingFailed": "图像保存失败", "canvasSentControlnetAssets": "画布已发送到 ControlNet & 素材", "problemCopyingCanvasDesc": "无法导出基础层", From a49b8febed13c4c8290d60e775024a29bf733d3a Mon Sep 17 00:00:00 2001 From: Surisen Date: Thu, 12 Oct 2023 12:45:12 +0000 Subject: [PATCH 041/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 98.0% (1186 of 1210 strings) translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 98.0% (1179 of 1203 strings) translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 97.9% (1175 of 1199 strings) Co-authored-by: Surisen Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- .../frontend/web/public/locales/zh_CN.json | 125 ++++++++++++++---- 1 file changed, 99 insertions(+), 26 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index a598f555ac..d91bf0b4bb 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -84,7 +84,11 @@ "lightMode": "浅色模式", "learnMore": "了解更多", "darkMode": "深色模式", - "advanced": "高级" + "advanced": "高级", + "t2iAdapter": "T2I Adapter", + "ipAdapter": "IP Adapter", + "controlAdapter": "Control Adapter", + "controlNet": "ControlNet" }, "gallery": { "generations": "生成的图像", @@ -112,7 +116,10 @@ "currentlyInUse": "该图像目前在以下功能中使用:", "copy": "复制", "download": "下载", - "setCurrentImage": "设为当前图像" + "setCurrentImage": "设为当前图像", + "preparingDownload": "准备下载", + "preparingDownloadFailed": "准备下载时出现问题", + "downloadSelection": "下载所选内容" }, "hotkeys": { "keyboardShortcuts": "键盘快捷键", @@ -460,7 +467,12 @@ "closeAdvanced": "关闭高级", "modelType": "模型类别", "customConfigFileLocation": "自定义配置文件目录", - "variant": "变体" + "variant": "变体", + "onnxModels": "Onnx", + "vae": "VAE", + "oliveModels": "Olive", + "loraModels": "LoRA", + "alpha": "Alpha" }, "parameters": { "images": "图像", @@ -513,8 +525,8 @@ "info": "信息", "initialImage": "初始图像", "showOptionsPanel": "显示侧栏浮窗 (O 或 T)", - "seamlessYAxis": "Y轴", - "seamlessXAxis": "X轴", + "seamlessYAxis": "Y 轴", + "seamlessXAxis": "X 轴", "boundingBoxWidth": "边界框宽度", "boundingBoxHeight": "边界框高度", "denoisingStrength": "去噪强度", @@ -559,9 +571,9 @@ "addingImagesTo": "添加图像到", "noPrompts": "没有已生成的提示词", "readyToInvoke": "准备调用", - "noControlImageForControlAdapter": "Control Adapter {{number}} 没有控制图像", - "noModelForControlAdapter": "Control Adapter {{number}} 没有选择模型。", - "incompatibleBaseModelForControlAdapter": "Control Adapter {{number}} 与主模型不匹配。" + "noControlImageForControlAdapter": "有 #{{number}} 个 Control Adapter 缺失控制图像", + "noModelForControlAdapter": "有 #{{number}} 个 Control Adapter 没有选择模型。", + "incompatibleBaseModelForControlAdapter": "有 #{{number}} 个 Control Adapter 模型与主模型不匹配。" }, "patchmatchDownScaleSize": "缩小", "coherenceSteps": "步数", @@ -611,7 +623,15 @@ "showAdvancedOptions": "显示进阶选项", "favoriteSchedulers": "采样算法偏好", "autoChangeDimensions": "更改时将宽/高更新为模型默认值", - "experimental": "实验性" + "experimental": "实验性", + "beta": "Beta", + "clearIntermediates": "清除中间产物", + "clearIntermediatesDesc3": "您图库中的图像不会被删除。", + "clearIntermediatesDesc2": "中间产物图像是生成过程中产生的副产品,与图库中的结果图像不同。清除中间产物可释放磁盘空间。", + "intermediatesCleared_other": "已清除 {{number}} 个中间产物", + "clearIntermediatesDesc1": "清除中间产物会重置您的画布和 ControlNet 状态。", + "intermediatesClearedFailed": "清除中间产物时出现问题", + "noIntermediates": "没有可清除的中间产物" }, "toast": { "tempFoldersEmptied": "临时文件夹已清空", @@ -635,8 +655,8 @@ "seedNotSet": "种子未设定", "seedNotSetDesc": "无法找到该图像的种子。", "promptSet": "提示词已设定", - "promptNotSet": "提示未设定", - "promptNotSetDesc": "无法找到该图像的提示。", + "promptNotSet": "提示词未设定", + "promptNotSetDesc": "无法找到该图像的提示词。", "upscalingFailed": "放大失败", "faceRestoreFailed": "面部修复失败", "metadataLoadFailed": "加载元数据失败", @@ -693,7 +713,8 @@ "maskSentControlnetAssets": "遮罩已发送到 ControlNet & 素材", "canvasSavedGallery": "画布已保存到图库", "imageUploadFailed": "图像上传失败", - "problemImportingMask": "导入遮罩时出现问题" + "problemImportingMask": "导入遮罩时出现问题", + "baseModelChangedCleared_other": "基础模型已更改, 已清除或禁用 {{number}} 个不兼容的子模型" }, "unifiedCanvas": { "layer": "图层", @@ -789,8 +810,8 @@ }, "tooltip": { "feature": { - "prompt": "这是提示词区域。提示词包括生成对象和风格术语。您也可以在提示中添加权重(Token 的重要性),但命令行命令和参数不起作用。", - "imageToImage": "图生图模式加载任何图像作为初始图像,然后与提示一起用于生成新图像。值越高,结果图像的变化就越大。可能的值为 0.0 到 1.0,建议的范围是 0.25 到 0.75", + "prompt": "这是提示词区域。提示词包括生成对象和风格术语。您也可以在提示词中添加权重(Token 的重要性),但命令行命令和参数不起作用。", + "imageToImage": "图生图模式加载任何图像作为初始图像,然后与提示词一起用于生成新图像。值越高,结果图像的变化就越大。可能的值为 0.0 到 1.0,建议的范围是 0.25 到 0.75", "upscale": "使用 ESRGAN 可以在图片生成后立即放大图片。", "variations": "尝试将变化值设置在 0.1 到 1.0 之间,以更改给定种子的结果。种子的变化在 0.1 到 0.3 之间会很有趣。", "boundingBox": "边界框的高和宽的设定对文生图和图生图模式是一样的,只有边界框中的区域会被处理。", @@ -875,7 +896,7 @@ "loadingNodes": "加载节点中...", "snapToGridHelp": "移动时将节点与网格对齐", "workflowSettings": "工作流编辑器设置", - "booleanPolymorphicDescription": "布尔集合。", + "booleanPolymorphicDescription": "一个布尔值合集。", "scheduler": "调度器", "inputField": "输入", "controlFieldDescription": "节点间传递的控制信息。", @@ -891,14 +912,14 @@ "maybeIncompatible": "可能与已安装的不兼容", "collectionDescription": "待办事项", "skippingReservedFieldType": "跳过保留类型", - "booleanCollectionDescription": "布尔集合。", + "booleanCollectionDescription": "一个布尔值合集。", "sDXLMainModelFieldDescription": "SDXL 模型。", "boardField": "面板", "problemReadingWorkflow": "从图像读取工作流时出现问题", "sourceNode": "源节点", "nodeOpacity": "节点不透明度", "collectionItemDescription": "待办事项", - "integerDescription": "整数 (Integer) 是没有与小数点的数字。", + "integerDescription": "整数是没有与小数点的数字。", "outputField": "输出", "skipped": "跳过", "updateNode": "更新节点", @@ -957,7 +978,32 @@ "cannotDuplicateConnection": "无法创建重复的连接", "ipAdapterModel": "IP-Adapter 模型", "ipAdapterDescription": "图像提示词自适应 (IP-Adapter)。", - "ipAdapterModelDescription": "IP-Adapter 模型" + "ipAdapterModelDescription": "IP-Adapter 模型", + "floatCollectionDescription": "一个浮点数合集。", + "enum": "Enum (枚举)", + "integerPolymorphicDescription": "一个整数值合集。", + "float": "浮点", + "integer": "整数", + "colorField": "颜色", + "stringCollectionDescription": "一个字符串合集。", + "stringCollection": "字符串合集", + "uNetField": "UNet", + "integerCollection": "整数合集", + "vaeModelField": "VAE", + "integerCollectionDescription": "一个整数值合集。", + "clipField": "Clip", + "stringDescription": "字符串是指文本。", + "colorCollection": "一个颜色合集。", + "boolean": "布尔值", + "stringPolymorphicDescription": "一个字符串合集。", + "controlField": "控制信息", + "floatPolymorphicDescription": "一个浮点数合集。", + "vaeField": "Vae", + "floatCollection": "浮点合集", + "booleanCollection": "布尔值合集", + "imageCollectionDescription": "一个图像合集。", + "loRAModelField": "LoRA", + "imageCollection": "图像合集" }, "controlnet": { "resize": "直接缩放", @@ -973,7 +1019,7 @@ "mlsdDescription": "简洁的分割线段(直线)检测器", "duplicate": "复制", "balanced": "平衡", - "prompt": "提示词", + "prompt": "Prompt (提示词控制)", "depthMidasDescription": "使用 Midas 生成深度图", "openPoseDescription": "使用 Openpose 进行人体姿态估计", "resizeMode": "缩放模式", @@ -998,7 +1044,7 @@ "toggleControlNet": "开关此 ControlNet", "delete": "删除", "colorMapTileSize": "分块大小", - "ipAdapterImageFallback": "无选中的 IP Adapter 图像", + "ipAdapterImageFallback": "无已选择的 IP Adapter 图像", "mediapipeFaceDescription": "使用 Mediapipe 检测面部", "depthZoeDescription": "使用 Zoe 生成深度图", "hedDescription": "整体嵌套边缘检测", @@ -1020,7 +1066,30 @@ "addIPAdapter": "添加 $t(common.ipAdapter)", "safe": "保守模式", "scribble": "草绘 (scribble)", - "maxFaces": "最大面部数" + "maxFaces": "最大面部数", + "pidi": "PIDI", + "normalBae": "Normal BAE", + "hed": "HED", + "contentShuffle": "Content Shuffle", + "f": "F", + "h": "H", + "controlnet": "$t(controlnet.controlAdapter) #{{number}} ($t(common.controlNet))", + "control": "Control (普通控制)", + "coarse": "Coarse", + "depthMidas": "Depth (Midas)", + "w": "W", + "ip_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.ipAdapter))", + "mediapipeFace": "Mediapipe Face", + "mlsd": "M-LSD", + "lineart": "Lineart", + "t2i_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.t2iAdapter))", + "megaControl": "Mega Control (超级控制)", + "depthZoe": "Depth (Zoe)", + "colorMap": "Color", + "openPose": "Openpose", + "controlAdapter": "Control Adapter", + "lineartAnime": "Lineart Anime", + "canny": "Canny" }, "queue": { "status": "状态", @@ -1096,7 +1165,8 @@ "concatPromptStyle": "连接提示词 & 样式", "loading": "加载中...", "steps": "步数", - "posStylePrompt": "正向样式提示词" + "posStylePrompt": "正向样式提示词", + "refiner": "Refiner" }, "metadata": { "positivePrompt": "正向提示词", @@ -1148,7 +1218,8 @@ "uncategorized": "未分类", "changeBoard": "更改面板", "loading": "加载中...", - "clearSearch": "清除检索" + "clearSearch": "清除检索", + "downloadBoard": "下载面板" }, "embedding": { "noMatchingEmbedding": "不匹配的 Embedding", @@ -1273,7 +1344,7 @@ "paramScheduler": { "heading": "调度器", "paragraphs": [ - "调度器定义如何在图像迭代过程中添加噪声,或者定义如何根据一个模型的输出来更新采样。" + "调度器 (采样器) 定义如何在图像迭代过程中添加噪声,或者定义如何根据一个模型的输出来更新采样。" ] }, "controlNetWeight": { @@ -1358,7 +1429,8 @@ "paramVAE": { "paragraphs": [ "用于将 AI 输出转换成最终图像的模型。" - ] + ], + "heading": "VAE" }, "dynamicPromptsSeedBehaviour": { "paragraphs": [ @@ -1378,7 +1450,8 @@ "controlNet": { "paragraphs": [ "ControlNet 为生成过程提供引导,为生成具有受控构图、结构、样式的图像提供帮助,具体的功能由所选的模型决定。" - ] + ], + "heading": "ControlNet" } }, "invocationCache": { From 5b2ed4ffb4e4541ed07f4699a09eb78c87f2a45e Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 12 Oct 2023 12:45:13 +0000 Subject: [PATCH 042/202] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 3 +-- invokeai/frontend/web/public/locales/zh_CN.json | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index c0f309320d..3cf61229dd 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -632,8 +632,7 @@ "intermediatesCleared_many": "Cancellate {{number}} immagini intermedie", "intermediatesCleared_other": "", "clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato di Tela Unificata e ControlNet.", - "intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie", - "noIntermediates": "Nessuna immagine intermedia da cancellare" + "intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index d91bf0b4bb..7299584b54 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -630,8 +630,7 @@ "clearIntermediatesDesc2": "中间产物图像是生成过程中产生的副产品,与图库中的结果图像不同。清除中间产物可释放磁盘空间。", "intermediatesCleared_other": "已清除 {{number}} 个中间产物", "clearIntermediatesDesc1": "清除中间产物会重置您的画布和 ControlNet 状态。", - "intermediatesClearedFailed": "清除中间产物时出现问题", - "noIntermediates": "没有可清除的中间产物" + "intermediatesClearedFailed": "清除中间产物时出现问题" }, "toast": { "tempFoldersEmptied": "临时文件夹已清空", From 52274087f3ce7253137feb6ebd9de9b603eff6bd Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 12 Oct 2023 21:23:29 -0400 Subject: [PATCH 043/202] close #4536 --- invokeai/app/api/routers/models.py | 1 + invokeai/backend/install/model_install_backend.py | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index ebc40f5ce5..cb0832735b 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -152,6 +152,7 @@ async def import_model( ) -> ImportModelResponse: """Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically""" + location = location.strip() items_to_import = {location} prediction_types = {x.value: x for x in SchedulerPredictionType} logger = ApiDependencies.invoker.services.logger diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 1481300c77..bd26a7aa07 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -236,9 +236,16 @@ class ModelInstall(object): if not models_installed: models_installed = dict() + model_path_id_or_url = str(model_path_id_or_url).strip() + # A little hack to allow nested routines to retrieve info on the requested ID self.current_id = model_path_id_or_url path = Path(model_path_id_or_url) + + # fix relative paths + if path.exists() and not path.is_absolute(): + path = path.absolute() # make relative to current WD + # checkpoint file, or similar if path.is_file(): models_installed.update({str(path): self._install_path(path)}) From 21d5969942c2ed28cef2c9e47d1fb1d64fb49564 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 12 Oct 2023 22:35:02 -0400 Subject: [PATCH 044/202] strip leading and trailing quotes as well as whitespace --- invokeai/app/api/routers/models.py | 2 +- invokeai/backend/install/model_install_backend.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index cb0832735b..a7b1f81252 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -152,7 +152,7 @@ async def import_model( ) -> ImportModelResponse: """Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically""" - location = location.strip() + location = location.strip("\"' ") items_to_import = {location} prediction_types = {x.value: x for x in SchedulerPredictionType} logger = ApiDependencies.invoker.services.logger diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index bd26a7aa07..9224f5c8b2 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -236,7 +236,7 @@ class ModelInstall(object): if not models_installed: models_installed = dict() - model_path_id_or_url = str(model_path_id_or_url).strip() + model_path_id_or_url = str(model_path_id_or_url).strip("\"' ") # A little hack to allow nested routines to retrieve info on the requested ID self.current_id = model_path_id_or_url From 15cabc4968ce094f504148e64249c5e6198af282 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 12 Oct 2023 22:23:08 -0400 Subject: [PATCH 045/202] Possibly closes #4815 --- invokeai/backend/model_management/model_manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 7bb188cb4e..bdc9a6c6bb 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -986,6 +986,8 @@ class ModelManager(object): for model_path in models_dir.iterdir(): if model_path not in loaded_files: # TODO: check + if model_path.name.startswith("."): + continue model_name = model_path.name if model_path.is_dir() else model_path.stem model_key = self.create_key(model_name, cur_base_model, cur_model_type) From fcba4382b249bf4527863de804d86719be91a17c Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 13 Oct 2023 12:49:24 -0400 Subject: [PATCH 046/202] upload to pypi whenever a branch starting with "release/" is released --- .github/workflows/pypi-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pypi-release.yml b/.github/workflows/pypi-release.yml index 9e58fb3ae0..5b7d2cd2fa 100644 --- a/.github/workflows/pypi-release.yml +++ b/.github/workflows/pypi-release.yml @@ -28,7 +28,7 @@ jobs: run: twine check dist/* - name: check PyPI versions - if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3' || github.ref == 'refs/heads/v3.3.0post1' + if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/') run: | pip install --upgrade requests python -c "\ From 8464450a53227eccb29fa83c03ac35e15b2715bd Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 13 Oct 2023 14:44:42 -0400 Subject: [PATCH 047/202] Add support for multi-image IP-Adapter. --- invokeai/app/invocations/ip_adapter.py | 4 ++-- invokeai/app/invocations/latent.py | 9 ++++++-- .../backend/ip_adapter/attention_processor.py | 21 ++++++++++++++++--- .../diffusion/conditioning_data.py | 4 ++-- .../diffusion/shared_invokeai_diffusion.py | 11 +++++++--- 5 files changed, 37 insertions(+), 12 deletions(-) diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 3e3a3d9b1f..ca35a17060 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -32,7 +32,7 @@ class CLIPVisionModelField(BaseModel): class IPAdapterField(BaseModel): - image: ImageField = Field(description="The IP-Adapter image prompt.") + image: Union[ImageField, List[ImageField]] = Field(description="The IP-Adapter image prompt(s).") ip_adapter_model: IPAdapterModelField = Field(description="The IP-Adapter model to use.") image_encoder_model: CLIPVisionModelField = Field(description="The name of the CLIP image encoder model.") weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet") @@ -56,7 +56,7 @@ class IPAdapterInvocation(BaseInvocation): """Collects IP-Adapter info to pass to other nodes.""" # Inputs - image: ImageField = InputField(description="The IP-Adapter image prompt.") + image: Union[ImageField, List[ImageField]] = InputField(description="The IP-Adapter image prompt(s).") ip_adapter_model: IPAdapterModelField = InputField( description="The IP-Adapter model.", title="IP-Adapter Model", input=Input.Direct, ui_order=-1 ) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 7ca8cbbe6c..2e69e4dac5 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -445,14 +445,19 @@ class DenoiseLatentsInvocation(BaseInvocation): context=context, ) - input_image = context.services.images.get_pil_image(single_ip_adapter.image.image_name) + # `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here. + single_ipa_images = single_ip_adapter.image + if not isinstance(single_ipa_images, list): + single_ipa_images = [single_ipa_images] + + single_ipa_images = [context.services.images.get_pil_image(image.image_name) for image in single_ipa_images] # TODO(ryand): With some effort, the step of running the CLIP Vision encoder could be done before any other # models are needed in memory. This would help to reduce peak memory utilization in low-memory environments. with image_encoder_model_info as image_encoder_model: # Get image embeddings from CLIP and ImageProjModel. image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds( - input_image, image_encoder_model + single_ipa_images, image_encoder_model ) conditioning_data.ip_adapter_conditioning.append( IPAdapterConditioningInfo(image_prompt_embeds, uncond_image_prompt_embeds) diff --git a/invokeai/backend/ip_adapter/attention_processor.py b/invokeai/backend/ip_adapter/attention_processor.py index 2873c52322..96ab5f876a 100644 --- a/invokeai/backend/ip_adapter/attention_processor.py +++ b/invokeai/backend/ip_adapter/attention_processor.py @@ -67,6 +67,12 @@ class IPAttnProcessor2_0(torch.nn.Module): temb=None, ip_adapter_image_prompt_embeds=None, ): + """Apply IP-Adapter attention. + + Args: + ip_adapter_image_prompt_embeds (torch.Tensor): The image prompt embeddings. + Shape: (batch_size, num_ip_images, seq_len, ip_embedding_len). + """ residual = hidden_states if attn.spatial_norm is not None: @@ -127,26 +133,35 @@ class IPAttnProcessor2_0(torch.nn.Module): for ipa_embed, ipa_weights, scale in zip(ip_adapter_image_prompt_embeds, self._weights, self._scales): # The batch dimensions should match. assert ipa_embed.shape[0] == encoder_hidden_states.shape[0] - # The channel dimensions should match. - assert ipa_embed.shape[2] == encoder_hidden_states.shape[2] + # The token_len dimensions should match. + assert ipa_embed.shape[-1] == encoder_hidden_states.shape[-1] ip_hidden_states = ipa_embed + # ip_hidden_state.shape = (batch_size, num_ip_images, ip_seq_len, ip_image_embedding) + ip_key = ipa_weights.to_k_ip(ip_hidden_states) ip_value = ipa_weights.to_v_ip(ip_hidden_states) + # ip_key.shape, ip_value.shape: (batch_size, num_ip_images, ip_seq_len, head_dim * num_heads) + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - # The output of sdpa has shape: (batch, num_heads, seq_len, head_dim) + # ip_key.shape, ip_value.shape: (batch_size, num_heads, num_ip_images * ip_seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 ip_hidden_states = F.scaled_dot_product_attention( query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False ) + # ip_hidden_states.shape: (batch_size, num_heads, query_seq_len, head_dim) + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) ip_hidden_states = ip_hidden_states.to(query.dtype) + # ip_hidden_states.shape: (batch_size, query_seq_len, num_heads * head_dim) + hidden_states = hidden_states + scale * ip_hidden_states # linear proj diff --git a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py index 7c3f835a44..6a63c225fc 100644 --- a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py +++ b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py @@ -55,11 +55,11 @@ class PostprocessingSettings: class IPAdapterConditioningInfo: cond_image_prompt_embeds: torch.Tensor """IP-Adapter image encoder conditioning embeddings. - Shape: (batch_size, num_tokens, encoding_dim). + Shape: (num_images, num_tokens, encoding_dim). """ uncond_image_prompt_embeds: torch.Tensor """IP-Adapter image encoding embeddings to use for unconditional generation. - Shape: (batch_size, num_tokens, encoding_dim). + Shape: (num_images, num_tokens, encoding_dim). """ diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index c12c86ed92..943fe7b307 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -345,9 +345,12 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs = None if conditioning_data.ip_adapter_conditioning is not None: + # Note that we 'stack' to produce tensors of shape (batch_size, num_ip_images, seq_len, token_len). cross_attention_kwargs = { "ip_adapter_image_prompt_embeds": [ - torch.cat([ipa_conditioning.uncond_image_prompt_embeds, ipa_conditioning.cond_image_prompt_embeds]) + torch.stack( + [ipa_conditioning.uncond_image_prompt_embeds, ipa_conditioning.cond_image_prompt_embeds] + ) for ipa_conditioning in conditioning_data.ip_adapter_conditioning ] } @@ -415,9 +418,10 @@ class InvokeAIDiffuserComponent: # Run unconditional UNet denoising. cross_attention_kwargs = None if conditioning_data.ip_adapter_conditioning is not None: + # Note that we 'unsqueeze' to produce tensors of shape (batch_size=1, num_ip_images, seq_len, token_len). cross_attention_kwargs = { "ip_adapter_image_prompt_embeds": [ - ipa_conditioning.uncond_image_prompt_embeds + torch.unsqueeze(ipa_conditioning.uncond_image_prompt_embeds, dim=0) for ipa_conditioning in conditioning_data.ip_adapter_conditioning ] } @@ -444,9 +448,10 @@ class InvokeAIDiffuserComponent: # Run conditional UNet denoising. cross_attention_kwargs = None if conditioning_data.ip_adapter_conditioning is not None: + # Note that we 'unsqueeze' to produce tensors of shape (batch_size=1, num_ip_images, seq_len, token_len). cross_attention_kwargs = { "ip_adapter_image_prompt_embeds": [ - ipa_conditioning.cond_image_prompt_embeds + torch.unsqueeze(ipa_conditioning.cond_image_prompt_embeds, dim=0) for ipa_conditioning in conditioning_data.ip_adapter_conditioning ] } From 49279bbe7416415308dcee7e0b58132f6dc9589e Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Sat, 14 Oct 2023 13:00:52 -0400 Subject: [PATCH 048/202] Update IP-Adapter unit test for multi-image. --- tests/backend/ip_adapter/test_ip_adapter.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/backend/ip_adapter/test_ip_adapter.py b/tests/backend/ip_adapter/test_ip_adapter.py index 7f634ee1fe..6712196778 100644 --- a/tests/backend/ip_adapter/test_ip_adapter.py +++ b/tests/backend/ip_adapter/test_ip_adapter.py @@ -65,7 +65,10 @@ def test_ip_adapter_unet_patch(model_params, model_installer, torch_device): ip_adapter.to(torch_device, dtype=torch.float32) unet.to(torch_device, dtype=torch.float32) - cross_attention_kwargs = {"ip_adapter_image_prompt_embeds": [torch.randn((1, 4, 768)).to(torch_device)]} + # ip_embeds shape: (batch_size, num_ip_images, seq_len, ip_image_embedding_len) + ip_embeds = torch.randn((1, 3, 4, 768)).to(torch_device) + + cross_attention_kwargs = {"ip_adapter_image_prompt_embeds": [ip_embeds]} ip_adapter_unet_patcher = UNetPatcher([ip_adapter]) with ip_adapter_unet_patcher.apply_ip_adapter_attention(unet): output = unet(**dummy_unet_input, cross_attention_kwargs=cross_attention_kwargs).sample From 35ebc9e18d7735986998219b3377de5558b8bfe5 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Sat, 14 Oct 2023 13:28:50 -0400 Subject: [PATCH 049/202] Bump invocation versions for the multi-image IP feature. --- invokeai/app/invocations/ip_adapter.py | 2 +- invokeai/app/invocations/latent.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index ca35a17060..15502ad52b 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -51,7 +51,7 @@ class IPAdapterOutput(BaseInvocationOutput): ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter") -@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.0.0") +@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.0") class IPAdapterInvocation(BaseInvocation): """Collects IP-Adapter info to pass to other nodes.""" diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 2e69e4dac5..3b6fd2498f 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -198,7 +198,7 @@ def get_scheduler( title="Denoise Latents", tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"], category="latents", - version="1.3.0", + version="1.4.0", ) class DenoiseLatentsInvocation(BaseInvocation): """Denoises noisy latents to decodable images""" From 48626c40fdbff62b963eb0a4fd742d4484ad5bce Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 14 Oct 2023 20:10:10 +1100 Subject: [PATCH 050/202] fix(backend): handle systems with `glibc` < 2.33 `mallinfo2` is not available on `glibc` < 2.33. On these systems, we successfully load the library but get an `AttributeError` on attempting to access `mallinfo2`. I'm not sure if the old `mallinfo` will work, and not sure how to install it safely to test, so for now we just handle the `AttributeError`. This means the enhanced memory snapshot logic will be skipped for these systems, which isn't a big deal. --- invokeai/backend/model_management/memory_snapshot.py | 6 ++++-- tests/backend/model_management/test_libc_util.py | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_management/memory_snapshot.py b/invokeai/backend/model_management/memory_snapshot.py index 4f43affcf7..01f1328114 100644 --- a/invokeai/backend/model_management/memory_snapshot.py +++ b/invokeai/backend/model_management/memory_snapshot.py @@ -55,8 +55,10 @@ class MemorySnapshot: try: malloc_info = LibcUtil().mallinfo2() - except OSError: - # This is expected in environments that do not have the 'libc.so.6' shared library. + except (OSError, AttributeError): + # OSError: This is expected in environments that do not have the 'libc.so.6' shared library. + # AttributeError: This is expected in environments that have `libc.so.6` but do not have the `mallinfo2` (e.g. glibc < 2.33) + # TODO: Does `mallinfo` work? malloc_info = None return cls(process_ram, vram, malloc_info) diff --git a/tests/backend/model_management/test_libc_util.py b/tests/backend/model_management/test_libc_util.py index a517db4c90..e13a2fd3a2 100644 --- a/tests/backend/model_management/test_libc_util.py +++ b/tests/backend/model_management/test_libc_util.py @@ -11,7 +11,10 @@ def test_libc_util_mallinfo2(): # TODO: Set the expected result preemptively based on the system properties. pytest.xfail("libc shared library is not available on this system.") - info = libc.mallinfo2() + try: + info = libc.mallinfo2() + except AttributeError: + pytest.xfail("`mallinfo2` is not available on this system, likely due to glibc < 2.33.") assert info.arena > 0 From b4cee46936d9efc0e1b4578e995f9673a02359f1 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Sat, 14 Oct 2023 22:03:51 +0200 Subject: [PATCH 051/202] translationBot(ui): update translation (Italian) Currently translated at 91.4% (1112 of 1216 strings) translationBot(ui): update translation (Italian) Currently translated at 90.4% (1100 of 1216 strings) translationBot(ui): update translation (Italian) Currently translated at 90.4% (1100 of 1216 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 94 +++++++++++++++++--- 1 file changed, 84 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index eb6a96b783..3cca8b508f 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -471,7 +471,8 @@ "useCustomConfig": "Utilizza configurazione personalizzata", "closeAdvanced": "Chiudi Avanzate", "modelType": "Tipo di modello", - "customConfigFileLocation": "Posizione del file di configurazione personalizzato" + "customConfigFileLocation": "Posizione del file di configurazione personalizzato", + "vaePrecision": "Precisione VAE" }, "parameters": { "images": "Immagini", @@ -897,7 +898,63 @@ "notesDescription": "Aggiunge note sul tuo flusso di lavoro", "unknownField": "Campo sconosciuto", "unknownNode": "Nodo sconosciuto", - "vaeFieldDescription": "Sotto modello VAE." + "vaeFieldDescription": "Sotto modello VAE.", + "booleanPolymorphicDescription": "Una raccolta di booleani.", + "missingTemplate": "Modello mancante", + "outputSchemaNotFound": "Schema di output non trovato", + "colorFieldDescription": "Un colore RGBA.", + "maybeIncompatible": "Potrebbe essere incompatibile con quello installato", + "noNodeSelected": "Nessun nodo selezionato", + "colorPolymorphic": "Colore polimorfico", + "booleanCollectionDescription": "Una raccolta di booleani.", + "colorField": "Colore", + "nodeTemplate": "Modello di nodo", + "nodeOpacity": "Opacità del nodo", + "pickOne": "Sceglierne uno", + "outputField": "Campo di output", + "nodeSearch": "Cerca nodi", + "nodeOutputs": "Uscite del nodo", + "collectionItem": "Oggetto della raccolta", + "noConnectionInProgress": "Nessuna connessione in corso", + "noConnectionData": "Nessun dato di connessione", + "outputFields": "Campi di output", + "cannotDuplicateConnection": "Impossibile creare connessioni duplicate", + "booleanPolymorphic": "Polimorfico booleano", + "colorPolymorphicDescription": "Una collezione di colori polimorfici.", + "missingCanvaInitImage": "Immagine iniziale della tela mancante", + "clipFieldDescription": "Sottomodelli di tokenizzatore e codificatore di testo.", + "noImageFoundState": "Nessuna immagine iniziale trovata nello stato", + "clipField": "CLIP", + "noMatchingNodes": "Nessun nodo corrispondente", + "noFieldType": "Nessun tipo di campo", + "colorCollection": "Una collezione di colori.", + "noOutputSchemaName": "Nessun nome dello schema di output trovato nell'oggetto di riferimento", + "boolean": "Booleani", + "missingCanvaInitMaskImages": "Immagini di inizializzazione e maschera della tela mancanti", + "oNNXModelField": "Modello ONNX", + "node": "Nodo", + "booleanDescription": "I booleani sono veri o falsi.", + "collection": "Raccolta", + "cannotConnectInputToInput": "Impossibile collegare Input a Input", + "cannotConnectOutputToOutput": "Impossibile collegare Output ad Output", + "booleanCollection": "Raccolta booleana", + "cannotConnectToSelf": "Impossibile connettersi a se stesso", + "mismatchedVersion": "Ha una versione non corrispondente", + "outputNode": "Nodo di Output", + "loadingNodes": "Caricamento nodi...", + "oNNXModelFieldDescription": "Campo del modello ONNX.", + "denoiseMaskFieldDescription": "La maschera di riduzione del rumore può essere passata tra i nodi", + "floatCollectionDescription": "Una raccolta di numeri virgola mobile.", + "enum": "Enumeratore", + "float": "In virgola mobile", + "doesNotExist": "non esiste", + "currentImageDescription": "Visualizza l'immagine corrente nell'editor dei nodi", + "fieldTypesMustMatch": "I tipi di campo devono corrispondere", + "edge": "Bordo", + "enumDescription": "Gli enumeratori sono valori che possono essere una delle diverse opzioni.", + "denoiseMaskField": "Maschera riduzione rumore", + "currentImage": "Immagine corrente", + "floatCollection": "Raccolta in virgola mobile" }, "boards": { "autoAddBoard": "Aggiungi automaticamente bacheca", @@ -983,8 +1040,13 @@ "addControlNet": "Aggiungi $t(common.controlNet)", "controlNetT2IMutexDesc": "$t(common.controlNet) e $t(common.t2iAdapter) contemporaneamente non sono attualmente supportati.", "addIPAdapter": "Aggiungi $t(common.ipAdapter)", - "controlAdapter": "Adattatore di Controllo", - "megaControl": "Mega ControlNet" + "controlAdapter_one": "Adattatore di Controllo", + "controlAdapter_many": "Adattatori di Controllo", + "controlAdapter_other": "", + "megaControl": "Mega ControlNet", + "minConfidence": "Confidenza minima", + "scribble": "Scribble", + "controlnet": "" }, "queue": { "queueFront": "Aggiungi all'inizio della coda", @@ -1100,7 +1162,9 @@ }, "compositingMaskAdjustments": { "heading": "Regolazioni della maschera", - "paragraphs": ["Regola la maschera."] + "paragraphs": [ + "Regola la maschera." + ] }, "compositingCoherenceSteps": { "heading": "Passi", @@ -1111,11 +1175,15 @@ }, "compositingBlur": { "heading": "Sfocatura", - "paragraphs": ["Il raggio di sfocatura della maschera."] + "paragraphs": [ + "Il raggio di sfocatura della maschera." + ] }, "compositingCoherenceMode": { "heading": "Modalità", - "paragraphs": ["La modalità del Passaggio di Coerenza."] + "paragraphs": [ + "La modalità del Passaggio di Coerenza." + ] }, "clipSkip": { "paragraphs": [ @@ -1255,7 +1323,9 @@ ] }, "infillMethod": { - "paragraphs": ["Metodo per riempire l'area selezionata."], + "paragraphs": [ + "Metodo per riempire l'area selezionata." + ], "heading": "Metodo di riempimento" }, "controlNetWeight": { @@ -1271,7 +1341,9 @@ ] }, "controlNetControlMode": { - "paragraphs": ["Attribuisce più peso al prompt o a ControlNet."], + "paragraphs": [ + "Attribuisce più peso al prompt o a ControlNet." + ], "heading": "Modalità di controllo" }, "paramSteps": { @@ -1335,6 +1407,8 @@ "createdBy": "Creato da", "workflow": "Flusso di lavoro", "steps": "Passi", - "scheduler": "Campionatore" + "scheduler": "Campionatore", + "recallParameters": "Richiama i parametri", + "noRecallParameters": "Nessun parametro da richiamare trovato" } } From 8bbd938be91e668d6d8ce23c151133a6937867be Mon Sep 17 00:00:00 2001 From: Dennis Date: Sat, 14 Oct 2023 22:03:51 +0200 Subject: [PATCH 052/202] translationBot(ui): update translation (Dutch) Currently translated at 100.0% (1216 of 1216 strings) Co-authored-by: Dennis Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/nl/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/nl.json | 802 ++++++++++++++++++- 1 file changed, 788 insertions(+), 14 deletions(-) diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index f682886dae..5c08f65d21 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -79,7 +79,18 @@ "modelManager": "Modelbeheer", "darkMode": "Donkere modus", "lightMode": "Lichte modus", - "communityLabel": "Gemeenschap" + "communityLabel": "Gemeenschap", + "t2iAdapter": "T2I-adapter", + "on": "Aan", + "nodeEditor": "Knooppunteditor", + "ipAdapter": "IP-adapter", + "controlAdapter": "Control-adapter", + "auto": "Autom.", + "controlNet": "ControlNet", + "statusProcessing": "Bezig met verwerken", + "imageFailedToLoad": "Kan afbeelding niet laden", + "learnMore": "Meer informatie", + "advanced": "Uitgebreid" }, "gallery": { "generations": "Gegenereerde afbeeldingen", @@ -100,7 +111,17 @@ "deleteImagePermanent": "Gewiste afbeeldingen kunnen niet worden hersteld.", "assets": "Eigen onderdelen", "images": "Afbeeldingen", - "autoAssignBoardOnClick": "Ken automatisch bord toe bij klikken" + "autoAssignBoardOnClick": "Ken automatisch bord toe bij klikken", + "featuresWillReset": "Als je deze afbeelding verwijdert, dan worden deze functies onmiddellijk teruggezet.", + "loading": "Bezig met laden", + "unableToLoad": "Kan galerij niet laden", + "preparingDownload": "Bezig met voorbereiden van download", + "preparingDownloadFailed": "Fout bij voorbereiden van download", + "downloadSelection": "Download selectie", + "currentlyInUse": "Deze afbeelding is momenteel in gebruik door de volgende functies:", + "copy": "Kopieer", + "download": "Download", + "setCurrentImage": "Stel in als huidige afbeelding" }, "hotkeys": { "keyboardShortcuts": "Sneltoetsen", @@ -332,7 +353,7 @@ "config": "Configuratie", "configValidationMsg": "Pad naar het configuratiebestand van je model.", "modelLocation": "Locatie model", - "modelLocationValidationMsg": "Pad naar waar je model zich bevindt.", + "modelLocationValidationMsg": "Geef het pad naar een lokale map waar je Diffusers-model wordt bewaard", "vaeLocation": "Locatie VAE", "vaeLocationValidationMsg": "Pad naar waar je VAE zich bevindt.", "width": "Breedte", @@ -444,7 +465,17 @@ "syncModelsDesc": "Als je modellen niet meer synchroon zijn met de backend, kan je ze met deze optie verversen. Dit wordt typisch gebruikt in het geval je het models.yaml bestand met de hand bewerkt of als je modellen aan de InvokeAI root map toevoegt nadat de applicatie gestart werd.", "loraModels": "LoRA's", "onnxModels": "Onnx", - "oliveModels": "Olives" + "oliveModels": "Olives", + "noModels": "Geen modellen gevonden", + "predictionType": "Soort voorspelling (voor Stable Diffusion 2.x-modellen en incidentele Stable Diffusion 1.x-modellen)", + "quickAdd": "Voeg snel toe", + "simpleModelDesc": "Geef een pad naar een lokaal Diffusers-model, lokale-checkpoint- / safetensors-model, een HuggingFace-repo-ID of een url naar een checkpoint- / Diffusers-model.", + "advanced": "Uitgebreid", + "useCustomConfig": "Gebruik eigen configuratie", + "closeAdvanced": "Sluit uitgebreid", + "modelType": "Soort model", + "customConfigFileLocation": "Locatie eigen configuratiebestand", + "vaePrecision": "Nauwkeurigheid VAE" }, "parameters": { "images": "Afbeeldingen", @@ -465,7 +496,7 @@ "type": "Soort", "strength": "Sterkte", "upscaling": "Opschalen", - "upscale": "Schaal op", + "upscale": "Vergroot (Shift + U)", "upscaleImage": "Schaal afbeelding op", "scale": "Schaal", "otherOptions": "Andere opties", @@ -496,7 +527,7 @@ "useInitImg": "Gebruik initiële afbeelding", "info": "Info", "initialImage": "Initiële afbeelding", - "showOptionsPanel": "Toon deelscherm Opties", + "showOptionsPanel": "Toon deelscherm Opties (O of T)", "symmetry": "Symmetrie", "hSymmetryStep": "Stap horiz. symmetrie", "vSymmetryStep": "Stap vert. symmetrie", @@ -504,7 +535,8 @@ "immediate": "Annuleer direct", "isScheduled": "Annuleren", "setType": "Stel annuleervorm in", - "schedule": "Annuleer na huidige iteratie" + "schedule": "Annuleer na huidige iteratie", + "cancel": "Annuleer" }, "general": "Algemeen", "copyImage": "Kopieer afbeelding", @@ -520,7 +552,7 @@ "boundingBoxWidth": "Tekenvak breedte", "boundingBoxHeight": "Tekenvak hoogte", "clipSkip": "Overslaan CLIP", - "aspectRatio": "Verhouding", + "aspectRatio": "Beeldverhouding", "negativePromptPlaceholder": "Negatieve prompt", "controlNetControlMode": "Aansturingsmodus", "positivePromptPlaceholder": "Positieve prompt", @@ -532,7 +564,46 @@ "coherenceSteps": "Stappen", "coherenceStrength": "Sterkte", "seamHighThreshold": "Hoog", - "seamLowThreshold": "Laag" + "seamLowThreshold": "Laag", + "invoke": { + "noNodesInGraph": "Geen knooppunten in graaf", + "noModelSelected": "Geen model ingesteld", + "invoke": "Start", + "noPrompts": "Geen prompts gegenereerd", + "systemBusy": "Systeem is bezig", + "noInitialImageSelected": "Geen initiële afbeelding gekozen", + "missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} invoer ontbreekt", + "noControlImageForControlAdapter": "Controle-adapter #{{number}} heeft geen controle-afbeelding", + "noModelForControlAdapter": "Control-adapter #{{number}} heeft geen model ingesteld staan.", + "unableToInvoke": "Kan niet starten", + "incompatibleBaseModelForControlAdapter": "Model van controle-adapter #{{number}} is ongeldig in combinatie met het hoofdmodel.", + "systemDisconnected": "Systeem is niet verbonden", + "missingNodeTemplate": "Knooppuntsjabloon ontbreekt", + "readyToInvoke": "Klaar om te starten", + "missingFieldTemplate": "Veldsjabloon ontbreekt", + "addingImagesTo": "Bezig met toevoegen van afbeeldingen aan" + }, + "seamlessX&Y": "Naadloos X en Y", + "isAllowedToUpscale": { + "useX2Model": "Afbeelding is te groot om te vergroten met het x4-model. Gebruik hiervoor het x2-model", + "tooLarge": "Afbeelding is te groot om te vergoten. Kies een kleinere afbeelding" + }, + "aspectRatioFree": "Vrij", + "cpuNoise": "CPU-ruis", + "patchmatchDownScaleSize": "Verklein", + "gpuNoise": "GPU-ruis", + "seamlessX": "Naadloos X", + "useCpuNoise": "Gebruik CPU-ruis", + "clipSkipWithLayerCount": "Overslaan CLIP {{layerCount}}", + "seamlessY": "Naadloos Y", + "manualSeed": "Handmatige seedwaarde", + "imageActions": "Afbeeldingshandeling", + "randomSeed": "Willekeurige seedwaarde", + "iterations": "Iteraties", + "iterationsWithCount_one": "{{count}} iteratie", + "iterationsWithCount_other": "{{count}} iteraties", + "enableNoiseSettings": "Schakel ruisinstellingen in", + "coherenceMode": "Modus" }, "settings": { "models": "Modellen", @@ -561,7 +632,16 @@ "experimental": "Experimenteel", "alternateCanvasLayout": "Omwisselen Canvas Layout", "enableNodesEditor": "Knopen Editor Inschakelen", - "autoChangeDimensions": "Werk bij wijziging afmetingen bij naar modelstandaard" + "autoChangeDimensions": "Werk bij wijziging afmetingen bij naar modelstandaard", + "clearIntermediates": "Wis tussentijdse afbeeldingen", + "clearIntermediatesDesc3": "Je galerijafbeeldingen zullen niet worden verwijderd.", + "clearIntermediatesWithCount_one": "Wis {{count}} tussentijdse afbeelding", + "clearIntermediatesWithCount_other": "Wis {{count}} tussentijdse afbeeldingen", + "clearIntermediatesDesc2": "Tussentijdse afbeeldingen zijn nevenproducten bij een generatie, die afwijken van de uitvoerafbeeldingen in de galerij. Het wissen van tussentijdse afbeeldingen zal schijfruimte vrijmaken.", + "intermediatesCleared_one": "{{count}} tussentijdse afbeelding gewist", + "intermediatesCleared_other": "{{count}} tussentijdse afbeeldingen gewist", + "clearIntermediatesDesc1": "Het wissen van tussentijdse onderdelen zet de staat van je canvas en ControlNet terug.", + "intermediatesClearedFailed": "Fout bij wissen van tussentijdse afbeeldingen" }, "toast": { "tempFoldersEmptied": "Tijdelijke map geleegd", @@ -610,7 +690,42 @@ "nodesCorruptedGraph": "Kan niet laden. Graph lijkt corrupt.", "nodesUnrecognizedTypes": "Laden mislukt. Graph heeft onherkenbare types", "nodesBrokenConnections": "Laden mislukt. Sommige verbindingen zijn verbroken.", - "nodesNotValidGraph": "Geen geldige knooppunten graph" + "nodesNotValidGraph": "Geen geldige knooppunten graph", + "baseModelChangedCleared_one": "Basismodel is gewijzigd: {{count}} niet-compatibel submodel weggehaald of uitgeschakeld", + "baseModelChangedCleared_other": "Basismodel is gewijzigd: {{count}} niet-compatibele submodellen weggehaald of uitgeschakeld", + "imageSavingFailed": "Fout bij bewaren afbeelding", + "canvasSentControlnetAssets": "Canvas gestuurd naar ControlNet en Assets", + "problemCopyingCanvasDesc": "Kan basislaag niet exporteren", + "loadedWithWarnings": "Werkstroom geladen met waarschuwingen", + "setInitialImage": "Ingesteld als initiële afbeelding", + "canvasCopiedClipboard": "Canvas gekopieerd naar klembord", + "setControlImage": "Ingesteld als controle-afbeelding", + "setNodeField": "Ingesteld als knooppuntveld", + "problemSavingMask": "Fout bij bewaren masker", + "problemSavingCanvasDesc": "Kan basislaag niet exporteren", + "maskSavedAssets": "Masker bewaard in Assets", + "modelAddFailed": "Fout bij toevoegen model", + "problemDownloadingCanvas": "Fout bij downloaden van canvas", + "problemMergingCanvas": "Fout bij samenvoegen canvas", + "setCanvasInitialImage": "Ingesteld als initiële canvasafbeelding", + "imageUploaded": "Afbeelding geüpload", + "addedToBoard": "Toegevoegd aan bord", + "workflowLoaded": "Werkstroom geladen", + "modelAddedSimple": "Model toegevoegd", + "problemImportingMaskDesc": "Kan masker niet exporteren", + "problemCopyingCanvas": "Fout bij kopiëren canvas", + "problemSavingCanvas": "Fout bij bewaren canvas", + "canvasDownloaded": "Canvas gedownload", + "setIPAdapterImage": "Ingesteld als IP-adapterafbeelding", + "problemMergingCanvasDesc": "Kan basislaag niet exporteren", + "problemDownloadingCanvasDesc": "Kan basislaag niet exporteren", + "problemSavingMaskDesc": "Kan masker niet exporteren", + "imageSaved": "Afbeelding bewaard", + "maskSentControlnetAssets": "Masker gestuurd naar ControlNet en Assets", + "canvasSavedGallery": "Canvas bewaard in galerij", + "imageUploadFailed": "Fout bij uploaden afbeelding", + "modelAdded": "Model toegevoegd: {{modelName}}", + "problemImportingMask": "Fout bij importeren masker" }, "tooltip": { "feature": { @@ -685,7 +800,9 @@ "betaDarkenOutside": "Verduister buiten tekenvak", "betaLimitToBox": "Beperk tot tekenvak", "betaPreserveMasked": "Behoud masker", - "antialiasing": "Anti-aliasing" + "antialiasing": "Anti-aliasing", + "showResultsOn": "Toon resultaten (aan)", + "showResultsOff": "Toon resultaten (uit)" }, "accessibility": { "exitViewer": "Stop viewer", @@ -707,7 +824,9 @@ "toggleAutoscroll": "Autom. scrollen aan/uit", "toggleLogViewer": "Logboekviewer aan/uit", "showOptionsPanel": "Toon zijscherm", - "menu": "Menu" + "menu": "Menu", + "showGalleryPanel": "Toon deelscherm Galerij", + "loadMore": "Laad meer" }, "ui": { "showProgressImages": "Toon voortgangsafbeeldingen", @@ -730,6 +849,661 @@ "resetWorkflow": "Herstel werkstroom", "resetWorkflowDesc": "Weet je zeker dat je deze werkstroom wilt herstellen?", "resetWorkflowDesc2": "Herstel van een werkstroom haalt alle knooppunten, randen en werkstroomdetails weg.", - "downloadWorkflow": "Download JSON van werkstroom" + "downloadWorkflow": "Download JSON van werkstroom", + "booleanPolymorphicDescription": "Een verzameling Booleanse waarden.", + "scheduler": "Planner", + "inputField": "Invoerveld", + "controlFieldDescription": "Controlegegevens doorgegeven tussen knooppunten.", + "skippingUnknownOutputType": "Overslaan van onbekend soort uitvoerveld", + "latentsFieldDescription": "Latents kunnen worden doorgegeven tussen knooppunten.", + "denoiseMaskFieldDescription": "Ontruisingsmasker kan worden doorgegeven tussen knooppunten", + "floatCollectionDescription": "Een verzameling zwevende-kommagetallen.", + "missingTemplate": "Ontbrekende sjabloon", + "outputSchemaNotFound": "Uitvoerschema niet gevonden", + "ipAdapterPolymorphicDescription": "Een verzameling IP-adapters.", + "workflowDescription": "Korte beschrijving", + "latentsPolymorphicDescription": "Latents kunnen worden doorgegeven tussen knooppunten.", + "colorFieldDescription": "Een RGBA-kleur.", + "mainModelField": "Model", + "unhandledInputProperty": "Onverwerkt invoerkenmerk", + "versionUnknown": " Versie onbekend", + "ipAdapterCollection": "Verzameling IP-adapters", + "conditioningCollection": "Verzameling conditionering", + "maybeIncompatible": "Is mogelijk niet compatibel met geïnstalleerde knooppunten", + "ipAdapterPolymorphic": "Polymorfisme IP-adapter", + "noNodeSelected": "Geen knooppunt gekozen", + "addNode": "Voeg knooppunt toe", + "unableToValidateWorkflow": "Kan werkstroom niet valideren", + "enum": "Enumeratie", + "integerPolymorphicDescription": "Een verzameling gehele getallen.", + "noOutputRecorded": "Geen uitvoer opgenomen", + "updateApp": "Werk app bij", + "conditioningCollectionDescription": "Conditionering kan worden doorgegeven tussen knooppunten.", + "colorPolymorphic": "Polymorfisme kleur", + "colorCodeEdgesHelp": "Kleurgecodeerde randen op basis van hun verbonden velden", + "collectionDescription": "Beschrijving", + "float": "Zwevende-kommagetal", + "workflowContact": "Contactpersoon", + "skippingReservedFieldType": "Overslaan van gereserveerd veldsoort", + "animatedEdges": "Geanimeerde randen", + "booleanCollectionDescription": "Een verzameling van Booleanse waarden.", + "sDXLMainModelFieldDescription": "SDXL-modelveld.", + "conditioningPolymorphic": "Polymorfisme conditionering", + "integer": "Geheel getal", + "colorField": "Kleur", + "boardField": "Bord", + "nodeTemplate": "Sjabloon knooppunt", + "latentsCollection": "Verzameling latents", + "problemReadingWorkflow": "Fout bij lezen van werkstroom uit afbeelding", + "sourceNode": "Bronknooppunt", + "nodeOpacity": "Dekking knooppunt", + "pickOne": "Kies er een", + "collectionItemDescription": "Beschrijving", + "integerDescription": "Gehele getallen zijn getallen zonder een decimaalteken.", + "outputField": "Uitvoerveld", + "unableToLoadWorkflow": "Kan werkstroom niet valideren", + "snapToGrid": "Lijn uit op raster", + "stringPolymorphic": "Polymorfisme tekenreeks", + "conditioningPolymorphicDescription": "Conditionering kan worden doorgegeven tussen knooppunten.", + "noFieldsLinearview": "Geen velden toegevoegd aan lineaire weergave", + "skipped": "Overgeslagen", + "imagePolymorphic": "Polymorfisme afbeelding", + "nodeSearch": "Zoek naar knooppunten", + "updateNode": "Werk knooppunt bij", + "sDXLRefinerModelFieldDescription": "Beschrijving", + "imagePolymorphicDescription": "Een verzameling afbeeldingen.", + "floatPolymorphic": "Polymorfisme zwevende-kommagetal", + "version": "Versie", + "doesNotExist": "bestaat niet", + "ipAdapterCollectionDescription": "Een verzameling van IP-adapters.", + "stringCollectionDescription": "Een verzameling tekenreeksen.", + "unableToParseNode": "Kan knooppunt niet inlezen", + "controlCollection": "Controle-verzameling", + "validateConnections": "Valideer verbindingen en graaf", + "stringCollection": "Verzameling tekenreeksen", + "inputMayOnlyHaveOneConnection": "Invoer mag slechts een enkele verbinding hebben", + "notes": "Opmerkingen", + "uNetField": "UNet", + "nodeOutputs": "Uitvoer knooppunt", + "currentImageDescription": "Toont de huidige afbeelding in de knooppunteditor", + "validateConnectionsHelp": "Voorkom dat er ongeldige verbindingen worden gelegd en dat er ongeldige grafen worden aangeroepen", + "problemSettingTitle": "Fout bij instellen titel", + "ipAdapter": "IP-adapter", + "integerCollection": "Verzameling gehele getallen", + "collectionItem": "Verzamelingsonderdeel", + "noConnectionInProgress": "Geen verbinding bezig te maken", + "vaeModelField": "VAE", + "controlCollectionDescription": "Controlegegevens doorgegeven tussen knooppunten.", + "skippedReservedInput": "Overgeslagen gereserveerd invoerveld", + "workflowVersion": "Versie", + "noConnectionData": "Geen verbindingsgegevens", + "outputFields": "Uitvoervelden", + "fieldTypesMustMatch": "Veldsoorten moeten overeenkomen", + "workflow": "Werkstroom", + "edge": "Rand", + "inputNode": "Invoerknooppunt", + "enumDescription": "Enumeraties zijn waarden die uit een aantal opties moeten worden gekozen.", + "unkownInvocation": "Onbekende aanroepsoort", + "loRAModelFieldDescription": "Beschrijving", + "imageField": "Afbeelding", + "skippedReservedOutput": "Overgeslagen gereserveerd uitvoerveld", + "animatedEdgesHelp": "Animeer gekozen randen en randen verbonden met de gekozen knooppunten", + "cannotDuplicateConnection": "Kan geen dubbele verbindingen maken", + "booleanPolymorphic": "Polymorfisme Booleaanse waarden", + "unknownTemplate": "Onbekend sjabloon", + "noWorkflow": "Geen werkstroom", + "removeLinearView": "Verwijder uit lineaire weergave", + "colorCollectionDescription": "Beschrijving", + "integerCollectionDescription": "Een verzameling gehele getallen.", + "colorPolymorphicDescription": "Een verzameling kleuren.", + "sDXLMainModelField": "SDXL-model", + "workflowTags": "Labels", + "denoiseMaskField": "Ontruisingsmasker", + "schedulerDescription": "Beschrijving", + "missingCanvaInitImage": "Ontbrekende initialisatie-afbeelding voor canvas", + "conditioningFieldDescription": "Conditionering kan worden doorgegeven tussen knooppunten.", + "clipFieldDescription": "Submodellen voor tokenizer en text_encoder.", + "fullyContainNodesHelp": "Knooppunten moeten zich volledig binnen het keuzevak bevinden om te worden gekozen", + "noImageFoundState": "Geen initiële afbeelding gevonden in de staat", + "workflowValidation": "Validatiefout werkstroom", + "clipField": "Clip", + "stringDescription": "Tekenreeksen zijn tekst.", + "nodeType": "Soort knooppunt", + "noMatchingNodes": "Geen overeenkomende knooppunten", + "fullyContainNodes": "Omvat knooppunten volledig om ze te kiezen", + "integerPolymorphic": "Polymorfisme geheel getal", + "executionStateInProgress": "Bezig", + "noFieldType": "Geen soort veld", + "colorCollection": "Een verzameling kleuren.", + "executionStateError": "Fout", + "noOutputSchemaName": "Geen naam voor uitvoerschema gevonden in referentieobject", + "ipAdapterModel": "Model IP-adapter", + "latentsPolymorphic": "Polymorfisme latents", + "vaeModelFieldDescription": "Beschrijving", + "skippingInputNoTemplate": "Overslaan van invoerveld zonder sjabloon", + "ipAdapterDescription": "Een Afbeeldingsprompt-adapter (IP-adapter).", + "boolean": "Booleaanse waarden", + "missingCanvaInitMaskImages": "Ontbrekende initialisatie- en maskerafbeeldingen voor canvas", + "problemReadingMetadata": "Fout bij lezen van metagegevens uit afbeelding", + "stringPolymorphicDescription": "Een verzameling tekenreeksen.", + "oNNXModelField": "ONNX-model", + "executionStateCompleted": "Voltooid", + "node": "Knooppunt", + "skippingUnknownInputType": "Overslaan van onbekend soort invoerveld", + "workflowAuthor": "Auteur", + "currentImage": "Huidige afbeelding", + "controlField": "Controle", + "workflowName": "Naam", + "booleanDescription": "Booleanse waarden zijn waar en onwaar.", + "collection": "Verzameling", + "ipAdapterModelDescription": "Modelveld IP-adapter", + "cannotConnectInputToInput": "Kan invoer niet aan invoer verbinden", + "invalidOutputSchema": "Ongeldig uitvoerschema", + "boardFieldDescription": "Een galerijbord", + "floatDescription": "Zwevende-kommagetallen zijn getallen met een decimaalteken.", + "floatPolymorphicDescription": "Een verzameling zwevende-kommagetallen.", + "vaeField": "Vae", + "conditioningField": "Conditionering", + "unhandledOutputProperty": "Onverwerkt uitvoerkenmerk", + "workflowNotes": "Opmerkingen", + "string": "Tekenreeks", + "floatCollection": "Verzameling zwevende-kommagetallen", + "latentsField": "Latents", + "cannotConnectOutputToOutput": "Kan uitvoer niet aan uitvoer verbinden", + "booleanCollection": "Verzameling Booleaanse waarden", + "connectionWouldCreateCycle": "Verbinding zou cyclisch worden", + "cannotConnectToSelf": "Kan niet aan zichzelf verbinden", + "notesDescription": "Voeg opmerkingen toe aan je werkstroom", + "unknownField": "Onbekend veld", + "inputFields": "Invoervelden", + "colorCodeEdges": "Kleurgecodeerde randen", + "uNetFieldDescription": "UNet-submodel.", + "unknownNode": "Onbekend knooppunt", + "imageCollectionDescription": "Een verzameling afbeeldingen.", + "mismatchedVersion": "Heeft niet-overeenkomende versie", + "vaeFieldDescription": "Vae-submodel.", + "imageFieldDescription": "Afbeeldingen kunnen worden doorgegeven tussen knooppunten.", + "outputNode": "Uitvoerknooppunt", + "addNodeToolTip": "Voeg knooppunt toe (Shift+A, spatie)", + "loadingNodes": "Bezig met laden van knooppunten...", + "snapToGridHelp": "Lijn knooppunten uit op raster bij verplaatsing", + "workflowSettings": "Instellingen werkstroomeditor", + "mainModelFieldDescription": "Beschrijving", + "sDXLRefinerModelField": "Verfijningsmodel", + "loRAModelField": "LoRA", + "unableToParseEdge": "Kan rand niet inlezen", + "latentsCollectionDescription": "Latents kunnen worden doorgegeven tussen knooppunten.", + "oNNXModelFieldDescription": "ONNX-modelveld.", + "imageCollection": "Afbeeldingsverzameling" + }, + "controlnet": { + "amult": "a_mult", + "resize": "Schaal", + "showAdvanced": "Toon uitgebreid", + "contentShuffleDescription": "Verschuift het materiaal in de afbeelding", + "bgth": "bg_th", + "addT2IAdapter": "Voeg $t(common.t2iAdapter) toe", + "pidi": "PIDI", + "importImageFromCanvas": "Importeer afbeelding uit canvas", + "lineartDescription": "Zet afbeelding om naar lineart", + "normalBae": "Normale BAE", + "importMaskFromCanvas": "Importeer masker uit canvas", + "hed": "HED", + "hideAdvanced": "Verberg uitgebreid", + "contentShuffle": "Verschuif materiaal", + "controlNetEnabledT2IDisabled": "$t(common.controlNet) ingeschakeld, $t(common.t2iAdapter)s uitgeschakeld", + "ipAdapterModel": "Adaptermodel", + "resetControlImage": "Zet controle-afbeelding terug", + "beginEndStepPercent": "Percentage begin-/eindstap", + "mlsdDescription": "Minimalistische herkenning lijnsegmenten", + "duplicate": "Maak kopie", + "balanced": "Gebalanceerd", + "f": "F", + "h": "H", + "prompt": "Prompt", + "depthMidasDescription": "Generatie van diepteblad via Midas", + "controlnet": "$t(controlnet.controlAdapter) #{{number}} ($t(common.controlNet))", + "openPoseDescription": "Menselijke pose-benadering via Openpose", + "control": "Controle", + "resizeMode": "Modus schaling", + "t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) ingeschakeld, $t(common.controlNet)s uitgeschakeld", + "coarse": "Grof", + "weight": "Gewicht", + "selectModel": "Kies een model", + "crop": "Snij bij", + "depthMidas": "Diepte (Midas)", + "w": "B", + "processor": "Verwerker", + "addControlNet": "Voeg $t(common.controlNet) toe", + "none": "Geen", + "incompatibleBaseModel": "Niet-compatibel basismodel:", + "enableControlnet": "Schakel ControlNet in", + "detectResolution": "Herken resolutie", + "controlNetT2IMutexDesc": "Gelijktijdig gebruik van $t(common.controlNet) en $t(common.t2iAdapter) wordt op dit moment niet ondersteund.", + "ip_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.ipAdapter))", + "pidiDescription": "PIDI-afbeeldingsverwerking", + "mediapipeFace": "Mediapipe - Gezicht", + "mlsd": "M-LSD", + "controlMode": "Controlemodus", + "fill": "Vul", + "cannyDescription": "Herkenning Canny-rand", + "addIPAdapter": "Voeg $t(common.ipAdapter) toe", + "lineart": "Lineart", + "colorMapDescription": "Genereert een kleurenblad van de afbeelding", + "lineartAnimeDescription": "Lineartverwerking in anime-stijl", + "t2i_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.t2iAdapter))", + "minConfidence": "Min. vertrouwensniveau", + "imageResolution": "Resolutie afbeelding", + "megaControl": "Zeer veel controle", + "depthZoe": "Diepte (Zoe)", + "colorMap": "Kleur", + "lowThreshold": "Lage drempelwaarde", + "autoConfigure": "Configureer verwerker automatisch", + "highThreshold": "Hoge drempelwaarde", + "normalBaeDescription": "Normale BAE-verwerking", + "noneDescription": "Geen verwerking toegepast", + "saveControlImage": "Bewaar controle-afbeelding", + "openPose": "Openpose", + "toggleControlNet": "Zet deze ControlNet aan/uit", + "delete": "Verwijder", + "controlAdapter_one": "Control-adapter", + "controlAdapter_other": "Control-adapters", + "safe": "Veilig", + "colorMapTileSize": "Grootte tegel", + "lineartAnime": "Lineart-anime", + "ipAdapterImageFallback": "Geen IP-adapterafbeelding gekozen", + "mediapipeFaceDescription": "Gezichtsherkenning met Mediapipe", + "canny": "Canny", + "depthZoeDescription": "Generatie van diepteblad via Zoe", + "hedDescription": "Herkenning van holistisch-geneste randen", + "setControlImageDimensions": "Stel afmetingen controle-afbeelding in op B/H", + "scribble": "Krabbel", + "resetIPAdapterImage": "Zet IP-adapterafbeelding terug", + "handAndFace": "Hand en gezicht", + "enableIPAdapter": "Schakel IP-adapter in", + "maxFaces": "Max. gezichten" + }, + "dynamicPrompts": { + "seedBehaviour": { + "perPromptDesc": "Gebruik een verschillende seedwaarde per afbeelding", + "perIterationLabel": "Seedwaarde per iteratie", + "perIterationDesc": "Gebruik een verschillende seedwaarde per iteratie", + "perPromptLabel": "Seedwaarde per afbeelding", + "label": "Gedrag seedwaarde" + }, + "enableDynamicPrompts": "Schakel dynamische prompts in", + "combinatorial": "Combinatorische generatie", + "maxPrompts": "Max. prompts", + "promptsWithCount_one": "{{count}} prompt", + "promptsWithCount_other": "{{count}} prompts", + "dynamicPrompts": "Dynamische prompts" + }, + "popovers": { + "noiseUseCPU": { + "paragraphs": [ + "Bestuurt of ruis wordt gegenereerd op de CPU of de GPU.", + "Met CPU-ruis ingeschakeld zal een bepaalde seedwaarde dezelfde afbeelding opleveren op welke machine dan ook.", + "Er is geen prestatieverschil bij het inschakelen van CPU-ruis." + ], + "heading": "Gebruik CPU-ruis" + }, + "paramScheduler": { + "paragraphs": [ + "De planner bepaalt hoe per keer ruis wordt toegevoegd aan een afbeelding of hoe een monster wordt bijgewerkt op basis van de uitvoer van een model." + ], + "heading": "Planner" + }, + "scaleBeforeProcessing": { + "paragraphs": [ + "Schaalt het gekozen gebied naar de grootte die het meest geschikt is voor het model, vooraf aan het proces van het afbeeldingen genereren." + ], + "heading": "Schaal vooraf aan verwerking" + }, + "compositingMaskAdjustments": { + "heading": "Aanpassingen masker", + "paragraphs": [ + "Pas het masker aan." + ] + }, + "paramRatio": { + "heading": "Beeldverhouding", + "paragraphs": [ + "De beeldverhouding van de afmetingen van de afbeelding die wordt gegenereerd.", + "Een afbeeldingsgrootte (in aantal pixels) equivalent aan 512x512 wordt aanbevolen voor SD1.5-modellen. Een grootte-equivalent van 1024x1024 wordt aanbevolen voor SDXL-modellen." + ] + }, + "compositingCoherenceSteps": { + "heading": "Stappen", + "paragraphs": [ + "Het aantal te gebruiken ontruisingsstappen in de coherentiefase.", + "Gelijk aan de hoofdparameter Stappen." + ] + }, + "dynamicPrompts": { + "paragraphs": [ + "Dynamische prompts vormt een enkele prompt om in vele.", + "De basissyntax is \"a {red|green|blue} ball\". Dit zal de volgende drie prompts geven: \"a red ball\", \"a green ball\" en \"a blue ball\".", + "Gebruik de syntax zo vaak als je wilt in een enkele prompt, maar zorg ervoor dat het aantal gegenereerde prompts in lijn ligt met de instelling Max. prompts." + ], + "heading": "Dynamische prompts" + }, + "paramVAE": { + "paragraphs": [ + "Het model gebruikt voor het vertalen van AI-uitvoer naar de uiteindelijke afbeelding." + ], + "heading": "VAE" + }, + "compositingBlur": { + "heading": "Vervaging", + "paragraphs": [ + "De vervagingsstraal van het masker." + ] + }, + "paramIterations": { + "paragraphs": [ + "Het aantal te genereren afbeeldingen.", + "Als dynamische prompts is ingeschakeld, dan zal elke prompt dit aantal keer gegenereerd worden." + ], + "heading": "Iteraties" + }, + "paramVAEPrecision": { + "heading": "Nauwkeurigheid VAE", + "paragraphs": [ + "De nauwkeurigheid gebruikt tijdens de VAE-codering en -decodering. FP16/halve nauwkeurig is efficiënter, ten koste van kleine afbeeldingsvariaties." + ] + }, + "compositingCoherenceMode": { + "heading": "Modus", + "paragraphs": [ + "De modus van de coherentiefase." + ] + }, + "paramSeed": { + "paragraphs": [ + "Bestuurt de startruis die gebruikt wordt bij het genereren.", + "Schakel \"Willekeurige seedwaarde\" uit om identieke resultaten te krijgen met dezelfde generatie-instellingen." + ], + "heading": "Seedwaarde" + }, + "controlNetResizeMode": { + "heading": "Schaalmodus", + "paragraphs": [ + "Hoe de ControlNet-afbeelding zal worden geschaald aan de uitvoergrootte van de afbeelding." + ] + }, + "controlNetBeginEnd": { + "paragraphs": [ + "Op welke stappen van het ontruisingsproces ControlNet worden toegepast.", + "ControlNets die worden toegepast aan het begin begeleiden het compositieproces. ControlNets die worden toegepast aan het eind zorgen voor details." + ], + "heading": "Percentage begin- / eindstap" + }, + "dynamicPromptsSeedBehaviour": { + "paragraphs": [ + "Bestuurt hoe de seedwaarde wordt gebruikt bij het genereren van prompts.", + "Per iteratie zal een unieke seedwaarde worden gebruikt voor elke iteratie. Gebruik dit om de promptvariaties binnen een enkele seedwaarde te verkennen.", + "Bijvoorbeeld: als je vijf prompts heb, dan zal voor elke afbeelding dezelfde seedwaarde gebruikt worden.", + "De optie Per afbeelding zal een unieke seedwaarde voor elke afbeelding gebruiken. Dit biedt meer variatie." + ], + "heading": "Gedrag seedwaarde" + }, + "clipSkip": { + "paragraphs": [ + "Kies hoeveel CLIP-modellagen je wilt overslaan.", + "Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen.", + "Een hogere waarde geeft meestal een minder gedetailleerde afbeelding." + ], + "heading": "Overslaan CLIP" + }, + "paramModel": { + "heading": "Model", + "paragraphs": [ + "Model gebruikt voor de ontruisingsstappen.", + "Verschillende modellen zijn meestal getraind zich te specialiseren in het maken van bepaalde esthetische resultaten en materiaal." + ] + }, + "compositingCoherencePass": { + "heading": "Coherentiefase", + "paragraphs": [ + "Een tweede ronde ontruising helpt bij het samenstellen van de erin- of eruitgetekende afbeelding." + ] + }, + "paramDenoisingStrength": { + "paragraphs": [ + "Hoeveel ruis wordt toegevoegd aan de invoerafbeelding.", + "0 geeft een identieke afbeelding, waarbij 1 een volledig nieuwe afbeelding geeft." + ], + "heading": "Ontruisingssterkte" + }, + "compositingStrength": { + "heading": "Sterkte", + "paragraphs": [ + "Ontruisingssterkte voor de coherentiefase.", + "Gelijk aan de parameter Ontruisingssterkte Afbeelding naar afbeelding." + ] + }, + "paramNegativeConditioning": { + "paragraphs": [ + "Het generatieproces voorkomt de gegeven begrippen in de negatieve prompt. Gebruik dit om bepaalde zaken of voorwerpen uit te sluiten van de uitvoerafbeelding.", + "Ondersteunt Compel-syntax en -embeddingen." + ], + "heading": "Negatieve prompt" + }, + "compositingBlurMethod": { + "heading": "Vervagingsmethode", + "paragraphs": [ + "De methode van de vervaging die wordt toegepast op het gemaskeerd gebied." + ] + }, + "dynamicPromptsMaxPrompts": { + "heading": "Max. prompts", + "paragraphs": [ + "Beperkt het aantal prompts die kunnen worden gegenereerd door dynamische prompts." + ] + }, + "infillMethod": { + "paragraphs": [ + "Methode om een gekozen gebied in te vullen." + ], + "heading": "Invulmethode" + }, + "controlNetWeight": { + "heading": "Gewicht", + "paragraphs": [ + "Hoe sterk ControlNet effect heeft op de gegeneerde afbeelding." + ] + }, + "controlNet": { + "heading": "ControlNet", + "paragraphs": [ + "ControlNets biedt begeleiding aan het generatieproces, waarbij hulp wordt geboden bij het maken van afbeelding met aangestuurde compositie, structuur of stijl, afhankelijk van het gekozen model." + ] + }, + "paramCFGScale": { + "heading": "CFG-schaal", + "paragraphs": [ + "Bestuurt hoeveel je prompt invloed heeft op het generatieproces." + ] + }, + "controlNetControlMode": { + "paragraphs": [ + "Geeft meer gewicht aan ofwel de prompt danwel ControlNet." + ], + "heading": "Controlemodus" + }, + "paramSteps": { + "heading": "Stappen", + "paragraphs": [ + "Het aantal uit te voeren stappen tijdens elke generatie.", + "Hogere stappenaantallen geven meestal betere afbeeldingen ten koste van een grotere benodigde generatietijd." + ] + }, + "paramPositiveConditioning": { + "heading": "Positieve prompt", + "paragraphs": [ + "Begeleidt het generartieproces. Gebruik een woord of frase naar keuze.", + "Syntaxes en embeddings voor Compel en dynamische prompts." + ] + }, + "lora": { + "heading": "Gewicht LoRA", + "paragraphs": [ + "Een hogere LoRA-gewicht zal leiden tot een groter effect op de uiteindelijke afbeelding." + ] + } + }, + "metadata": { + "seamless": "Naadloos", + "positivePrompt": "Positieve prompt", + "negativePrompt": "Negatieve prompt", + "generationMode": "Generatiemodus", + "Threshold": "Drempelwaarde ruis", + "metadata": "Metagegevens", + "strength": "Sterkte Afbeelding naar afbeelding", + "seed": "Seedwaarde", + "imageDetails": "Afbeeldingsdetails", + "perlin": "Perlin-ruis", + "model": "Model", + "noImageDetails": "Geen afbeeldingsdetails gevonden", + "hiresFix": "Optimalisatie voor hoge resolutie", + "cfgScale": "CFG-schaal", + "fit": "Schaal aanpassen in Afbeelding naar afbeelding", + "initImage": "Initiële afbeelding", + "recallParameters": "Opnieuw aan te roepen parameters", + "height": "Hoogte", + "variations": "Paren seedwaarde-gewicht", + "noMetaData": "Geen metagegevens gevonden", + "width": "Breedte", + "createdBy": "Gemaakt door", + "workflow": "Werkstroom", + "steps": "Stappen", + "scheduler": "Planner", + "noRecallParameters": "Geen opnieuw uit te voeren parameters gevonden" + }, + "queue": { + "status": "Status", + "pruneSucceeded": "{{item_count}} voltooide onderdelen uit wachtrij gesnoeid", + "cancelTooltip": "Annuleer huidig onderdeel", + "queueEmpty": "Wachtrij leeg", + "pauseSucceeded": "Verwerker onderbroken", + "in_progress": "Bezig", + "queueFront": "Voeg toe aan voorkant van wachtrij", + "notReady": "Kan niet in wachtrij plaatsen", + "batchFailedToQueue": "Fout bij reeks in wachtrij plaatsen", + "completed": "Voltooid", + "queueBack": "Voeg toe aan wachtrij", + "batchValues": "Reekswaarden", + "cancelFailed": "Fout bij annuleren onderdeel", + "queueCountPrediction": "Voeg {{predicted}} toe aan wachtrij", + "batchQueued": "Reeks in wachtrij geplaatst", + "pauseFailed": "Fout bij onderbreken verwerker", + "clearFailed": "Fout bij wissen van wachtrij", + "queuedCount": "{{pending}} wachtend", + "front": "begin", + "clearSucceeded": "Wachtrij gewist", + "pause": "Onderbreek", + "pruneTooltip": "Snoei {{item_count}} voltooide onderdelen", + "cancelSucceeded": "Onderdeel geannuleerd", + "batchQueuedDesc_one": "Voeg {{count}} sessie toe aan het {{direction}} van de wachtrij", + "batchQueuedDesc_other": "Voeg {{count}} sessies toe aan het {{direction}} van de wachtrij", + "graphQueued": "Graaf in wachtrij geplaatst", + "queue": "Wachtrij", + "batch": "Reeks", + "clearQueueAlertDialog": "Als je de wachtrij onmiddellijk wist, dan worden alle onderdelen die bezig zijn geannuleerd en wordt de gehele wachtrij gewist.", + "pending": "Wachtend", + "completedIn": "Voltooid na", + "resumeFailed": "Fout bij hervatten verwerker", + "clear": "Wis", + "prune": "Snoei", + "total": "Totaal", + "canceled": "Geannuleerd", + "pruneFailed": "Fout bij snoeien van wachtrij", + "cancelBatchSucceeded": "Reeks geannuleerd", + "clearTooltip": "Annuleer en wis alle onderdelen", + "current": "Huidig", + "pauseTooltip": "Onderbreek verwerker", + "failed": "Mislukt", + "cancelItem": "Annuleer onderdeel", + "next": "Volgende", + "cancelBatch": "Annuleer reeks", + "back": "eind", + "cancel": "Annuleer", + "session": "Sessie", + "queueTotal": "Totaal {{total}}", + "resumeSucceeded": "Verwerker hervat", + "enqueueing": "Toevoegen van reeks aan wachtrij", + "resumeTooltip": "Hervat verwerker", + "queueMaxExceeded": "Max. aantal van {{max_queue_size}} overschreden, {{skip}} worden overgeslagen", + "resume": "Hervat", + "cancelBatchFailed": "Fout bij annuleren van reeks", + "clearQueueAlertDialog2": "Weet je zeker dat je de wachtrij wilt wissen?", + "item": "Onderdeel", + "graphFailedToQueue": "Fout bij toevoegen graaf aan wachtrij" + }, + "sdxl": { + "refinerStart": "Startwaarde verfijner", + "selectAModel": "Kies een model", + "scheduler": "Planner", + "cfgScale": "CFG-schaal", + "negStylePrompt": "Negatieve-stijlprompt", + "noModelsAvailable": "Geen modellen beschikbaar", + "refiner": "Verfijner", + "negAestheticScore": "Negatieve aantrekkelijkheidsscore", + "useRefiner": "Gebruik verfijner", + "denoisingStrength": "Sterkte ontruising", + "refinermodel": "Verfijnermodel", + "posAestheticScore": "Positieve aantrekkelijkheidsscore", + "concatPromptStyle": "Plak prompt- en stijltekst aan elkaar", + "loading": "Bezig met laden...", + "steps": "Stappen", + "posStylePrompt": "Positieve-stijlprompt" + }, + "models": { + "noMatchingModels": "Geen overeenkomend modellen", + "loading": "bezig met laden", + "noMatchingLoRAs": "Geen overeenkomende LoRA's", + "noLoRAsAvailable": "Geen LoRA's beschikbaar", + "noModelsAvailable": "Geen modellen beschikbaar", + "selectModel": "Kies een model", + "selectLoRA": "Kies een LoRA" + }, + "boards": { + "autoAddBoard": "Voeg automatisch bord toe", + "topMessage": "Dit bord bevat afbeeldingen die in gebruik zijn door de volgende functies:", + "move": "Verplaats", + "menuItemAutoAdd": "Voeg dit automatisch toe aan bord", + "myBoard": "Mijn bord", + "searchBoard": "Zoek borden...", + "noMatching": "Geen overeenkomende borden", + "selectBoard": "Kies een bord", + "cancel": "Annuleer", + "addBoard": "Voeg bord toe", + "bottomMessage": "Als je dit bord en alle afbeeldingen erop verwijdert, dan worden alle functies teruggezet die ervan gebruik maken.", + "uncategorized": "Zonder categorie", + "downloadBoard": "Download bord", + "changeBoard": "Wijzig bord", + "loading": "Bezig met laden...", + "clearSearch": "Maak zoekopdracht leeg" + }, + "invocationCache": { + "disable": "Schakel uit", + "misses": "Mislukt cacheverzoek", + "enableFailed": "Fout bij inschakelen aanroepcache", + "invocationCache": "Aanroepcache", + "clearSucceeded": "Aanroepcache gewist", + "enableSucceeded": "Aanroepcache ingeschakeld", + "clearFailed": "Fout bij wissen aanroepcache", + "hits": "Gelukt cacheverzoek", + "disableSucceeded": "Aanroepcache uitgeschakeld", + "disableFailed": "Fout bij uitschakelen aanroepcache", + "enable": "Schakel in", + "clear": "Wis", + "maxCacheSize": "Max. grootte cache", + "cacheSize": "Grootte cache" + }, + "embedding": { + "noMatchingEmbedding": "Geen overeenkomende embeddings", + "addEmbedding": "Voeg embedding toe", + "incompatibleModel": "Niet-compatibel basismodel:" } } From 2ab7c5f783ffa32175014d7aed2784890dc420d1 Mon Sep 17 00:00:00 2001 From: Surisen Date: Sat, 14 Oct 2023 22:03:51 +0200 Subject: [PATCH 053/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 100.0% (1216 of 1216 strings) Co-authored-by: Surisen Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- .../frontend/web/public/locales/zh_CN.json | 106 ++++++++++++++---- 1 file changed, 82 insertions(+), 24 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 220cb87515..5a2d829f95 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -88,7 +88,9 @@ "t2iAdapter": "T2I Adapter", "ipAdapter": "IP Adapter", "controlAdapter": "Control Adapter", - "controlNet": "ControlNet" + "controlNet": "ControlNet", + "on": "开", + "auto": "自动" }, "gallery": { "generations": "生成的图像", @@ -472,7 +474,8 @@ "vae": "VAE", "oliveModels": "Olive", "loraModels": "LoRA", - "alpha": "Alpha" + "alpha": "Alpha", + "vaePrecision": "VAE 精度" }, "parameters": { "images": "图像", @@ -595,7 +598,11 @@ "useX2Model": "图像太大,无法使用 x4 模型,使用 x2 模型作为替代", "tooLarge": "图像太大无法进行放大,请选择更小的图像" }, - "iterationsWithCount_other": "{{count}} 次迭代生成" + "iterationsWithCount_other": "{{count}} 次迭代生成", + "seamlessX&Y": "无缝 X & Y", + "aspectRatioFree": "自由", + "seamlessX": "无缝 X", + "seamlessY": "无缝 Y" }, "settings": { "models": "模型", @@ -628,9 +635,10 @@ "clearIntermediates": "清除中间产物", "clearIntermediatesDesc3": "您图库中的图像不会被删除。", "clearIntermediatesDesc2": "中间产物图像是生成过程中产生的副产品,与图库中的结果图像不同。清除中间产物可释放磁盘空间。", - "intermediatesCleared_other": "已清除 {{number}} 个中间产物", + "intermediatesCleared_other": "已清除 {{count}} 个中间产物", "clearIntermediatesDesc1": "清除中间产物会重置您的画布和 ControlNet 状态。", - "intermediatesClearedFailed": "清除中间产物时出现问题" + "intermediatesClearedFailed": "清除中间产物时出现问题", + "clearIntermediatesWithCount_other": "清除 {{count}} 个中间产物" }, "toast": { "tempFoldersEmptied": "临时文件夹已清空", @@ -713,7 +721,7 @@ "canvasSavedGallery": "画布已保存到图库", "imageUploadFailed": "图像上传失败", "problemImportingMask": "导入遮罩时出现问题", - "baseModelChangedCleared_other": "基础模型已更改, 已清除或禁用 {{number}} 个不兼容的子模型" + "baseModelChangedCleared_other": "基础模型已更改, 已清除或禁用 {{count}} 个不兼容的子模型" }, "unifiedCanvas": { "layer": "图层", @@ -1002,7 +1010,27 @@ "booleanCollection": "布尔值合集", "imageCollectionDescription": "一个图像合集。", "loRAModelField": "LoRA", - "imageCollection": "图像合集" + "imageCollection": "图像合集", + "ipAdapterPolymorphicDescription": "一个 IP-Adapters Collection 合集。", + "ipAdapterCollection": "IP-Adapters 合集", + "conditioningCollection": "条件合集", + "ipAdapterPolymorphic": "IP-Adapters 多态", + "conditioningCollectionDescription": "条件可以在节点间传递。", + "colorPolymorphic": "颜色多态", + "conditioningPolymorphic": "条件多态", + "latentsCollection": "Latents 合集", + "stringPolymorphic": "字符多态", + "conditioningPolymorphicDescription": "条件可以在节点间传递。", + "imagePolymorphic": "图像多态", + "floatPolymorphic": "浮点多态", + "ipAdapterCollectionDescription": "一个 IP-Adapters Collection 合集。", + "ipAdapter": "IP-Adapter", + "booleanPolymorphic": "布尔多态", + "conditioningFieldDescription": "条件可以在节点间传递。", + "integerPolymorphic": "整数多态", + "latentsPolymorphic": "Latents 多态", + "conditioningField": "条件", + "latentsField": "Latents" }, "controlnet": { "resize": "直接缩放", @@ -1086,7 +1114,7 @@ "depthZoe": "Depth (Zoe)", "colorMap": "Color", "openPose": "Openpose", - "controlAdapter": "Control Adapter", + "controlAdapter_other": "Control Adapters", "lineartAnime": "Lineart Anime", "canny": "Canny" }, @@ -1140,7 +1168,7 @@ "queuedCount": "{{pending}} 待处理", "front": "前", "pruneTooltip": "修剪 {{item_count}} 个已完成的项目", - "batchQueuedDesc": "在队列的 {{direction}} 中添加了 {{item_count}} 个会话", + "batchQueuedDesc_other": "在队列的 {{direction}} 中添加了 {{count}} 个会话", "graphQueued": "节点图已加入队列", "back": "后", "session": "会话", @@ -1191,7 +1219,9 @@ "steps": "步数", "scheduler": "调度器", "seamless": "无缝", - "fit": "图生图适应" + "fit": "图生图匹配", + "recallParameters": "召回参数", + "noRecallParameters": "未找到要召回的参数" }, "models": { "noMatchingModels": "无相匹配的模型", @@ -1242,7 +1272,9 @@ "popovers": { "compositingMaskAdjustments": { "heading": "遮罩调整", - "paragraphs": ["调整遮罩。"] + "paragraphs": [ + "调整遮罩。" + ] }, "paramRatio": { "heading": "纵横比", @@ -1260,7 +1292,9 @@ }, "compositingBlur": { "heading": "模糊", - "paragraphs": ["遮罩模糊半径。"] + "paragraphs": [ + "遮罩模糊半径。" + ] }, "noiseUseCPU": { "heading": "使用 CPU 噪声", @@ -1278,11 +1312,15 @@ }, "compositingCoherenceMode": { "heading": "模式", - "paragraphs": ["一致性层模式。"] + "paragraphs": [ + "一致性层模式。" + ] }, "controlNetResizeMode": { "heading": "缩放模式", - "paragraphs": ["ControlNet 输入图像适应输出图像大小的方法。"] + "paragraphs": [ + "ControlNet 输入图像适应输出图像大小的方法。" + ] }, "clipSkip": { "paragraphs": [ @@ -1308,7 +1346,9 @@ }, "compositingCoherencePass": { "heading": "一致性层", - "paragraphs": ["第二轮去噪有助于合成内补/外扩图像。"] + "paragraphs": [ + "第二轮去噪有助于合成内补/外扩图像。" + ] }, "compositingStrength": { "heading": "强度", @@ -1326,7 +1366,9 @@ }, "compositingBlurMethod": { "heading": "模糊方式", - "paragraphs": ["应用于遮罩区域的模糊方法。"] + "paragraphs": [ + "应用于遮罩区域的模糊方法。" + ] }, "paramScheduler": { "heading": "调度器", @@ -1336,11 +1378,15 @@ }, "controlNetWeight": { "heading": "权重", - "paragraphs": ["ControlNet 对生成图像的影响强度。"] + "paragraphs": [ + "ControlNet 对生成图像的影响强度。" + ] }, "paramCFGScale": { "heading": "CFG 等级", - "paragraphs": ["控制提示词对生成过程的影响程度。"] + "paragraphs": [ + "控制提示词对生成过程的影响程度。" + ] }, "paramSteps": { "heading": "步数", @@ -1358,11 +1404,15 @@ }, "lora": { "heading": "LoRA 权重", - "paragraphs": ["更高的 LoRA 权重会对最终图像产生更大的影响。"] + "paragraphs": [ + "更高的 LoRA 权重会对最终图像产生更大的影响。" + ] }, "infillMethod": { "heading": "填充方法", - "paragraphs": ["填充选定区域的方式。"] + "paragraphs": [ + "填充选定区域的方式。" + ] }, "controlNetBeginEnd": { "heading": "开始 / 结束步数百分比", @@ -1373,7 +1423,9 @@ }, "scaleBeforeProcessing": { "heading": "处理前缩放", - "paragraphs": ["生成图像前将所选区域缩放为最适合模型的大小。"] + "paragraphs": [ + "生成图像前将所选区域缩放为最适合模型的大小。" + ] }, "paramDenoisingStrength": { "heading": "去噪强度", @@ -1391,7 +1443,9 @@ }, "controlNetControlMode": { "heading": "控制模式", - "paragraphs": ["给提示词或 ControlNet 增加更大的权重。"] + "paragraphs": [ + "给提示词或 ControlNet 增加更大的权重。" + ] }, "dynamicPrompts": { "paragraphs": [ @@ -1402,7 +1456,9 @@ "heading": "动态提示词" }, "paramVAE": { - "paragraphs": ["用于将 AI 输出转换成最终图像的模型。"], + "paragraphs": [ + "用于将 AI 输出转换成最终图像的模型。" + ], "heading": "VAE" }, "dynamicPromptsSeedBehaviour": { @@ -1416,7 +1472,9 @@ }, "dynamicPromptsMaxPrompts": { "heading": "最大提示词数量", - "paragraphs": ["限制动态提示词可生成的提示词数量。"] + "paragraphs": [ + "限制动态提示词可生成的提示词数量。" + ] }, "controlNet": { "paragraphs": [ From e21f3af5abeb644fe6a3ec6defa46d85595a610a Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Sat, 14 Oct 2023 22:03:51 +0200 Subject: [PATCH 054/202] translationBot(ui): update translation files Updated by "Remove blank strings" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 3cca8b508f..3123d8fcf8 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -1045,8 +1045,7 @@ "controlAdapter_other": "", "megaControl": "Mega ControlNet", "minConfidence": "Confidenza minima", - "scribble": "Scribble", - "controlnet": "" + "scribble": "Scribble" }, "queue": { "queueFront": "Aggiungi all'inizio della coda", From e9879b9e1f743b3af9c67c73c60d5a61faf0400e Mon Sep 17 00:00:00 2001 From: mickr777 <115216705+mickr777@users.noreply.github.com> Date: Sun, 15 Oct 2023 09:01:20 +1100 Subject: [PATCH 055/202] Clean up communityNodes.md (#4870) * Clean up communityNodes.md * Update communityNodes.md --- docs/nodes/communityNodes.md | 381 ++++++++++++++++++----------------- 1 file changed, 197 insertions(+), 184 deletions(-) diff --git a/docs/nodes/communityNodes.md b/docs/nodes/communityNodes.md index 47ed407695..d5a5d5654f 100644 --- a/docs/nodes/communityNodes.md +++ b/docs/nodes/communityNodes.md @@ -8,28 +8,42 @@ To download a node, simply download the `.py` node file from the link and add it To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor. --------------------------------- +- Community Nodes + + [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj) + + [Film Grain](#film-grain) + + [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes) + + [GPT2RandomPromptMaker](#gpt2randompromptmaker) + + [Grid to Gif](#grid-to-gif) + + [Halftone](#halftone) + + [Ideal Size](#ideal-size) + + [Image and Mask Composition Pack](#image-and-mask-composition-pack) + + [Image to Character Art Image Nodes](#image-to-character-art-image-nodes) + + [Image Picker](#image-picker) + + [Load Video Frame](#load-video-frame) + + [Make 3D](#make-3d) + + [Oobabooga](#oobabooga) + + [Prompt Tools](#prompt-tools) + + [Retroize](#retroize) + + [Size Stepper Nodes](#size-stepper-nodes) + + [Text font to Image](#text-font-to-image) + + [Thresholding](#thresholding) + + [XY Image to Grid and Images to Grids nodes](#xy-image-to-grid-and-images-to-grids-nodes) +- [Example Node Template](#example-node-template) +- [Disclaimer](#disclaimer) +- [Help](#help) + -------------------------------- -### Make 3D +### Depth Map from Wavefront OBJ -**Description:** Create compelling 3D stereo images from 2D originals. +**Description:** Render depth maps from Wavefront .obj files (triangulated) using this simple 3D renderer utilizing numpy and matplotlib to compute and color the scene. There are simple parameters to change the FOV, camera position, and model orientation. -**Node Link:** [https://gitlab.com/srcrr/shift3d/-/raw/main/make3d.py](https://gitlab.com/srcrr/shift3d) +To be imported, an .obj must use triangulated meshes, so make sure to enable that option if exporting from a 3D modeling program. This renderer makes each triangle a solid color based on its average depth, so it will cause anomalies if your .obj has large triangles. In Blender, the Remesh modifier can be helpful to subdivide a mesh into small pieces that work well given these limitations. -**Example Node Graph:** https://gitlab.com/srcrr/shift3d/-/raw/main/example-workflow.json?ref_type=heads&inline=false +**Node Link:** https://github.com/dwringer/depth-from-obj-node -**Output Examples** - -![Painting of a cozy delapidated house](https://gitlab.com/srcrr/shift3d/-/raw/main/example-1.png){: style="height:512px;width:512px"} -![Photo of cute puppies](https://gitlab.com/srcrr/shift3d/-/raw/main/example-2.png){: style="height:512px;width:512px"} - --------------------------------- -### Ideal Size - -**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of. - -**Node Link:** https://github.com/JPPhoto/ideal-size-node +**Example Usage:** +
-------------------------------- ### Film Grain @@ -39,68 +53,19 @@ To use a community workflow, download the the `.json` node graph file and load i **Node Link:** https://github.com/JPPhoto/film-grain-node -------------------------------- -### Image Picker +### Generative Grammar-Based Prompt Nodes -**Description:** This InvokeAI node takes in a collection of images and randomly chooses one. This can be useful when you have a number of poses to choose from for a ControlNet node, or a number of input images for another purpose. +**Description:** This set of 3 nodes generates prompts from simple user-defined grammar rules (loaded from custom files - examples provided below). The prompts are made by recursively expanding a special template string, replacing nonterminal "parts-of-speech" until no nonterminal terms remain in the string. -**Node Link:** https://github.com/JPPhoto/image-picker-node +This includes 3 Nodes: +- *Lookup Table from File* - loads a YAML file "prompt" section (or of a whole folder of YAML's) into a JSON-ified dictionary (Lookups output) +- *Lookups Entry from Prompt* - places a single entry in a new Lookups output under the specified heading +- *Prompt from Lookup Table* - uses a Collection of Lookups as grammar rules from which to randomly generate prompts. --------------------------------- -### Thresholding +**Node Link:** https://github.com/dwringer/generative-grammar-prompt-nodes -**Description:** This node generates masks for highlights, midtones, and shadows given an input image. You can optionally specify a blur for the lookup table used in making those masks from the source image. - -**Node Link:** https://github.com/JPPhoto/thresholding-node - -**Examples** - -Input: - -![image](https://github.com/invoke-ai/InvokeAI/assets/34005131/c88ada13-fb3d-484c-a4fe-947b44712632){: style="height:512px;width:512px"} - -Highlights/Midtones/Shadows: - - - - - -Highlights/Midtones/Shadows (with LUT blur enabled): - - - - - --------------------------------- -### Halftone - -**Description**: Halftone converts the source image to grayscale and then performs halftoning. CMYK Halftone converts the image to CMYK and applies a per-channel halftoning to make the source image look like a magazine or newspaper. For both nodes, you can specify angles and halftone dot spacing. - -**Node Link:** https://github.com/JPPhoto/halftone-node - -**Example** - -Input: - -![image](https://github.com/invoke-ai/InvokeAI/assets/34005131/fd5efb9f-4355-4409-a1c2-c1ca99e0cab4){: style="height:512px;width:512px"} - -Halftone Output: - -![image](https://github.com/invoke-ai/InvokeAI/assets/34005131/7e606f29-e68f-4d46-b3d5-97f799a4ec2f){: style="height:512px;width:512px"} - -CMYK Halftone Output: - -![image](https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea){: style="height:512px;width:512px"} - --------------------------------- -### Retroize - -**Description:** Retroize is a collection of nodes for InvokeAI to "Retroize" images. Any image can be given a fresh coat of retro paint with these nodes, either from your gallery or from within the graph itself. It includes nodes to pixelize, quantize, palettize, and ditherize images; as well as to retrieve palettes from existing images. - -**Node Link:** https://github.com/Ar7ific1al/invokeai-retroizeinode/ - -**Retroize Output Examples** - -![image](https://github.com/Ar7ific1al/InvokeAI_nodes_retroize/assets/2306586/de8b4fa6-324c-4c2d-b36c-297600c73974) +**Example Usage:** +
-------------------------------- ### GPT2RandomPromptMaker @@ -113,76 +78,49 @@ CMYK Halftone Output: Generated Prompt: An enchanted weapon will be usable by any character regardless of their alignment. -![9acf5aef-7254-40dd-95b3-8eac431dfab0 (1)](https://github.com/mickr777/InvokeAI/assets/115216705/8496ba09-bcdd-4ff7-8076-ff213b6a1e4c) + -------------------------------- -### Load Video Frame +### Grid to Gif -**Description:** This is a video frame image provider + indexer/video creation nodes for hooking up to iterators and ranges and ControlNets and such for invokeAI node experimentation. Think animation + ControlNet outputs. +**Description:** One node that turns a grid image into an image collection, one node that turns an image collection into a gif. -**Node Link:** https://github.com/helix4u/load_video_frame +**Node Link:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/GridToGif.py -**Example Node Graph:** https://github.com/helix4u/load_video_frame/blob/main/Example_Workflow.json +**Example Node Graph:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/Grid%20to%20Gif%20Example%20Workflow.json -**Output Example:** +**Output Examples** -![Example animation](https://github.com/helix4u/load_video_frame/blob/main/testmp4_embed_converted.gif) -[Full mp4 of Example Output test.mp4](https://github.com/helix4u/load_video_frame/blob/main/test.mp4) + + -------------------------------- +### Halftone -### Oobabooga +**Description**: Halftone converts the source image to grayscale and then performs halftoning. CMYK Halftone converts the image to CMYK and applies a per-channel halftoning to make the source image look like a magazine or newspaper. For both nodes, you can specify angles and halftone dot spacing. -**Description:** asks a local LLM running in Oobabooga's Text-Generation-Webui to write a prompt based on the user input. +**Node Link:** https://github.com/JPPhoto/halftone-node -**Link:** https://github.com/sammyf/oobabooga-node +**Example** +Input: -**Example:** + -"describe a new mystical creature in its natural environment" +Halftone Output: -*can return* + -"The mystical creature I am describing to you is called the "Glimmerwing". It is a majestic, iridescent being that inhabits the depths of the most enchanted forests and glimmering lakes. Its body is covered in shimmering scales that reflect every color of the rainbow, and it has delicate, translucent wings that sparkle like diamonds in the sunlight. The Glimmerwing's home is a crystal-clear lake, surrounded by towering trees with leaves that shimmer like jewels. In this serene environment, the Glimmerwing spends its days swimming gracefully through the water, chasing schools of glittering fish and playing with the gentle ripples of the lake's surface. -As the sun sets, the Glimmerwing perches on a branch of one of the trees, spreading its wings to catch the last rays of light. The creature's scales glow softly, casting a rainbow of colors across the forest floor. The Glimmerwing sings a haunting melody, its voice echoing through the stillness of the night air. Its song is said to have the power to heal the sick and bring peace to troubled souls. Those who are lucky enough to hear the Glimmerwing's song are forever changed by its beauty and grace." +CMYK Halftone Output: -![glimmerwing_small](https://github.com/sammyf/oobabooga-node/assets/42468608/cecdd820-93dd-4c35-abbf-607e001fb2ed) - -**Requirement** - -a Text-Generation-Webui instance (might work remotely too, but I never tried it) and obviously InvokeAI 3.x - -**Note** - -This node works best with SDXL models, especially as the style can be described independantly of the LLM's output. + -------------------------------- -### Depth Map from Wavefront OBJ +### Ideal Size -**Description:** Render depth maps from Wavefront .obj files (triangulated) using this simple 3D renderer utilizing numpy and matplotlib to compute and color the scene. There are simple parameters to change the FOV, camera position, and model orientation. +**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of. -To be imported, an .obj must use triangulated meshes, so make sure to enable that option if exporting from a 3D modeling program. This renderer makes each triangle a solid color based on its average depth, so it will cause anomalies if your .obj has large triangles. In Blender, the Remesh modifier can be helpful to subdivide a mesh into small pieces that work well given these limitations. - -**Node Link:** https://github.com/dwringer/depth-from-obj-node - -**Example Usage:** -![depth from obj usage graph](https://raw.githubusercontent.com/dwringer/depth-from-obj-node/main/depth_from_obj_usage.jpg) - --------------------------------- -### Generative Grammar-Based Prompt Nodes - -**Description:** This set of 3 nodes generates prompts from simple user-defined grammar rules (loaded from custom files - examples provided below). The prompts are made by recursively expanding a special template string, replacing nonterminal "parts-of-speech" until no more nonterminal terms remain in the string. - -This includes 3 Nodes: -- *Lookup Table from File* - loads a YAML file "prompt" section (or of a whole folder of YAML's) into a JSON-ified dictionary (Lookups output) -- *Lookups Entry from Prompt* - places a single entry in a new Lookups output under the specified heading -- *Prompt from Lookup Table* - uses a Collection of Lookups as grammar rules from which to randomly generate prompts. - -**Node Link:** https://github.com/dwringer/generative-grammar-prompt-nodes - -**Example Usage:** -![lookups usage example graph](https://raw.githubusercontent.com/dwringer/generative-grammar-prompt-nodes/main/lookuptables_usage.jpg) +**Node Link:** https://github.com/JPPhoto/ideal-size-node -------------------------------- ### Image and Mask Composition Pack @@ -208,45 +146,88 @@ This includes 15 Nodes: - *Text Mask (simple 2D)* - create and position a white on black (or black on white) line of text using any font locally available to Invoke. **Node Link:** https://github.com/dwringer/composition-nodes - -**Nodes and Output Examples:** -![composition nodes usage graph](https://raw.githubusercontent.com/dwringer/composition-nodes/main/composition_pack_overview.jpg) + +
-------------------------------- -### Size Stepper Nodes +### Image to Character Art Image Nodes -**Description:** This is a set of nodes for calculating the necessary size increments for doing upscaling workflows. Use the *Final Size & Orientation* node to enter your full size dimensions and orientation (portrait/landscape/random), then plug that and your initial generation dimensions into the *Ideal Size Stepper* and get 1, 2, or 3 intermediate pairs of dimensions for upscaling. Note this does not output the initial size or full size dimensions: the 1, 2, or 3 outputs of this node are only the intermediate sizes. +**Description:** Group of nodes to convert an input image into ascii/unicode art Image -A third node is included, *Random Switch (Integers)*, which is just a generic version of Final Size with no orientation selection. - -**Node Link:** https://github.com/dwringer/size-stepper-nodes - -**Example Usage:** -![size stepper usage graph](https://raw.githubusercontent.com/dwringer/size-stepper-nodes/main/size_nodes_usage.jpg) - --------------------------------- - -### Text font to Image - -**Description:** text font to text image node for InvokeAI, download a font to use (or if in font cache uses it from there), the text is always resized to the image size, but can control that with padding, optional 2nd line - -**Node Link:** https://github.com/mickr777/textfontimage +**Node Link:** https://github.com/mickr777/imagetoasciiimage **Output Examples** -![a3609d48-d9b7-41f0-b280-063d857986fb](https://github.com/mickr777/InvokeAI/assets/115216705/c21b0af3-d9c6-4c16-9152-846a23effd36) - -Results after using the depth controlnet - -![9133eabb-bcda-4326-831e-1b641228b178](https://github.com/mickr777/InvokeAI/assets/115216705/915f1a53-968e-43eb-aa61-07cd8f1a733a) -![4f9a3fa8-9be9-4236-8a3e-fcec66decd2a](https://github.com/mickr777/InvokeAI/assets/115216705/821ef89e-8a60-44f5-b94e-471a9d8690cc) -![babd69c4-9d60-4a55-a834-5e8397f62610](https://github.com/mickr777/InvokeAI/assets/115216705/2befcb6d-49f4-4bfd-b5fc-1fee19274f89) +
+ + -------------------------------- +### Image Picker + +**Description:** This InvokeAI node takes in a collection of images and randomly chooses one. This can be useful when you have a number of poses to choose from for a ControlNet node, or a number of input images for another purpose. + +**Node Link:** https://github.com/JPPhoto/image-picker-node + +-------------------------------- +### Load Video Frame + +**Description:** This is a video frame image provider + indexer/video creation nodes for hooking up to iterators and ranges and ControlNets and such for invokeAI node experimentation. Think animation + ControlNet outputs. + +**Node Link:** https://github.com/helix4u/load_video_frame + +**Example Node Graph:** https://github.com/helix4u/load_video_frame/blob/main/Example_Workflow.json + +**Output Example:** + + +[Full mp4 of Example Output test.mp4](https://github.com/helix4u/load_video_frame/blob/main/test.mp4) + +-------------------------------- +### Make 3D + +**Description:** Create compelling 3D stereo images from 2D originals. + +**Node Link:** [https://gitlab.com/srcrr/shift3d/-/raw/main/make3d.py](https://gitlab.com/srcrr/shift3d) + +**Example Node Graph:** https://gitlab.com/srcrr/shift3d/-/raw/main/example-workflow.json?ref_type=heads&inline=false + +**Output Examples** + + + + +-------------------------------- +### Oobabooga + +**Description:** asks a local LLM running in Oobabooga's Text-Generation-Webui to write a prompt based on the user input. + +**Link:** https://github.com/sammyf/oobabooga-node + +**Example:** + +"describe a new mystical creature in its natural environment" + +*can return* + +"The mystical creature I am describing to you is called the "Glimmerwing". It is a majestic, iridescent being that inhabits the depths of the most enchanted forests and glimmering lakes. Its body is covered in shimmering scales that reflect every color of the rainbow, and it has delicate, translucent wings that sparkle like diamonds in the sunlight. The Glimmerwing's home is a crystal-clear lake, surrounded by towering trees with leaves that shimmer like jewels. In this serene environment, the Glimmerwing spends its days swimming gracefully through the water, chasing schools of glittering fish and playing with the gentle ripples of the lake's surface. +As the sun sets, the Glimmerwing perches on a branch of one of the trees, spreading its wings to catch the last rays of light. The creature's scales glow softly, casting a rainbow of colors across the forest floor. The Glimmerwing sings a haunting melody, its voice echoing through the stillness of the night air. Its song is said to have the power to heal the sick and bring peace to troubled souls. Those who are lucky enough to hear the Glimmerwing's song are forever changed by its beauty and grace." + + + +**Requirement** + +a Text-Generation-Webui instance (might work remotely too, but I never tried it) and obviously InvokeAI 3.x + +**Note** + +This node works best with SDXL models, especially as the style can be described independently of the LLM's output. + +-------------------------------- ### Prompt Tools -**Description:** A set of InvokeAI nodes that add general prompt manipulation tools. These where written to accompany the PromptsFromFile node and other prompt generation nodes. +**Description:** A set of InvokeAI nodes that add general prompt manipulation tools. These were written to accompany the PromptsFromFile node and other prompt generation nodes. 1. PromptJoin - Joins to prompts into one. 2. PromptReplace - performs a search and replace on a prompt. With the option of using regex. @@ -263,51 +244,83 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai **Node Link:** https://github.com/skunkworxdark/Prompt-tools-nodes -------------------------------- +### Retroize +**Description:** Retroize is a collection of nodes for InvokeAI to "Retroize" images. Any image can be given a fresh coat of retro paint with these nodes, either from your gallery or from within the graph itself. It includes nodes to pixelize, quantize, palettize, and ditherize images; as well as to retrieve palettes from existing images. + +**Node Link:** https://github.com/Ar7ific1al/invokeai-retroizeinode/ + +**Retroize Output Examples** + + + +-------------------------------- +### Size Stepper Nodes + +**Description:** This is a set of nodes for calculating the necessary size increments for doing upscaling workflows. Use the *Final Size & Orientation* node to enter your full size dimensions and orientation (portrait/landscape/random), then plug that and your initial generation dimensions into the *Ideal Size Stepper* and get 1, 2, or 3 intermediate pairs of dimensions for upscaling. Note this does not output the initial size or full size dimensions: the 1, 2, or 3 outputs of this node are only the intermediate sizes. + +A third node is included, *Random Switch (Integers)*, which is just a generic version of Final Size with no orientation selection. + +**Node Link:** https://github.com/dwringer/size-stepper-nodes + +**Example Usage:** +
+ +-------------------------------- +### Text font to Image + +**Description:** text font to text image node for InvokeAI, download a font to use (or if in font cache uses it from there), the text is always resized to the image size, but can control that with padding, optional 2nd line + +**Node Link:** https://github.com/mickr777/textfontimage + +**Output Examples** + + + +Results after using the depth controlnet + + + + + +-------------------------------- +### Thresholding + +**Description:** This node generates masks for highlights, midtones, and shadows given an input image. You can optionally specify a blur for the lookup table used in making those masks from the source image. + +**Node Link:** https://github.com/JPPhoto/thresholding-node + +**Examples** + +Input: + + + +Highlights/Midtones/Shadows: + + + + + +Highlights/Midtones/Shadows (with LUT blur enabled): + + + + + +-------------------------------- ### XY Image to Grid and Images to Grids nodes **Description:** Image to grid nodes and supporting tools. -1. "Images To Grids" node - Takes a collection of images and creates a grid(s) of images. If there are more images than the size of a single grid then mutilple grids will be created until it runs out of images. -2. "XYImage To Grid" node - Converts a collection of XYImages into a labeled Grid of images. The XYImages collection has to be built using the supporoting nodes. See example node setups for more details. - +1. "Images To Grids" node - Takes a collection of images and creates a grid(s) of images. If there are more images than the size of a single grid then multiple grids will be created until it runs out of images. +2. "XYImage To Grid" node - Converts a collection of XYImages into a labeled Grid of images. The XYImages collection has to be built using the supporting nodes. See example node setups for more details. See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/README.md **Node Link:** https://github.com/skunkworxdark/XYGrid_nodes -------------------------------- - -### Image to Character Art Image Node's - -**Description:** Group of nodes to convert an input image into ascii/unicode art Image - -**Node Link:** https://github.com/mickr777/imagetoasciiimage - -**Output Examples** - - -
- - - --------------------------------- - -### Grid to Gif - -**Description:** One node that turns a grid image into an image colletion, one node that turns an image collection into a gif - -**Node Link:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/GridToGif.py - -**Example Node Graph:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/Grid%20to%20Gif%20Example%20Workflow.json - -**Output Examples** - - - - --------------------------------- - ### Example Node Template **Description:** This node allows you to do super cool things with InvokeAI. @@ -318,7 +331,7 @@ See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/READ **Output Examples** -![Example Image](https://invoke-ai.github.io/InvokeAI/assets/invoke_ai_banner.png){: style="height:115px;width:240px"} +
## Disclaimer From 78b8cfede3e1d7bb21c46734c3a647ceef095202 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 15 Oct 2023 10:44:16 +1100 Subject: [PATCH 056/202] fix(nodes,ui): optional metadata - Make all metadata items optional. This will reduce errors related to metadata not being provided when we update the backend but old queue items still exist - Fix a bug in t2i adapter metadata handling where it checked for ip adapter metadata instaed of t2i adapter metadata - Fix some metadata fields that were not using `InputField` --- invokeai/app/invocations/metadata.py | 74 +++++++++------ .../addT2IAdapterToLinearGraph.ts | 2 +- .../frontend/web/src/services/api/schema.d.ts | 94 +++++++++---------- 3 files changed, 91 insertions(+), 79 deletions(-) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index cecc01096a..449f332387 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -44,28 +44,31 @@ class CoreMetadata(BaseModelExcludeNull): """Core generation metadata for an image generated in InvokeAI.""" app_version: str = Field(default=__version__, description="The version of InvokeAI used to generate this image") - generation_mode: str = Field( + generation_mode: Optional[str] = Field( + default=None, description="The generation mode that output this image", ) created_by: Optional[str] = Field(description="The name of the creator of the image") - positive_prompt: str = Field(description="The positive prompt parameter") - negative_prompt: str = Field(description="The negative prompt parameter") - width: int = Field(description="The width parameter") - height: int = Field(description="The height parameter") - seed: int = Field(description="The seed used for noise generation") - rand_device: str = Field(description="The device used for random number generation") - cfg_scale: float = Field(description="The classifier-free guidance scale parameter") - steps: int = Field(description="The number of steps used for inference") - scheduler: str = Field(description="The scheduler used for inference") + positive_prompt: Optional[str] = Field(default=None, description="The positive prompt parameter") + negative_prompt: Optional[str] = Field(default=None, description="The negative prompt parameter") + width: Optional[int] = Field(default=None, description="The width parameter") + height: Optional[int] = Field(default=None, description="The height parameter") + seed: Optional[int] = Field(default=None, description="The seed used for noise generation") + rand_device: Optional[str] = Field(default=None, description="The device used for random number generation") + cfg_scale: Optional[float] = Field(default=None, description="The classifier-free guidance scale parameter") + steps: Optional[int] = Field(default=None, description="The number of steps used for inference") + scheduler: Optional[str] = Field(default=None, description="The scheduler used for inference") clip_skip: Optional[int] = Field( default=None, description="The number of skipped CLIP layers", ) - model: MainModelField = Field(description="The main model used for inference") - controlnets: list[ControlField] = Field(description="The ControlNets used for inference") - ipAdapters: list[IPAdapterMetadataField] = Field(description="The IP Adapters used for inference") - t2iAdapters: list[T2IAdapterField] = Field(description="The IP Adapters used for inference") - loras: list[LoRAMetadataField] = Field(description="The LoRAs used for inference") + model: Optional[MainModelField] = Field(default=None, description="The main model used for inference") + controlnets: Optional[list[ControlField]] = Field(default=None, description="The ControlNets used for inference") + ipAdapters: Optional[list[IPAdapterMetadataField]] = Field( + default=None, description="The IP Adapters used for inference" + ) + t2iAdapters: Optional[list[T2IAdapterField]] = Field(default=None, description="The IP Adapters used for inference") + loras: Optional[list[LoRAMetadataField]] = Field(default=None, description="The LoRAs used for inference") vae: Optional[VAEModelField] = Field( default=None, description="The VAE used for decoding, if the main model's default was not used", @@ -122,27 +125,34 @@ class MetadataAccumulatorOutput(BaseInvocationOutput): class MetadataAccumulatorInvocation(BaseInvocation): """Outputs a Core Metadata Object""" - generation_mode: str = InputField( + generation_mode: Optional[str] = InputField( + default=None, description="The generation mode that output this image", ) - positive_prompt: str = InputField(description="The positive prompt parameter") - negative_prompt: str = InputField(description="The negative prompt parameter") - width: int = InputField(description="The width parameter") - height: int = InputField(description="The height parameter") - seed: int = InputField(description="The seed used for noise generation") - rand_device: str = InputField(description="The device used for random number generation") - cfg_scale: float = InputField(description="The classifier-free guidance scale parameter") - steps: int = InputField(description="The number of steps used for inference") - scheduler: str = InputField(description="The scheduler used for inference") - clip_skip: Optional[int] = Field( + positive_prompt: Optional[str] = InputField(default=None, description="The positive prompt parameter") + negative_prompt: Optional[str] = InputField(default=None, description="The negative prompt parameter") + width: Optional[int] = InputField(default=None, description="The width parameter") + height: Optional[int] = InputField(default=None, description="The height parameter") + seed: Optional[int] = InputField(default=None, description="The seed used for noise generation") + rand_device: Optional[str] = InputField(default=None, description="The device used for random number generation") + cfg_scale: Optional[float] = InputField(default=None, description="The classifier-free guidance scale parameter") + steps: Optional[int] = InputField(default=None, description="The number of steps used for inference") + scheduler: Optional[str] = InputField(default=None, description="The scheduler used for inference") + clip_skip: Optional[int] = InputField( default=None, description="The number of skipped CLIP layers", ) - model: MainModelField = InputField(description="The main model used for inference") - controlnets: list[ControlField] = InputField(description="The ControlNets used for inference") - ipAdapters: list[IPAdapterMetadataField] = InputField(description="The IP Adapters used for inference") - t2iAdapters: list[T2IAdapterField] = Field(description="The IP Adapters used for inference") - loras: list[LoRAMetadataField] = InputField(description="The LoRAs used for inference") + model: Optional[MainModelField] = InputField(default=None, description="The main model used for inference") + controlnets: Optional[list[ControlField]] = InputField( + default=None, description="The ControlNets used for inference" + ) + ipAdapters: Optional[list[IPAdapterMetadataField]] = InputField( + default=None, description="The IP Adapters used for inference" + ) + t2iAdapters: Optional[list[T2IAdapterField]] = InputField( + default=None, description="The IP Adapters used for inference" + ) + loras: Optional[list[LoRAMetadataField]] = InputField(default=None, description="The LoRAs used for inference") strength: Optional[float] = InputField( default=None, description="The strength used for latents-to-latents", @@ -158,9 +168,11 @@ class MetadataAccumulatorInvocation(BaseInvocation): # High resolution fix metadata. hrf_width: Optional[int] = InputField( + default=None, description="The high resolution fix height and width multipler.", ) hrf_height: Optional[int] = InputField( + default=None, description="The high resolution fix height and width multipler.", ) hrf_strength: Optional[float] = InputField( diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts index f07edcb220..16dc5bbc71 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts @@ -86,7 +86,7 @@ export const addT2IAdaptersToLinearGraph = ( graph.nodes[t2iAdapterNode.id] = t2iAdapterNode as T2IAdapterInvocation; - if (metadataAccumulator?.ipAdapters) { + if (metadataAccumulator?.t2iAdapters) { // metadata accumulator only needs a control field - not the whole node // extract what we need and add to the accumulator const t2iAdapterField = omit(t2iAdapterNode, [ diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index d71d01e6af..e476217e6c 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -2058,14 +2058,14 @@ export type components = { /** * App Version * @description The version of InvokeAI used to generate this image - * @default 3.2.0 + * @default 3.3.0 */ app_version?: string; /** * Generation Mode * @description The generation mode that output this image */ - generation_mode: string; + generation_mode?: string; /** * Created By * @description The name of the creator of the image @@ -2075,47 +2075,47 @@ export type components = { * Positive Prompt * @description The positive prompt parameter */ - positive_prompt: string; + positive_prompt?: string; /** * Negative Prompt * @description The negative prompt parameter */ - negative_prompt: string; + negative_prompt?: string; /** * Width * @description The width parameter */ - width: number; + width?: number; /** * Height * @description The height parameter */ - height: number; + height?: number; /** * Seed * @description The seed used for noise generation */ - seed: number; + seed?: number; /** * Rand Device * @description The device used for random number generation */ - rand_device: string; + rand_device?: string; /** * Cfg Scale * @description The classifier-free guidance scale parameter */ - cfg_scale: number; + cfg_scale?: number; /** * Steps * @description The number of steps used for inference */ - steps: number; + steps?: number; /** * Scheduler * @description The scheduler used for inference */ - scheduler: string; + scheduler?: string; /** * Clip Skip * @description The number of skipped CLIP layers @@ -2125,27 +2125,27 @@ export type components = { * Model * @description The main model used for inference */ - model: components["schemas"]["MainModelField"]; + model?: components["schemas"]["MainModelField"]; /** * Controlnets * @description The ControlNets used for inference */ - controlnets: components["schemas"]["ControlField"][]; + controlnets?: components["schemas"]["ControlField"][]; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters?: components["schemas"]["IPAdapterMetadataField"][]; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters: components["schemas"]["T2IAdapterField"][]; + t2iAdapters?: components["schemas"]["T2IAdapterField"][]; /** * Loras * @description The LoRAs used for inference */ - loras: components["schemas"]["LoRAMetadataField"][]; + loras?: components["schemas"]["LoRAMetadataField"][]; /** * Vae * @description The VAE used for decoding, if the main model's default was not used @@ -2258,7 +2258,7 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default false + * @default true */ fp32?: boolean; /** @@ -3293,7 +3293,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; }; /** * Edges @@ -3336,7 +3336,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["String2Output"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"]; + [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["String2Output"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"]; }; /** * Errors @@ -4735,7 +4735,7 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default false + * @default true */ fp32?: boolean; /** @@ -5453,7 +5453,7 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default false + * @default true */ fp32?: boolean; /** @@ -6169,7 +6169,7 @@ export type components = { * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters: components["schemas"]["T2IAdapterField"][]; + t2iAdapters?: components["schemas"]["T2IAdapterField"][]; /** * Loras * @description The LoRAs used for inference @@ -6191,17 +6191,17 @@ export type components = { */ vae?: components["schemas"]["VAEModelField"]; /** - * High Resolution Fix Width - * @description The high resolution fix height and width multiplier. + * Hrf Width + * @description The high resolution fix height and width multipler. */ hrf_width?: number; /** - * High Resolution Fix Height - * @description The high resolution fix height and width multiplier. + * Hrf Height + * @description The high resolution fix height and width multipler. */ hrf_height?: number; /** - * High Resolution Strength + * Hrf Strength * @description The high resolution fix img2img strength used in the upscale pass. */ hrf_strength?: number; @@ -9744,18 +9744,6 @@ export type components = { /** Ui Order */ ui_order?: number; }; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; /** * IPAdapterModelFormat * @description An enumeration. @@ -9768,12 +9756,6 @@ export type components = { * @enum {string} */ T2IAdapterModelFormat: "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusion2ModelFormat * @description An enumeration. @@ -9786,12 +9768,30 @@ export type components = { * @enum {string} */ CLIPVisionModelFormat: "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionOnnxModelFormat: "olive" | "onnx"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9918,7 +9918,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; }; }; responses: { @@ -9960,7 +9960,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; }; }; responses: { From c3d6ff5b11a57bd5d1a89affbafdb8a5a184c695 Mon Sep 17 00:00:00 2001 From: Marta Nahorniuk Date: Thu, 12 Oct 2023 13:17:04 +0300 Subject: [PATCH 057/202] fixed bug #4857 --- .../SettingsModal/SettingsClearIntermediates.tsx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx index 423ce41bcd..62d4c95a45 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx @@ -8,6 +8,7 @@ import { useClearIntermediatesMutation, useGetIntermediatesCountQuery, } from '../../../../services/api/endpoints/images'; +import { useGetQueueStatusQuery } from 'services/api/endpoints/queue'; import { resetCanvas } from '../../../canvas/store/canvasSlice'; import { addToast } from '../../store/systemSlice'; import StyledFlex from './StyledFlex'; @@ -22,7 +23,14 @@ export default function SettingsClearIntermediates() { const [clearIntermediates, { isLoading: isLoadingClearIntermediates }] = useClearIntermediatesMutation(); + const { data: queueStatus } = useGetQueueStatusQuery(); + const hasPendingItems = queueStatus && (queueStatus.in_progress > 0 || queueStatus.pending > 0); + const handleClickClearIntermediates = useCallback(() => { + if (hasPendingItems) { + return; + } + clearIntermediates() .unwrap() .then((clearedCount) => { From 99e6bb48ba3cac24485573dcdb548dd4c4e0bc38 Mon Sep 17 00:00:00 2001 From: Marta Nahorniuk Date: Thu, 12 Oct 2023 15:34:06 +0300 Subject: [PATCH 058/202] fixed problems --- .../components/SettingsModal/SettingsClearIntermediates.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx index 62d4c95a45..51cabe9f46 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx @@ -24,7 +24,7 @@ export default function SettingsClearIntermediates() { useClearIntermediatesMutation(); const { data: queueStatus } = useGetQueueStatusQuery(); - const hasPendingItems = queueStatus && (queueStatus.in_progress > 0 || queueStatus.pending > 0); + const hasPendingItems = queueStatus && (queueStatus.queue.in_progress > 0 || queueStatus.queue.pending > 0); const handleClickClearIntermediates = useCallback(() => { if (hasPendingItems) { @@ -51,7 +51,7 @@ export default function SettingsClearIntermediates() { }) ); }); - }, [t, clearIntermediates, dispatch]); + }, [t, clearIntermediates, dispatch, hasPendingItems]); useEffect(() => { // update the count on mount From 779c902402b808f4d22bd682ade7edba4ca11f6f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 15 Oct 2023 17:18:36 +1100 Subject: [PATCH 059/202] chore(ui): lint --- .../components/SettingsModal/SettingsClearIntermediates.tsx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx index 51cabe9f46..c2a67cba26 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx @@ -24,7 +24,9 @@ export default function SettingsClearIntermediates() { useClearIntermediatesMutation(); const { data: queueStatus } = useGetQueueStatusQuery(); - const hasPendingItems = queueStatus && (queueStatus.queue.in_progress > 0 || queueStatus.queue.pending > 0); + const hasPendingItems = + queueStatus && + (queueStatus.queue.in_progress > 0 || queueStatus.queue.pending > 0); const handleClickClearIntermediates = useCallback(() => { if (hasPendingItems) { From 95cca9493c4beab58b39a10b0c42bc0efda19e3b Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 15 Oct 2023 17:21:35 +1100 Subject: [PATCH 060/202] feat(ui): disable clear intermediates button when queue has items --- .../components/SettingsModal/SettingsClearIntermediates.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx index c2a67cba26..648a4b5162 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx @@ -67,7 +67,7 @@ export default function SettingsClearIntermediates() { colorScheme="warning" onClick={handleClickClearIntermediates} isLoading={isLoadingClearIntermediates} - isDisabled={!intermediatesCount} + isDisabled={!intermediatesCount || hasPendingItems} > {t('settings.clearIntermediatesWithCount', { count: intermediatesCount ?? 0, From 1c099e0abb9221483bdac0a567394d1b493cbece Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 15 Oct 2023 17:27:25 +1100 Subject: [PATCH 061/202] feat(ui): add tooltip to clear intermediates button when disabled --- invokeai/frontend/web/public/locales/en.json | 1 + .../components/SettingsModal/SettingsClearIntermediates.tsx | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index dd96193d5d..dc6c54f025 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1115,6 +1115,7 @@ "showProgressInViewer": "Show Progress Images in Viewer", "ui": "User Interface", "useSlidersForAll": "Use Sliders For All Options", + "clearIntermediatesDisabled": "Queue must be empty to clear intermediates", "clearIntermediatesDesc1": "Clearing intermediates will reset your Canvas and ControlNet state.", "clearIntermediatesDesc2": "Intermediate images are byproducts of generation, different from the result images in the gallery. Clearing intermediates will free disk space.", "clearIntermediatesDesc3": "Your gallery images will not be deleted.", diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx index 648a4b5162..fbe5692431 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx @@ -3,12 +3,12 @@ import { useAppDispatch } from 'app/store/storeHooks'; import { controlAdaptersReset } from 'features/controlAdapters/store/controlAdaptersSlice'; import { useCallback, useEffect } from 'react'; import { useTranslation } from 'react-i18next'; +import { useGetQueueStatusQuery } from 'services/api/endpoints/queue'; import IAIButton from '../../../../common/components/IAIButton'; import { useClearIntermediatesMutation, useGetIntermediatesCountQuery, } from '../../../../services/api/endpoints/images'; -import { useGetQueueStatusQuery } from 'services/api/endpoints/queue'; import { resetCanvas } from '../../../canvas/store/canvasSlice'; import { addToast } from '../../store/systemSlice'; import StyledFlex from './StyledFlex'; @@ -64,6 +64,9 @@ export default function SettingsClearIntermediates() { {t('settings.clearIntermediates')} Date: Sun, 15 Oct 2023 18:16:10 +1100 Subject: [PATCH 062/202] fix(ui): fix control adapter translation string --- invokeai/frontend/web/public/locales/en.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index dc6c54f025..818f9f4fa9 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -137,9 +137,9 @@ "controlnet": { "controlAdapter_one": "Control Adapter", "controlAdapter_other": "Control Adapters", - "controlnet": "$t(controlnet.controlAdapter) #{{number}} ($t(common.controlNet))", - "ip_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.ipAdapter))", - "t2i_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.t2iAdapter))", + "controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))", + "ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))", + "t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))", "addControlNet": "Add $t(common.controlNet)", "addIPAdapter": "Add $t(common.ipAdapter)", "addT2IAdapter": "Add $t(common.t2iAdapter)", From 2060ee22f23f238de59f44afb7c637a5de61546c Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 15 Oct 2023 18:28:05 +1100 Subject: [PATCH 063/202] fix(ui): reset canvas batchIds on clear/batch cancel Closes #4889 --- .../web/src/features/canvas/store/canvasSlice.ts | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts index 77fae4e0a1..a5520322be 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts @@ -30,6 +30,7 @@ import { isCanvasMaskLine, } from './canvasTypes'; import { appSocketQueueItemStatusChanged } from 'services/events/actions'; +import { queueApi } from 'services/api/endpoints/queue'; export const initialLayerState: CanvasLayerState = { objects: [], @@ -812,6 +813,20 @@ export const canvasSlice = createSlice({ ); } }); + builder.addMatcher( + queueApi.endpoints.clearQueue.matchFulfilled, + (state) => { + state.batchIds = []; + } + ); + builder.addMatcher( + queueApi.endpoints.cancelByBatchIds.matchFulfilled, + (state, action) => { + state.batchIds = state.batchIds.filter( + (id) => !action.meta.arg.originalArgs.batch_ids.includes(id) + ); + } + ); }, }); From bcf0d8a590529febe8336510312e703bf3728e6a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 15 Oct 2023 18:34:25 +1100 Subject: [PATCH 064/202] fix(ui): use `_other` for control adapter collapse --- .../controlAdapters/components/ControlAdaptersCollapse.tsx | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdaptersCollapse.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdaptersCollapse.tsx index d1c3f6f0b9..2cad007a03 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdaptersCollapse.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdaptersCollapse.tsx @@ -90,9 +90,7 @@ const ControlAdaptersCollapse = () => { return ( From 3f9708f166098eeb070ebde842217bf88bcceda0 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Sun, 15 Oct 2023 09:40:37 +0200 Subject: [PATCH 065/202] translationBot(ui): update translation (Italian) Currently translated at 91.9% (1119 of 1217 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 24 ++++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 3123d8fcf8..d3c3c7b9ff 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -87,7 +87,8 @@ "learnMore": "Per saperne di più", "ipAdapter": "Adattatore IP", "t2iAdapter": "Adattatore T2I", - "controlAdapter": "Adattatore di Controllo" + "controlAdapter": "Adattatore di Controllo", + "controlNet": "ControlNet" }, "gallery": { "generations": "Generazioni", @@ -574,9 +575,9 @@ "systemBusy": "Sistema occupato", "unableToInvoke": "Impossibile invocare", "systemDisconnected": "Sistema disconnesso", - "noControlImageForControlAdapter": "L'adattatore di controllo {{number}} non ha un'immagine di controllo", - "noModelForControlAdapter": "Nessun modello selezionato per l'adattatore di controllo {{number}}.", - "incompatibleBaseModelForControlAdapter": "Il modello dell'adattatore di controllo {{number}} non è compatibile con il modello principale.", + "noControlImageForControlAdapter": "L'adattatore di controllo #{{number}} non ha un'immagine di controllo", + "noModelForControlAdapter": "Nessun modello selezionato per l'adattatore di controllo #{{number}}.", + "incompatibleBaseModelForControlAdapter": "Il modello dell'adattatore di controllo #{{number}} non è compatibile con il modello principale.", "missingNodeTemplate": "Modello di nodo mancante" }, "enableNoiseSettings": "Abilita le impostazioni del rumore", @@ -629,8 +630,8 @@ "clearIntermediates": "Cancella le immagini intermedie", "clearIntermediatesDesc3": "Le immagini della galleria non verranno eliminate.", "clearIntermediatesDesc2": "Le immagini intermedie sono sottoprodotti della generazione, diversi dalle immagini risultanti nella galleria. La cancellazione degli intermedi libererà spazio su disco.", - "intermediatesCleared_one": "Cancellata 1 immagine intermedia", - "intermediatesCleared_many": "Cancellate {{number}} immagini intermedie", + "intermediatesCleared_one": "Cancellata {{count}} immagine intermedia", + "intermediatesCleared_many": "Cancellate {{count}} immagini intermedie", "intermediatesCleared_other": "", "clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato di Tela Unificata e ControlNet.", "intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie" @@ -683,8 +684,8 @@ "nodesUnrecognizedTypes": "Impossibile caricare. Il grafico ha tipi di dati non riconosciuti", "nodesNotValidJSON": "JSON non valido", "nodesBrokenConnections": "Impossibile caricare. Alcune connessioni sono interrotte.", - "baseModelChangedCleared_one": "Il modello base è stato modificato, cancellato o disabilitato {{number}} sotto-modello incompatibile", - "baseModelChangedCleared_many": "", + "baseModelChangedCleared_one": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modello incompatibile", + "baseModelChangedCleared_many": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modelli incompatibili", "baseModelChangedCleared_other": "", "imageSavingFailed": "Salvataggio dell'immagine non riuscito", "canvasSentControlnetAssets": "Tela inviata a ControlNet & Risorse", @@ -1072,7 +1073,9 @@ "pause": "Sospendi", "pruneTooltip": "Rimuovi {{item_count}} elementi completati", "cancelSucceeded": "Elemento annullato", - "batchQueuedDesc": "Aggiunte {{item_count}} sessioni a {{direction}} della coda", + "batchQueuedDesc_one": "Aggiunta {{count}} sessione a {{direction}} della coda", + "batchQueuedDesc_many": "Aggiunte {{count}} sessioni a {{direction}} della coda", + "batchQueuedDesc_other": "", "graphQueued": "Grafico in coda", "batch": "Lotto", "clearQueueAlertDialog": "Lo svuotamento della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda.", @@ -1361,7 +1364,8 @@ "controlNet": { "paragraphs": [ "ControlNet fornisce una guida al processo di generazione, aiutando a creare immagini con composizione, struttura o stile controllati, a seconda del modello selezionato." - ] + ], + "heading": "ControlNet" } }, "sdxl": { From 6994783c171e1b5898a88d133a2c89ed9685e9a3 Mon Sep 17 00:00:00 2001 From: psychedelicious Date: Sun, 15 Oct 2023 09:40:37 +0200 Subject: [PATCH 066/202] translationBot(ui): update translation (Italian) Currently translated at 91.9% (1119 of 1217 strings) Co-authored-by: psychedelicious Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index d3c3c7b9ff..32994e0169 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -1043,7 +1043,7 @@ "addIPAdapter": "Aggiungi $t(common.ipAdapter)", "controlAdapter_one": "Adattatore di Controllo", "controlAdapter_many": "Adattatori di Controllo", - "controlAdapter_other": "", + "controlAdapter_other": "Adattatori di Controllo", "megaControl": "Mega ControlNet", "minConfidence": "Confidenza minima", "scribble": "Scribble" From ffb01f13452250c6760836057450782c18aaed0b Mon Sep 17 00:00:00 2001 From: Jonathan <34005131+JPPhoto@users.noreply.github.com> Date: Fri, 13 Oct 2023 15:07:08 -0500 Subject: [PATCH 067/202] Update facetools.py Facetools nodes were cutting off faces that extended beyond chunk boundaries in some cases. All faces found are considered and are coalesced rather than pruned, meaning that you should not see half a face any more. --- invokeai/app/invocations/facetools.py | 108 +++++++++++++++++--------- 1 file changed, 70 insertions(+), 38 deletions(-) diff --git a/invokeai/app/invocations/facetools.py b/invokeai/app/invocations/facetools.py index a433fac792..31ab77bd1a 100644 --- a/invokeai/app/invocations/facetools.py +++ b/invokeai/app/invocations/facetools.py @@ -46,6 +46,8 @@ class FaceResultData(TypedDict): y_center: float mesh_width: int mesh_height: int + chunk_x_offset: int + chunk_y_offset: int class FaceResultDataWithId(FaceResultData): @@ -78,6 +80,48 @@ FONT_SIZE = 32 FONT_STROKE_WIDTH = 4 +def coalesce_faces(face1: FaceResultData, face2: FaceResultData) -> FaceResultData: + face1_x_offset = face1["chunk_x_offset"] - min(face1["chunk_x_offset"], face2["chunk_x_offset"]) + face2_x_offset = face2["chunk_x_offset"] - min(face1["chunk_x_offset"], face2["chunk_x_offset"]) + face1_y_offset = face1["chunk_y_offset"] - min(face1["chunk_y_offset"], face2["chunk_y_offset"]) + face2_y_offset = face2["chunk_y_offset"] - min(face1["chunk_y_offset"], face2["chunk_y_offset"]) + + new_im_width = ( + max(face1["image"].width, face2["image"].width) + + max(face1["chunk_x_offset"], face2["chunk_x_offset"]) + - min(face1["chunk_x_offset"], face2["chunk_x_offset"]) + ) + new_im_height = ( + max(face1["image"].height, face2["image"].height) + + max(face1["chunk_y_offset"], face2["chunk_y_offset"]) + - min(face1["chunk_y_offset"], face2["chunk_y_offset"]) + ) + pil_image = Image.new(mode=face1["image"].mode, size=(new_im_width, new_im_height)) + pil_image.paste(face1["image"], (face1_x_offset, face1_y_offset)) + pil_image.paste(face2["image"], (face2_x_offset, face2_y_offset)) + + # Mask images are always from the origin + new_mask_im_width = max(face1["mask"].width, face2["mask"].width) + new_mask_im_height = max(face1["mask"].height, face2["mask"].height) + mask_pil = create_white_image(new_mask_im_width, new_mask_im_height) + black_image = create_black_image(face1["mask"].width, face1["mask"].height) + mask_pil.paste(black_image, (0, 0), ImageOps.invert(face1["mask"])) + black_image = create_black_image(face2["mask"].width, face2["mask"].height) + mask_pil.paste(black_image, (0, 0), ImageOps.invert(face2["mask"])) + + new_face = FaceResultData( + image=pil_image, + mask=mask_pil, + x_center=max(face1["x_center"], face2["x_center"]), + y_center=max(face1["y_center"], face2["y_center"]), + mesh_width=max(face1["mesh_width"], face2["mesh_width"]), + mesh_height=max(face1["mesh_height"], face2["mesh_height"]), + chunk_x_offset=max(face1["chunk_x_offset"], face2["chunk_x_offset"]), + chunk_y_offset=max(face2["chunk_y_offset"], face2["chunk_y_offset"]), + ) + return new_face + + def prepare_faces_list( face_result_list: list[FaceResultData], ) -> list[FaceResultDataWithId]: @@ -91,7 +135,7 @@ def prepare_faces_list( should_add = True candidate_x_center = candidate["x_center"] candidate_y_center = candidate["y_center"] - for face in deduped_faces: + for idx, face in enumerate(deduped_faces): face_center_x = face["x_center"] face_center_y = face["y_center"] face_radius_w = face["mesh_width"] / 2 @@ -105,6 +149,7 @@ def prepare_faces_list( ) if p < 1: # Inside of the already-added face's radius + deduped_faces[idx] = coalesce_faces(face, candidate) should_add = False break @@ -138,7 +183,6 @@ def generate_face_box_mask( chunk_x_offset: int = 0, chunk_y_offset: int = 0, draw_mesh: bool = True, - check_bounds: bool = True, ) -> list[FaceResultData]: result = [] mask_pil = None @@ -211,33 +255,20 @@ def generate_face_box_mask( mask_pil = create_white_image(w + chunk_x_offset, h + chunk_y_offset) mask_pil.paste(init_mask_pil, (chunk_x_offset, chunk_y_offset)) - left_side = x_center - mesh_width - right_side = x_center + mesh_width - top_side = y_center - mesh_height - bottom_side = y_center + mesh_height - im_width, im_height = pil_image.size - over_w = im_width * 0.1 - over_h = im_height * 0.1 - if not check_bounds or ( - (left_side >= -over_w) - and (right_side < im_width + over_w) - and (top_side >= -over_h) - and (bottom_side < im_height + over_h) - ): - x_center = float(x_center) - y_center = float(y_center) - face = FaceResultData( - image=pil_image, - mask=mask_pil or create_white_image(*pil_image.size), - x_center=x_center + chunk_x_offset, - y_center=y_center + chunk_y_offset, - mesh_width=mesh_width, - mesh_height=mesh_height, - ) + x_center = float(x_center) + y_center = float(y_center) + face = FaceResultData( + image=pil_image, + mask=mask_pil or create_white_image(*pil_image.size), + x_center=x_center + chunk_x_offset, + y_center=y_center + chunk_y_offset, + mesh_width=mesh_width, + mesh_height=mesh_height, + chunk_x_offset=chunk_x_offset, + chunk_y_offset=chunk_y_offset, + ) - result.append(face) - else: - context.services.logger.info("FaceTools --> Face out of bounds, ignoring.") + result.append(face) return result @@ -346,7 +377,6 @@ def get_faces_list( chunk_x_offset=0, chunk_y_offset=0, draw_mesh=draw_mesh, - check_bounds=False, ) if should_chunk or len(result) == 0: context.services.logger.info("FaceTools --> Chunking image (chunk toggled on, or no face found in full image).") @@ -360,24 +390,26 @@ def get_faces_list( if width > height: # Landscape - slice the image horizontally fx = 0.0 - steps = int(width * 2 / height) + steps = int(width * 2 / height) + 1 + increment = (width - height) / (steps - 1) while fx <= (width - height): x = int(fx) - image_chunks.append(image.crop((x, 0, x + height - 1, height - 1))) + image_chunks.append(image.crop((x, 0, x + height, height))) x_offsets.append(x) y_offsets.append(0) - fx += (width - height) / steps + fx += increment context.services.logger.info(f"FaceTools --> Chunk starting at x = {x}") elif height > width: # Portrait - slice the image vertically fy = 0.0 - steps = int(height * 2 / width) + steps = int(height * 2 / width) + 1 + increment = (height - width) / (steps - 1) while fy <= (height - width): y = int(fy) - image_chunks.append(image.crop((0, y, width - 1, y + width - 1))) + image_chunks.append(image.crop((0, y, width, y + width))) x_offsets.append(0) y_offsets.append(y) - fy += (height - width) / steps + fy += increment context.services.logger.info(f"FaceTools --> Chunk starting at y = {y}") for idx in range(len(image_chunks)): @@ -404,7 +436,7 @@ def get_faces_list( return all_faces -@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.0.1") +@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.0.2") class FaceOffInvocation(BaseInvocation): """Bound, extract, and mask a face from an image using MediaPipe detection""" @@ -498,7 +530,7 @@ class FaceOffInvocation(BaseInvocation): return output -@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.0.1") +@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.0.2") class FaceMaskInvocation(BaseInvocation): """Face mask creation using mediapipe face detection""" @@ -616,7 +648,7 @@ class FaceMaskInvocation(BaseInvocation): @invocation( - "face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.0.1" + "face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.0.2" ) class FaceIdentifierInvocation(BaseInvocation): """Outputs an image with detected face IDs printed on each face. For use with other FaceTools.""" From 3e389d3f6061deec6a9f68be38755495dc8cf470 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 15 Oct 2023 19:30:39 +1100 Subject: [PATCH 068/202] chore(ui): update deps --- invokeai/frontend/web/package.json | 96 +- .../listeners/canvasMerged.ts | 2 +- invokeai/frontend/web/yarn.lock | 1330 +++++++++-------- 3 files changed, 752 insertions(+), 676 deletions(-) diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index e5f1513c89..59e672544a 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -54,42 +54,42 @@ ] }, "dependencies": { - "@chakra-ui/anatomy": "^2.2.0", - "@chakra-ui/icons": "^2.1.0", - "@chakra-ui/react": "^2.8.0", + "@chakra-ui/anatomy": "^2.2.1", + "@chakra-ui/icons": "^2.1.1", + "@chakra-ui/react": "^2.8.1", "@chakra-ui/styled-system": "^2.9.1", - "@chakra-ui/theme-tools": "^2.1.0", + "@chakra-ui/theme-tools": "^2.1.1", "@dagrejs/graphlib": "^2.1.13", "@dnd-kit/core": "^6.0.8", "@dnd-kit/modifiers": "^6.0.1", "@dnd-kit/utilities": "^3.2.1", "@emotion/react": "^11.11.1", "@emotion/styled": "^11.11.0", - "@floating-ui/react-dom": "^2.0.1", - "@fontsource-variable/inter": "^5.0.8", - "@fontsource/inter": "^5.0.8", + "@floating-ui/react-dom": "^2.0.2", + "@fontsource-variable/inter": "^5.0.13", + "@fontsource/inter": "^5.0.13", "@mantine/core": "^6.0.19", "@mantine/form": "^6.0.19", "@mantine/hooks": "^6.0.19", "@nanostores/react": "^0.7.1", - "@reduxjs/toolkit": "^1.9.5", - "@roarr/browser-log-writer": "^1.1.5", + "@reduxjs/toolkit": "^1.9.7", + "@roarr/browser-log-writer": "^1.3.0", "@stevebel/png": "^1.5.1", "compare-versions": "^6.1.0", "dateformat": "^5.0.3", - "formik": "^2.4.3", - "framer-motion": "^10.16.1", + "formik": "^2.4.5", + "framer-motion": "^10.16.4", "fuse.js": "^6.6.2", - "i18next": "^23.4.4", + "i18next": "^23.5.1", "i18next-browser-languagedetector": "^7.0.2", - "i18next-http-backend": "^2.2.1", - "konva": "^9.2.0", + "i18next-http-backend": "^2.2.2", + "konva": "^9.2.2", "lodash-es": "^4.17.21", "nanostores": "^0.9.2", "new-github-issue-url": "^1.0.0", - "openapi-fetch": "^0.7.4", - "overlayscrollbars": "^2.2.0", - "overlayscrollbars-react": "^0.5.0", + "openapi-fetch": "^0.7.10", + "overlayscrollbars": "^2.3.2", + "overlayscrollbars-react": "^0.5.2", "patch-package": "^8.0.0", "query-string": "^8.1.0", "react": "^18.2.0", @@ -98,25 +98,25 @@ "react-dropzone": "^14.2.3", "react-error-boundary": "^4.0.11", "react-hotkeys-hook": "4.4.1", - "react-i18next": "^13.1.2", - "react-icons": "^4.10.1", + "react-i18next": "^13.3.0", + "react-icons": "^4.11.0", "react-konva": "^18.2.10", - "react-redux": "^8.1.2", + "react-redux": "^8.1.3", "react-resizable-panels": "^0.0.55", "react-use": "^17.4.0", - "react-virtuoso": "^4.5.0", - "react-zoom-pan-pinch": "^3.0.8", - "reactflow": "^11.8.3", + "react-virtuoso": "^4.6.1", + "react-zoom-pan-pinch": "^3.2.0", + "reactflow": "^11.9.3", "redux-dynamic-middlewares": "^2.2.0", - "redux-remember": "^4.0.1", + "redux-remember": "^4.0.4", "roarr": "^7.15.1", - "serialize-error": "^11.0.1", + "serialize-error": "^11.0.2", "socket.io-client": "^4.7.2", - "type-fest": "^4.2.0", + "type-fest": "^4.4.0", "use-debounce": "^9.0.4", "use-image": "^1.1.1", - "uuid": "^9.0.0", - "zod": "^3.22.2", + "uuid": "^9.0.1", + "zod": "^3.22.4", "zod-validation-error": "^1.5.0" }, "peerDependencies": { @@ -129,40 +129,40 @@ "devDependencies": { "@chakra-ui/cli": "^2.4.1", "@types/dateformat": "^5.0.0", - "@types/lodash-es": "^4.14.194", - "@types/node": "^20.5.1", - "@types/react": "^18.2.20", - "@types/react-dom": "^18.2.6", - "@types/react-redux": "^7.1.25", - "@types/react-transition-group": "^4.4.6", - "@types/uuid": "^9.0.2", - "@typescript-eslint/eslint-plugin": "^6.4.1", - "@typescript-eslint/parser": "^6.4.1", - "@vitejs/plugin-react-swc": "^3.3.2", - "axios": "^1.4.0", + "@types/lodash-es": "^4.17.9", + "@types/node": "^20.8.6", + "@types/react": "^18.2.28", + "@types/react-dom": "^18.2.13", + "@types/react-redux": "^7.1.27", + "@types/react-transition-group": "^4.4.7", + "@types/uuid": "^9.0.5", + "@typescript-eslint/eslint-plugin": "^6.7.5", + "@typescript-eslint/parser": "^6.7.5", + "@vitejs/plugin-react-swc": "^3.4.0", + "axios": "^1.5.1", "babel-plugin-transform-imports": "^2.0.0", - "concurrently": "^8.2.0", - "eslint": "^8.47.0", + "concurrently": "^8.2.1", + "eslint": "^8.51.0", "eslint-config-prettier": "^9.0.0", - "eslint-plugin-prettier": "^5.0.0", + "eslint-plugin-prettier": "^5.0.1", "eslint-plugin-react": "^7.33.2", "eslint-plugin-react-hooks": "^4.6.0", "form-data": "^4.0.0", "husky": "^8.0.3", - "lint-staged": "^14.0.1", + "lint-staged": "^15.0.1", "madge": "^6.1.0", "openapi-types": "^12.1.3", - "openapi-typescript": "^6.5.2", + "openapi-typescript": "^6.7.0", "postinstall-postinstall": "^2.1.0", - "prettier": "^3.0.2", + "prettier": "^3.0.3", "rollup-plugin-visualizer": "^5.9.2", "ts-toolbelt": "^9.6.0", "typescript": "^5.2.2", - "vite": "^4.4.9", + "vite": "^4.4.11", "vite-plugin-css-injected-by-js": "^3.3.0", - "vite-plugin-dts": "^3.5.2", + "vite-plugin-dts": "^3.6.0", "vite-plugin-eslint": "^1.8.1", - "vite-tsconfig-paths": "^4.2.0", + "vite-tsconfig-paths": "^4.2.1", "yarn": "^1.22.19" } } diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMerged.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMerged.ts index 62f7b60036..35c1affb97 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMerged.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMerged.ts @@ -44,7 +44,7 @@ export const addCanvasMergedListener = () => { } const baseLayerRect = canvasBaseLayer.getClientRect({ - relativeTo: canvasBaseLayer.getParent(), + relativeTo: canvasBaseLayer.getParent() ?? undefined, }); const imageDTO = await dispatch( diff --git a/invokeai/frontend/web/yarn.lock b/invokeai/frontend/web/yarn.lock index 2a531175d4..cc257115da 100644 --- a/invokeai/frontend/web/yarn.lock +++ b/invokeai/frontend/web/yarn.lock @@ -50,13 +50,20 @@ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.10.tgz#e37634f9a12a1716136c44624ef54283cabd3f55" integrity sha512-lNbdGsQb9ekfsnjFGhEiF4hfFqGgfOP3H3d27re3n+CGhNuTSUEQdfWk556sTLNTloczcdM5TYF2LhzmDQKyvQ== -"@babel/runtime@^7.0.0", "@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.12.1", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.18.3", "@babel/runtime@^7.19.4", "@babel/runtime@^7.21.0", "@babel/runtime@^7.22.5", "@babel/runtime@^7.9.2": +"@babel/runtime@^7.0.0", "@babel/runtime@^7.1.2", "@babel/runtime@^7.12.1", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.18.3", "@babel/runtime@^7.19.4", "@babel/runtime@^7.21.0", "@babel/runtime@^7.22.5", "@babel/runtime@^7.9.2": version "7.22.6" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.22.6.tgz#57d64b9ae3cff1d67eb067ae117dac087f5bd438" integrity sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ== dependencies: regenerator-runtime "^0.13.11" +"@babel/runtime@^7.10.2", "@babel/runtime@^7.13.10": + version "7.23.2" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.2.tgz#062b0ac103261d68a966c4c7baf2ae3e62ec3885" + integrity sha512-mM8eg4yl5D6i3lu2QKPuPH4FArvJ8KhTofbE7jwMUv9KX5mBvwPAqnV3MlyBNqdp9RyRKP6Yck8TrfYrPvX3bg== + dependencies: + regenerator-runtime "^0.14.0" + "@babel/types@^7.22.5", "@babel/types@^7.4": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.5.tgz#cd93eeaab025880a3a47ec881f4b096a5b786fbe" @@ -66,33 +73,33 @@ "@babel/helper-validator-identifier" "^7.22.5" to-fast-properties "^2.0.0" -"@chakra-ui/accordion@2.3.0": - version "2.3.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/accordion/-/accordion-2.3.0.tgz#2c85fd2d2734b176f019f8db9f4e075007b4e1fb" - integrity sha512-A4TkRw3Jnt+Fam6dSSJ62rskdrvjF3JGctYcfXlojfFIpHPuIw4pDwfZgNAxlaxWkcj0e7JJKlQ88dnZW+QfFg== +"@chakra-ui/accordion@2.3.1": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/accordion/-/accordion-2.3.1.tgz#a326509e286a5c4e8478de9bc2b4b05017039e6b" + integrity sha512-FSXRm8iClFyU+gVaXisOSEw0/4Q+qZbFRiuhIAkVU6Boj0FxAMrlo9a8AV5TuF77rgaHytCdHk0Ng+cyUijrag== dependencies: "@chakra-ui/descendant" "3.1.0" - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/react-use-controllable-state" "2.1.0" "@chakra-ui/react-use-merge-refs" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" "@chakra-ui/transition" "2.1.0" -"@chakra-ui/alert@2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/alert/-/alert-2.2.0.tgz#b59eadca4f083674dfcd43651b6c47b953c2c984" - integrity sha512-De+BT88iYOu3Con7MxQeICb1SwgAdVdgpHIYjTh3qvGlNXAQjs81rhG0fONXvwW1FIYletvr9DY2Tlg8xJe7tQ== +"@chakra-ui/alert@2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/alert/-/alert-2.2.1.tgz#69f4fae19e4f8204ae1db906784139d416063d04" + integrity sha512-GduIqqWCkvID8hxRlKw29Jp3w93r/E9S30J2F8By3ODon9Bhk1o/KVolcPiSiQvRwKNBJCd/rBTpPpLkB+s7pw== dependencies: - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" "@chakra-ui/spinner" "2.1.0" -"@chakra-ui/anatomy@2.2.0", "@chakra-ui/anatomy@^2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/anatomy/-/anatomy-2.2.0.tgz#788229829f853dcd03314cd7ddd4f19f056ec24e" - integrity sha512-cD8Ms5C8+dFda0LrORMdxiFhAZwOIY1BSlCadz6/mHUIgNdQy13AHPrXiq6qWdMslqVHq10k5zH7xMPLt6kjFg== +"@chakra-ui/anatomy@2.2.1", "@chakra-ui/anatomy@^2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/anatomy/-/anatomy-2.2.1.tgz#f7ef088dcb8be4f1d075f37101830199fb93f763" + integrity sha512-bbmyWTGwQo+aHYDMtLIj7k7hcWvwE7GFVDViLFArrrPhfUTDdQTNqhiDp1N7eh2HLyjNhc2MKXV8s2KTQqkmTg== "@chakra-ui/avatar@2.3.0": version "2.3.0" @@ -137,12 +144,12 @@ dependencies: "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/checkbox@2.3.0": - version "2.3.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/checkbox/-/checkbox-2.3.0.tgz#6a103555100008fcde0b25a4f3f87267dd6ea073" - integrity sha512-fX7M5sQK27aFWoj7vqnPkf1Q3AHmML/5dIRYfm7HEIsZXYH2C1CkM6+dijeSWIk6a0mp0r3el6SNDUti2ehH8g== +"@chakra-ui/checkbox@2.3.1": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/checkbox/-/checkbox-2.3.1.tgz#bde33a655a8f033656378e3e95ae0dc4c8e73864" + integrity sha512-e6qL9ntVI/Ui6g0+iljUV2chX86YMsXafldpTHBNYDEoNLjGo1lqLFzq3y6zs3iuB3DHI0X7eAG3REmMVs0A0w== dependencies: - "@chakra-ui/form-control" "2.1.0" + "@chakra-ui/form-control" "2.1.1" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/react-types" "2.0.7" "@chakra-ui/react-use-callback-ref" "2.1.0" @@ -151,8 +158,8 @@ "@chakra-ui/react-use-safe-layout-effect" "2.1.0" "@chakra-ui/react-use-update-effect" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" - "@chakra-ui/visually-hidden" "2.1.0" - "@zag-js/focus-visible" "0.10.5" + "@chakra-ui/visually-hidden" "2.2.0" + "@zag-js/focus-visible" "0.16.0" "@chakra-ui/cli@^2.4.1": version "2.4.1" @@ -175,12 +182,12 @@ "@chakra-ui/react-use-merge-refs" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/close-button@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/close-button/-/close-button-2.1.0.tgz#5af435a62919793dc713be321eaff1f61749b6d5" - integrity sha512-KfJcz6UAaR2dDWSIv6UrCGkZQS54Fjl+DEEVOUTJ7gf4KOP4FQZCkv8hqsAB9FeCtnwU43adq2oaw3aZH/Uzew== +"@chakra-ui/close-button@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/close-button/-/close-button-2.1.1.tgz#995b245c56eb41465a71d8667840c238618a7b66" + integrity sha512-gnpENKOanKexswSVpVz7ojZEALl2x5qjLYNqSQGbxz+aP9sOXPfUS56ebyBrre7T7exuWGiFeRwnM0oVeGPaiw== dependencies: - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/color-mode@2.2.0": version "2.2.0" @@ -203,10 +210,10 @@ "@chakra-ui/react-use-callback-ref" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/css-reset@2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/css-reset/-/css-reset-2.2.0.tgz#7bd8da563941709cd68e2d1cf1e71279bec038ea" - integrity sha512-nn7hjquIrPwCzwI4d/Y4wzM5A5xAeswREOfT8gT0Yd+U+Qnw3pPT8NPLbNJ3DvuOfJaCV6/N5ld/6RRTgYF/sQ== +"@chakra-ui/css-reset@2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@chakra-ui/css-reset/-/css-reset-2.3.0.tgz#83e3160a9c2a12431cad0ee27ebfbf3aedc5c9c7" + integrity sha512-cQwwBy5O0jzvl0K7PLTLgp8ijqLPKyuEMiDXwYzl95seD3AoeuoCLyzZcJtVqaUZ573PiBdAbY/IlZcwDOItWg== "@chakra-ui/descendant@3.1.0": version "3.1.0" @@ -249,40 +256,40 @@ "@chakra-ui/dom-utils" "2.1.0" react-focus-lock "^2.9.4" -"@chakra-ui/form-control@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/form-control/-/form-control-2.1.0.tgz#a48ad454428c03daaaf71671925becd02a2c3f66" - integrity sha512-3QmWG9v6Rx+JOwJP3Wt89+AWZxK0F1NkVAgXP3WVfE9VDXOKFRV/faLT0GEe2V+l7WZHF5PLdEBvKG8Cgw2mkA== +"@chakra-ui/form-control@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/form-control/-/form-control-2.1.1.tgz#05b06a52432642ddc7ed795bfe127108d160927d" + integrity sha512-LJPDzA1ITc3lhd/iDiINqGeca5bJD09PZAjePGEmmZyLPZZi8nPh/iii0RMxvKyJArsTBwXymCh+dEqK9aDzGQ== dependencies: - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/react-types" "2.0.7" "@chakra-ui/react-use-merge-refs" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/hooks@2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/hooks/-/hooks-2.2.0.tgz#f779bf85542dacd607abe7e67f4571cf8a1102fa" - integrity sha512-GZE64mcr20w+3KbCUPqQJHHmiFnX5Rcp8jS3YntGA4D5X2qU85jka7QkjfBwv/iduZ5Ei0YpCMYGCpi91dhD1Q== +"@chakra-ui/hooks@2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/hooks/-/hooks-2.2.1.tgz#b86ce5eeaaab877ddcb11a50842d1227306ace28" + integrity sha512-RQbTnzl6b1tBjbDPf9zGRo9rf/pQMholsOudTxjy4i9GfTfz6kgp5ValGjQm2z7ng6Z31N1cnjZ1AlSzQ//ZfQ== dependencies: "@chakra-ui/react-utils" "2.0.12" "@chakra-ui/utils" "2.0.15" - compute-scroll-into-view "1.0.20" + compute-scroll-into-view "3.0.3" copy-to-clipboard "3.3.3" -"@chakra-ui/icon@3.1.0": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/icon/-/icon-3.1.0.tgz#48312c071b3a0ed20ce807c8bd24d5f3e9cfdb7f" - integrity sha512-t6v0lGCXRbwUJycN8A/nDTuLktMP+LRjKbYJnd2oL6Pm2vOl99XwEQ5cAEyEa4XoseYNEgXiLR+2TfvgfNFvcw== +"@chakra-ui/icon@3.2.0": + version "3.2.0" + resolved "https://registry.yarnpkg.com/@chakra-ui/icon/-/icon-3.2.0.tgz#92b9454aa0d561b4994bcd6a1b3bb1fdd5c67bef" + integrity sha512-xxjGLvlX2Ys4H0iHrI16t74rG9EBcpFvJ3Y3B7KMQTrnW34Kf7Da/UC8J67Gtx85mTHW020ml85SVPKORWNNKQ== dependencies: "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/icons@^2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/icons/-/icons-2.1.0.tgz#52677939e02f9d6b902bd2c2931e5f18d3c8b523" - integrity sha512-pGFxFfQ/P5VnSRnTzK8zGAJxoxkxpHo/Br9ohRZdOpuhnIHSW7va0P53UoycEO5/vNJ/7BN0oDY0k9qurChcew== +"@chakra-ui/icons@^2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/icons/-/icons-2.1.1.tgz#58ff0f9e703f2f4f89debd600ce4e438f43f9c9a" + integrity sha512-3p30hdo4LlRZTT5CwoAJq3G9fHI0wDc0pBaMHj4SUn0yomO+RcDRlzhdXqdr5cVnzax44sqXJVnf3oQG0eI+4g== dependencies: - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/image@2.1.0": version "2.1.0" @@ -292,24 +299,24 @@ "@chakra-ui/react-use-safe-layout-effect" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/input@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/input/-/input-2.1.0.tgz#45a2e2ccdd65dc9f6dee11b6e3043438864bf806" - integrity sha512-HItI2vq6vupCuixdzof4sIanGdLlszhDtlR5be5z8Nrda1RkXVqI+9CTJPbNsx2nIKEfwPt01pnT9mozoOSMMw== +"@chakra-ui/input@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/input/-/input-2.1.1.tgz#c9666bd1efd7763458bec713fb87cc3f365ec15d" + integrity sha512-RQYzQ/qcak3eCuCfvSqc1kEFx0sCcnIeiSi7i0r70CeBnAUK/CP1/4Uz849FpKz81K4z2SikC9MkHPQd8ZpOwg== dependencies: - "@chakra-ui/form-control" "2.1.0" + "@chakra-ui/form-control" "2.1.1" "@chakra-ui/object-utils" "2.1.0" "@chakra-ui/react-children-utils" "2.0.6" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/layout@2.3.0": - version "2.3.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/layout/-/layout-2.3.0.tgz#c53219235db737202006b8b0881b82fedcf3b225" - integrity sha512-tp1/Bn+cHn0Q4HWKY62HtOwzhpH1GUA3i5fvs23HEhOEryTps05hyuQVeJ71fLqSs6f1QEIdm+9It+5WCj64vQ== +"@chakra-ui/layout@2.3.1": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/layout/-/layout-2.3.1.tgz#0601c5eb91555d24a7015a7c9d4e01fed2698557" + integrity sha512-nXuZ6WRbq0WdgnRgLw+QuxWAHuhDtVX8ElWqcTK+cSMFg/52eVP47czYBE5F35YhnoW2XBwfNoNgZ7+e8Z01Rg== dependencies: "@chakra-ui/breakpoint-utils" "2.0.8" - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/object-utils" "2.1.0" "@chakra-ui/react-children-utils" "2.0.6" "@chakra-ui/react-context" "2.1.0" @@ -334,10 +341,10 @@ "@chakra-ui/react-env" "3.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/menu@2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/menu/-/menu-2.2.0.tgz#ba74b538a3fa3dc46313368ed2878f92b479e6d7" - integrity sha512-l7HQjriW4JGeCyxDdguAzekwwB+kHGDLxACi0DJNp37sil51SRaN1S1OrneISbOHVpHuQB+KVNgU0rqhoglVew== +"@chakra-ui/menu@2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/menu/-/menu-2.2.1.tgz#7d9810d435f6b40fa72ed867a33b88a1ef75073f" + integrity sha512-lJS7XEObzJxsOwWQh7yfG4H8FzFPRP5hVPN/CL+JzytEINCSBvsCDHrYPQGp7jzpCi8vnTqQQGQe0f8dwnXd2g== dependencies: "@chakra-ui/clickable" "2.1.0" "@chakra-ui/descendant" "3.1.0" @@ -355,12 +362,12 @@ "@chakra-ui/shared-utils" "2.0.5" "@chakra-ui/transition" "2.1.0" -"@chakra-ui/modal@2.3.0": - version "2.3.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/modal/-/modal-2.3.0.tgz#f7c35bb457c3c4be391c9366f892a5a779af810a" - integrity sha512-S1sITrIeLSf21LJ0Vz8xZhj5fWEud5z5Dl2dmvOEv1ezypgOrCCBdOEnnqCkoEKZDbKvzZWZXWR5791ikLP6+g== +"@chakra-ui/modal@2.3.1": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/modal/-/modal-2.3.1.tgz#524dc32b6b4f545b54ae531dbf6c74e1052ee794" + integrity sha512-TQv1ZaiJMZN+rR9DK0snx/OPwmtaGH1HbZtlYt4W4s6CzyK541fxLRTjIXfEzIGpvNW+b6VFuFjbcR78p4DEoQ== dependencies: - "@chakra-ui/close-button" "2.1.0" + "@chakra-ui/close-button" "2.1.1" "@chakra-ui/focus-lock" "2.1.0" "@chakra-ui/portal" "2.1.0" "@chakra-ui/react-context" "2.1.0" @@ -368,17 +375,17 @@ "@chakra-ui/react-use-merge-refs" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" "@chakra-ui/transition" "2.1.0" - aria-hidden "^1.2.2" - react-remove-scroll "^2.5.5" + aria-hidden "^1.2.3" + react-remove-scroll "^2.5.6" -"@chakra-ui/number-input@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/number-input/-/number-input-2.1.0.tgz#7812a6602edbed26829a8f7defe8c4f9f175421a" - integrity sha512-/gEAzQHhrMA+1rzyCMaN8OkKtUPuER6iA+nloYEYBoT7dH/EoNlRtBkiIQhDp+E4VpgZJ0SK3OVrm9/eBbtHHg== +"@chakra-ui/number-input@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/number-input/-/number-input-2.1.1.tgz#5308a30e972cd45a017f613996d7d5c1f32bd89f" + integrity sha512-B4xwUPyr0NmjGN/dBhOmCD2xjX6OY1pr9GmGH3GQRozMsLAClD3TibwiZetwlyCp02qQqiFwEcZmUxaX88794Q== dependencies: "@chakra-ui/counter" "2.1.0" - "@chakra-ui/form-control" "2.1.0" - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/form-control" "2.1.1" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/react-types" "2.0.7" "@chakra-ui/react-use-callback-ref" "2.1.0" @@ -411,12 +418,12 @@ "@chakra-ui/react-use-merge-refs" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/popover@2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/popover/-/popover-2.2.0.tgz#470c4814447010a1b7db6839fcc3e8983cbabb60" - integrity sha512-cTqXdgkU0vgK82AR1nWcC2MJYhEL/y6uTeprvO2+j4o2D0yPrzVMuIZZRl0abrQwiravQyVGEMgA5y0ZLYwbiQ== +"@chakra-ui/popover@2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/popover/-/popover-2.2.1.tgz#89cfd29817abcd204da570073c0f2b4d8072c3a3" + integrity sha512-K+2ai2dD0ljvJnlrzesCDT9mNzLifE3noGKZ3QwLqd/K34Ym1W/0aL1ERSynrcG78NKoXS54SdEzkhCZ4Gn/Zg== dependencies: - "@chakra-ui/close-button" "2.1.0" + "@chakra-ui/close-button" "2.1.1" "@chakra-ui/lazy-utils" "2.0.5" "@chakra-ui/popper" "3.1.0" "@chakra-ui/react-context" "2.1.0" @@ -452,28 +459,28 @@ dependencies: "@chakra-ui/react-context" "2.1.0" -"@chakra-ui/provider@2.4.0": - version "2.4.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/provider/-/provider-2.4.0.tgz#1649e607b7dddd8dd160e624946769a29f67d9a3" - integrity sha512-KJ/TNczpY+EStQXa2Y5PZ+senlBHrY7P+RpBgJLBZLGkQUCS3APw5KvCwgpA0COb2M4AZXCjw+rm+Ko7ontlgA== +"@chakra-ui/provider@2.4.1": + version "2.4.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/provider/-/provider-2.4.1.tgz#0c6c1bab2b50fdf9dfbcbb363df8982988c54d65" + integrity sha512-u4g02V9tJ9vVYfkLz5jBn/bKlAyjLdg4Sh3f7uckmYVAZpOL/uUlrStyADrynu3tZhI+BE8XdmXC4zs/SYD7ow== dependencies: - "@chakra-ui/css-reset" "2.2.0" + "@chakra-ui/css-reset" "2.3.0" "@chakra-ui/portal" "2.1.0" "@chakra-ui/react-env" "3.1.0" - "@chakra-ui/system" "2.6.0" + "@chakra-ui/system" "2.6.1" "@chakra-ui/utils" "2.0.15" -"@chakra-ui/radio@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/radio/-/radio-2.1.0.tgz#68b2cb4193570369568052cb1254163224a1479c" - integrity sha512-WiRlSCqKWgy4m9106w4g77kcLYqBxqGhFRO1pTTJp99rxpM6jNadOeK+moEjqj64N9mSz3njEecMJftKKcOYdg== +"@chakra-ui/radio@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/radio/-/radio-2.1.1.tgz#399983ce8a1bbc81e7cddfbaf091f54a1645fb7e" + integrity sha512-5JXDVvMWsF/Cprh6BKfcTLbLtRcgD6Wl2zwbNU30nmKIE8+WUfqD7JQETV08oWEzhi3Ea4e5EHvyll2sGx8H3w== dependencies: - "@chakra-ui/form-control" "2.1.0" + "@chakra-ui/form-control" "2.1.1" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/react-types" "2.0.7" "@chakra-ui/react-use-merge-refs" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" - "@zag-js/focus-visible" "0.10.5" + "@zag-js/focus-visible" "0.16.0" "@chakra-ui/react-children-utils@2.0.6": version "2.0.6" @@ -617,71 +624,71 @@ dependencies: "@chakra-ui/utils" "2.0.15" -"@chakra-ui/react@^2.8.0": - version "2.8.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/react/-/react-2.8.0.tgz#cc76a2448f9bc85f1645a1afb90d5756a5313ac3" - integrity sha512-tV82DaqE4fMbLIWq58BYh4Ol3gAlNEn+qYOzx8bPrZudboEDnboq8aVfSBwWOY++MLWz2Nn7CkT69YRm91e5sg== +"@chakra-ui/react@^2.8.1": + version "2.8.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/react/-/react-2.8.1.tgz#fd80632b0ef34434443d8999d03d297f130aabcf" + integrity sha512-UL9Rtj4DovP3+oVbI06gsdfyJJb+wmS2RYnGNXjW9tsjCyXxjlBw9TAUj0jyOfWe0+zd/4juL8+J+QCwmdhptg== dependencies: - "@chakra-ui/accordion" "2.3.0" - "@chakra-ui/alert" "2.2.0" + "@chakra-ui/accordion" "2.3.1" + "@chakra-ui/alert" "2.2.1" "@chakra-ui/avatar" "2.3.0" "@chakra-ui/breadcrumb" "2.2.0" "@chakra-ui/button" "2.1.0" "@chakra-ui/card" "2.2.0" - "@chakra-ui/checkbox" "2.3.0" - "@chakra-ui/close-button" "2.1.0" + "@chakra-ui/checkbox" "2.3.1" + "@chakra-ui/close-button" "2.1.1" "@chakra-ui/control-box" "2.1.0" "@chakra-ui/counter" "2.1.0" - "@chakra-ui/css-reset" "2.2.0" + "@chakra-ui/css-reset" "2.3.0" "@chakra-ui/editable" "3.1.0" "@chakra-ui/focus-lock" "2.1.0" - "@chakra-ui/form-control" "2.1.0" - "@chakra-ui/hooks" "2.2.0" - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/form-control" "2.1.1" + "@chakra-ui/hooks" "2.2.1" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/image" "2.1.0" - "@chakra-ui/input" "2.1.0" - "@chakra-ui/layout" "2.3.0" + "@chakra-ui/input" "2.1.1" + "@chakra-ui/layout" "2.3.1" "@chakra-ui/live-region" "2.1.0" "@chakra-ui/media-query" "3.3.0" - "@chakra-ui/menu" "2.2.0" - "@chakra-ui/modal" "2.3.0" - "@chakra-ui/number-input" "2.1.0" + "@chakra-ui/menu" "2.2.1" + "@chakra-ui/modal" "2.3.1" + "@chakra-ui/number-input" "2.1.1" "@chakra-ui/pin-input" "2.1.0" - "@chakra-ui/popover" "2.2.0" + "@chakra-ui/popover" "2.2.1" "@chakra-ui/popper" "3.1.0" "@chakra-ui/portal" "2.1.0" "@chakra-ui/progress" "2.2.0" - "@chakra-ui/provider" "2.4.0" - "@chakra-ui/radio" "2.1.0" + "@chakra-ui/provider" "2.4.1" + "@chakra-ui/radio" "2.1.1" "@chakra-ui/react-env" "3.1.0" - "@chakra-ui/select" "2.1.0" + "@chakra-ui/select" "2.1.1" "@chakra-ui/skeleton" "2.1.0" "@chakra-ui/skip-nav" "2.1.0" "@chakra-ui/slider" "2.1.0" "@chakra-ui/spinner" "2.1.0" - "@chakra-ui/stat" "2.1.0" - "@chakra-ui/stepper" "2.3.0" + "@chakra-ui/stat" "2.1.1" + "@chakra-ui/stepper" "2.3.1" "@chakra-ui/styled-system" "2.9.1" - "@chakra-ui/switch" "2.1.0" - "@chakra-ui/system" "2.6.0" + "@chakra-ui/switch" "2.1.1" + "@chakra-ui/system" "2.6.1" "@chakra-ui/table" "2.1.0" - "@chakra-ui/tabs" "2.2.0" - "@chakra-ui/tag" "3.1.0" - "@chakra-ui/textarea" "2.1.0" - "@chakra-ui/theme" "3.2.0" - "@chakra-ui/theme-utils" "2.0.19" - "@chakra-ui/toast" "7.0.0" + "@chakra-ui/tabs" "3.0.0" + "@chakra-ui/tag" "3.1.1" + "@chakra-ui/textarea" "2.1.1" + "@chakra-ui/theme" "3.3.0" + "@chakra-ui/theme-utils" "2.0.20" + "@chakra-ui/toast" "7.0.1" "@chakra-ui/tooltip" "2.3.0" "@chakra-ui/transition" "2.1.0" "@chakra-ui/utils" "2.0.15" - "@chakra-ui/visually-hidden" "2.1.0" + "@chakra-ui/visually-hidden" "2.2.0" -"@chakra-ui/select@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/select/-/select-2.1.0.tgz#4c9a6e881281e77ed35ba7a2e343f235b7e0b2fd" - integrity sha512-6GEjCJNOm1pS9E7XRvodoVOuSFl82Jio3MGWgmcQrLznjJAhIZVMq85vCQqzGpjjfbHys/UctfdJY75Ctas/Jg== +"@chakra-ui/select@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/select/-/select-2.1.1.tgz#0792eeebdb82b1710c4527e7e8e2e07c686c714d" + integrity sha512-CERDATncv5w05Zo5/LrFtf1yKp1deyMUyDGv6eZvQG/etyukH4TstsuIHt/0GfNXrCF3CJLZ8lINzpv5wayVjQ== dependencies: - "@chakra-ui/form-control" "2.1.0" + "@chakra-ui/form-control" "2.1.1" "@chakra-ui/shared-utils" "2.0.5" "@chakra-ui/shared-utils@2.0.5": @@ -726,21 +733,21 @@ dependencies: "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/stat@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/stat/-/stat-2.1.0.tgz#6643b507358e9cacf008387b3f12b75899497369" - integrity sha512-sqx0/AdFFZ80dsiM5owmhtQyYl+zON1r+IY0m70I/ABRVy+I3br06xdUhoaxh3tcP7c0O/BQgb+VCfXa9Y34CA== +"@chakra-ui/stat@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/stat/-/stat-2.1.1.tgz#a204ba915795345996a16c79794d84826d7dcc2d" + integrity sha512-LDn0d/LXQNbAn2KaR3F1zivsZCewY4Jsy1qShmfBMKwn6rI8yVlbvu6SiA3OpHS0FhxbsZxQI6HefEoIgtqY6Q== dependencies: - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/stepper@2.3.0": - version "2.3.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/stepper/-/stepper-2.3.0.tgz#5714df429936839145a62b5c197cff26b872b660" - integrity sha512-q80QX/NLrjJQIlBP1N+Q8GVJb7/HiOpMoK1PlP4denB/KxkU2K8GEjss8U2vklR1XsWJy1fwfj03+66Q78Uk/Q== +"@chakra-ui/stepper@2.3.1": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/stepper/-/stepper-2.3.1.tgz#a0a0b73e147f202ab4e51cae55dad45489cc89fd" + integrity sha512-ky77lZbW60zYkSXhYz7kbItUpAQfEdycT0Q4bkHLxfqbuiGMf8OmgZOQkOB9uM4v0zPwy2HXhe0vq4Dd0xa55Q== dependencies: - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" @@ -753,26 +760,26 @@ csstype "^3.0.11" lodash.mergewith "4.6.2" -"@chakra-ui/switch@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/switch/-/switch-2.1.0.tgz#750474ef7f0a9854062e692cbbe9f3ed0cfbc4d8" - integrity sha512-uWHOaIDQdGh+mszxeppj5aYVepbkSK445KZlJJkfr9Bnr6sythTwM63HSufnVDiTEE4uRqegv9jEjZK2JKA+9A== +"@chakra-ui/switch@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/switch/-/switch-2.1.1.tgz#8049963e6421cdd5eaaac1d20d9febae8d731b62" + integrity sha512-cOHIhW5AlLZSFENxFEBYTBniqiduOowa1WdzslP1Fd0usBFaD5iAgOY1Fvr7xKhE8nmzzeMCkPB3XBvUSWnawQ== dependencies: - "@chakra-ui/checkbox" "2.3.0" + "@chakra-ui/checkbox" "2.3.1" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/system@2.6.0": - version "2.6.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/system/-/system-2.6.0.tgz#29f65bb0887ee0816bbb7b4b098ca5aa4918c409" - integrity sha512-MgAFRz9V1pW0dplwWsB99hx49LCC+LsrkMala7KXcP0OvWdrkjw+iu+voBksO3626+glzgIwlZW113Eja+7JEQ== +"@chakra-ui/system@2.6.1": + version "2.6.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/system/-/system-2.6.1.tgz#22ee50ddc9e1f56b974a0dd42d86108391a2f372" + integrity sha512-P5Q/XRWy3f1pXJ7IxDkV+Z6AT7GJeR2JlBnQl109xewVQcBLWWMIp702fFMFw8KZ2ALB/aYKtWm5EmQMddC/tg== dependencies: "@chakra-ui/color-mode" "2.2.0" "@chakra-ui/object-utils" "2.1.0" "@chakra-ui/react-utils" "2.0.12" "@chakra-ui/styled-system" "2.9.1" - "@chakra-ui/theme-utils" "2.0.19" + "@chakra-ui/theme-utils" "2.0.20" "@chakra-ui/utils" "2.0.15" - react-fast-compare "3.2.1" + react-fast-compare "3.2.2" "@chakra-ui/table@2.1.0": version "2.1.0" @@ -782,10 +789,10 @@ "@chakra-ui/react-context" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/tabs@2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/tabs/-/tabs-2.2.0.tgz#72b6bf8bff3d3da3effb115991bf24e2157e29d2" - integrity sha512-ulN7McHZ322qlbJXg8S+IwdN8Axh8q0HzYBOHzSdcnVphEytfv9TsfJhN0Hx5yjkpekAzG5fewn33ZdIpIpKyQ== +"@chakra-ui/tabs@3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@chakra-ui/tabs/-/tabs-3.0.0.tgz#854c06880af26158d7c72881c4b5e0453f6c485d" + integrity sha512-6Mlclp8L9lqXmsGWF5q5gmemZXOiOYuh0SGT/7PgJVNPz3LXREXlXg2an4MBUD8W5oTkduCX+3KTMCwRrVrDYw== dependencies: "@chakra-ui/clickable" "2.1.0" "@chakra-ui/descendant" "3.1.0" @@ -797,64 +804,64 @@ "@chakra-ui/react-use-safe-layout-effect" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/tag@3.1.0": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/tag/-/tag-3.1.0.tgz#b2c06254e1d5aaaf77ff41e5d2e6548b404cabd7" - integrity sha512-Mn2u828z5HvqEBEG+tUJWe3al5tzN87bK2U0QfThx3+zqWbBCWBSCVfnWRtkNh80m+5a1TekexDAPZqu5G8zdw== +"@chakra-ui/tag@3.1.1": + version "3.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/tag/-/tag-3.1.1.tgz#d05284b6549a84d3a08e57eec57df3ad0eebd882" + integrity sha512-Bdel79Dv86Hnge2PKOU+t8H28nm/7Y3cKd4Kfk9k3lOpUh4+nkSGe58dhRzht59lEqa4N9waCgQiBdkydjvBXQ== dependencies: - "@chakra-ui/icon" "3.1.0" + "@chakra-ui/icon" "3.2.0" "@chakra-ui/react-context" "2.1.0" -"@chakra-ui/textarea@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/textarea/-/textarea-2.1.0.tgz#d0f157c9a09aea87c372409eead4292bd26999a0" - integrity sha512-4F7X/lPRsY+sPxYrWGrhh1pBtdnFvVllIOapzAwnjYwsflm+vf6c+9ZgoDWobXsNezJ9fcqN0FTPwaBnDvDQRQ== +"@chakra-ui/textarea@2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/textarea/-/textarea-2.1.1.tgz#3e33404ff8470140e877840a5702a406996a3834" + integrity sha512-28bpwgmXg3BzSpg8i1Ao9h7pHaE1j2mBBFJpWaqPgMhS0IHm0BQsqqyWU6PsxxJDvrC4HN6MTzrIL4C1RA1I0A== dependencies: - "@chakra-ui/form-control" "2.1.0" + "@chakra-ui/form-control" "2.1.1" "@chakra-ui/shared-utils" "2.0.5" -"@chakra-ui/theme-tools@2.1.0", "@chakra-ui/theme-tools@^2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/theme-tools/-/theme-tools-2.1.0.tgz#ad34f2fdda61305ac96f69ad9ce38ad12f8bfdbf" - integrity sha512-TKv4trAY8q8+DWdZrpSabTd3SZtZrnzFDwUdzhbWBhFEDEVR3fAkRTPpnPDtf1X9w1YErWn3QAcMACVFz4+vkw== +"@chakra-ui/theme-tools@2.1.1", "@chakra-ui/theme-tools@^2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/theme-tools/-/theme-tools-2.1.1.tgz#c7f3072ab533d7abc6a3831666be3c172f992554" + integrity sha512-n14L5L3ej3Zy+Xm/kDKO1G6/DkmieT7Li1C7NzMRcUj5C9YybQpyo7IZZ0BBUh3u+OVnKVhNC3d4P2NYDGRXmA== dependencies: - "@chakra-ui/anatomy" "2.2.0" + "@chakra-ui/anatomy" "2.2.1" "@chakra-ui/shared-utils" "2.0.5" - color2k "^2.0.0" + color2k "^2.0.2" -"@chakra-ui/theme-utils@2.0.19": - version "2.0.19" - resolved "https://registry.yarnpkg.com/@chakra-ui/theme-utils/-/theme-utils-2.0.19.tgz#47e6af43f8ef22403686b779ca1a869ab1b7a5ec" - integrity sha512-UQ+KvozTN86+0oA80rdQd1a++4rm4ulo+DEabkgwNpkK3yaWsucOxkDQpi2sMIMvw5X0oaWvNBZJuVyK7HdOXg== +"@chakra-ui/theme-utils@2.0.20": + version "2.0.20" + resolved "https://registry.yarnpkg.com/@chakra-ui/theme-utils/-/theme-utils-2.0.20.tgz#fdc4947ac4b95c16ff5885707c9a931c43b80cf6" + integrity sha512-IkAzSmwBlRIZ3dN2InDz0tf9SldbckVkgwylCobSFmYP8lnMjykL8Lex1BBo9U8UQjZxEDVZ+Qw6SeayKRntOQ== dependencies: "@chakra-ui/shared-utils" "2.0.5" "@chakra-ui/styled-system" "2.9.1" - "@chakra-ui/theme" "3.2.0" + "@chakra-ui/theme" "3.3.0" lodash.mergewith "4.6.2" -"@chakra-ui/theme@3.2.0": - version "3.2.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/theme/-/theme-3.2.0.tgz#b8232d770e542895cf82535942a74ad803bb7133" - integrity sha512-q9mppdkhmaBnvOT8REr/lVNNBX/prwm50EzObJ+r+ErVhNQDc55gCFmtr+It3xlcCqmOteG6XUdwRCJz8qzOqg== +"@chakra-ui/theme@3.3.0": + version "3.3.0" + resolved "https://registry.yarnpkg.com/@chakra-ui/theme/-/theme-3.3.0.tgz#7fe364322e75c7bdfa45b96dd3db6dac7eb8f7ef" + integrity sha512-VHY2ax5Wqgfm83U/zYBk0GS0TGD8m41s/rxQgnEq8tU+ug1YZjvOZmtOq/VjfKP/bQraFhCt05zchcxXmDpEYg== dependencies: - "@chakra-ui/anatomy" "2.2.0" + "@chakra-ui/anatomy" "2.2.1" "@chakra-ui/shared-utils" "2.0.5" - "@chakra-ui/theme-tools" "2.1.0" + "@chakra-ui/theme-tools" "2.1.1" -"@chakra-ui/toast@7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/toast/-/toast-7.0.0.tgz#6c6f1b7b8dc458ed0827b2edc47eb7d4075c95dc" - integrity sha512-XQgSnn4DYRgfOBzBvh8GI/AZ7SfrO8wlVSmChfp92Nfmqm7tRDUT9x8ws/iNKAvMRHkhl7fmRjJ39ipeXYrMvA== +"@chakra-ui/toast@7.0.1": + version "7.0.1" + resolved "https://registry.yarnpkg.com/@chakra-ui/toast/-/toast-7.0.1.tgz#11113b9185409ed1dc7a062f0498673f0840a013" + integrity sha512-V5JUhw6RZxbGRTijvd5k4iEMLCfbzTLNWbZLZhRZk10YvFfAP5OYfRCm68zpE/t3orN/f+4ZLL3P+Wb4E7oSmw== dependencies: - "@chakra-ui/alert" "2.2.0" - "@chakra-ui/close-button" "2.1.0" + "@chakra-ui/alert" "2.2.1" + "@chakra-ui/close-button" "2.1.1" "@chakra-ui/portal" "2.1.0" "@chakra-ui/react-context" "2.1.0" "@chakra-ui/react-use-timeout" "2.1.0" "@chakra-ui/react-use-update-effect" "2.1.0" "@chakra-ui/shared-utils" "2.0.5" "@chakra-ui/styled-system" "2.9.1" - "@chakra-ui/theme" "3.2.0" + "@chakra-ui/theme" "3.3.0" "@chakra-ui/tooltip@2.3.0": version "2.3.0" @@ -887,10 +894,10 @@ framesync "6.1.2" lodash.mergewith "4.6.2" -"@chakra-ui/visually-hidden@2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@chakra-ui/visually-hidden/-/visually-hidden-2.1.0.tgz#9ec573964e5a07f8682574a060140d78e9f91c32" - integrity sha512-3OHKqTz78PX7V4qto+a5Y6VvH6TbU3Pg6Z0Z2KnDkOBP3Po8fiz0kk+/OSPzIwdcSsQKiocLi0c1pnnUPdMZPg== +"@chakra-ui/visually-hidden@2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@chakra-ui/visually-hidden/-/visually-hidden-2.2.0.tgz#9b0ecef8f01263ab808ba3bda7b36a0d91b4d5c1" + integrity sha512-KmKDg01SrQ7VbTD3+cPWf/UfpF5MSwm3v7MWi0n5t8HnnadT13MF0MJCDSXbBWnzLv1ZKJ6zlyAOeARWX+DpjQ== "@dagrejs/graphlib@^2.1.13": version "2.1.13" @@ -1302,27 +1309,30 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@eslint/js@^8.47.0": - version "8.47.0" - resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.47.0.tgz#5478fdf443ff8158f9de171c704ae45308696c7d" - integrity sha512-P6omY1zv5MItm93kLM8s2vr1HICJH8v0dvddDhysbIuZ+vcjOHg5Zbkf1mTkcmi2JA9oBG2anOkRnW8WJTS8Og== +"@eslint/js@8.51.0": + version "8.51.0" + resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.51.0.tgz#6d419c240cfb2b66da37df230f7e7eef801c32fa" + integrity sha512-HxjQ8Qn+4SI3/AFv6sOrDB+g6PpUTDwSJiQqOrnneEk8L71161srI9gjzzZvYVbzHiVg/BvcH95+cK/zfIt4pg== "@fastify/deepmerge@^1.0.0": version "1.3.0" resolved "https://registry.yarnpkg.com/@fastify/deepmerge/-/deepmerge-1.3.0.tgz#8116858108f0c7d9fd460d05a7d637a13fe3239a" integrity sha512-J8TOSBq3SoZbDhM9+R/u77hP93gz/rajSA+K2kGyijPpORPWUXHUpTaleoj+92As0S9uPRP7Oi8IqMf0u+ro6A== -"@floating-ui/core@^1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.3.1.tgz#4d795b649cc3b1cbb760d191c80dcb4353c9a366" - integrity sha512-Bu+AMaXNjrpjh41znzHqaz3r2Nr8hHuHZT6V2LBKMhyMl0FgKA62PNYbqnfgmzOhoWZj70Zecisbo4H1rotP5g== - -"@floating-ui/dom@^1.2.1", "@floating-ui/dom@^1.3.0": - version "1.4.5" - resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.4.5.tgz#336dfb9870c98b471ff5802002982e489b8bd1c5" - integrity sha512-96KnRWkRnuBSSFbj0sFGwwOUd8EkiecINVl0O9wiZlZ64EkpyAOG3Xc2vKKNJmru0Z7RqWNymA+6b8OZqjgyyw== +"@floating-ui/core@^1.4.2": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.5.0.tgz#5c05c60d5ae2d05101c3021c1a2a350ddc027f8c" + integrity sha512-kK1h4m36DQ0UHGj5Ah4db7R0rHemTqqO0QLvUqi1/mUUp3LuAWbWxdxSIf/XsnH9VS6rRVPLJCncjRzUvyCLXg== dependencies: - "@floating-ui/core" "^1.3.1" + "@floating-ui/utils" "^0.1.3" + +"@floating-ui/dom@^1.2.1", "@floating-ui/dom@^1.5.1": + version "1.5.3" + resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.5.3.tgz#54e50efcb432c06c23cd33de2b575102005436fa" + integrity sha512-ClAbQnEqJAKCJOEbbLo5IUlZHkNszqhuxS4fHAVxRPXPya6Ysf2G8KypnYcOTpx6I8xcgF9bbHb6g/2KpbV8qA== + dependencies: + "@floating-ui/core" "^1.4.2" + "@floating-ui/utils" "^0.1.3" "@floating-ui/react-dom@^1.3.0": version "1.3.0" @@ -1331,12 +1341,12 @@ dependencies: "@floating-ui/dom" "^1.2.1" -"@floating-ui/react-dom@^2.0.1": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@floating-ui/react-dom/-/react-dom-2.0.1.tgz#7972a4fc488a8c746cded3cfe603b6057c308a91" - integrity sha512-rZtAmSht4Lry6gdhAJDrCp/6rKN7++JnL1/Anbr/DdeyYXQPxvg/ivrbYvJulbRf4vL8b212suwMM2lxbv+RQA== +"@floating-ui/react-dom@^2.0.2": + version "2.0.2" + resolved "https://registry.yarnpkg.com/@floating-ui/react-dom/-/react-dom-2.0.2.tgz#fab244d64db08e6bed7be4b5fcce65315ef44d20" + integrity sha512-5qhlDvjaLmAst/rKb3VdlCinwTF4EYMiVxuuc/HVUjs46W0zgtbMmAZ1UTsDrRTxRmUEzl92mOtWbeeXL26lSQ== dependencies: - "@floating-ui/dom" "^1.3.0" + "@floating-ui/dom" "^1.5.1" "@floating-ui/react@^0.19.1": version "0.19.2" @@ -1347,20 +1357,25 @@ aria-hidden "^1.1.3" tabbable "^6.0.1" -"@fontsource-variable/inter@^5.0.8": - version "5.0.8" - resolved "https://registry.yarnpkg.com/@fontsource-variable/inter/-/inter-5.0.8.tgz#bd6d61ece1019c59b0ac330a138d48a34dfa6d8c" - integrity sha512-WkYfFNccmEIeL2fNg0mYeLWqOoB7xD8MFxFRc4IwbSP2o8ZaBt36v5aW4by4MyrgGRMNk7uNi5LbvYKq6clPjw== +"@floating-ui/utils@^0.1.3": + version "0.1.6" + resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.1.6.tgz#22958c042e10b67463997bd6ea7115fe28cbcaf9" + integrity sha512-OfX7E2oUDYxtBvsuS4e/jSn4Q9Qb6DzgeYtsAdkPZ47znpoNsMgZw0+tVijiv3uGNR6dgNlty6r9rzIzHjtd/A== -"@fontsource/inter@^5.0.8": - version "5.0.8" - resolved "https://registry.yarnpkg.com/@fontsource/inter/-/inter-5.0.8.tgz#61b50cb0eb72b14ae1938d47c4a9a91546d2a50c" - integrity sha512-28knWH1BfOiRalfLs90U4sge5mpQ8ZH6FS0PTT+IZMKrZ7wNHDHRuKa1kQJg+uHcc6axBppnxll+HXM4c7zo/Q== +"@fontsource-variable/inter@^5.0.13": + version "5.0.13" + resolved "https://registry.yarnpkg.com/@fontsource-variable/inter/-/inter-5.0.13.tgz#ddffb8cdc888c00bc232e30698fb872b775ee115" + integrity sha512-mb2WyZ2rHeqIG8aqGJIvLBOmo4sg2x7SHlsE6PUhwxbOicVzO59EZwSGtzNO3FmchuDPFVAxzcXYcR5B6jE6Qw== -"@humanwhocodes/config-array@^0.11.10": - version "0.11.10" - resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.10.tgz#5a3ffe32cc9306365fb3fd572596cd602d5e12d2" - integrity sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ== +"@fontsource/inter@^5.0.13": + version "5.0.13" + resolved "https://registry.yarnpkg.com/@fontsource/inter/-/inter-5.0.13.tgz#b106fdc43b8eea8d6f5c0ce55941b1ae577c49e5" + integrity sha512-FVIBhP9X/x02blF2VQl2Pji/c3jUjkWEQ9bom4vIrGwO1MlHRDXhXx9iA1hhjpcCIfH3oX68ihIBdYcFnOXhsg== + +"@humanwhocodes/config-array@^0.11.11": + version "0.11.11" + resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.11.tgz#88a04c570dbbc7dd943e4712429c3df09bc32844" + integrity sha512-N2brEuAadi0CcdeMXUkhbZB84eskAc8MEX1By6qEchoVywSgXPIjou4rYsl0V3Hj0ZnuGycGCjdNgockbzeWNA== dependencies: "@humanwhocodes/object-schema" "^1.2.1" debug "^4.1.1" @@ -1377,63 +1392,63 @@ integrity sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA== "@mantine/core@^6.0.19": - version "6.0.19" - resolved "https://registry.yarnpkg.com/@mantine/core/-/core-6.0.19.tgz#612413f0e8eb117e6a39068a625c6ccf2ae2ccdd" - integrity sha512-SvMZCOgCc315SIg6hkuLM0ZnBaAac4VFDHZ0BM5LIE4MPJUpe4QOLsg/5RGxOa5s7JRCtu/dawH3/9frvfDrhw== + version "6.0.21" + resolved "https://registry.yarnpkg.com/@mantine/core/-/core-6.0.21.tgz#6e3a1b8d0f6869518a644d5f5e3d55a5db7e1e51" + integrity sha512-Kx4RrRfv0I+cOCIcsq/UA2aWcYLyXgW3aluAuW870OdXnbII6qg7RW28D+r9D76SHPxWFKwIKwmcucAG08Divg== dependencies: "@floating-ui/react" "^0.19.1" - "@mantine/styles" "6.0.19" - "@mantine/utils" "6.0.19" + "@mantine/styles" "6.0.21" + "@mantine/utils" "6.0.21" "@radix-ui/react-scroll-area" "1.0.2" react-remove-scroll "^2.5.5" react-textarea-autosize "8.3.4" "@mantine/form@^6.0.19": - version "6.0.19" - resolved "https://registry.yarnpkg.com/@mantine/form/-/form-6.0.19.tgz#3d97f08a45b1a8bc8840dbf77defd267abb20e39" - integrity sha512-5SFLZEzaBH7yKIDSDt1r9UiN4y7RkFvu+7J7CFPIQM+nTdXeGnugVFM8rASuZI7/FSYty/XoPY+Yymq3xDX+MQ== + version "6.0.21" + resolved "https://registry.yarnpkg.com/@mantine/form/-/form-6.0.21.tgz#0d717631aa90b9cce834a479f4c8d7e9c0e1969b" + integrity sha512-d4tlxyZic7MSDnaPx/WliCX1sRFDkUd2nxx4MxxO2T4OSek0YDqTlSBCxeoveu60P+vrQQN5rbbsVsaOJBe4SQ== dependencies: fast-deep-equal "^3.1.3" klona "^2.0.5" "@mantine/hooks@^6.0.19": - version "6.0.19" - resolved "https://registry.yarnpkg.com/@mantine/hooks/-/hooks-6.0.19.tgz#39f61434304f687d3ba7bf0040c5adf380c7c4b3" - integrity sha512-YkmuB6kmoenU1PVuE8tLBA+6RJIY9hIsGyIQG1yuPAy6SLWNFT8g2T9YvI/psqsUbVIYGaNEXg8zq42xbxnD8Q== + version "6.0.21" + resolved "https://registry.yarnpkg.com/@mantine/hooks/-/hooks-6.0.21.tgz#bc009d8380ad18455b90f3ddaf484de16a13da95" + integrity sha512-sYwt5wai25W6VnqHbS5eamey30/HD5dNXaZuaVEAJ2i2bBv8C0cCiczygMDpAFiSYdXoSMRr/SZ2CrrPTzeNew== -"@mantine/styles@6.0.19": - version "6.0.19" - resolved "https://registry.yarnpkg.com/@mantine/styles/-/styles-6.0.19.tgz#7d9a6f2c2a9b345dfd9d12f8fd66af3976d67ab2" - integrity sha512-0tg3Dvv/kxCc1mbQVFhZaIhlSbSbV1F/3xG0NRlP2DF23mw9088o5KaIXGKM6XkXU6OEt/f99nDCUHBk2ixtUg== +"@mantine/styles@6.0.21": + version "6.0.21" + resolved "https://registry.yarnpkg.com/@mantine/styles/-/styles-6.0.21.tgz#8ea097fc76cbb3ed55f5cfd719d2f910aff5031b" + integrity sha512-PVtL7XHUiD/B5/kZ/QvZOZZQQOj12QcRs3Q6nPoqaoPcOX5+S7bMZLMH0iLtcGq5OODYk0uxlvuJkOZGoPj8Mg== dependencies: clsx "1.1.1" csstype "3.0.9" -"@mantine/utils@6.0.19": - version "6.0.19" - resolved "https://registry.yarnpkg.com/@mantine/utils/-/utils-6.0.19.tgz#0197fccc5649259787d5468228139f8815909803" - integrity sha512-duvtnaW1gDR2gnvUqnWhl6DMW7sN0HEWqS8Z/BbwaMi75U+Xp17Q72R9JtiIrxQbzsq+KvH9L9B/pxMVwbLirg== +"@mantine/utils@6.0.21": + version "6.0.21" + resolved "https://registry.yarnpkg.com/@mantine/utils/-/utils-6.0.21.tgz#6185506e91cba3e308aaa8ea9ababc8e767995d6" + integrity sha512-33RVDRop5jiWFao3HKd3Yp7A9mEq4HAJxJPTuYm1NkdqX6aTKOQK7wT8v8itVodBp+sb4cJK6ZVdD1UurK/txQ== -"@microsoft/api-extractor-model@7.27.6": - version "7.27.6" - resolved "https://registry.yarnpkg.com/@microsoft/api-extractor-model/-/api-extractor-model-7.27.6.tgz#308e44cd595d2fb446c6357759ee0675ec37d26e" - integrity sha512-eiCnlayyum1f7fS2nA9pfIod5VCNR1G+Tq84V/ijDrKrOFVa598BLw145nCsGDMoFenV6ajNi2PR5WCwpAxW6Q== +"@microsoft/api-extractor-model@7.28.2": + version "7.28.2" + resolved "https://registry.yarnpkg.com/@microsoft/api-extractor-model/-/api-extractor-model-7.28.2.tgz#91c66dd820ccc70e0c163e06b392d8363f1b9269" + integrity sha512-vkojrM2fo3q4n4oPh4uUZdjJ2DxQ2+RnDQL/xhTWSRUNPF6P4QyrvY357HBxbnltKcYu+nNNolVqc6TIGQ73Ig== dependencies: "@microsoft/tsdoc" "0.14.2" "@microsoft/tsdoc-config" "~0.16.1" - "@rushstack/node-core-library" "3.59.7" + "@rushstack/node-core-library" "3.61.0" -"@microsoft/api-extractor@^7.36.3": - version "7.36.4" - resolved "https://registry.yarnpkg.com/@microsoft/api-extractor/-/api-extractor-7.36.4.tgz#3bb9fbbbeacaa48eea49150351905a2677a506d9" - integrity sha512-21UECq8C/8CpHT23yiqTBQ10egKUacIpxkPyYR7hdswo/M5yTWdBvbq+77YC9uPKQJOUfOD1FImBQ1DzpsdeQQ== +"@microsoft/api-extractor@^7.36.4": + version "7.38.0" + resolved "https://registry.yarnpkg.com/@microsoft/api-extractor/-/api-extractor-7.38.0.tgz#e72546d6766b3866578a462b040f71b17779e1c5" + integrity sha512-e1LhZYnfw+JEebuY2bzhw0imDCl1nwjSThTrQqBXl40hrVo6xm3j/1EpUr89QyzgjqmAwek2ZkIVZbrhaR+cqg== dependencies: - "@microsoft/api-extractor-model" "7.27.6" + "@microsoft/api-extractor-model" "7.28.2" "@microsoft/tsdoc" "0.14.2" "@microsoft/tsdoc-config" "~0.16.1" - "@rushstack/node-core-library" "3.59.7" - "@rushstack/rig-package" "0.4.1" - "@rushstack/ts-command-line" "4.15.2" + "@rushstack/node-core-library" "3.61.0" + "@rushstack/rig-package" "0.5.1" + "@rushstack/ts-command-line" "4.16.1" colors "~1.2.1" lodash "~4.17.15" resolve "~1.22.1" @@ -1589,28 +1604,28 @@ dependencies: "@babel/runtime" "^7.13.10" -"@reactflow/background@11.2.8": - version "11.2.8" - resolved "https://registry.yarnpkg.com/@reactflow/background/-/background-11.2.8.tgz#aa83f87b7d65442b52732f0a04d9da981f978265" - integrity sha512-5o41N2LygiNC2/Pk8Ak2rIJjXbKHfQ23/Y9LFsnAlufqwdzFqKA8txExpsMoPVHHlbAdA/xpQaMuoChGPqmyDw== +"@reactflow/background@11.3.3": + version "11.3.3" + resolved "https://registry.yarnpkg.com/@reactflow/background/-/background-11.3.3.tgz#c2662ca22acf49cce5084daf21aa4a3ca9f241f2" + integrity sha512-m3MR25ufbrDkZI2Yi7pHX5uewVpiaaVM5px35pk2v3qdG68adqHOgJjncUOpGiJpc3rDwt4mqmW1V7RjBqNv6Q== dependencies: - "@reactflow/core" "11.8.3" + "@reactflow/core" "11.9.3" classcat "^5.0.3" zustand "^4.4.1" -"@reactflow/controls@11.1.19": - version "11.1.19" - resolved "https://registry.yarnpkg.com/@reactflow/controls/-/controls-11.1.19.tgz#a8bc4b4eafc10d5d230db5286753e867bcf35e5b" - integrity sha512-Vo0LFfAYjiSRMLEII/aeBo+1MT2a0Yc7iLVnkuRTLzChC0EX+A2Fa+JlzeOEYKxXlN4qcDxckRNGR7092v1HOQ== +"@reactflow/controls@11.2.3": + version "11.2.3" + resolved "https://registry.yarnpkg.com/@reactflow/controls/-/controls-11.2.3.tgz#d965399772f912b504f1e96d00ddf950aa822153" + integrity sha512-UTsfHE+PhgWrCZN4GUOTRU/3l8dGSyR2KslmgqV7mVNsh6EuS2cxboRczjpcIc8lF0EH+7QxLGeXSH42GWCcOQ== dependencies: - "@reactflow/core" "11.8.3" + "@reactflow/core" "11.9.3" classcat "^5.0.3" zustand "^4.4.1" -"@reactflow/core@11.8.3": - version "11.8.3" - resolved "https://registry.yarnpkg.com/@reactflow/core/-/core-11.8.3.tgz#03ffeb06fbc141b8f786cb4ac8169f8a51a5f00e" - integrity sha512-y6DN8Wy4V4KQBGHFqlj9zWRjLJU6CgdnVwWaEA/PdDg/YUkFBMpZnXqTs60czinoA2rAcvsz50syLTPsj5e+Wg== +"@reactflow/core@11.9.3": + version "11.9.3" + resolved "https://registry.yarnpkg.com/@reactflow/core/-/core-11.9.3.tgz#5212092e5f1ad741e9bd36b4e1a785a7611fd9da" + integrity sha512-45o8X1sjF48wSWALHybbLoWF6yo9SARgJpMKm96J8ZL8mrNhqSjll77sLRJg6zQ+VKdDwotEN30jp5eY6i28tw== dependencies: "@types/d3" "^7.4.0" "@types/d3-drag" "^3.0.1" @@ -1622,12 +1637,12 @@ d3-zoom "^3.0.0" zustand "^4.4.1" -"@reactflow/minimap@11.6.3": - version "11.6.3" - resolved "https://registry.yarnpkg.com/@reactflow/minimap/-/minimap-11.6.3.tgz#1cfddd87e9afd23ad704167988c66bd683ffc5d2" - integrity sha512-PSA28dk09RnBHOA1zb45fjQXz3UozSJZmsIpgq49O3trfVFlSgRapxNdGsughWLs7/emg2M5jmi6Vc+ejcfjvQ== +"@reactflow/minimap@11.7.3": + version "11.7.3" + resolved "https://registry.yarnpkg.com/@reactflow/minimap/-/minimap-11.7.3.tgz#d6ed45d48080e2d8bbd0af255dec958aca523574" + integrity sha512-u620uYwjmA5tJ/4p+F/0kyjNojvV0axTMSw87d/CCDij96m+2/drwqMW+BE8XHEqjG0c1HyplrkXQ3WhGu6ZaA== dependencies: - "@reactflow/core" "11.8.3" + "@reactflow/core" "11.9.3" "@types/d3-selection" "^3.0.3" "@types/d3-zoom" "^3.0.1" classcat "^5.0.3" @@ -1635,40 +1650,40 @@ d3-zoom "^3.0.0" zustand "^4.4.1" -"@reactflow/node-resizer@2.1.5": - version "2.1.5" - resolved "https://registry.yarnpkg.com/@reactflow/node-resizer/-/node-resizer-2.1.5.tgz#f4033946ccc9cc8f47a94ed93f10a32befd546f1" - integrity sha512-z/hJlsptd2vTx13wKouqvN/Kln08qbkA+YTJLohc2aJ6rx3oGn9yX4E4IqNxhA7zNqYEdrnc1JTEA//ifh9z3w== +"@reactflow/node-resizer@2.2.3": + version "2.2.3" + resolved "https://registry.yarnpkg.com/@reactflow/node-resizer/-/node-resizer-2.2.3.tgz#94665174cbf0524a3733bac836f5428f851b50e3" + integrity sha512-x1TXN4YZhBI1LxNegVsE51emUg1rf4rBgvNL8Tzj0xsKkD/av4DOzRizQ3xAGgk0joPrsOTiGiP511m/PWjsew== dependencies: - "@reactflow/core" "11.8.3" + "@reactflow/core" "11.9.3" classcat "^5.0.4" d3-drag "^3.0.0" d3-selection "^3.0.0" zustand "^4.4.1" -"@reactflow/node-toolbar@1.2.7": - version "1.2.7" - resolved "https://registry.yarnpkg.com/@reactflow/node-toolbar/-/node-toolbar-1.2.7.tgz#cf6639945dc42b42416f293d6132e1187bca3424" - integrity sha512-vs+Wg1tjy3SuD7eoeTqEtscBfE9RY+APqC28urVvftkrtsN7KlnoQjqDG6aE45jWP4z+8bvFizRWjAhxysNLkg== +"@reactflow/node-toolbar@1.3.3": + version "1.3.3" + resolved "https://registry.yarnpkg.com/@reactflow/node-toolbar/-/node-toolbar-1.3.3.tgz#ccedd3522a43426438e69c204c281f2bb6609f40" + integrity sha512-juNFBLZgC+KOYpVaQFTkSQTDf4hYK7WAagiQQ4Dw0IUcLaMY3TA31OLP6X6gMG73YGKFmkgrDwi0ZDB0jpMqdA== dependencies: - "@reactflow/core" "11.8.3" + "@reactflow/core" "11.9.3" classcat "^5.0.3" zustand "^4.4.1" -"@reduxjs/toolkit@^1.9.5": - version "1.9.5" - resolved "https://registry.yarnpkg.com/@reduxjs/toolkit/-/toolkit-1.9.5.tgz#d3987849c24189ca483baa7aa59386c8e52077c4" - integrity sha512-Rt97jHmfTeaxL4swLRNPD/zV4OxTes4la07Xc4hetpUW/vc75t5m1ANyxG6ymnEQ2FsLQsoMlYB2vV1sO3m8tQ== +"@reduxjs/toolkit@^1.9.7": + version "1.9.7" + resolved "https://registry.yarnpkg.com/@reduxjs/toolkit/-/toolkit-1.9.7.tgz#7fc07c0b0ebec52043f8cb43510cf346405f78a6" + integrity sha512-t7v8ZPxhhKgOKtU+uyJT13lu4vL7az5aFi4IdoDs/eS548edn2M8Ik9h8fxgvMjGoAUVFSt6ZC1P5cWmQ014QQ== dependencies: immer "^9.0.21" redux "^4.2.1" redux-thunk "^2.4.2" reselect "^4.1.8" -"@roarr/browser-log-writer@^1.1.5": - version "1.1.5" - resolved "https://registry.yarnpkg.com/@roarr/browser-log-writer/-/browser-log-writer-1.1.5.tgz#755ff62ddaa297bb3488067408a7085db382352b" - integrity sha512-yLn//DRjh1/rUgZpZkwmT/5RqHYfkdOwGXWXnKBR3l/HE04DIhSVeYin3sc8aWHBa7s7WglQpYX/uw/WI6POpw== +"@roarr/browser-log-writer@^1.3.0": + version "1.3.0" + resolved "https://registry.yarnpkg.com/@roarr/browser-log-writer/-/browser-log-writer-1.3.0.tgz#64aeb65ac88050f0e8c133876e548bec05cfa4af" + integrity sha512-RTzjxrm0CpTSoESmsO6104VymAksDS/yJEkaZrL/OLfbM6q+J+jLRBLtJxhJHSY03pBWOEE3wRh+pVwfKtBPqg== dependencies: boolean "^3.1.4" globalthis "^1.0.2" @@ -1691,10 +1706,10 @@ estree-walker "^2.0.2" picomatch "^2.3.1" -"@rushstack/node-core-library@3.59.7": - version "3.59.7" - resolved "https://registry.yarnpkg.com/@rushstack/node-core-library/-/node-core-library-3.59.7.tgz#9dcd62b79263e8a5b68465d4bf9124ec86e14b6c" - integrity sha512-ln1Drq0h+Hwa1JVA65x5mlSgUrBa1uHL+V89FqVWQgXd1vVIMhrtqtWGQrhTnFHxru5ppX+FY39VWELF/FjQCw== +"@rushstack/node-core-library@3.61.0": + version "3.61.0" + resolved "https://registry.yarnpkg.com/@rushstack/node-core-library/-/node-core-library-3.61.0.tgz#7441a0d2ae5268b758a7a49588a78cd55af57e66" + integrity sha512-tdOjdErme+/YOu4gPed3sFS72GhtWCgNV9oDsHDnoLY5oDfwjKUc9Z+JOZZ37uAxcm/OCahDHfuu2ugqrfWAVQ== dependencies: colors "~1.2.1" fs-extra "~7.0.1" @@ -1704,18 +1719,18 @@ semver "~7.5.4" z-schema "~5.0.2" -"@rushstack/rig-package@0.4.1": - version "0.4.1" - resolved "https://registry.yarnpkg.com/@rushstack/rig-package/-/rig-package-0.4.1.tgz#ff11bf67dad46f9b4f09db91cf45739ab411ee9f" - integrity sha512-AGRwpqlXNSp9LhUSz4HKI9xCluqQDt/obsQFdv/NYIekF3pTTPzc+HbQsIsjVjYnJ3DcmxOREVMhvrMEjpiq6g== +"@rushstack/rig-package@0.5.1": + version "0.5.1" + resolved "https://registry.yarnpkg.com/@rushstack/rig-package/-/rig-package-0.5.1.tgz#6c9c283cc96b5bb1eae9875946d974ac5429bb21" + integrity sha512-pXRYSe29TjRw7rqxD4WS3HN/sRSbfr+tJs4a9uuaSIBAITbUggygdhuG0VrO0EO+QqH91GhYMN4S6KRtOEmGVA== dependencies: resolve "~1.22.1" strip-json-comments "~3.1.1" -"@rushstack/ts-command-line@4.15.2": - version "4.15.2" - resolved "https://registry.yarnpkg.com/@rushstack/ts-command-line/-/ts-command-line-4.15.2.tgz#7920e3fa2ab6af129d995ce4424c600da0bf8a93" - integrity sha512-5+C2uoJY8b+odcZD6coEe2XNC4ZjGB4vCMESbqW/8DHRWC/qIHfANdmN9F1wz/lAgxz72i7xRoVtPY2j7e4gpQ== +"@rushstack/ts-command-line@4.16.1": + version "4.16.1" + resolved "https://registry.yarnpkg.com/@rushstack/ts-command-line/-/ts-command-line-4.16.1.tgz#3537bbc323f77c8646646465c579b992d39feb16" + integrity sha512-+OCsD553GYVLEmz12yiFjMOzuPeCiZ3f8wTiFHL30ZVXexTyPmgjwXEhg2K2P0a2lVf+8YBy7WtPoflB2Fp8/A== dependencies: "@types/argparse" "1.0.38" argparse "~1.0.9" @@ -1734,71 +1749,84 @@ dependencies: pako "^2.1.0" -"@swc/core-darwin-arm64@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.70.tgz#056ac6899e22cb7f7be21388d4d938ca5123a72b" - integrity sha512-31+mcl0dgdRHvZRjhLOK9V6B+qJ7nxDZYINr9pBlqGWxknz37Vld5KK19Kpr79r0dXUZvaaelLjCnJk9dA2PcQ== +"@swc/core-darwin-arm64@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.93.tgz#aefd94625451988286bebccb1c072bae0a36bcdb" + integrity sha512-gEKgk7FVIgltnIfDO6GntyuQBBlAYg5imHpRgLxB1zSI27ijVVkksc6QwISzFZAhKYaBWIsFSVeL9AYSziAF7A== -"@swc/core-darwin-x64@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-darwin-x64/-/core-darwin-x64-1.3.70.tgz#3945814de6fadbee5b46cb2a3422353acb420c5c" - integrity sha512-GMFJ65E18zQC80t0os+TZvI+8lbRuitncWVge/RXmXbVLPRcdykP4EJ87cqzcG5Ah0z18/E0T+ixD6jHRisrYQ== +"@swc/core-darwin-x64@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-darwin-x64/-/core-darwin-x64-1.3.93.tgz#18409c6effdf508ddf1ebccfa77d35aaa6cd72f0" + integrity sha512-ZQPxm/fXdDQtn3yrYSL/gFfA8OfZ5jTi33yFQq6vcg/Y8talpZ+MgdSlYM0FkLrZdMTYYTNFiuBQuuvkA+av+Q== -"@swc/core-linux-arm-gnueabihf@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.3.70.tgz#7960e54ede1af75a7ef99ee53febf37fea6269a8" - integrity sha512-wjhCwS8LCiAq2VedF1b4Bryyw68xZnfMED4pLRazAl8BaUlDFANfRBORNunxlfHQj4V3x39IaiLgCZRHMdzXBg== +"@swc/core-linux-arm-gnueabihf@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.3.93.tgz#23a97bc94a8b2f23fb6cc4bc9d8936899e5eeff5" + integrity sha512-OYFMMI2yV+aNe3wMgYhODxHdqUB/jrK0SEMHHS44GZpk8MuBXEF+Mcz4qjkY5Q1EH7KVQqXb/gVWwdgTHpjM2A== -"@swc/core-linux-arm64-gnu@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.3.70.tgz#df9654e5040bbeb1619739756a7f50100e38ace8" - integrity sha512-9D/Rx67cAOnMiexvCqARxvhj7coRajTp5HlJHuf+rfwMqI2hLhpO9/pBMQxBUAWxODO/ksQ/OF+GJRjmtWw/2A== +"@swc/core-linux-arm64-gnu@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.3.93.tgz#7a17406a7cf76a959a617626d5ee2634ae9afa26" + integrity sha512-BT4dT78odKnJMNiq5HdjBsv29CiIdcCcImAPxeFqAeFw1LL6gh9nzI8E96oWc+0lVT5lfhoesCk4Qm7J6bty8w== -"@swc/core-linux-arm64-musl@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.3.70.tgz#2c2aab5a136c7eb409ddc9cdc4f947a68fd74493" - integrity sha512-gkjxBio7XD+1GlQVVyPP/qeFkLu83VhRHXaUrkNYpr5UZG9zZurBERT9nkS6Y+ouYh+Q9xmw57aIyd2KvD2zqQ== +"@swc/core-linux-arm64-musl@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.3.93.tgz#a30be7780090afefd3b8706398418cbe1d23db49" + integrity sha512-yH5fWEl1bktouC0mhh0Chuxp7HEO4uCtS/ly1Vmf18gs6wZ8DOOkgAEVv2dNKIryy+Na++ljx4Ym7C8tSJTrLw== -"@swc/core-linux-x64-gnu@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.3.70.tgz#774351532b154ed36a5c6d14b647e7a8ab510028" - integrity sha512-/nCly+V4xfMVwfEUoLLAukxUSot/RcSzsf6GdsGTjFcrp5sZIntAjokYRytm3VT1c2TK321AfBorsi9R5w8Y7Q== +"@swc/core-linux-x64-gnu@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.3.93.tgz#41e903fd82e059952d16051b442cbe65ee5b8cb3" + integrity sha512-OFUdx64qvrGJhXKEyxosHxgoUVgba2ztYh7BnMiU5hP8lbI8G13W40J0SN3CmFQwPP30+3oEbW7LWzhKEaYjlg== -"@swc/core-linux-x64-musl@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.3.70.tgz#c0b1b4ad5f4ef187eaa093589a4933ecb6836546" - integrity sha512-HoOsPJbt361KGKaivAK0qIiYARkhzlxeAfvF5NlnKxkIMOZpQ46Lwj3tR0VWohKbrhS+cYKFlVuDi5XnDkx0XA== +"@swc/core-linux-x64-musl@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.3.93.tgz#0866807545c44eac9b3254b374310ad5e1c573f9" + integrity sha512-4B8lSRwEq1XYm6xhxHhvHmKAS7pUp1Q7E33NQ2TlmFhfKvCOh86qvThcjAOo57x8DRwmpvEVrqvpXtYagMN6Ig== -"@swc/core-win32-arm64-msvc@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.3.70.tgz#8640267ce3959db0e7e682103677a5e0500b5ea7" - integrity sha512-hm4IBK/IaRil+aj1cWU6f0GyAdHpw/Jr5nyFYLM2c/tt7w2t5hgb8NjzM2iM84lOClrig1fG6edj2vCF1dFzNQ== +"@swc/core-win32-arm64-msvc@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.3.93.tgz#c72411dea2fd4f62a832f71a6e15424d849e7610" + integrity sha512-BHShlxtkven8ZjjvZ5QR6sC5fZCJ9bMujEkiha6W4cBUTY7ce7qGFyHmQd+iPC85d9kD/0cCiX/Xez8u0BhO7w== -"@swc/core-win32-ia32-msvc@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.3.70.tgz#f95d5656622f5a963bc0125da9fda84cf40faa8d" - integrity sha512-5cgKUKIT/9Fp5fCA+zIjYCQ4dSvjFYOeWGZR3QiTXGkC4bGa1Ji9SEPyeIAX0iruUnKjYaZB9RvHK2tNn7RLrQ== +"@swc/core-win32-ia32-msvc@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.3.93.tgz#05c2b031b976af4ef81f5073ee114254678a5d5d" + integrity sha512-nEwNWnz4JzYAK6asVvb92yeylfxMYih7eMQOnT7ZVlZN5ba9WF29xJ6kcQKs9HRH6MvWhz9+wRgv3FcjlU6HYA== -"@swc/core-win32-x64-msvc@1.3.70": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.3.70.tgz#5b3acddb96fdf60df089b837061915cb4be94eaa" - integrity sha512-LE8lW46+TQBzVkn2mHBlk8DIElPIZ2dO5P8AbJiARNBAnlqQWu67l9gWM89UiZ2l33J2cI37pHzON3tKnT8f9g== +"@swc/core-win32-x64-msvc@1.3.93": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.3.93.tgz#f8748b3fd1879f13084b1b0814edf328c662935c" + integrity sha512-jibQ0zUr4kwJaQVwgmH+svS04bYTPnPw/ZkNInzxS+wFAtzINBYcU8s2PMWbDb2NGYiRSEeoSGyAvS9H+24JFA== -"@swc/core@^1.3.61": - version "1.3.70" - resolved "https://registry.yarnpkg.com/@swc/core/-/core-1.3.70.tgz#f5ddc6fe6add7a99f5b94d2214ad0d8527d11479" - integrity sha512-LWVWlEDLlOD25PvA2NEz41UzdwXnlDyBiZbe69s3zM0DfCPwZXLUm79uSqH9ItsOjTrXSL5/1+XUL6C/BZwChA== +"@swc/core@^1.3.85": + version "1.3.93" + resolved "https://registry.yarnpkg.com/@swc/core/-/core-1.3.93.tgz#be4282aa44deffb0e5081a2613bac00335600630" + integrity sha512-690GRr1wUGmGYZHk7fUduX/JUwViMF2o74mnZYIWEcJaCcd9MQfkhsxPBtjeg6tF+h266/Cf3RPYhsFBzzxXcA== + dependencies: + "@swc/counter" "^0.1.1" + "@swc/types" "^0.1.5" optionalDependencies: - "@swc/core-darwin-arm64" "1.3.70" - "@swc/core-darwin-x64" "1.3.70" - "@swc/core-linux-arm-gnueabihf" "1.3.70" - "@swc/core-linux-arm64-gnu" "1.3.70" - "@swc/core-linux-arm64-musl" "1.3.70" - "@swc/core-linux-x64-gnu" "1.3.70" - "@swc/core-linux-x64-musl" "1.3.70" - "@swc/core-win32-arm64-msvc" "1.3.70" - "@swc/core-win32-ia32-msvc" "1.3.70" - "@swc/core-win32-x64-msvc" "1.3.70" + "@swc/core-darwin-arm64" "1.3.93" + "@swc/core-darwin-x64" "1.3.93" + "@swc/core-linux-arm-gnueabihf" "1.3.93" + "@swc/core-linux-arm64-gnu" "1.3.93" + "@swc/core-linux-arm64-musl" "1.3.93" + "@swc/core-linux-x64-gnu" "1.3.93" + "@swc/core-linux-x64-musl" "1.3.93" + "@swc/core-win32-arm64-msvc" "1.3.93" + "@swc/core-win32-ia32-msvc" "1.3.93" + "@swc/core-win32-x64-msvc" "1.3.93" + +"@swc/counter@^0.1.1": + version "0.1.2" + resolved "https://registry.yarnpkg.com/@swc/counter/-/counter-0.1.2.tgz#bf06d0770e47c6f1102270b744e17b934586985e" + integrity sha512-9F4ys4C74eSTEUNndnER3VJ15oru2NumfQxS8geE+f3eB5xvfxpWyqE5XlVnxb/R14uoXi6SLbBwwiDSkv+XEw== + +"@swc/types@^0.1.5": + version "0.1.5" + resolved "https://registry.yarnpkg.com/@swc/types/-/types-0.1.5.tgz#043b731d4f56a79b4897a3de1af35e75d56bc63a" + integrity sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw== "@types/argparse@1.0.38": version "1.0.38" @@ -2061,10 +2089,10 @@ resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee" integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== -"@types/lodash-es@^4.14.194": - version "4.17.8" - resolved "https://registry.yarnpkg.com/@types/lodash-es/-/lodash-es-4.17.8.tgz#cfffd0969507830c22da18dbb20d2ca126fdaa8b" - integrity sha512-euY3XQcZmIzSy7YH5+Unb3b2X12Wtk54YWINBvvGQ5SmMvwb11JQskGsfkH/5HXK77Kr8GF0wkVDIxzAisWtog== +"@types/lodash-es@^4.17.9": + version "4.17.9" + resolved "https://registry.yarnpkg.com/@types/lodash-es/-/lodash-es-4.17.9.tgz#49dbe5112e23c54f2b387d860b7d03028ce170c2" + integrity sha512-ZTcmhiI3NNU7dEvWLZJkzG6ao49zOIjEgIE0RgV7wbPxU0f2xT3VSAHw2gmst8swH6V0YkLRGp4qPlX/6I90MQ== dependencies: "@types/lodash" "*" @@ -2080,10 +2108,12 @@ resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.195.tgz#bafc975b252eb6cea78882ce8a7b6bf22a6de632" integrity sha512-Hwx9EUgdwf2GLarOjQp5ZH8ZmblzcbTBC2wtQWNKARBSxM9ezRIAUpeDTgoQRAFB0+8CNWXVA9+MaSOzOF3nPg== -"@types/node@^20.5.1": - version "20.5.1" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.5.1.tgz#178d58ee7e4834152b0e8b4d30cbfab578b9bb30" - integrity sha512-4tT2UrL5LBqDwoed9wZ6N3umC4Yhz3W3FloMmiiG4JwmUJWpie0c7lcnUNd4gtMKuDEO4wRVS8B6Xa0uMRsMKg== +"@types/node@^20.8.6": + version "20.8.6" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.8.6.tgz#0dbd4ebcc82ad0128df05d0e6f57e05359ee47fa" + integrity sha512-eWO4K2Ji70QzKUqRy6oyJWUeB7+g2cRagT3T/nxYibYcT4y2BDL8lqolRXjTHmkZCdJfIPaY73KbJAZmcryxTQ== + dependencies: + undici-types "~5.25.1" "@types/parse-json@^4.0.0": version "4.0.0" @@ -2095,10 +2125,10 @@ resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.5.tgz#5f19d2b85a98e9558036f6a3cacc8819420f05cf" integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w== -"@types/react-dom@^18.2.6": - version "18.2.7" - resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.2.7.tgz#67222a08c0a6ae0a0da33c3532348277c70abb63" - integrity sha512-GRaAEriuT4zp9N4p1i8BDBYmEyfo+xQ3yHjJU4eiK5NDa1RmUZG+unZABUTK4/Ox/M+GaHwb6Ow8rUITrtjszA== +"@types/react-dom@^18.2.13": + version "18.2.13" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.2.13.tgz#89cd7f9ec8b28c8b6f0392b9591671fb4a9e96b7" + integrity sha512-eJIUv7rPP+EC45uNYp/ThhSpE16k22VJUknt5OLoH9tbXoi8bMhwLf5xRuWMywamNbWzhrSmU7IBJfPup1+3fw== dependencies: "@types/react" "*" @@ -2109,20 +2139,20 @@ dependencies: "@types/react" "*" -"@types/react-redux@^7.1.25": - version "7.1.25" - resolved "https://registry.yarnpkg.com/@types/react-redux/-/react-redux-7.1.25.tgz#de841631205b24f9dfb4967dd4a7901e048f9a88" - integrity sha512-bAGh4e+w5D8dajd6InASVIyCo4pZLJ66oLb80F9OBLO1gKESbZcRCJpTT6uLXX+HAB57zw1WTdwJdAsewuTweg== +"@types/react-redux@^7.1.27": + version "7.1.27" + resolved "https://registry.yarnpkg.com/@types/react-redux/-/react-redux-7.1.27.tgz#1afb31f7354bf787e162c10ff3fa19bafa9e6b57" + integrity sha512-xj7d9z32p1K/eBmO+OEy+qfaWXtcPlN8f1Xk3Ne0p/ZRQ867RI5bQ/bpBtxbqU1AHNhKJSgGvld/P2myU2uYkg== dependencies: "@types/hoist-non-react-statics" "^3.3.0" "@types/react" "*" hoist-non-react-statics "^3.3.0" redux "^4.0.0" -"@types/react-transition-group@^4.4.6": - version "4.4.6" - resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.6.tgz#18187bcda5281f8e10dfc48f0943e2fdf4f75e2e" - integrity sha512-VnCdSxfcm08KjsJVQcfBmhEQAPnLB8G08hAxn39azX1qYBQ/5RVQuoHuKIcfKOdncuaUvEpFKFzEvbtIMsfVew== +"@types/react-transition-group@^4.4.7": + version "4.4.7" + resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.7.tgz#bf69f269d74aa78b99097673ca6dd6824a68ef1c" + integrity sha512-ICCyBl5mvyqYp8Qeq9B5G/fyBSRC0zx3XM3sCC6KkcMsNeAHqXBKkmat4GqdJET5jtYUpZXrxI5flve5qhi2Eg== dependencies: "@types/react" "*" @@ -2135,10 +2165,10 @@ "@types/scheduler" "*" csstype "^3.0.2" -"@types/react@^18.2.20": - version "18.2.20" - resolved "https://registry.yarnpkg.com/@types/react/-/react-18.2.20.tgz#1605557a83df5c8a2cc4eeb743b3dfc0eb6aaeb2" - integrity sha512-WKNtmsLWJM/3D5mG4U84cysVY31ivmyw85dE84fOCk5Hx78wezB/XEjVPWl2JTZ5FkEeaTJf+VgUAUn3PE7Isw== +"@types/react@^18.2.28": + version "18.2.28" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.2.28.tgz#86877465c0fcf751659a36c769ecedfcfacee332" + integrity sha512-ad4aa/RaaJS3hyGz0BGegdnSRXQBkd1CCYDCdNjBPg90UUpLgo+WlJqb9fMYUxtehmzF3PJaTWqRZjko6BRzBg== dependencies: "@types/prop-types" "*" "@types/scheduler" "*" @@ -2159,21 +2189,21 @@ resolved "https://registry.yarnpkg.com/@types/use-sync-external-store/-/use-sync-external-store-0.0.3.tgz#b6725d5f4af24ace33b36fafd295136e75509f43" integrity sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA== -"@types/uuid@^9.0.2": - version "9.0.2" - resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.2.tgz#ede1d1b1e451548d44919dc226253e32a6952c4b" - integrity sha512-kNnC1GFBLuhImSnV7w4njQkUiJi0ZXUycu1rUaouPqiKlXkh77JKgdRnTAp1x5eBwcIwbtI+3otwzuIDEuDoxQ== +"@types/uuid@^9.0.5": + version "9.0.5" + resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.5.tgz#25a71eb73eba95ac0e559ff3dd018fc08294acf6" + integrity sha512-xfHdwa1FMJ082prjSJpoEI57GZITiQz10r3vEJCHa2khEFQjKy91aWKz6+zybzssCvXUwE1LQWgWVwZ4nYUvHQ== -"@typescript-eslint/eslint-plugin@^6.4.1": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.4.1.tgz#bc0c6f000134b53c304ad0bec4ee4753cd3e89d2" - integrity sha512-3F5PtBzUW0dYlq77Lcqo13fv+58KDwUib3BddilE8ajPJT+faGgxmI9Sw+I8ZS22BYwoir9ZhNXcLi+S+I2bkw== +"@typescript-eslint/eslint-plugin@^6.7.5": + version "6.7.5" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.5.tgz#f4024b9f63593d0c2b5bd6e4ca027e6f30934d4f" + integrity sha512-JhtAwTRhOUcP96D0Y6KYnwig/MRQbOoLGXTON2+LlyB/N35SP9j1boai2zzwXb7ypKELXMx3DVk9UTaEq1vHEw== dependencies: "@eslint-community/regexpp" "^4.5.1" - "@typescript-eslint/scope-manager" "6.4.1" - "@typescript-eslint/type-utils" "6.4.1" - "@typescript-eslint/utils" "6.4.1" - "@typescript-eslint/visitor-keys" "6.4.1" + "@typescript-eslint/scope-manager" "6.7.5" + "@typescript-eslint/type-utils" "6.7.5" + "@typescript-eslint/utils" "6.7.5" + "@typescript-eslint/visitor-keys" "6.7.5" debug "^4.3.4" graphemer "^1.4.0" ignore "^5.2.4" @@ -2181,32 +2211,32 @@ semver "^7.5.4" ts-api-utils "^1.0.1" -"@typescript-eslint/parser@^6.4.1": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.4.1.tgz#85ad550bf4ac4aa227504f1becb828f8e46c44e3" - integrity sha512-610G6KHymg9V7EqOaNBMtD1GgpAmGROsmfHJPXNLCU9bfIuLrkdOygltK784F6Crboyd5tBFayPB7Sf0McrQwg== +"@typescript-eslint/parser@^6.7.5": + version "6.7.5" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.7.5.tgz#8d7ca3d1fbd9d5a58cc4d30b2aa797a760137886" + integrity sha512-bIZVSGx2UME/lmhLcjdVc7ePBwn7CLqKarUBL4me1C5feOd663liTGjMBGVcGr+BhnSLeP4SgwdvNnnkbIdkCw== dependencies: - "@typescript-eslint/scope-manager" "6.4.1" - "@typescript-eslint/types" "6.4.1" - "@typescript-eslint/typescript-estree" "6.4.1" - "@typescript-eslint/visitor-keys" "6.4.1" + "@typescript-eslint/scope-manager" "6.7.5" + "@typescript-eslint/types" "6.7.5" + "@typescript-eslint/typescript-estree" "6.7.5" + "@typescript-eslint/visitor-keys" "6.7.5" debug "^4.3.4" -"@typescript-eslint/scope-manager@6.4.1": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.4.1.tgz#4b073a30be2dbe603e44e9ae0cff7e1d3ed19278" - integrity sha512-p/OavqOQfm4/Hdrr7kvacOSFjwQ2rrDVJRPxt/o0TOWdFnjJptnjnZ+sYDR7fi4OimvIuKp+2LCkc+rt9fIW+A== +"@typescript-eslint/scope-manager@6.7.5": + version "6.7.5" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.7.5.tgz#1cf33b991043886cd67f4f3600b8e122fc14e711" + integrity sha512-GAlk3eQIwWOJeb9F7MKQ6Jbah/vx1zETSDw8likab/eFcqkjSD7BI75SDAeC5N2L0MmConMoPvTsmkrg71+B1A== dependencies: - "@typescript-eslint/types" "6.4.1" - "@typescript-eslint/visitor-keys" "6.4.1" + "@typescript-eslint/types" "6.7.5" + "@typescript-eslint/visitor-keys" "6.7.5" -"@typescript-eslint/type-utils@6.4.1": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.4.1.tgz#fa21cb13016c8d6f352fe9b2d6c9ab6edc2d1857" - integrity sha512-7ON8M8NXh73SGZ5XvIqWHjgX2f+vvaOarNliGhjrJnv1vdjG0LVIz+ToYfPirOoBi56jxAKLfsLm40+RvxVVXA== +"@typescript-eslint/type-utils@6.7.5": + version "6.7.5" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.7.5.tgz#0a65949ec16588d8956f6d967f7d9c84ddb2d72a" + integrity sha512-Gs0qos5wqxnQrvpYv+pf3XfcRXW6jiAn9zE/K+DlmYf6FcpxeNYN0AIETaPR7rHO4K2UY+D0CIbDP9Ut0U4m1g== dependencies: - "@typescript-eslint/typescript-estree" "6.4.1" - "@typescript-eslint/utils" "6.4.1" + "@typescript-eslint/typescript-estree" "6.7.5" + "@typescript-eslint/utils" "6.7.5" debug "^4.3.4" ts-api-utils "^1.0.1" @@ -2220,18 +2250,18 @@ resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-5.62.0.tgz#258607e60effa309f067608931c3df6fed41fd2f" integrity sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ== -"@typescript-eslint/types@6.4.1": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.4.1.tgz#b2c61159f46dda210fed9f117f5d027f65bb5c3b" - integrity sha512-zAAopbNuYu++ijY1GV2ylCsQsi3B8QvfPHVqhGdDcbx/NK5lkqMnCGU53amAjccSpk+LfeONxwzUhDzArSfZJg== +"@typescript-eslint/types@6.7.5": + version "6.7.5" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.7.5.tgz#4571320fb9cf669de9a95d9849f922c3af809790" + integrity sha512-WboQBlOXtdj1tDFPyIthpKrUb+kZf2VroLZhxKa/VlwLlLyqv/PwUNgL30BlTVZV1Wu4Asu2mMYPqarSO4L5ZQ== -"@typescript-eslint/typescript-estree@6.4.1": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.4.1.tgz#91ff88101c710adb0f70a317f2f65efa9441da45" - integrity sha512-xF6Y7SatVE/OyV93h1xGgfOkHr2iXuo8ip0gbfzaKeGGuKiAnzS+HtVhSPx8Www243bwlW8IF7X0/B62SzFftg== +"@typescript-eslint/typescript-estree@6.7.5": + version "6.7.5" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.5.tgz#4578de1a26e9f24950f029a4f00d1bfe41f15a39" + integrity sha512-NhJiJ4KdtwBIxrKl0BqG1Ur+uw7FiOnOThcYx9DpOGJ/Abc9z2xNzLeirCG02Ig3vkvrc2qFLmYSSsaITbKjlg== dependencies: - "@typescript-eslint/types" "6.4.1" - "@typescript-eslint/visitor-keys" "6.4.1" + "@typescript-eslint/types" "6.7.5" + "@typescript-eslint/visitor-keys" "6.7.5" debug "^4.3.4" globby "^11.1.0" is-glob "^4.0.3" @@ -2264,17 +2294,17 @@ semver "^7.3.7" tsutils "^3.21.0" -"@typescript-eslint/utils@6.4.1": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.4.1.tgz#81bf62ff0c3119a26c19fab683582e29450717bc" - integrity sha512-F/6r2RieNeorU0zhqZNv89s9bDZSovv3bZQpUNOmmQK1L80/cV4KEu95YUJWi75u5PhboFoKUJBnZ4FQcoqhDw== +"@typescript-eslint/utils@6.7.5": + version "6.7.5" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.7.5.tgz#ab847b53d6b65e029314b8247c2336843dba81ab" + integrity sha512-pfRRrH20thJbzPPlPc4j0UNGvH1PjPlhlCMq4Yx7EGjV7lvEeGX0U6MJYe8+SyFutWgSHsdbJ3BXzZccYggezA== dependencies: "@eslint-community/eslint-utils" "^4.4.0" "@types/json-schema" "^7.0.12" "@types/semver" "^7.5.0" - "@typescript-eslint/scope-manager" "6.4.1" - "@typescript-eslint/types" "6.4.1" - "@typescript-eslint/typescript-estree" "6.4.1" + "@typescript-eslint/scope-manager" "6.7.5" + "@typescript-eslint/types" "6.7.5" + "@typescript-eslint/typescript-estree" "6.7.5" semver "^7.5.4" "@typescript-eslint/visitor-keys@4.33.0": @@ -2293,20 +2323,20 @@ "@typescript-eslint/types" "5.62.0" eslint-visitor-keys "^3.3.0" -"@typescript-eslint/visitor-keys@6.4.1": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.4.1.tgz#e3ccf7b8d42e625946ac5094ed92a405fb4115e0" - integrity sha512-y/TyRJsbZPkJIZQXrHfdnxVnxyKegnpEvnRGNam7s3TRR2ykGefEWOhaef00/UUN3IZxizS7BTO3svd3lCOJRQ== +"@typescript-eslint/visitor-keys@6.7.5": + version "6.7.5" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.5.tgz#84c68d6ceb5b12d5246b918b84f2b79affd6c2f1" + integrity sha512-3MaWdDZtLlsexZzDSdQWsFQ9l9nL8B80Z4fImSpyllFC/KLqWQRdEcB+gGGO+N3Q2uL40EsG66wZLsohPxNXvg== dependencies: - "@typescript-eslint/types" "6.4.1" + "@typescript-eslint/types" "6.7.5" eslint-visitor-keys "^3.4.1" -"@vitejs/plugin-react-swc@^3.3.2": - version "3.3.2" - resolved "https://registry.yarnpkg.com/@vitejs/plugin-react-swc/-/plugin-react-swc-3.3.2.tgz#34a82c1728066f48a86dfecb2f15df60f89207fb" - integrity sha512-VJFWY5sfoZerQRvJrh518h3AcQt6f/yTuWn4/TRB+dqmYU0NX1qz7qM5Wfd+gOQqUzQW4gxKqKN3KpE/P3+zrA== +"@vitejs/plugin-react-swc@^3.4.0": + version "3.4.0" + resolved "https://registry.yarnpkg.com/@vitejs/plugin-react-swc/-/plugin-react-swc-3.4.0.tgz#53ca6a07423abadec92f967e188d5ba49b350830" + integrity sha512-m7UaA4Uvz82N/0EOVpZL4XsFIakRqrFKeSNxa1FBLSXGvWrWRBwmZb4qxk+ZIVAZcW3c3dn5YosomDgx62XWcQ== dependencies: - "@swc/core" "^1.3.61" + "@swc/core" "^1.3.85" "@volar/language-core@1.10.1", "@volar/language-core@~1.10.0": version "1.10.1" @@ -2391,22 +2421,22 @@ resolved "https://registry.yarnpkg.com/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz#e77a97fbd345b76d83245edcd17d393b1b41fb31" integrity sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ== -"@zag-js/dom-query@0.10.5": - version "0.10.5" - resolved "https://registry.yarnpkg.com/@zag-js/dom-query/-/dom-query-0.10.5.tgz#9fc02a51383989667694d8747925e6d20294af9e" - integrity sha512-zm6wA5+kqU48it6afNjaUhjVSixKZruTKB23z0V1xBqKbuiLOMMOZ5oK26cTPSXtZ5CPhDNZ2Qk4pliS5n9SVw== +"@zag-js/dom-query@0.16.0": + version "0.16.0" + resolved "https://registry.yarnpkg.com/@zag-js/dom-query/-/dom-query-0.16.0.tgz#bca46bcd78f78c900064478646d95f9781ed098e" + integrity sha512-Oqhd6+biWyKnhKwFFuZrrf6lxBz2tX2pRQe6grUnYwO6HJ8BcbqZomy2lpOdr+3itlaUqx+Ywj5E5ZZDr/LBfQ== "@zag-js/element-size@0.10.5": version "0.10.5" resolved "https://registry.yarnpkg.com/@zag-js/element-size/-/element-size-0.10.5.tgz#a24bad2eeb7e2c8709e32be5336e158e1a1a174f" integrity sha512-uQre5IidULANvVkNOBQ1tfgwTQcGl4hliPSe69Fct1VfYb2Fd0jdAcGzqQgPhfrXFpR62MxLPB7erxJ/ngtL8w== -"@zag-js/focus-visible@0.10.5": - version "0.10.5" - resolved "https://registry.yarnpkg.com/@zag-js/focus-visible/-/focus-visible-0.10.5.tgz#643e196ea768bea5ce54771102fae22f5e9ba3c7" - integrity sha512-EhDHKLutMtvLFCjBjyIY6h1JoJJNXG3KJz7Dj1sh4tj4LWAqo/TqLvgHyUTB29XMHwoslFHDJHKVWmLGMi+ULQ== +"@zag-js/focus-visible@0.16.0": + version "0.16.0" + resolved "https://registry.yarnpkg.com/@zag-js/focus-visible/-/focus-visible-0.16.0.tgz#c9e53e3dbab0f2649d04a489bb379f5800f4f069" + integrity sha512-a7U/HSopvQbrDU4GLerpqiMcHKEkQkNPeDZJWz38cw/6Upunh41GjHetq5TB84hxyCaDzJ6q2nEdNoBQfC0FKA== dependencies: - "@zag-js/dom-query" "0.10.5" + "@zag-js/dom-query" "0.16.0" acorn-jsx@^5.3.2: version "5.3.2" @@ -2516,7 +2546,7 @@ argparse@~1.0.9: dependencies: sprintf-js "~1.0.2" -aria-hidden@^1.1.3, aria-hidden@^1.2.2: +aria-hidden@^1.1.3, aria-hidden@^1.2.3: version "1.2.3" resolved "https://registry.yarnpkg.com/aria-hidden/-/aria-hidden-1.2.3.tgz#14aeb7fb692bbb72d69bebfa47279c1fd725e954" integrity sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ== @@ -2632,10 +2662,10 @@ available-typed-arrays@^1.0.5: resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz#92f95616501069d07d10edb2fc37d3e1c65123b7" integrity sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw== -axios@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/axios/-/axios-1.4.0.tgz#38a7bf1224cd308de271146038b551d725f0be1f" - integrity sha512-S4XCWMEmzvo64T9GfvQDOXgYRDJ/wsSZc7Jvdgx5u1sd0JwsuPLqb3SYmusag+edF6ziyMensPVqLTSc1PiSEA== +axios@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.5.1.tgz#11fbaa11fc35f431193a9564109c88c1f27b585f" + integrity sha512-Q28iYCWzNHjAm+yEAot5QaAMxhMghWLFVf7rRdwhUI+c2jix2DUXjAHXVi+s1ibs3mjPO/cCgbA++3BjD0vP/A== dependencies: follow-redirects "^1.15.0" form-data "^4.0.0" @@ -2918,7 +2948,7 @@ color-name@^1.1.4, color-name@~1.1.4: resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -color2k@^2.0.0: +color2k@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/color2k/-/color2k-2.0.2.tgz#ac2b4aea11c822a6bcb70c768b5a289f4fffcebb" integrity sha512-kJhwH5nAwb34tmyuqq/lgjEKzlFXn1U99NlnB6Ws4qVaERcRUYeYP1cBw6BJ4vxaWStAUEef4WMr7WjOCnBt8w== @@ -2940,10 +2970,10 @@ combined-stream@^1.0.8: dependencies: delayed-stream "~1.0.0" -commander@11.0.0: - version "11.0.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-11.0.0.tgz#43e19c25dbedc8256203538e8d7e9346877a6f67" - integrity sha512-9HMlXtt/BNoYr8ooyjjNRdIilOTkVJXB+GhxMTtOKwk0R4j4lS4NpjuqmRxroBfnfTSHQIHQB7wryHhXarNjmQ== +commander@11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-11.1.0.tgz#62fdce76006a68e5c1ab3314dc92e800eb83d906" + integrity sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ== commander@^10.0.0: version "10.0.1" @@ -2975,20 +3005,20 @@ compare-versions@^6.1.0: resolved "https://registry.yarnpkg.com/compare-versions/-/compare-versions-6.1.0.tgz#3f2131e3ae93577df111dba133e6db876ffe127a" integrity sha512-LNZQXhqUvqUTotpZ00qLSaify3b4VFD588aRr8MKFw4CMUr98ytzCW5wDH5qx/DEY5kCDXcbcRuCqL0szEf2tg== -compute-scroll-into-view@1.0.20: - version "1.0.20" - resolved "https://registry.yarnpkg.com/compute-scroll-into-view/-/compute-scroll-into-view-1.0.20.tgz#1768b5522d1172754f5d0c9b02de3af6be506a43" - integrity sha512-UCB0ioiyj8CRjtrvaceBLqqhZCVP+1B8+NWQhmdsm0VXOJtobBCf1dBQmebCCo34qZmUwZfIH2MZLqNHazrfjg== +compute-scroll-into-view@3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/compute-scroll-into-view/-/compute-scroll-into-view-3.0.3.tgz#c418900a5c56e2b04b885b54995df164535962b1" + integrity sha512-nadqwNxghAGTamwIqQSG433W6OADZx2vCo3UXHNrzTRHK/htu+7+L0zhjEoaeaQVNAi3YgqWDv8+tzf0hRfR+A== concat-map@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== -concurrently@^8.2.0: - version "8.2.0" - resolved "https://registry.yarnpkg.com/concurrently/-/concurrently-8.2.0.tgz#cdc9f621a4d913366600355d68254df2c5e782f3" - integrity sha512-nnLMxO2LU492mTUj9qX/az/lESonSZu81UznYDoXtz1IQf996ixVqPAgHXwvHiHCAef/7S8HIK+fTFK7Ifk8YA== +concurrently@^8.2.1: + version "8.2.1" + resolved "https://registry.yarnpkg.com/concurrently/-/concurrently-8.2.1.tgz#bcab9cacc38c23c503839583151e0fa96fd5b584" + integrity sha512-nVraf3aXOpIcNud5pB9M82p1tynmZkrSGQ1p6X/VY8cJ+2LMVqAgXsJxYYefACSHbTYlm92O1xuhdGTjwoEvbQ== dependencies: chalk "^4.1.2" date-fns "^2.30.0" @@ -3642,10 +3672,10 @@ eslint-config-prettier@^9.0.0: resolved "https://registry.yarnpkg.com/eslint-config-prettier/-/eslint-config-prettier-9.0.0.tgz#eb25485946dd0c66cd216a46232dc05451518d1f" integrity sha512-IcJsTkJae2S35pRsRAwoCE+925rJJStOdkKnLVgtE+tEpqU0EVVM7OqrwxqgptKdX29NUwC82I5pXsGFIgSevw== -eslint-plugin-prettier@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-5.0.0.tgz#6887780ed95f7708340ec79acfdf60c35b9be57a" - integrity sha512-AgaZCVuYDXHUGxj/ZGu1u8H8CYgDY3iG6w5kUFw4AzMVXzB7VvbKgYR4nATIN+OvUrghMbiDLeimVjVY5ilq3w== +eslint-plugin-prettier@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-5.0.1.tgz#a3b399f04378f79f066379f544e42d6b73f11515" + integrity sha512-m3u5RnR56asrwV/lDC4GHorlW75DsFfmUcjfCYylTUs85dBRnB7VM6xG8eCMJdeDRnppzmxZVf1GEPJvl1JmNg== dependencies: prettier-linter-helpers "^1.0.0" synckit "^0.8.5" @@ -3700,16 +3730,16 @@ eslint-visitor-keys@^3.4.3: resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== -eslint@^8.47.0: - version "8.47.0" - resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.47.0.tgz#c95f9b935463fb4fad7005e626c7621052e90806" - integrity sha512-spUQWrdPt+pRVP1TTJLmfRNJJHHZryFmptzcafwSvHsceV81djHOdnEeDmkdotZyLNjDhrOasNK8nikkoG1O8Q== +eslint@^8.51.0: + version "8.51.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.51.0.tgz#4a82dae60d209ac89a5cff1604fea978ba4950f3" + integrity sha512-2WuxRZBrlwnXi+/vFSJyjMqrNjtJqiasMzehF0shoLaW7DzS3/9Yvrmq5JiT66+pNjiX4UBnLDiKHcWAr/OInA== dependencies: "@eslint-community/eslint-utils" "^4.2.0" "@eslint-community/regexpp" "^4.6.1" "@eslint/eslintrc" "^2.1.2" - "@eslint/js" "^8.47.0" - "@humanwhocodes/config-array" "^0.11.10" + "@eslint/js" "8.51.0" + "@humanwhocodes/config-array" "^0.11.11" "@humanwhocodes/module-importer" "^1.0.1" "@nodelib/fs.walk" "^1.2.8" ajv "^6.12.4" @@ -3791,19 +3821,19 @@ eventemitter3@^5.0.1: resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-5.0.1.tgz#53f5ffd0a492ac800721bb42c66b841de96423c4" integrity sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA== -execa@7.2.0, execa@^7.1.1: - version "7.2.0" - resolved "https://registry.yarnpkg.com/execa/-/execa-7.2.0.tgz#657e75ba984f42a70f38928cedc87d6f2d4fe4e9" - integrity sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA== +execa@8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-8.0.1.tgz#51f6a5943b580f963c3ca9c6321796db8cc39b8c" + integrity sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg== dependencies: cross-spawn "^7.0.3" - get-stream "^6.0.1" - human-signals "^4.3.0" + get-stream "^8.0.1" + human-signals "^5.0.0" is-stream "^3.0.0" merge-stream "^2.0.0" npm-run-path "^5.1.0" onetime "^6.0.0" - signal-exit "^3.0.7" + signal-exit "^4.1.0" strip-final-newline "^3.0.0" execa@^5.0.0: @@ -3821,6 +3851,21 @@ execa@^5.0.0: signal-exit "^3.0.3" strip-final-newline "^2.0.0" +execa@^7.1.1: + version "7.2.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-7.2.0.tgz#657e75ba984f42a70f38928cedc87d6f2d4fe4e9" + integrity sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA== + dependencies: + cross-spawn "^7.0.3" + get-stream "^6.0.1" + human-signals "^4.3.0" + is-stream "^3.0.0" + merge-stream "^2.0.0" + npm-run-path "^5.1.0" + onetime "^6.0.0" + signal-exit "^3.0.7" + strip-final-newline "^3.0.0" + fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: version "3.1.3" resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" @@ -4020,11 +4065,12 @@ form-data@^4.0.0: combined-stream "^1.0.8" mime-types "^2.1.12" -formik@^2.4.3: - version "2.4.3" - resolved "https://registry.yarnpkg.com/formik/-/formik-2.4.3.tgz#6020e85eb3e3e8415b3b19d6f4f65793ab754b24" - integrity sha512-2Dy79Szw3zlXmZiokUdKsn+n1ow4G8hRrC/n92cOWHNTWXCRpQXlyvz6HcjW7aSQZrldytvDOavYjhfmDnUq8Q== +formik@^2.4.5: + version "2.4.5" + resolved "https://registry.yarnpkg.com/formik/-/formik-2.4.5.tgz#f899b5b7a6f103a8fabb679823e8fafc7e0ee1b4" + integrity sha512-Gxlht0TD3vVdzMDHwkiNZqJ7Mvg77xQNfmBRrNtvzcHZs72TJppSTDKHpImCMJZwcWPBJ8jSQQ95GJzXFf1nAQ== dependencies: + "@types/hoist-non-react-statics" "^3.3.1" deepmerge "^2.1.1" hoist-non-react-statics "^3.3.0" lodash "^4.17.21" @@ -4033,10 +4079,10 @@ formik@^2.4.3: tiny-warning "^1.0.2" tslib "^2.0.0" -framer-motion@^10.16.1: - version "10.16.1" - resolved "https://registry.yarnpkg.com/framer-motion/-/framer-motion-10.16.1.tgz#0ff5de554bbb35ee6605357d80f92b27d0271a94" - integrity sha512-K6TXr5mZtitC/dxQCBdg7xzdN0d5IAIrlaqCPKtIQVdzVPGC0qBuJKXggHX1vjnP5gPOFwB1KbCCTWcnFc3kWg== +framer-motion@^10.16.4: + version "10.16.4" + resolved "https://registry.yarnpkg.com/framer-motion/-/framer-motion-10.16.4.tgz#30279ef5499b8d85db3a298ee25c83429933e9f8" + integrity sha512-p9V9nGomS3m6/CALXqv6nFGMuFOxbWsmaOrdmhyQimMIlLl3LC7h7l86wge/Js/8cRu5ktutS/zlzgR7eBOtFA== dependencies: tslib "^2.4.0" optionalDependencies: @@ -4149,6 +4195,11 @@ get-stream@^6.0.0, get-stream@^6.0.1: resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== +get-stream@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2" + integrity sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA== + get-symbol-description@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6" @@ -4313,6 +4364,11 @@ human-signals@^4.3.0: resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-4.3.1.tgz#ab7f811e851fca97ffbd2c1fe9a958964de321b2" integrity sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ== +human-signals@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-5.0.0.tgz#42665a284f9ae0dade3ba41ebc37eb4b852f3a28" + integrity sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ== + husky@^8.0.3: version "8.0.3" resolved "https://registry.yarnpkg.com/husky/-/husky-8.0.3.tgz#4936d7212e46d1dea28fef29bb3a108872cd9184" @@ -4330,17 +4386,17 @@ i18next-browser-languagedetector@^7.0.2: dependencies: "@babel/runtime" "^7.19.4" -i18next-http-backend@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/i18next-http-backend/-/i18next-http-backend-2.2.1.tgz#cdb7efbafa46ce8f237d9db443f62514664a3bdf" - integrity sha512-ZXIdn/8NJIBJ0X4hzXfc3STYxKrCKh1fYjji9HPyIpEJfvTvy8/ZlTl8RuTizzCPj2ZcWrfaecyOMKs6bQ7u5A== +i18next-http-backend@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/i18next-http-backend/-/i18next-http-backend-2.2.2.tgz#3ee16dfe5fe33524ec8925d4f0bf1508ebbbfadf" + integrity sha512-mJu4ZqzDtBiU3O4GV9AbK5ekEqoDMdMnCl3pkdXmb5b8yoIH//u8FsmIe6C5qXb3teZu+j6VMi20tjUgzeABiw== dependencies: cross-fetch "3.1.6" -i18next@^23.4.4: - version "23.4.4" - resolved "https://registry.yarnpkg.com/i18next/-/i18next-23.4.4.tgz#ec8fb2b5f3c5d8e3bf3f8ab1b19e743be91300e0" - integrity sha512-+c9B0txp/x1m5zn+QlwHaCS9vyFtmIAEXbVSFzwCX7vupm5V7va8F9cJGNJZ46X9ZtoGzhIiRC7eTIIh93TxPA== +i18next@^23.5.1: + version "23.5.1" + resolved "https://registry.yarnpkg.com/i18next/-/i18next-23.5.1.tgz#7f7c35ffaa907618d9489f106d5006b09fbca3d3" + integrity sha512-JelYzcaCoFDaa+Ysbfz2JsGAKkrHiMG6S61+HLBUEIPaF40WMwW9hCPymlQGrP+wWawKxKPuSuD71WZscCsWHg== dependencies: "@babel/runtime" "^7.22.5" @@ -4837,10 +4893,10 @@ kolorist@^1.8.0: resolved "https://registry.yarnpkg.com/kolorist/-/kolorist-1.8.0.tgz#edddbbbc7894bc13302cdf740af6374d4a04743c" integrity sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ== -konva@^9.2.0: - version "9.2.0" - resolved "https://registry.yarnpkg.com/konva/-/konva-9.2.0.tgz#3739e539724b0e6b76d697a322efdaa01baa1508" - integrity sha512-+woI76Sk+VFVl9z7zPkuTnN2zFpEYg27YWz8BCdQXpt5IS3pdnSPAPQVPPMidcbDi9/G5b/IOIp35/KqMGiYPA== +konva@^9.2.2: + version "9.2.2" + resolved "https://registry.yarnpkg.com/konva/-/konva-9.2.2.tgz#972105ec79a89c60296d5e36d1f7cef9b84a42d4" + integrity sha512-Gyn5hQa/5+8pJvTn/IVyZWgum2otWXszuVCG/cevkAyKUFcmFv4tGbQhHFGtJPLQkGO+W6xfgRzyYIkNgKnPxA== levn@^0.4.1: version "0.4.1" @@ -4860,21 +4916,21 @@ lines-and-columns@^1.1.6: resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== -lint-staged@^14.0.1: - version "14.0.1" - resolved "https://registry.yarnpkg.com/lint-staged/-/lint-staged-14.0.1.tgz#57dfa3013a3d60762d9af5d9c83bdb51291a6232" - integrity sha512-Mw0cL6HXnHN1ag0mN/Dg4g6sr8uf8sn98w2Oc1ECtFto9tvRF7nkXGJRbx8gPlHyoR0pLyBr2lQHbWwmUHe1Sw== +lint-staged@^15.0.1: + version "15.0.1" + resolved "https://registry.yarnpkg.com/lint-staged/-/lint-staged-15.0.1.tgz#1db47c315c79bafe993aa33a0b50cbfeef50d906" + integrity sha512-2IU5OWmCaxch0X0+IBF4/v7sutpB+F3qoXbro43pYjQTOo5wumckjxoxn47pQBqqBsCWrD5HnI2uG/zJA7isew== dependencies: chalk "5.3.0" - commander "11.0.0" + commander "11.1.0" debug "4.3.4" - execa "7.2.0" + execa "8.0.1" lilconfig "2.1.0" - listr2 "6.6.1" + listr2 "7.0.1" micromatch "4.0.5" pidtree "0.6.0" string-argv "0.3.2" - yaml "2.3.1" + yaml "2.3.2" liqe@^3.6.0: version "3.6.1" @@ -4884,10 +4940,10 @@ liqe@^3.6.0: nearley "^2.20.1" ts-error "^1.0.6" -listr2@6.6.1: - version "6.6.1" - resolved "https://registry.yarnpkg.com/listr2/-/listr2-6.6.1.tgz#08b2329e7e8ba6298481464937099f4a2cd7f95d" - integrity sha512-+rAXGHh0fkEWdXBmX+L6mmfmXmXvDGEKzkjxO+8mP3+nI/r/CWznVBvsibXdxda9Zz0OW2e2ikphN3OwCT/jSg== +listr2@7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/listr2/-/listr2-7.0.1.tgz#18e7a655b189cd7a8a76575a26f4d494b6ffc2c2" + integrity sha512-nz+7hwgbDp8eWNoDgzdl4hA/xDSLrNRzPu1TLgOYs6l5Y+Ma6zVWWy9Oyt9TQFONwKoSPoka3H50D3vD5EuNwg== dependencies: cli-truncate "^3.1.0" colorette "^2.0.20" @@ -5299,27 +5355,27 @@ open@^9.1.0: is-inside-container "^1.0.0" is-wsl "^2.2.0" -openapi-fetch@^0.7.4: - version "0.7.4" - resolved "https://registry.yarnpkg.com/openapi-fetch/-/openapi-fetch-0.7.4.tgz#3676e0c44433bfa6701cd408853fec75599a9f0d" - integrity sha512-ACoSikOuFO3sMROtqritJAsGd694gRNXFnWpYAqi+tQzowLOkcQ6SbeAvS+T6qNS92y/OLiiYcNrb/Rh/MrEVw== +openapi-fetch@^0.7.10: + version "0.7.10" + resolved "https://registry.yarnpkg.com/openapi-fetch/-/openapi-fetch-0.7.10.tgz#624c928a646b53561ff2703f00d5617fa88a72df" + integrity sha512-lDZkHjSxBuSTPXkJuJ9kSpkLxY9jgsVHbKkhS7rukoKi5et5QUlWCEzO/E6PaSHTQkJDPOjXdBJeDOSj2e8QwQ== dependencies: - openapi-typescript-helpers "^0.0.1" + openapi-typescript-helpers "^0.0.4" openapi-types@^12.1.3: version "12.1.3" resolved "https://registry.yarnpkg.com/openapi-types/-/openapi-types-12.1.3.tgz#471995eb26c4b97b7bd356aacf7b91b73e777dd3" integrity sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw== -openapi-typescript-helpers@^0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/openapi-typescript-helpers/-/openapi-typescript-helpers-0.0.1.tgz#865c9b66f18db89e41cd0d770170719610f68d2d" - integrity sha512-WDmxej0eHSZtLgCuyPEn2NXRV7tcvUnBBNP/0c/U66mOlxs6Yn0/dHuWlkVKdHGNahSUwG57A1tyutHWRpWqFg== +openapi-typescript-helpers@^0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/openapi-typescript-helpers/-/openapi-typescript-helpers-0.0.4.tgz#ffe7c4868f094fcc8502dbdcddc6c32ce8011aee" + integrity sha512-Q0MTapapFAG993+dx8lNw33X6P/6EbFr31yNymJHq56fNc6dODyRm8tWyRnGxuC74lyl1iCRMV6nQCGQsfVNKg== -openapi-typescript@^6.5.2: - version "6.5.2" - resolved "https://registry.yarnpkg.com/openapi-typescript/-/openapi-typescript-6.5.2.tgz#622e54e6de25bd65c65a1f129be42193160e6e42" - integrity sha512-Zz41utYZ3BAyr32QhOATSnN9zcWMsJeA4Jdq7xQjfYOdZbQfI8Fvsvx41Doe9Wvoho1aj5cP9b5Z7kHtG6mYvg== +openapi-typescript@^6.7.0: + version "6.7.0" + resolved "https://registry.yarnpkg.com/openapi-typescript/-/openapi-typescript-6.7.0.tgz#6d1a4dfc0db60b61573a3ea3c52984a79c638c67" + integrity sha512-eoUfJwhnMEug7euZ1dATG7iRiDVsEROwdPkhLUDiaFjcClV4lzft9F0Ii0fYjULCPNIiWiFi0BqMpSxipuvAgQ== dependencies: ansi-colors "^4.1.3" fast-glob "^3.3.1" @@ -5360,15 +5416,15 @@ os-tmpdir@~1.0.2: resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" integrity sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g== -overlayscrollbars-react@^0.5.0: - version "0.5.1" - resolved "https://registry.yarnpkg.com/overlayscrollbars-react/-/overlayscrollbars-react-0.5.1.tgz#b5dde9803bae0115f0f214db0c60cdf98213eedb" - integrity sha512-0xw9J1CT/cQ+ELYy3hudG6nY1H5dgJ1DdVW3d8aZwqx6wyHNZV4nsBQXUxoHmPo3dmlJ5MvOLzpKWA4X6nL4QA== +overlayscrollbars-react@^0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/overlayscrollbars-react/-/overlayscrollbars-react-0.5.2.tgz#b8a6294279a1d0984392586b56a03df78d6c1e34" + integrity sha512-y3xIPY3to7hneF30MQ/tK9nI6UOcflgenAYktgyfi0GuwnsYEuAqgLfs2qDGiNhFA30Xcz3gaMxvqD7+hTvFRA== -overlayscrollbars@^2.2.0: - version "2.2.1" - resolved "https://registry.yarnpkg.com/overlayscrollbars/-/overlayscrollbars-2.2.1.tgz#2a6d43ebba5188a394a8b99611f39e22aad6e0c0" - integrity sha512-5oMxq4UCiEVLiOSvovbX8p+P2NtPosjHC0KkIcaobnYuxGwMyTOwBCtBdqO1tXrrA02VVrNzuIjGMLisO2mIwg== +overlayscrollbars@^2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/overlayscrollbars/-/overlayscrollbars-2.3.2.tgz#1f31daa0b808f5d4710a59c73eb579397745dd72" + integrity sha512-K3Sau7NpFruKfXBauvchAQshAW+un1qD8EYNcozrPAB2kbif8C2rqa+1EWvMMWPKl88wgf2rX2QDMLgAfR0hHA== p-limit@^3.0.2: version "3.1.0" @@ -5577,10 +5633,10 @@ prettier@^2.0.5, prettier@^2.8.8: resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.8.8.tgz#e8c5d7e98a4305ffe3de2e1fc4aca1a71c28b1da" integrity sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q== -prettier@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.0.2.tgz#78fcecd6d870551aa5547437cdae39d4701dca5b" - integrity sha512-o2YR9qtniXvwEZlOKbveKfDQVyqxbEIWn48Z8m3ZJjBjcCmUy3xZGIv+7AkaeuaTr6yPXJjwv07ZWlsWbEy1rQ== +prettier@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.0.3.tgz#432a51f7ba422d1469096c0fdc28e235db8f9643" + integrity sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg== pretty-ms@^7.0.1: version "7.0.1" @@ -5686,10 +5742,10 @@ react-error-boundary@^4.0.11: dependencies: "@babel/runtime" "^7.12.5" -react-fast-compare@3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.1.tgz#53933d9e14f364281d6cba24bfed7a4afb808b5f" - integrity sha512-xTYf9zFim2pEif/Fw16dBiXpe0hoy5PxcD8+OwBnTtNLfIm3g6WxhKNurY+6OmdH1u6Ta/W/Vl6vjbYP1MFnDg== +react-fast-compare@3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.2.tgz#929a97a532304ce9fee4bcae44234f1ce2c21d49" + integrity sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ== react-fast-compare@^2.0.1: version "2.0.4" @@ -5713,18 +5769,18 @@ react-hotkeys-hook@4.4.1: resolved "https://registry.yarnpkg.com/react-hotkeys-hook/-/react-hotkeys-hook-4.4.1.tgz#1f7a7a1c9c21d4fa3280bf340fcca8fd77d81994" integrity sha512-sClBMBioFEgFGYLTWWRKvhxcCx1DRznd+wkFHwQZspnRBkHTgruKIHptlK/U/2DPX8BhHoRGzpMVWUXMmdZlmw== -react-i18next@^13.1.2: - version "13.1.2" - resolved "https://registry.yarnpkg.com/react-i18next/-/react-i18next-13.1.2.tgz#dbb1b18c364295af2a9072333ee4e0b43cbc2da8" - integrity sha512-D/OJ/8ZQYscabsvbCAiOgvJq8W3feQF/VIV0to1w7V7UvrUE1IZ3hcalOckUYvKBd7BP3b8EPm+hop3J8sS+Mw== +react-i18next@^13.3.0: + version "13.3.0" + resolved "https://registry.yarnpkg.com/react-i18next/-/react-i18next-13.3.0.tgz#8e39c0101f654db7eb971f159bb55067a78925c3" + integrity sha512-FlR9xjYHSPIJfQspEmkN0yOlxgRyNuiJKJ8gCaZH08UJ7SZHG+VrptEPcpEMEchjNoCOZdKcvJ3PnmHEZhkeXg== dependencies: "@babel/runtime" "^7.22.5" html-parse-stringify "^3.0.1" -react-icons@^4.10.1: - version "4.10.1" - resolved "https://registry.yarnpkg.com/react-icons/-/react-icons-4.10.1.tgz#3f3b5eec1f63c1796f6a26174a1091ca6437a500" - integrity sha512-/ngzDP/77tlCfqthiiGNZeYFACw85fUjZtLbedmJ5DTlNDIwETxhwBzdOJ21zj4iJdvc0J3y7yOsX3PpxAJzrw== +react-icons@^4.11.0: + version "4.11.0" + resolved "https://registry.yarnpkg.com/react-icons/-/react-icons-4.11.0.tgz#4b0e31c9bfc919608095cc429c4f1846f4d66c65" + integrity sha512-V+4khzYcE5EBk/BvcuYRq6V/osf11ODUM2J8hg2FDSswRrGvqiYUYPRy4OdrWaQOBj4NcpJfmHZLNaD+VH0TyA== react-is@^16.13.1, react-is@^16.7.0: version "16.13.1" @@ -5754,10 +5810,10 @@ react-reconciler@~0.29.0: loose-envify "^1.1.0" scheduler "^0.23.0" -react-redux@^8.1.2: - version "8.1.2" - resolved "https://registry.yarnpkg.com/react-redux/-/react-redux-8.1.2.tgz#9076bbc6b60f746659ad6d51cb05de9c5e1e9188" - integrity sha512-xJKYI189VwfsFc4CJvHqHlDrzyFTY/3vZACbE+rr/zQ34Xx1wQfB4OTOSeOSNrF6BDVe8OOdxIrAnMGXA3ggfw== +react-redux@^8.1.3: + version "8.1.3" + resolved "https://registry.yarnpkg.com/react-redux/-/react-redux-8.1.3.tgz#4fdc0462d0acb59af29a13c27ffef6f49ab4df46" + integrity sha512-n0ZrutD7DaX/j9VscF+uTALI3oUPa/pO4Z3soOBIjuRn/FzVu6aehhysxZCLi6y7duMf52WNZGMl7CtuK5EnRw== dependencies: "@babel/runtime" "^7.12.1" "@types/hoist-non-react-statics" "^3.3.1" @@ -5774,10 +5830,10 @@ react-remove-scroll-bar@^2.3.4: react-style-singleton "^2.2.1" tslib "^2.0.0" -react-remove-scroll@^2.5.5: - version "2.5.6" - resolved "https://registry.yarnpkg.com/react-remove-scroll/-/react-remove-scroll-2.5.6.tgz#7510b8079e9c7eebe00e65a33daaa3aa29a10336" - integrity sha512-bO856ad1uDYLefgArk559IzUNeQ6SWH4QnrevIUjH+GczV56giDfl3h0Idptf2oIKxQmd1p9BN25jleKodTALg== +react-remove-scroll@^2.5.5, react-remove-scroll@^2.5.6: + version "2.5.7" + resolved "https://registry.yarnpkg.com/react-remove-scroll/-/react-remove-scroll-2.5.7.tgz#15a1fd038e8497f65a695bf26a4a57970cac1ccb" + integrity sha512-FnrTWO4L7/Bhhf3CYBNArEG/yROV0tKmTv7/3h9QCFvH6sndeFf1wPqOcbFVu5VAulS5dV1wGT3GZZ/1GawqiA== dependencies: react-remove-scroll-bar "^2.3.4" react-style-singleton "^2.2.1" @@ -5833,15 +5889,15 @@ react-use@^17.4.0: ts-easing "^0.2.0" tslib "^2.1.0" -react-virtuoso@^4.5.0: - version "4.5.0" - resolved "https://registry.yarnpkg.com/react-virtuoso/-/react-virtuoso-4.5.0.tgz#0bc043b4e3e928e7891aa541dd7e55d5b46db8c8" - integrity sha512-OMP6XrzJMMos1vbJZC16RxGW7utAxUMP7i5PNPi6epBNVH7nz+CF/DlmecNBep5wyjLud51dQ5epjb2A0w9W/Q== +react-virtuoso@^4.6.1: + version "4.6.1" + resolved "https://registry.yarnpkg.com/react-virtuoso/-/react-virtuoso-4.6.1.tgz#b08465be4222d9820a22fbe2fefec29e91d0b048" + integrity sha512-dQq0yOdRjdWIYaiTvUbudqmTodGSdWGt5lVYz3mM07TTEV91yv7eL5Fn3FPEJOA36whScxPWg0GiYBIOZYEIEA== -react-zoom-pan-pinch@^3.0.8: - version "3.1.0" - resolved "https://registry.yarnpkg.com/react-zoom-pan-pinch/-/react-zoom-pan-pinch-3.1.0.tgz#d87a66fd22a97f5dd56b54076411a9dce1f448cd" - integrity sha512-a3LlP8QPgTikvteCNkZ3X6wIWC0lrg1geP5WkUJyx2MXXAhHQek3r17N1nT/esOiWGuPIECnsd9AGoK8jOeGcg== +react-zoom-pan-pinch@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/react-zoom-pan-pinch/-/react-zoom-pan-pinch-3.2.0.tgz#6ce7d014a8dc4aa62ce83ca57f85e76cf2e934b8" + integrity sha512-7MS0wYWoXjr6PrmpgHOVpVyNQr9gj7LEr4xIvq6lBy62nuNwjdI1r+XxahQ0SDHhWrLuSF11e2PTL/YLengYyg== react@^18.2.0: version "18.2.0" @@ -5850,17 +5906,17 @@ react@^18.2.0: dependencies: loose-envify "^1.1.0" -reactflow@^11.8.3: - version "11.8.3" - resolved "https://registry.yarnpkg.com/reactflow/-/reactflow-11.8.3.tgz#ad5cdf22408298956c92ab652929ff92206af9dc" - integrity sha512-wuVxJOFqi1vhA4WAEJLK0JWx2TsTiWpxTXTRp/wvpqKInQgQcB49I2QNyNYsKJCQ6jjXektS7H+LXoaVK/pG4A== +reactflow@^11.9.3: + version "11.9.3" + resolved "https://registry.yarnpkg.com/reactflow/-/reactflow-11.9.3.tgz#4723774370ff87403c574bdec43c93e809165854" + integrity sha512-GiIo20Vgy1U4h1NlLyQChWYgsl2OQkEgKHjokyQsdmm1nidywTr0n94O6w97ixLljKzJynTMjDdWP0p8xkq6NQ== dependencies: - "@reactflow/background" "11.2.8" - "@reactflow/controls" "11.1.19" - "@reactflow/core" "11.8.3" - "@reactflow/minimap" "11.6.3" - "@reactflow/node-resizer" "2.1.5" - "@reactflow/node-toolbar" "1.2.7" + "@reactflow/background" "11.3.3" + "@reactflow/controls" "11.2.3" + "@reactflow/core" "11.9.3" + "@reactflow/minimap" "11.7.3" + "@reactflow/node-resizer" "2.2.3" + "@reactflow/node-toolbar" "1.3.3" readable-stream@^3.4.0: version "3.6.2" @@ -5883,10 +5939,10 @@ redux-dynamic-middlewares@^2.2.0: resolved "https://registry.yarnpkg.com/redux-dynamic-middlewares/-/redux-dynamic-middlewares-2.2.0.tgz#6835dd6d4f2fd975266376b45dcae0141320ae97" integrity sha512-GHESQC+Y0PV98ZBoaC6br6cDOsNiM1Cu4UleGMqMWCXX03jIr3BoozYVrRkLVVAl4sC216chakMnZOu6SwNdGA== -redux-remember@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/redux-remember/-/redux-remember-4.0.1.tgz#fae416d140a9dccdf84285b957e7062934a337fb" - integrity sha512-mP/EWdBVKg0bJfe3srzofp5sNSmWBLjKX+JzJC7J+DBjbLaxTCsLXVq1fnE4rcHXb9Sz/4u5qZ040I/ZhKzjLw== +redux-remember@^4.0.4: + version "4.0.4" + resolved "https://registry.yarnpkg.com/redux-remember/-/redux-remember-4.0.4.tgz#ca0b583088fdb1ff6d917c88ae80bb83001627de" + integrity sha512-a1T+UMYTa08Uq0YtCp0j5Z7v5yydbePPgfu4iAZ21Uk4ozcFfT/PoB9PwETFhHRxBW4Ij0yWPfPJw3mIE/CXlw== redux-thunk@^2.4.2: version "2.4.2" @@ -5917,6 +5973,11 @@ regenerator-runtime@^0.13.11: resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9" integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg== +regenerator-runtime@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45" + integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA== + regexp.prototype.flags@^1.4.3, regexp.prototype.flags@^1.5.0: version "1.5.0" resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.0.tgz#fe7ce25e7e4cca8db37b6634c8a2c7009199b9cb" @@ -6169,10 +6230,10 @@ semver@^7.3.5, semver@^7.3.7, semver@^7.3.8, semver@^7.5.3, semver@^7.5.4, semve dependencies: lru-cache "^6.0.0" -serialize-error@^11.0.1: - version "11.0.1" - resolved "https://registry.yarnpkg.com/serialize-error/-/serialize-error-11.0.1.tgz#7cfa2b54f7aca3e4cbfc0137259d94d93793f813" - integrity sha512-B5yw3/Lg+Daspbs0f+iO3Qim0+lALnaLS8aZUAy8Y0tO92tkOoMEuwtKo4jpZ5XO16CTwMi4tYN8cZQI3QF2Qw== +serialize-error@^11.0.2: + version "11.0.2" + resolved "https://registry.yarnpkg.com/serialize-error/-/serialize-error-11.0.2.tgz#8c1a44f0ab872ee2c3ca6736ca5c750003bc1d04" + integrity sha512-o43i0jLcA0LXA5Uu+gI1Vj+lF66KR9IAcy0ThbGq1bAMPN+k5IgSHsulfnqf/ddKAz6dWf+k8PD5hAr9oCSHEQ== dependencies: type-fest "^2.12.2" @@ -6212,6 +6273,11 @@ signal-exit@^3.0.2, signal-exit@^3.0.3, signal-exit@^3.0.7: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== +signal-exit@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" + integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== + slash@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44" @@ -6629,20 +6695,20 @@ tslib@^1.8.1: resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== -tslib@^2.0.0, tslib@^2.1.0, tslib@^2.4.0: - version "2.6.0" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.0.tgz#b295854684dbda164e181d259a22cd779dcd7bc3" - integrity sha512-7At1WUettjcSRHXCyYtTselblcHl9PJFFVKiCAy/bY97+BPZXSQ2wbq0P9s8tK2G7dFQfNnlJnPAiArVBVBsfA== +tslib@^2.0.0, tslib@^2.1.0, tslib@^2.5.0, tslib@^2.6.0: + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== tslib@^2.0.3: version "2.6.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.1.tgz#fd8c9a0ff42590b25703c0acb3de3d3f4ede0410" integrity sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig== -tslib@^2.5.0, tslib@^2.6.0: - version "2.6.2" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" - integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== +tslib@^2.4.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.0.tgz#b295854684dbda164e181d259a22cd779dcd7bc3" + integrity sha512-7At1WUettjcSRHXCyYtTselblcHl9PJFFVKiCAy/bY97+BPZXSQ2wbq0P9s8tK2G7dFQfNnlJnPAiArVBVBsfA== tsutils@^3.21.0: version "3.21.0" @@ -6673,10 +6739,10 @@ type-fest@^2.12.2: resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-2.19.0.tgz#88068015bb33036a598b952e55e9311a60fd3a9b" integrity sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA== -type-fest@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.2.0.tgz#e259430307710e77721ecf6f545840acad72195f" - integrity sha512-5zknd7Dss75pMSED270A1RQS3KloqRJA9XbXLe0eCxyw7xXFb3rd+9B0UQ/0E+LQT6lnrLviEolYORlRWamn4w== +type-fest@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.4.0.tgz#061cd10ff55664bb7174218cdf78c28c48f71c69" + integrity sha512-HT3RRs7sTfY22KuPQJkD/XjbTbxgP2Je5HPt6H6JEGvcjHd5Lqru75EbrP3tb4FYjNJ+DjLp+MNQTFQU0mhXNw== typed-array-buffer@^1.0.0: version "1.0.0" @@ -6747,6 +6813,11 @@ unbox-primitive@^1.0.2: has-symbols "^1.0.3" which-boxed-primitive "^1.0.2" +undici-types@~5.25.1: + version "5.25.3" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.25.3.tgz#e044115914c85f0bcbb229f346ab739f064998c3" + integrity sha512-Ga1jfYwRn7+cP9v8auvEXN1rX3sWqlayd4HP7OKk4mZWylEmu3KzXDUGrQUN6Ol7qo1gPvB2e5gX6udnyEPgdA== + undici@^5.23.0: version "5.23.0" resolved "https://registry.yarnpkg.com/undici/-/undici-5.23.0.tgz#e7bdb0ed42cebe7b7aca87ced53e6eaafb8f8ca0" @@ -6833,10 +6904,10 @@ util-deprecate@^1.0.1: resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== -uuid@^9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.0.tgz#592f550650024a38ceb0c562f2f6aa435761efb5" - integrity sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg== +uuid@^9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" + integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== validator@^13.7.0: version "13.9.0" @@ -6848,12 +6919,12 @@ vite-plugin-css-injected-by-js@^3.3.0: resolved "https://registry.yarnpkg.com/vite-plugin-css-injected-by-js/-/vite-plugin-css-injected-by-js-3.3.0.tgz#c19480a9e42a95c5bced976a9dde1446f9bd91ff" integrity sha512-xG+jyHNCmUqi/TXp6q88wTJGeAOrNLSyUUTp4qEQ9QZLGcHWQQsCsSSKa59rPMQr8sOzfzmWDd8enGqfH/dBew== -vite-plugin-dts@^3.5.2: - version "3.5.2" - resolved "https://registry.yarnpkg.com/vite-plugin-dts/-/vite-plugin-dts-3.5.2.tgz#429612f727f1bf4eff1f22e29c04b52a75d398b8" - integrity sha512-iKc851+jdHEoN1ifbOEsoMs+/Zg26PE1EyO2Jc+4apOWRoaeK2zRJnaStgUuJaVaEcAjTqWzpNgCAMq7iO6DWA== +vite-plugin-dts@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/vite-plugin-dts/-/vite-plugin-dts-3.6.0.tgz#8d1052e93ae4efef1b0860e44411570a58c77517" + integrity sha512-doxhDRFJCZD2sGjIp4V800nm8Y19GvmwckjG5vYPuiqJ7OBjc9NlW1Vp9Gkyh2aXlUs1jTDRH/lxWfcsPLOQHg== dependencies: - "@microsoft/api-extractor" "^7.36.3" + "@microsoft/api-extractor" "^7.36.4" "@rollup/pluginutils" "^5.0.2" "@vue/language-core" "^1.8.8" debug "^4.3.4" @@ -6869,19 +6940,19 @@ vite-plugin-eslint@^1.8.1: "@types/eslint" "^8.4.5" rollup "^2.77.2" -vite-tsconfig-paths@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/vite-tsconfig-paths/-/vite-tsconfig-paths-4.2.0.tgz#bd2647d3eadafb65a10fc98a2ca565211f2eaf63" - integrity sha512-jGpus0eUy5qbbMVGiTxCL1iB9ZGN6Bd37VGLJU39kTDD6ZfULTTb1bcc5IeTWqWJKiWV5YihCaibeASPiGi8kw== +vite-tsconfig-paths@^4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/vite-tsconfig-paths/-/vite-tsconfig-paths-4.2.1.tgz#e53b89096b91d31a6d1e26f75999ea8c336a89ed" + integrity sha512-GNUI6ZgPqT3oervkvzU+qtys83+75N/OuDaQl7HmOqFTb0pjZsuARrRipsyJhJ3enqV8beI1xhGbToR4o78nSQ== dependencies: debug "^4.1.1" globrex "^0.1.2" tsconfck "^2.1.0" -vite@^4.4.9: - version "4.4.9" - resolved "https://registry.yarnpkg.com/vite/-/vite-4.4.9.tgz#1402423f1a2f8d66fd8d15e351127c7236d29d3d" - integrity sha512-2mbUn2LlUmNASWwSCNSJ/EG2HuSRTnVNaydp6vMCm5VIqJsjMfbIWtbH2kDuwUVW5mMUKKZvGPX/rqeqVvv1XA== +vite@^4.4.11: + version "4.4.11" + resolved "https://registry.yarnpkg.com/vite/-/vite-4.4.11.tgz#babdb055b08c69cfc4c468072a2e6c9ca62102b0" + integrity sha512-ksNZJlkcU9b0lBwAGZGGaZHCMqHsc8OpgtoYhsQ4/I2v5cnpmmmqe5pM4nv/4Hn6G/2GhTdj0DhZh2e+Er1q5A== dependencies: esbuild "^0.18.10" postcss "^8.4.27" @@ -7036,16 +7107,21 @@ yallist@^4.0.0: resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== -yaml@2.3.1, yaml@^2.2.2: - version "2.3.1" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.3.1.tgz#02fe0975d23cd441242aa7204e09fc28ac2ac33b" - integrity sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ== +yaml@2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.3.2.tgz#f522db4313c671a0ca963a75670f1c12ea909144" + integrity sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg== yaml@^1.10.0: version "1.10.2" resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== +yaml@^2.2.2: + version "2.3.1" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.3.1.tgz#02fe0975d23cd441242aa7204e09fc28ac2ac33b" + integrity sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ== + yargs-parser@^21.1.1: version "21.1.1" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" @@ -7090,10 +7166,10 @@ zod-validation-error@^1.5.0: resolved "https://registry.yarnpkg.com/zod-validation-error/-/zod-validation-error-1.5.0.tgz#2b355007a1c3b7fb04fa476bfad4e7b3fd5491e3" integrity sha512-/7eFkAI4qV0tcxMBB/3+d2c1P6jzzZYdYSlBuAklzMuCrJu5bzJfHS0yVAS87dRHVlhftd6RFJDIvv03JgkSbw== -zod@^3.22.2: - version "3.22.2" - resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.2.tgz#3add8c682b7077c05ac6f979fea6998b573e157b" - integrity sha512-wvWkphh5WQsJbVk1tbx1l1Ly4yg+XecD+Mq280uBGt9wa5BKSWf4Mhp6GmrkPixhMxmabYY7RbzlwVP32pbGCg== +zod@^3.22.4: + version "3.22.4" + resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.4.tgz#f31c3a9386f61b1f228af56faa9255e845cf3fff" + integrity sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg== zustand@^4.4.1: version "4.4.1" From dc232438fb86f2b6acddf7400a69f22a4ff58cbe Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Sun, 15 Oct 2023 11:35:25 +0000 Subject: [PATCH 069/202] translationBot(ui): update translation (Italian) Currently translated at 97.5% (1187 of 1217 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 90 +++++++++++++++++--- 1 file changed, 80 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 32994e0169..c69879cfcf 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -88,7 +88,8 @@ "ipAdapter": "Adattatore IP", "t2iAdapter": "Adattatore T2I", "controlAdapter": "Adattatore di Controllo", - "controlNet": "ControlNet" + "controlNet": "ControlNet", + "auto": "Automatico" }, "gallery": { "generations": "Generazioni", @@ -578,7 +579,9 @@ "noControlImageForControlAdapter": "L'adattatore di controllo #{{number}} non ha un'immagine di controllo", "noModelForControlAdapter": "Nessun modello selezionato per l'adattatore di controllo #{{number}}.", "incompatibleBaseModelForControlAdapter": "Il modello dell'adattatore di controllo #{{number}} non è compatibile con il modello principale.", - "missingNodeTemplate": "Modello di nodo mancante" + "missingNodeTemplate": "Modello di nodo mancante", + "missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} ingresso mancante", + "missingFieldTemplate": "Modello di campo mancante" }, "enableNoiseSettings": "Abilita le impostazioni del rumore", "cpuNoise": "Rumore CPU", @@ -589,7 +592,7 @@ "iterations": "Iterazioni", "iterationsWithCount_one": "{{count}} Iterazione", "iterationsWithCount_many": "{{count}} Iterazioni", - "iterationsWithCount_other": "", + "iterationsWithCount_other": "{{count}} Iterazioni", "seamlessX&Y": "Senza cuciture X & Y", "isAllowedToUpscale": { "useX2Model": "L'immagine è troppo grande per l'ampliamento con il modello x4, utilizza il modello x2", @@ -597,7 +600,8 @@ }, "seamlessX": "Senza cuciture X", "seamlessY": "Senza cuciture Y", - "imageActions": "Azioni Immagine" + "imageActions": "Azioni Immagine", + "aspectRatioFree": "Libere" }, "settings": { "models": "Modelli", @@ -632,9 +636,13 @@ "clearIntermediatesDesc2": "Le immagini intermedie sono sottoprodotti della generazione, diversi dalle immagini risultanti nella galleria. La cancellazione degli intermedi libererà spazio su disco.", "intermediatesCleared_one": "Cancellata {{count}} immagine intermedia", "intermediatesCleared_many": "Cancellate {{count}} immagini intermedie", - "intermediatesCleared_other": "", + "intermediatesCleared_other": "Cancellate {{count}} immagini intermedie", "clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato di Tela Unificata e ControlNet.", - "intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie" + "intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie", + "clearIntermediatesWithCount_one": "Cancella {{count}} immagine intermedia", + "clearIntermediatesWithCount_many": "Cancella {{count}} immagini intermedie", + "clearIntermediatesWithCount_other": "Cancella {{count}} immagini intermedie", + "clearIntermediatesDisabled": "La coda deve essere vuota per cancellare le immagini intermedie" }, "toast": { "tempFoldersEmptied": "Cartella temporanea svuotata", @@ -686,7 +694,7 @@ "nodesBrokenConnections": "Impossibile caricare. Alcune connessioni sono interrotte.", "baseModelChangedCleared_one": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modello incompatibile", "baseModelChangedCleared_many": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modelli incompatibili", - "baseModelChangedCleared_other": "", + "baseModelChangedCleared_other": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modelli incompatibili", "imageSavingFailed": "Salvataggio dell'immagine non riuscito", "canvasSentControlnetAssets": "Tela inviata a ControlNet & Risorse", "problemCopyingCanvasDesc": "Impossibile copiare la tela", @@ -955,7 +963,69 @@ "enumDescription": "Gli enumeratori sono valori che possono essere una delle diverse opzioni.", "denoiseMaskField": "Maschera riduzione rumore", "currentImage": "Immagine corrente", - "floatCollection": "Raccolta in virgola mobile" + "floatCollection": "Raccolta in virgola mobile", + "inputField": "Campo di Input", + "controlFieldDescription": "Informazioni di controllo passate tra i nodi.", + "skippingUnknownOutputType": "Tipo di campo di output sconosciuto saltato", + "latentsFieldDescription": "Le immagini latenti possono essere passate tra i nodi.", + "ipAdapterPolymorphicDescription": "Una raccolta di adattatori IP.", + "latentsPolymorphicDescription": "Le immagini latenti possono essere passate tra i nodi.", + "ipAdapterCollection": "Raccolta Adattatori IP", + "conditioningCollection": "Raccolta condizionamenti", + "ipAdapterPolymorphic": "Adattatore IP Polimorfico", + "integerPolymorphicDescription": "Una raccolta di numeri interi.", + "conditioningCollectionDescription": "Il condizionamento può essere passato tra i nodi.", + "skippingReservedFieldType": "Tipo di campo riservato saltato", + "conditioningPolymorphic": "Condizionamento Polimorfico", + "integer": "Numero Intero", + "latentsCollection": "Raccolta Latenti", + "sourceNode": "Nodo di origine", + "integerDescription": "Gli interi sono numeri senza punto decimale.", + "stringPolymorphic": "Stringa polimorfica", + "conditioningPolymorphicDescription": "Il condizionamento può essere passato tra i nodi.", + "skipped": "Saltato", + "imagePolymorphic": "Immagine Polimorfica", + "imagePolymorphicDescription": "Una raccolta di immagini.", + "floatPolymorphic": "Numeri in virgola mobile Polimorfici", + "ipAdapterCollectionDescription": "Una raccolta di adattatori IP.", + "stringCollectionDescription": "Una raccolta di stringhe.", + "unableToParseNode": "Impossibile analizzare il nodo", + "controlCollection": "Raccolta di Controllo", + "stringCollection": "Raccolta di stringhe", + "inputMayOnlyHaveOneConnection": "L'ingresso può avere solo una connessione", + "ipAdapter": "Adattatore IP", + "integerCollection": "Raccolta di numeri interi", + "controlCollectionDescription": "Informazioni di controllo passate tra i nodi.", + "skippedReservedInput": "Campo di input riservato saltato", + "inputNode": "Nodo di Input", + "imageField": "Immagine", + "skippedReservedOutput": "Campo di output riservato saltato", + "integerCollectionDescription": "Una raccolta di numeri interi.", + "conditioningFieldDescription": "Il condizionamento può essere passato tra i nodi.", + "stringDescription": "Le stringhe sono testo.", + "integerPolymorphic": "Numero intero Polimorfico", + "ipAdapterModel": "Modello Adattatore IP", + "latentsPolymorphic": "Latenti polimorfici", + "skippingInputNoTemplate": "Campo di input senza modello saltato", + "ipAdapterDescription": "Un adattatore di prompt di immagini (Adattatore IP).", + "stringPolymorphicDescription": "Una raccolta di stringhe.", + "skippingUnknownInputType": "Tipo di campo di input sconosciuto saltato", + "controlField": "Controllo", + "ipAdapterModelDescription": "Campo Modello adattatore IP", + "invalidOutputSchema": "Schema di output non valido", + "floatDescription": "I numeri in virgola mobile sono numeri con un punto decimale.", + "floatPolymorphicDescription": "Una raccolta di numeri in virgola mobile.", + "conditioningField": "Condizionamento", + "string": "Stringa", + "latentsField": "Latenti", + "connectionWouldCreateCycle": "La connessione creerebbe un ciclo", + "inputFields": "Campi di Input", + "uNetFieldDescription": "Sub-modello UNet.", + "imageCollectionDescription": "Una raccolta di immagini.", + "imageFieldDescription": "Le immagini possono essere passate tra i nodi.", + "unableToParseEdge": "Impossibile analizzare il bordo", + "latentsCollectionDescription": "Le immagini latenti possono essere passate tra i nodi.", + "imageCollection": "Raccolta Immagini" }, "boards": { "autoAddBoard": "Aggiungi automaticamente bacheca", @@ -1075,7 +1145,7 @@ "cancelSucceeded": "Elemento annullato", "batchQueuedDesc_one": "Aggiunta {{count}} sessione a {{direction}} della coda", "batchQueuedDesc_many": "Aggiunte {{count}} sessioni a {{direction}} della coda", - "batchQueuedDesc_other": "", + "batchQueuedDesc_other": "Aggiunte {{count}} sessioni a {{direction}} della coda", "graphQueued": "Grafico in coda", "batch": "Lotto", "clearQueueAlertDialog": "Lo svuotamento della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda.", @@ -1152,7 +1222,7 @@ "maxPrompts": "Numero massimo di prompt", "promptsWithCount_one": "{{count}} Prompt", "promptsWithCount_many": "{{count}} Prompt", - "promptsWithCount_other": "", + "promptsWithCount_other": "{{count}} Prompt", "dynamicPrompts": "Prompt dinamici" }, "popovers": { From bedb35af8c8eda316ceca1029ec4cfa8fa6a8537 Mon Sep 17 00:00:00 2001 From: Surisen Date: Sun, 15 Oct 2023 11:35:26 +0000 Subject: [PATCH 070/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 100.0% (1217 of 1217 strings) Co-authored-by: Surisen Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/zh_CN.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 5a2d829f95..23940542a9 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -638,7 +638,8 @@ "intermediatesCleared_other": "已清除 {{count}} 个中间产物", "clearIntermediatesDesc1": "清除中间产物会重置您的画布和 ControlNet 状态。", "intermediatesClearedFailed": "清除中间产物时出现问题", - "clearIntermediatesWithCount_other": "清除 {{count}} 个中间产物" + "clearIntermediatesWithCount_other": "清除 {{count}} 个中间产物", + "clearIntermediatesDisabled": "队列为空才能清理中间产物" }, "toast": { "tempFoldersEmptied": "临时文件夹已清空", From 388d36b83978566baf942bfbaf8c45d351a4cd87 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 16 Oct 2023 11:16:41 +1100 Subject: [PATCH 071/202] fix(db): use RLock instead of Lock Fixes issues where a db-accessing service wants to call db-accessing methods with locks. --- .../board_image_records/board_image_records_sqlite.py | 2 +- invokeai/app/services/board_records/board_records_sqlite.py | 2 +- invokeai/app/services/image_records/image_records_sqlite.py | 2 +- invokeai/app/services/item_storage/item_storage_sqlite.py | 2 +- invokeai/app/services/session_queue/session_queue_sqlite.py | 2 +- invokeai/app/services/shared/sqlite.py | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/invokeai/app/services/board_image_records/board_image_records_sqlite.py b/invokeai/app/services/board_image_records/board_image_records_sqlite.py index df7505b797..9f4e4379bc 100644 --- a/invokeai/app/services/board_image_records/board_image_records_sqlite.py +++ b/invokeai/app/services/board_image_records/board_image_records_sqlite.py @@ -12,7 +12,7 @@ from .board_image_records_base import BoardImageRecordStorageBase class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase): _conn: sqlite3.Connection _cursor: sqlite3.Cursor - _lock: threading.Lock + _lock: threading.RLock def __init__(self, db: SqliteDatabase) -> None: super().__init__() diff --git a/invokeai/app/services/board_records/board_records_sqlite.py b/invokeai/app/services/board_records/board_records_sqlite.py index b2ddc931f5..9e3423ab19 100644 --- a/invokeai/app/services/board_records/board_records_sqlite.py +++ b/invokeai/app/services/board_records/board_records_sqlite.py @@ -20,7 +20,7 @@ from .board_records_common import ( class SqliteBoardRecordStorage(BoardRecordStorageBase): _conn: sqlite3.Connection _cursor: sqlite3.Cursor - _lock: threading.Lock + _lock: threading.RLock def __init__(self, db: SqliteDatabase) -> None: super().__init__() diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index c0783fdf2f..864f4eff00 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -24,7 +24,7 @@ from .image_records_common import ( class SqliteImageRecordStorage(ImageRecordStorageBase): _conn: sqlite3.Connection _cursor: sqlite3.Cursor - _lock: threading.Lock + _lock: threading.RLock def __init__(self, db: SqliteDatabase) -> None: super().__init__() diff --git a/invokeai/app/services/item_storage/item_storage_sqlite.py b/invokeai/app/services/item_storage/item_storage_sqlite.py index b810baf9fd..1d6008e90f 100644 --- a/invokeai/app/services/item_storage/item_storage_sqlite.py +++ b/invokeai/app/services/item_storage/item_storage_sqlite.py @@ -17,7 +17,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): _conn: sqlite3.Connection _cursor: sqlite3.Cursor _id_field: str - _lock: threading.Lock + _lock: threading.RLock def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"): super().__init__() diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index 0e12382392..eb82667be5 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -37,7 +37,7 @@ class SqliteSessionQueue(SessionQueueBase): __invoker: Invoker __conn: sqlite3.Connection __cursor: sqlite3.Cursor - __lock: threading.Lock + __lock: threading.RLock def start(self, invoker: Invoker) -> None: self.__invoker = invoker diff --git a/invokeai/app/services/shared/sqlite.py b/invokeai/app/services/shared/sqlite.py index c41dbbe606..3c75c3d6a7 100644 --- a/invokeai/app/services/shared/sqlite.py +++ b/invokeai/app/services/shared/sqlite.py @@ -9,7 +9,7 @@ sqlite_memory = ":memory:" class SqliteDatabase: conn: sqlite3.Connection - lock: threading.Lock + lock: threading.RLock _logger: Logger _config: InvokeAIAppConfig @@ -27,7 +27,7 @@ class SqliteDatabase: self._logger.info(f"Using database at {location}") self.conn = sqlite3.connect(location, check_same_thread=False) - self.lock = threading.Lock() + self.lock = threading.RLock() self.conn.row_factory = sqlite3.Row if self._config.log_sql: From 9a1aea9cafe225ae5d7fb70d2e5c8e27446237e1 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:17:36 +1100 Subject: [PATCH 072/202] fix(api): fix socketio breaking change Fix for breaking change in `python-socketio` 5.10.0 in which `enter_room` and `leave_room` were made coroutines. --- invokeai/app/api/sockets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/app/api/sockets.py b/invokeai/app/api/sockets.py index f41c38786c..5725901de0 100644 --- a/invokeai/app/api/sockets.py +++ b/invokeai/app/api/sockets.py @@ -30,8 +30,8 @@ class SocketIO: async def _handle_sub_queue(self, sid, data, *args, **kwargs): if "queue_id" in data: - self.__sio.enter_room(sid, data["queue_id"]) + await self.__sio.enter_room(sid, data["queue_id"]) async def _handle_unsub_queue(self, sid, data, *args, **kwargs): if "queue_id" in data: - self.__sio.enter_room(sid, data["queue_id"]) + await self.__sio.enter_room(sid, data["queue_id"]) From 3079c75a60c01f5478f3230a8f74159d347fb1a0 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 16 Oct 2023 08:35:32 -0400 Subject: [PATCH 073/202] (minor) Make it more clear that shape annotations are just comments and not commented lines of code. --- invokeai/backend/ip_adapter/attention_processor.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/ip_adapter/attention_processor.py b/invokeai/backend/ip_adapter/attention_processor.py index 96ab5f876a..c06d7d113c 100644 --- a/invokeai/backend/ip_adapter/attention_processor.py +++ b/invokeai/backend/ip_adapter/attention_processor.py @@ -138,29 +138,29 @@ class IPAttnProcessor2_0(torch.nn.Module): ip_hidden_states = ipa_embed - # ip_hidden_state.shape = (batch_size, num_ip_images, ip_seq_len, ip_image_embedding) + # Expected ip_hidden_state shape: (batch_size, num_ip_images, ip_seq_len, ip_image_embedding) ip_key = ipa_weights.to_k_ip(ip_hidden_states) ip_value = ipa_weights.to_v_ip(ip_hidden_states) - # ip_key.shape, ip_value.shape: (batch_size, num_ip_images, ip_seq_len, head_dim * num_heads) + # Expected ip_key and ip_value shape: (batch_size, num_ip_images, ip_seq_len, head_dim * num_heads) ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - # ip_key.shape, ip_value.shape: (batch_size, num_heads, num_ip_images * ip_seq_len, head_dim) + # Expected ip_key and ip_value shape: (batch_size, num_heads, num_ip_images * ip_seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 ip_hidden_states = F.scaled_dot_product_attention( query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False ) - # ip_hidden_states.shape: (batch_size, num_heads, query_seq_len, head_dim) + # Expected ip_hidden_states shape: (batch_size, num_heads, query_seq_len, head_dim) ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) ip_hidden_states = ip_hidden_states.to(query.dtype) - # ip_hidden_states.shape: (batch_size, query_seq_len, num_heads * head_dim) + # Expected ip_hidden_states shape: (batch_size, query_seq_len, num_heads * head_dim) hidden_states = hidden_states + scale * ip_hidden_states From 19c5435332d219f0fa6b89fdb334e37cb585b1f8 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 16 Oct 2023 15:11:46 +1100 Subject: [PATCH 074/202] fix(ui): copy image via img onload to blob There's a bug in chrome that screws with headers on fetch requests and 307 responses. This causes images to fail to copy in the commercial environment. This change attempts to get around this by copying images in a different way (similar to how the canvas works). When the user requests a copy we: - create an `` element - set `crossOrigin` if needed - add an onload handler: - create a canvas element - draw image onto it - export canvas to blob This is wrapped in a promise which resolves to the blob, which can then be copied to clipboard. --- A customized version of Konva's `useImage` hook is also included, which returns the image blob in addition to the `` element. Unfortunately, this hook is not suitable for use across the app, because it does all the image fetching up front, regardless of whether we actually want to copy the image. In other words, we'd have to fetch the whole image file even if the user is just skipping through image metadata, in order to have the blob to copy. The callback approach means we only fetch the image when the user clicks copy. The hook is thus currently unused. --- .../frontend/web/src/common/hooks/useImage.ts | 102 ++++++++++++++++++ .../web/src/common/hooks/useImageUrlToBlob.ts | 40 +++++++ .../system/util/copyBlobToClipboard.ts | 2 +- .../ui/hooks/useCopyImageToClipboard.ts | 20 ++-- 4 files changed, 153 insertions(+), 11 deletions(-) create mode 100644 invokeai/frontend/web/src/common/hooks/useImage.ts create mode 100644 invokeai/frontend/web/src/common/hooks/useImageUrlToBlob.ts diff --git a/invokeai/frontend/web/src/common/hooks/useImage.ts b/invokeai/frontend/web/src/common/hooks/useImage.ts new file mode 100644 index 0000000000..60c973ce59 --- /dev/null +++ b/invokeai/frontend/web/src/common/hooks/useImage.ts @@ -0,0 +1,102 @@ +import { useLayoutEffect, useRef, useState } from 'react'; + +// Adapted from https://github.com/konvajs/use-image + +type CrossOrigin = 'anonymous' | 'use-credentials'; +type ReferrerPolicy = + | 'no-referrer' + | 'no-referrer-when-downgrade' + | 'origin' + | 'origin-when-cross-origin' + | 'same-origin' + | 'strict-origin' + | 'strict-origin-when-cross-origin' + | 'unsafe-url'; +type ImageStatus = 'loaded' | 'loading' | 'failed'; + +export const useImage = ( + url: string, + crossOrigin?: CrossOrigin, + referrerpolicy?: ReferrerPolicy +): [undefined | HTMLImageElement, ImageStatus, Blob | null] => { + // lets use refs for image and status + // so we can update them during render + // to have instant update in status/image when new data comes in + const statusRef = useRef('loading'); + const imageRef = useRef(); + const blobRef = useRef(null); + + // we are not going to use token + // but we need to just to trigger state update + const [_, setStateToken] = useState(0); + + // keep track of old props to trigger changes + const oldUrl = useRef(); + const oldCrossOrigin = useRef(); + const oldReferrerPolicy = useRef(); + + if ( + oldUrl.current !== url || + oldCrossOrigin.current !== crossOrigin || + oldReferrerPolicy.current !== referrerpolicy + ) { + statusRef.current = 'loading'; + imageRef.current = undefined; + oldUrl.current = url; + oldCrossOrigin.current = crossOrigin; + oldReferrerPolicy.current = referrerpolicy; + } + + useLayoutEffect( + function () { + if (!url) { + return; + } + const img = document.createElement('img'); + + function onload() { + statusRef.current = 'loaded'; + imageRef.current = img; + const canvas = document.createElement('canvas'); + canvas.width = img.clientWidth; + canvas.height = img.clientHeight; + + const context = canvas.getContext('2d'); + if (context) { + context.drawImage(img, 0, 0); + canvas.toBlob(function (blob) { + blobRef.current = blob; + }, 'image/png'); + } + setStateToken(Math.random()); + } + + function onerror() { + statusRef.current = 'failed'; + imageRef.current = undefined; + setStateToken(Math.random()); + } + + img.addEventListener('load', onload); + img.addEventListener('error', onerror); + if (crossOrigin) { + img.crossOrigin = crossOrigin; + } + if (referrerpolicy) { + img.referrerPolicy = referrerpolicy; + } + img.src = url; + + return function cleanup() { + img.removeEventListener('load', onload); + img.removeEventListener('error', onerror); + }; + }, + [url, crossOrigin, referrerpolicy] + ); + + // return array because it is better to use in case of several useImage hooks + // const [background, backgroundStatus] = useImage(url1); + // const [patter] = useImage(url2); + return [imageRef.current, statusRef.current, blobRef.current]; +}; diff --git a/invokeai/frontend/web/src/common/hooks/useImageUrlToBlob.ts b/invokeai/frontend/web/src/common/hooks/useImageUrlToBlob.ts new file mode 100644 index 0000000000..77538a929d --- /dev/null +++ b/invokeai/frontend/web/src/common/hooks/useImageUrlToBlob.ts @@ -0,0 +1,40 @@ +import { useCallback } from 'react'; +import { $authToken } from 'services/api/client'; + +/** + * Converts an image URL to a Blob by creating an element, drawing it to canvas + * and then converting the canvas to a Blob. + * + * @returns A function that takes a URL and returns a Promise that resolves with a Blob + */ +export const useImageUrlToBlob = () => { + const imageUrlToBlob = useCallback( + async (url: string) => + new Promise((resolve) => { + const img = new Image(); + img.onload = () => { + const canvas = document.createElement('canvas'); + canvas.width = img.width; + canvas.height = img.height; + + const context = canvas.getContext('2d'); + if (!context) { + return; + } + context.drawImage(img, 0, 0); + resolve( + new Promise((resolve) => { + canvas.toBlob(function (blob) { + resolve(blob); + }, 'image/png'); + }) + ); + }; + img.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous'; + img.src = url; + }), + [] + ); + + return imageUrlToBlob; +}; diff --git a/invokeai/frontend/web/src/features/system/util/copyBlobToClipboard.ts b/invokeai/frontend/web/src/features/system/util/copyBlobToClipboard.ts index cf59f2a687..b5e896f3bf 100644 --- a/invokeai/frontend/web/src/features/system/util/copyBlobToClipboard.ts +++ b/invokeai/frontend/web/src/features/system/util/copyBlobToClipboard.ts @@ -2,7 +2,7 @@ * Copies a blob to the clipboard by calling navigator.clipboard.write(). */ export const copyBlobToClipboard = ( - blob: Promise, + blob: Promise | Blob, type = 'image/png' ) => { navigator.clipboard.write([ diff --git a/invokeai/frontend/web/src/features/ui/hooks/useCopyImageToClipboard.ts b/invokeai/frontend/web/src/features/ui/hooks/useCopyImageToClipboard.ts index 4b42a45e93..ef9db44a9d 100644 --- a/invokeai/frontend/web/src/features/ui/hooks/useCopyImageToClipboard.ts +++ b/invokeai/frontend/web/src/features/ui/hooks/useCopyImageToClipboard.ts @@ -1,11 +1,13 @@ import { useAppToaster } from 'app/components/Toaster'; +import { useImageUrlToBlob } from 'common/hooks/useImageUrlToBlob'; +import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard'; import { useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard'; export const useCopyImageToClipboard = () => { const toaster = useAppToaster(); const { t } = useTranslation(); + const imageUrlToBlob = useImageUrlToBlob(); const isClipboardAPIAvailable = useMemo(() => { return Boolean(navigator.clipboard) && Boolean(window.ClipboardItem); @@ -23,15 +25,13 @@ export const useCopyImageToClipboard = () => { }); } try { - const getImageBlob = async () => { - const response = await fetch(image_url); - if (!response.ok) { - throw new Error(`Problem retrieving image data`); - } - return await response.blob(); - }; + const blob = await imageUrlToBlob(image_url); - copyBlobToClipboard(getImageBlob()); + if (!blob) { + throw new Error('Unable to create Blob'); + } + + copyBlobToClipboard(blob); toaster({ title: t('toast.imageCopied'), @@ -49,7 +49,7 @@ export const useCopyImageToClipboard = () => { }); } }, - [isClipboardAPIAvailable, t, toaster] + [imageUrlToBlob, isClipboardAPIAvailable, t, toaster] ); return { isClipboardAPIAvailable, copyImageToClipboard }; From c238a7f18b47bbdad9bc0489ef250786bce94424 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 24 Sep 2023 18:11:07 +1000 Subject: [PATCH 075/202] feat(api): chore: pydantic & fastapi upgrade Upgrade pydantic and fastapi to latest. - pydantic~=2.4.2 - fastapi~=103.2 - fastapi-events~=0.9.1 **Big Changes** There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes. **Invocations** The biggest change relates to invocation creation, instantiation and validation. Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie. Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`. With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation. This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method. In the end, this implementation is cleaner. **Invocation Fields** In pydantic v2, you can no longer directly add or remove fields from a model. Previously, we did this to add the `type` field to invocations. **Invocation Decorators** With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper. A similar technique is used for `invocation_output()`. **Minor Changes** There are a number of minor changes around the pydantic v2 models API. **Protected `model_` Namespace** All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_". Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple. ```py class IPAdapterModelField(BaseModel): model_name: str = Field(description="Name of the IP-Adapter model") base_model: BaseModelType = Field(description="Base model") model_config = ConfigDict(protected_namespaces=()) ``` **Model Serialization** Pydantic models no longer have `Model.dict()` or `Model.json()`. Instead, we use `Model.model_dump()` or `Model.model_dump_json()`. **Model Deserialization** Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions. Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model. ```py adapter_graph = TypeAdapter(Graph) deserialized_graph_from_json = adapter_graph.validate_json(graph_json) deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict) ``` **Field Customisation** Pydantic `Field`s no longer accept arbitrary args. Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field. **Schema Customisation** FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec. This necessitates two changes: - Our schema customization logic has been revised - Schema parsing to build node templates has been revised The specific aren't important, but this does present additional surface area for bugs. **Performance Improvements** Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node. I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster. --- invokeai/app/api/routers/images.py | 2 +- invokeai/app/api/routers/models.py | 109 +- invokeai/app/api/routers/utilities.py | 3 +- invokeai/app/api_app.py | 53 +- invokeai/app/cli/commands.py | 11 +- invokeai/app/invocations/baseinvocation.py | 553 +-- invokeai/app/invocations/collections.py | 8 +- invokeai/app/invocations/compel.py | 70 +- .../controlnet_image_processors.py | 24 +- invokeai/app/invocations/facetools.py | 4 +- invokeai/app/invocations/image.py | 154 +- invokeai/app/invocations/ip_adapter.py | 6 +- invokeai/app/invocations/latent.py | 149 +- invokeai/app/invocations/math.py | 15 +- invokeai/app/invocations/metadata.py | 2 +- invokeai/app/invocations/model.py | 78 +- invokeai/app/invocations/noise.py | 14 +- invokeai/app/invocations/onnx.py | 30 +- invokeai/app/invocations/param_easing.py | 31 +- invokeai/app/invocations/prompt.py | 31 +- invokeai/app/invocations/t2i_adapter.py | 4 +- invokeai/app/invocations/upscale.py | 3 + .../board_records/board_records_common.py | 12 +- invokeai/app/services/boards/boards_common.py | 2 +- invokeai/app/services/config/config_base.py | 47 +- .../app/services/config/config_default.py | 138 +- invokeai/app/services/events/events_base.py | 8 +- .../services/image_files/image_files_base.py | 3 +- .../image_records/image_records_base.py | 14 +- .../image_records/image_records_common.py | 8 +- .../image_records/image_records_sqlite.py | 20 +- invokeai/app/services/images/images_base.py | 2 +- invokeai/app/services/images/images_common.py | 6 +- .../app/services/images/images_default.py | 6 +- .../invocation_cache_memory.py | 9 +- .../invocation_processor_default.py | 10 +- .../invocation_stats_default.py | 2 +- .../item_storage/item_storage_sqlite.py | 17 +- .../model_manager/model_manager_base.py | 2 +- .../model_manager/model_manager_default.py | 2 +- .../session_queue/session_queue_common.py | 74 +- .../session_queue/session_queue_sqlite.py | 16 +- .../app/services/shared/default_graphs.py | 6 +- invokeai/app/services/shared/graph.py | 70 +- invokeai/app/services/shared/pagination.py | 7 +- invokeai/app/util/controlnet_utils.py | 2 +- invokeai/app/util/misc.py | 6 + invokeai/app/util/model_exclude_null.py | 4 +- invokeai/assets/__init__.py | 0 invokeai/backend/image_util/txt2mask.py | 14 +- invokeai/backend/image_util/util.py | 2 +- .../backend/install/invokeai_configure.py | 6 +- .../backend/model_management/model_manager.py | 16 +- .../model_management/models/__init__.py | 16 +- .../backend/model_management/models/base.py | 13 +- .../model_management/models/ip_adapter.py | 6 +- invokeai/backend/model_management/seamless.py | 8 +- .../diffusion/cross_attention_map_saving.py | 12 +- .../listeners/imageDeleted.ts | 4 +- .../components/IAIMantineMultiSelect.tsx | 2 +- .../components/IAIMantineSearchableSelect.tsx | 2 +- .../common/components/IAIMantineSelect.tsx | 2 +- .../store/dynamicPromptsSlice.ts | 5 +- .../web/src/features/nodes/types/types.ts | 64 +- .../nodes/util/fieldTemplateBuilders.ts | 232 +- .../src/features/nodes/util/parseSchema.ts | 13 +- .../queue/components/common/QueueItemCard.tsx | 2 +- .../subpanels/MergeModelsPanel.tsx | 4 +- .../web/src/services/api/endpoints/images.ts | 4 +- .../frontend/web/src/services/api/schema.d.ts | 3486 ++++++----------- pyproject.toml | 110 +- tests/nodes/test_node_graph.py | 21 +- tests/nodes/test_session_queue.py | 10 +- tests/nodes/test_sqlite.py | 3 +- 74 files changed, 2788 insertions(+), 3116 deletions(-) create mode 100644 invokeai/assets/__init__.py diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 7b61887eb8..43a72943ee 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -42,7 +42,7 @@ async def upload_image( crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"), ) -> ImageDTO: """Uploads an image""" - if not file.content_type.startswith("image"): + if not file.content_type or not file.content_type.startswith("image"): raise HTTPException(status_code=415, detail="Not an image") contents = await file.read() diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index a7b1f81252..018f3af02b 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -2,11 +2,11 @@ import pathlib -from typing import List, Literal, Optional, Union +from typing import Annotated, List, Literal, Optional, Union from fastapi import Body, Path, Query, Response from fastapi.routing import APIRouter -from pydantic import BaseModel, parse_obj_as +from pydantic import BaseModel, ConfigDict, Field, TypeAdapter from starlette.exceptions import HTTPException from invokeai.backend import BaseModelType, ModelType @@ -23,8 +23,14 @@ from ..dependencies import ApiDependencies models_router = APIRouter(prefix="/v1/models", tags=["models"]) UpdateModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +update_models_response_adapter = TypeAdapter(UpdateModelResponse) + ImportModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +import_models_response_adapter = TypeAdapter(ImportModelResponse) + ConvertModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +convert_models_response_adapter = TypeAdapter(ConvertModelResponse) + MergeModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)] @@ -32,6 +38,11 @@ ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)] class ModelsList(BaseModel): models: list[Union[tuple(OPENAPI_MODEL_CONFIGS)]] + model_config = ConfigDict(use_enum_values=True) + + +models_list_adapter = TypeAdapter(ModelsList) + @models_router.get( "/", @@ -49,7 +60,7 @@ async def list_models( models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type)) else: models_raw = ApiDependencies.invoker.services.model_manager.list_models(None, model_type) - models = parse_obj_as(ModelsList, {"models": models_raw}) + models = models_list_adapter.validate_python({"models": models_raw}) return models @@ -105,11 +116,14 @@ async def update_model( info.path = new_info.get("path") # replace empty string values with None/null to avoid phenomenon of vae: '' - info_dict = info.dict() + info_dict = info.model_dump() info_dict = {x: info_dict[x] if info_dict[x] else None for x in info_dict.keys()} ApiDependencies.invoker.services.model_manager.update_model( - model_name=model_name, base_model=base_model, model_type=model_type, model_attributes=info_dict + model_name=model_name, + base_model=base_model, + model_type=model_type, + model_attributes=info_dict, ) model_raw = ApiDependencies.invoker.services.model_manager.list_model( @@ -117,7 +131,7 @@ async def update_model( base_model=base_model, model_type=model_type, ) - model_response = parse_obj_as(UpdateModelResponse, model_raw) + model_response = update_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: raise HTTPException(status_code=404, detail=str(e)) except ValueError as e: @@ -159,7 +173,8 @@ async def import_model( try: installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import( - items_to_import=items_to_import, prediction_type_helper=lambda x: prediction_types.get(prediction_type) + items_to_import=items_to_import, + prediction_type_helper=lambda x: prediction_types.get(prediction_type), ) info = installed_models.get(location) @@ -171,7 +186,7 @@ async def import_model( model_raw = ApiDependencies.invoker.services.model_manager.list_model( model_name=info.name, base_model=info.base_model, model_type=info.model_type ) - return parse_obj_as(ImportModelResponse, model_raw) + return import_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: logger.error(str(e)) @@ -205,13 +220,18 @@ async def add_model( try: ApiDependencies.invoker.services.model_manager.add_model( - info.model_name, info.base_model, info.model_type, model_attributes=info.dict() + info.model_name, + info.base_model, + info.model_type, + model_attributes=info.model_dump(), ) logger.info(f"Successfully added {info.model_name}") model_raw = ApiDependencies.invoker.services.model_manager.list_model( - model_name=info.model_name, base_model=info.base_model, model_type=info.model_type + model_name=info.model_name, + base_model=info.base_model, + model_type=info.model_type, ) - return parse_obj_as(ImportModelResponse, model_raw) + return import_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: logger.error(str(e)) raise HTTPException(status_code=404, detail=str(e)) @@ -223,7 +243,10 @@ async def add_model( @models_router.delete( "/{base_model}/{model_type}/{model_name}", operation_id="del_model", - responses={204: {"description": "Model deleted successfully"}, 404: {"description": "Model not found"}}, + responses={ + 204: {"description": "Model deleted successfully"}, + 404: {"description": "Model not found"}, + }, status_code=204, response_model=None, ) @@ -279,7 +302,7 @@ async def convert_model( model_raw = ApiDependencies.invoker.services.model_manager.list_model( model_name, base_model=base_model, model_type=model_type ) - response = parse_obj_as(ConvertModelResponse, model_raw) + response = convert_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found: {str(e)}") except ValueError as e: @@ -302,7 +325,8 @@ async def search_for_models( ) -> List[pathlib.Path]: if not search_path.is_dir(): raise HTTPException( - status_code=404, detail=f"The search path '{search_path}' does not exist or is not directory" + status_code=404, + detail=f"The search path '{search_path}' does not exist or is not directory", ) return ApiDependencies.invoker.services.model_manager.search_for_models(search_path) @@ -337,6 +361,26 @@ async def sync_to_config() -> bool: return True +# There's some weird pydantic-fastapi behaviour that requires this to be a separate class +# TODO: After a few updates, see if it works inside the route operation handler? +class MergeModelsBody(BaseModel): + model_names: List[str] = Field(description="model name", min_length=2, max_length=3) + merged_model_name: Optional[str] = Field(description="Name of destination model") + alpha: Optional[float] = Field(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5) + interp: Optional[MergeInterpolationMethod] = Field(description="Interpolation method") + force: Optional[bool] = Field( + description="Force merging of models created with different versions of diffusers", + default=False, + ) + + merge_dest_directory: Optional[str] = Field( + description="Save the merged model to the designated directory (with 'merged_model_name' appended)", + default=None, + ) + + model_config = ConfigDict(protected_namespaces=()) + + @models_router.put( "/merge/{base_model}", operation_id="merge_models", @@ -349,31 +393,23 @@ async def sync_to_config() -> bool: response_model=MergeModelResponse, ) async def merge_models( + body: Annotated[MergeModelsBody, Body(description="Model configuration", embed=True)], base_model: BaseModelType = Path(description="Base model"), - model_names: List[str] = Body(description="model name", min_items=2, max_items=3), - merged_model_name: Optional[str] = Body(description="Name of destination model"), - alpha: Optional[float] = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5), - interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method"), - force: Optional[bool] = Body( - description="Force merging of models created with different versions of diffusers", default=False - ), - merge_dest_directory: Optional[str] = Body( - description="Save the merged model to the designated directory (with 'merged_model_name' appended)", - default=None, - ), ) -> MergeModelResponse: """Convert a checkpoint model into a diffusers model""" logger = ApiDependencies.invoker.services.logger try: - logger.info(f"Merging models: {model_names} into {merge_dest_directory or ''}/{merged_model_name}") - dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None + logger.info( + f"Merging models: {body.model_names} into {body.merge_dest_directory or ''}/{body.merged_model_name}" + ) + dest = pathlib.Path(body.merge_dest_directory) if body.merge_dest_directory else None result = ApiDependencies.invoker.services.model_manager.merge_models( - model_names, - base_model, - merged_model_name=merged_model_name or "+".join(model_names), - alpha=alpha, - interp=interp, - force=force, + model_names=body.model_names, + base_model=base_model, + merged_model_name=body.merged_model_name or "+".join(body.model_names), + alpha=body.alpha, + interp=body.interp, + force=body.force, merge_dest_directory=dest, ) model_raw = ApiDependencies.invoker.services.model_manager.list_model( @@ -381,9 +417,12 @@ async def merge_models( base_model=base_model, model_type=ModelType.Main, ) - response = parse_obj_as(ConvertModelResponse, model_raw) + response = convert_models_response_adapter.validate_python(model_raw) except ModelNotFoundException: - raise HTTPException(status_code=404, detail=f"One or more of the models '{model_names}' not found") + raise HTTPException( + status_code=404, + detail=f"One or more of the models '{body.model_names}' not found", + ) except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) return response diff --git a/invokeai/app/api/routers/utilities.py b/invokeai/app/api/routers/utilities.py index e664cb9070..476d10e2c0 100644 --- a/invokeai/app/api/routers/utilities.py +++ b/invokeai/app/api/routers/utilities.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Union from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator from fastapi import Body @@ -27,6 +27,7 @@ async def parse_dynamicprompts( combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"), ) -> DynamicPromptsResponse: """Creates a batch process""" + generator: Union[RandomPromptGenerator, CombinatorialPromptGenerator] try: error: Optional[str] = None if combinatorial: diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index fdbd64b30d..5bbd8150c1 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -22,7 +22,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from fastapi.staticfiles import StaticFiles from fastapi_events.handlers.local import local_handler from fastapi_events.middleware import EventHandlerASGIMiddleware - from pydantic.schema import schema + from pydantic.json_schema import models_json_schema # noinspection PyUnresolvedReferences import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) @@ -31,7 +31,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from ..backend.util.logging import InvokeAILogger from .api.dependencies import ApiDependencies - from .api.routers import app_info, board_images, boards, images, models, session_queue, sessions, utilities + from .api.routers import app_info, board_images, boards, images, models, session_queue, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField @@ -51,7 +51,7 @@ mimetypes.add_type("text/css", ".css") # Create the app # TODO: create this all in a method so configuration/etc. can be passed in? -app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None) +app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None, separate_input_output_schemas=False) # Add event handler event_handler_id: int = id(app) @@ -63,18 +63,18 @@ app.add_middleware( socket_io = SocketIO(app) +app.add_middleware( + CORSMiddleware, + allow_origins=app_config.allow_origins, + allow_credentials=app_config.allow_credentials, + allow_methods=app_config.allow_methods, + allow_headers=app_config.allow_headers, +) + # Add startup event to load dependencies @app.on_event("startup") async def startup_event(): - app.add_middleware( - CORSMiddleware, - allow_origins=app_config.allow_origins, - allow_credentials=app_config.allow_credentials, - allow_methods=app_config.allow_methods, - allow_headers=app_config.allow_headers, - ) - ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger) @@ -85,12 +85,7 @@ async def shutdown_event(): # Include all routers -# TODO: REMOVE -# app.include_router( -# invocation.invocation_router, -# prefix = '/api') - -app.include_router(sessions.session_router, prefix="/api") +# app.include_router(sessions.session_router, prefix="/api") app.include_router(utilities.utilities_router, prefix="/api") @@ -117,6 +112,7 @@ def custom_openapi(): description="An API for invoking AI image operations", version="1.0.0", routes=app.routes, + separate_input_output_schemas=False, # https://fastapi.tiangolo.com/how-to/separate-openapi-schemas/ ) # Add all outputs @@ -127,29 +123,32 @@ def custom_openapi(): output_type = signature(invoker.invoke).return_annotation output_types.add(output_type) - output_schemas = schema(output_types, ref_prefix="#/components/schemas/") - for schema_key, output_schema in output_schemas["definitions"].items(): - output_schema["class"] = "output" - openapi_schema["components"]["schemas"][schema_key] = output_schema - + output_schemas = models_json_schema( + models=[(o, "serialization") for o in output_types], ref_template="#/components/schemas/{model}" + ) + for schema_key, output_schema in output_schemas[1]["$defs"].items(): # TODO: note that we assume the schema_key here is the TYPE.__name__ # This could break in some cases, figure out a better way to do it output_type_titles[schema_key] = output_schema["title"] # Add Node Editor UI helper schemas - ui_config_schemas = schema([UIConfigBase, _InputField, _OutputField], ref_prefix="#/components/schemas/") - for schema_key, ui_config_schema in ui_config_schemas["definitions"].items(): + ui_config_schemas = models_json_schema( + [(UIConfigBase, "serialization"), (_InputField, "serialization"), (_OutputField, "serialization")], + ref_template="#/components/schemas/{model}", + ) + for schema_key, ui_config_schema in ui_config_schemas[1]["$defs"].items(): openapi_schema["components"]["schemas"][schema_key] = ui_config_schema # Add a reference to the output type to additionalProperties of the invoker schema for invoker in all_invocations: invoker_name = invoker.__name__ - output_type = signature(invoker.invoke).return_annotation + output_type = signature(obj=invoker.invoke).return_annotation output_type_title = output_type_titles[output_type.__name__] - invoker_schema = openapi_schema["components"]["schemas"][invoker_name] + invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"] outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"} invoker_schema["output"] = outputs_ref invoker_schema["class"] = "invocation" + openapi_schema["components"]["schemas"][f"{output_type_title}"]["class"] = "output" from invokeai.backend.model_management.models import get_model_config_enums @@ -172,7 +171,7 @@ def custom_openapi(): return app.openapi_schema -app.openapi = custom_openapi +app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid assignment # Override API doc favicons app.mount("/static", StaticFiles(directory=Path(web_dir.__path__[0], "static/dream_web")), name="static") diff --git a/invokeai/app/cli/commands.py b/invokeai/app/cli/commands.py index b000abcf6a..c21c6315ed 100644 --- a/invokeai/app/cli/commands.py +++ b/invokeai/app/cli/commands.py @@ -24,8 +24,8 @@ def add_field_argument(command_parser, name: str, field, default_override=None): if field.default_factory is None else field.default_factory() ) - if get_origin(field.type_) == Literal: - allowed_values = get_args(field.type_) + if get_origin(field.annotation) == Literal: + allowed_values = get_args(field.annotation) allowed_types = set() for val in allowed_values: allowed_types.add(type(val)) @@ -38,15 +38,15 @@ def add_field_argument(command_parser, name: str, field, default_override=None): type=field_type, default=default, choices=allowed_values, - help=field.field_info.description, + help=field.description, ) else: command_parser.add_argument( f"--{name}", dest=name, - type=field.type_, + type=field.annotation, default=default, - help=field.field_info.description, + help=field.description, ) @@ -142,7 +142,6 @@ class BaseCommand(ABC, BaseModel): """A CLI command""" # All commands must include a type name like this: - # type: Literal['your_command_name'] = 'your_command_name' @classmethod def get_all_subclasses(cls): diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index d82b94d0e9..8bd4a89f45 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -7,28 +7,16 @@ import re from abc import ABC, abstractmethod from enum import Enum from inspect import signature -from typing import ( - TYPE_CHECKING, - AbstractSet, - Any, - Callable, - ClassVar, - Literal, - Mapping, - Optional, - Type, - TypeVar, - Union, - get_args, - get_type_hints, -) +from types import UnionType +from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Optional, Type, TypeVar, Union import semver -from pydantic import BaseModel, Field, validator -from pydantic.fields import ModelField, Undefined -from pydantic.typing import NoArgAnyCallable +from pydantic import BaseModel, ConfigDict, Field, create_model, field_validator +from pydantic.fields import _Unset +from pydantic_core import PydanticUndefined from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.util.misc import uuid_string if TYPE_CHECKING: from ..services.invocation_services import InvocationServices @@ -211,6 +199,11 @@ class _InputField(BaseModel): ui_choice_labels: Optional[dict[str, str]] item_default: Optional[Any] + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + ) + class _OutputField(BaseModel): """ @@ -224,34 +217,36 @@ class _OutputField(BaseModel): ui_type: Optional[UIType] ui_order: Optional[int] + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + ) + + +def get_type(klass: BaseModel) -> str: + """Helper function to get an invocation or invocation output's type. This is the default value of the `type` field.""" + return klass.model_fields["type"].default + def InputField( - *args: Any, - default: Any = Undefined, - default_factory: Optional[NoArgAnyCallable] = None, - alias: Optional[str] = None, - title: Optional[str] = None, - description: Optional[str] = None, - exclude: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - include: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - const: Optional[bool] = None, - gt: Optional[float] = None, - ge: Optional[float] = None, - lt: Optional[float] = None, - le: Optional[float] = None, - multiple_of: Optional[float] = None, - allow_inf_nan: Optional[bool] = None, - max_digits: Optional[int] = None, - decimal_places: Optional[int] = None, - min_items: Optional[int] = None, - max_items: Optional[int] = None, - unique_items: Optional[bool] = None, - min_length: Optional[int] = None, - max_length: Optional[int] = None, - allow_mutation: bool = True, - regex: Optional[str] = None, - discriminator: Optional[str] = None, - repr: bool = True, + # copied from pydantic's Field + default: Any = _Unset, + default_factory: Callable[[], Any] | None = _Unset, + title: str | None = _Unset, + description: str | None = _Unset, + pattern: str | None = _Unset, + strict: bool | None = _Unset, + gt: float | None = _Unset, + ge: float | None = _Unset, + lt: float | None = _Unset, + le: float | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + # custom input: Input = Input.Any, ui_type: Optional[UIType] = None, ui_component: Optional[UIComponent] = None, @@ -259,7 +254,6 @@ def InputField( ui_order: Optional[int] = None, ui_choice_labels: Optional[dict[str, str]] = None, item_default: Optional[Any] = None, - **kwargs: Any, ) -> Any: """ Creates an input field for an invocation. @@ -289,18 +283,26 @@ def InputField( : param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \ : param bool item_default: [None] Specifies the default item value, if this is a collection input. \ - Ignored for non-collection fields.. + Ignored for non-collection fields. """ - return Field( - *args, + + json_schema_extra_: dict[str, Any] = dict( + input=input, + ui_type=ui_type, + ui_component=ui_component, + ui_hidden=ui_hidden, + ui_order=ui_order, + item_default=item_default, + ui_choice_labels=ui_choice_labels, + ) + + field_args = dict( default=default, default_factory=default_factory, - alias=alias, title=title, description=description, - exclude=exclude, - include=include, - const=const, + pattern=pattern, + strict=strict, gt=gt, ge=ge, lt=lt, @@ -309,57 +311,92 @@ def InputField( allow_inf_nan=allow_inf_nan, max_digits=max_digits, decimal_places=decimal_places, - min_items=min_items, - max_items=max_items, - unique_items=unique_items, min_length=min_length, max_length=max_length, - allow_mutation=allow_mutation, - regex=regex, - discriminator=discriminator, - repr=repr, - input=input, - ui_type=ui_type, - ui_component=ui_component, - ui_hidden=ui_hidden, - ui_order=ui_order, - item_default=item_default, - ui_choice_labels=ui_choice_labels, - **kwargs, + ) + + """ + Invocation definitions have their fields typed correctly for their `invoke()` functions. + This typing is often more specific than the actual invocation definition requires, because + fields may have values provided only by connections. + + For example, consider an ResizeImageInvocation with an `image: ImageField` field. + + `image` is required during the call to `invoke()`, but when the python class is instantiated, + the field may not be present. This is fine, because that image field will be provided by a + an ancestor node that outputs the image. + + So we'd like to type that `image` field as `Optional[ImageField]`. If we do that, however, then + we need to handle a lot of extra logic in the `invoke()` function to check if the field has a + value or not. This is very tedious. + + Ideally, the invocation definition would be able to specify that the field is required during + invocation, but optional during instantiation. So the field would be typed as `image: ImageField`, + but when calling the `invoke()` function, we raise an error if the field is not present. + + To do this, we need to do a bit of fanagling to make the pydantic field optional, and then do + extra validation when calling `invoke()`. + + There is some additional logic here to cleaning create the pydantic field via the wrapper. + """ + + # Filter out field args not provided + provided_args = {k: v for (k, v) in field_args.items() if v is not PydanticUndefined} + + if (default is not PydanticUndefined) and (default_factory is not PydanticUndefined): + raise ValueError("Cannot specify both default and default_factory") + + # because we are manually making fields optional, we need to store the original required bool for reference later + if default is PydanticUndefined and default_factory is PydanticUndefined: + json_schema_extra_.update(dict(orig_required=True)) + else: + json_schema_extra_.update(dict(orig_required=False)) + + # make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one + if (input is Input.Any or input is Input.Connection) and default_factory is PydanticUndefined: + default_ = None if default is PydanticUndefined else default + provided_args.update(dict(default=default_)) + if default is not PydanticUndefined: + # before invoking, we'll grab the original default value and set it on the field if the field wasn't provided a value + json_schema_extra_.update(dict(default=default)) + json_schema_extra_.update(dict(orig_default=default)) + elif default is not PydanticUndefined and default_factory is PydanticUndefined: + default_ = default + provided_args.update(dict(default=default_)) + json_schema_extra_.update(dict(orig_default=default_)) + elif default_factory is not PydanticUndefined: + provided_args.update(dict(default_factory=default_factory)) + # TODO: cannot serialize default_factory... + # json_schema_extra_.update(dict(orig_default_factory=default_factory)) + + return Field( + **provided_args, + json_schema_extra=json_schema_extra_, ) def OutputField( - *args: Any, - default: Any = Undefined, - default_factory: Optional[NoArgAnyCallable] = None, - alias: Optional[str] = None, - title: Optional[str] = None, - description: Optional[str] = None, - exclude: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - include: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - const: Optional[bool] = None, - gt: Optional[float] = None, - ge: Optional[float] = None, - lt: Optional[float] = None, - le: Optional[float] = None, - multiple_of: Optional[float] = None, - allow_inf_nan: Optional[bool] = None, - max_digits: Optional[int] = None, - decimal_places: Optional[int] = None, - min_items: Optional[int] = None, - max_items: Optional[int] = None, - unique_items: Optional[bool] = None, - min_length: Optional[int] = None, - max_length: Optional[int] = None, - allow_mutation: bool = True, - regex: Optional[str] = None, - discriminator: Optional[str] = None, - repr: bool = True, + # copied from pydantic's Field + default: Any = _Unset, + default_factory: Callable[[], Any] | None = _Unset, + title: str | None = _Unset, + description: str | None = _Unset, + pattern: str | None = _Unset, + strict: bool | None = _Unset, + gt: float | None = _Unset, + ge: float | None = _Unset, + lt: float | None = _Unset, + le: float | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + # custom ui_type: Optional[UIType] = None, ui_hidden: bool = False, ui_order: Optional[int] = None, - **kwargs: Any, ) -> Any: """ Creates an output field for an invocation output. @@ -379,15 +416,12 @@ def OutputField( : param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \ """ return Field( - *args, default=default, default_factory=default_factory, - alias=alias, title=title, description=description, - exclude=exclude, - include=include, - const=const, + pattern=pattern, + strict=strict, gt=gt, ge=ge, lt=lt, @@ -396,19 +430,13 @@ def OutputField( allow_inf_nan=allow_inf_nan, max_digits=max_digits, decimal_places=decimal_places, - min_items=min_items, - max_items=max_items, - unique_items=unique_items, min_length=min_length, max_length=max_length, - allow_mutation=allow_mutation, - regex=regex, - discriminator=discriminator, - repr=repr, - ui_type=ui_type, - ui_hidden=ui_hidden, - ui_order=ui_order, - **kwargs, + json_schema_extra=dict( + ui_type=ui_type, + ui_hidden=ui_hidden, + ui_order=ui_order, + ), ) @@ -422,7 +450,13 @@ class UIConfigBase(BaseModel): title: Optional[str] = Field(default=None, description="The node's display name") category: Optional[str] = Field(default=None, description="The node's category") version: Optional[str] = Field( - default=None, description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".' + default=None, + description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".', + ) + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, ) @@ -457,23 +491,38 @@ class BaseInvocationOutput(BaseModel): All invocation outputs must use the `@invocation_output` decorator to provide their unique type. """ - @classmethod - def get_all_subclasses_tuple(cls): - subclasses = [] - toprocess = [cls] - while len(toprocess) > 0: - next = toprocess.pop(0) - next_subclasses = next.__subclasses__() - subclasses.extend(next_subclasses) - toprocess.extend(next_subclasses) - return tuple(subclasses) + _output_classes: ClassVar[set[BaseInvocationOutput]] = set() - class Config: - @staticmethod - def schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: - if "required" not in schema or not isinstance(schema["required"], list): - schema["required"] = list() - schema["required"].extend(["type"]) + @classmethod + def register_output(cls, output: BaseInvocationOutput) -> None: + cls._output_classes.add(output) + + @classmethod + def get_outputs(cls) -> Iterable[BaseInvocationOutput]: + return cls._output_classes + + @classmethod + def get_outputs_union(cls) -> UnionType: + outputs_union = Union[tuple(cls._output_classes)] # type: ignore [valid-type] + return outputs_union # type: ignore [return-value] + + @classmethod + def get_output_types(cls) -> Iterable[str]: + return map(lambda i: get_type(i), BaseInvocationOutput.get_outputs()) + + @staticmethod + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: + # Because we use a pydantic Literal field with default value for the invocation type, + # it will be typed as optional in the OpenAPI schema. Make it required manually. + if "required" not in schema or not isinstance(schema["required"], list): + schema["required"] = list() + schema["required"].extend(["type"]) + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + json_schema_extra=json_schema_extra, + ) class RequiredConnectionException(Exception): @@ -498,104 +547,91 @@ class BaseInvocation(ABC, BaseModel): All invocations must use the `@invocation` decorator to provide their unique type. """ + _invocation_classes: ClassVar[set[BaseInvocation]] = set() + @classmethod - def get_all_subclasses(cls): + def register_invocation(cls, invocation: BaseInvocation) -> None: + cls._invocation_classes.add(invocation) + + @classmethod + def get_invocations_union(cls) -> UnionType: + invocations_union = Union[tuple(cls._invocation_classes)] # type: ignore [valid-type] + return invocations_union # type: ignore [return-value] + + @classmethod + def get_invocations(cls) -> Iterable[BaseInvocation]: app_config = InvokeAIAppConfig.get_config() - subclasses = [] - toprocess = [cls] - while len(toprocess) > 0: - next = toprocess.pop(0) - next_subclasses = next.__subclasses__() - subclasses.extend(next_subclasses) - toprocess.extend(next_subclasses) - allowed_invocations = [] - for sc in subclasses: + allowed_invocations: set[BaseInvocation] = set() + for sc in cls._invocation_classes: + invocation_type = get_type(sc) is_in_allowlist = ( - sc.__fields__.get("type").default in app_config.allow_nodes - if isinstance(app_config.allow_nodes, list) - else True + invocation_type in app_config.allow_nodes if isinstance(app_config.allow_nodes, list) else True ) - is_in_denylist = ( - sc.__fields__.get("type").default in app_config.deny_nodes - if isinstance(app_config.deny_nodes, list) - else False + invocation_type in app_config.deny_nodes if isinstance(app_config.deny_nodes, list) else False ) - if is_in_allowlist and not is_in_denylist: - allowed_invocations.append(sc) + allowed_invocations.add(sc) return allowed_invocations @classmethod - def get_invocations(cls): - return tuple(BaseInvocation.get_all_subclasses()) - - @classmethod - def get_invocations_map(cls): + def get_invocations_map(cls) -> dict[str, BaseInvocation]: # Get the type strings out of the literals and into a dictionary return dict( map( - lambda t: (get_args(get_type_hints(t)["type"])[0], t), - BaseInvocation.get_all_subclasses(), + lambda i: (get_type(i), i), + BaseInvocation.get_invocations(), ) ) @classmethod - def get_output_type(cls): + def get_invocation_types(cls) -> Iterable[str]: + return map(lambda i: get_type(i), BaseInvocation.get_invocations()) + + @classmethod + def get_output_type(cls) -> BaseInvocationOutput: return signature(cls.invoke).return_annotation - class Config: - validate_assignment = True - validate_all = True - - @staticmethod - def schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: - uiconfig = getattr(model_class, "UIConfig", None) - if uiconfig and hasattr(uiconfig, "title"): - schema["title"] = uiconfig.title - if uiconfig and hasattr(uiconfig, "tags"): - schema["tags"] = uiconfig.tags - if uiconfig and hasattr(uiconfig, "category"): - schema["category"] = uiconfig.category - if uiconfig and hasattr(uiconfig, "version"): - schema["version"] = uiconfig.version - if "required" not in schema or not isinstance(schema["required"], list): - schema["required"] = list() - schema["required"].extend(["type", "id"]) + @staticmethod + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: + # Add the various UI-facing attributes to the schema. These are used to build the invocation templates. + uiconfig = getattr(model_class, "UIConfig", None) + if uiconfig and hasattr(uiconfig, "title"): + schema["title"] = uiconfig.title + if uiconfig and hasattr(uiconfig, "tags"): + schema["tags"] = uiconfig.tags + if uiconfig and hasattr(uiconfig, "category"): + schema["category"] = uiconfig.category + if uiconfig and hasattr(uiconfig, "version"): + schema["version"] = uiconfig.version + if "required" not in schema or not isinstance(schema["required"], list): + schema["required"] = list() + schema["required"].extend(["type", "id"]) @abstractmethod def invoke(self, context: InvocationContext) -> BaseInvocationOutput: """Invoke with provided context and return outputs.""" pass - def __init__(self, **data): - # nodes may have required fields, that can accept input from connections - # on instantiation of the model, we need to exclude these from validation - restore = dict() - try: - field_names = list(self.__fields__.keys()) - for field_name in field_names: - # if the field is required and may get its value from a connection, exclude it from validation - field = self.__fields__[field_name] - _input = field.field_info.extra.get("input", None) - if _input in [Input.Connection, Input.Any] and field.required: - if field_name not in data: - restore[field_name] = self.__fields__.pop(field_name) - # instantiate the node, which will validate the data - super().__init__(**data) - finally: - # restore the removed fields - for field_name, field in restore.items(): - self.__fields__[field_name] = field - def invoke_internal(self, context: InvocationContext) -> BaseInvocationOutput: - for field_name, field in self.__fields__.items(): - _input = field.field_info.extra.get("input", None) - if field.required and not hasattr(self, field_name): - if _input == Input.Connection: - raise RequiredConnectionException(self.__fields__["type"].default, field_name) - elif _input == Input.Any: - raise MissingInputException(self.__fields__["type"].default, field_name) + for field_name, field in self.model_fields.items(): + if not field.json_schema_extra or callable(field.json_schema_extra): + # something has gone terribly awry, we should always have this and it should be a dict + continue + + # Here we handle the case where the field is optional in the pydantic class, but required + # in the `invoke()` method. + + orig_default = field.json_schema_extra.get("orig_default", PydanticUndefined) + orig_required = field.json_schema_extra.get("orig_required", True) + input_ = field.json_schema_extra.get("input", None) + if orig_default is not PydanticUndefined and not hasattr(self, field_name): + setattr(self, field_name, orig_default) + if orig_required and orig_default is PydanticUndefined and getattr(self, field_name) is None: + if input_ == Input.Connection: + raise RequiredConnectionException(self.model_fields["type"].default, field_name) + elif input_ == Input.Any: + raise MissingInputException(self.model_fields["type"].default, field_name) # skip node cache codepath if it's disabled if context.services.configuration.node_cache_size == 0: @@ -618,23 +654,31 @@ class BaseInvocation(ABC, BaseModel): return self.invoke(context) def get_type(self) -> str: - return self.__fields__["type"].default + return self.model_fields["type"].default id: str = Field( - description="The id of this instance of an invocation. Must be unique among all instances of invocations." + default_factory=uuid_string, + description="The id of this instance of an invocation. Must be unique among all instances of invocations.", ) - is_intermediate: bool = InputField( - default=False, description="Whether or not this is an intermediate invocation.", ui_type=UIType.IsIntermediate + is_intermediate: Optional[bool] = Field( + default=False, + description="Whether or not this is an intermediate invocation.", + json_schema_extra=dict(ui_type=UIType.IsIntermediate), ) - workflow: Optional[str] = InputField( + workflow: Optional[str] = Field( default=None, description="The workflow to save with the image", - ui_type=UIType.WorkflowField, + json_schema_extra=dict(ui_type=UIType.WorkflowField), + ) + use_cache: Optional[bool] = Field( + default=True, + description="Whether or not to use the cache", ) - use_cache: bool = InputField(default=True, description="Whether or not to use the cache") - @validator("workflow", pre=True) + @field_validator("workflow", mode="before") + @classmethod def validate_workflow_is_json(cls, v): + """We don't have a workflow schema in the backend, so we just check that it's valid JSON""" if v is None: return None try: @@ -645,8 +689,14 @@ class BaseInvocation(ABC, BaseModel): UIConfig: ClassVar[Type[UIConfigBase]] + model_config = ConfigDict( + validate_assignment=True, + json_schema_extra=json_schema_extra, + json_schema_serialization_defaults_required=True, + ) -GenericBaseInvocation = TypeVar("GenericBaseInvocation", bound=BaseInvocation) + +TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation) def invocation( @@ -656,7 +706,7 @@ def invocation( category: Optional[str] = None, version: Optional[str] = None, use_cache: Optional[bool] = True, -) -> Callable[[Type[GenericBaseInvocation]], Type[GenericBaseInvocation]]: +) -> Callable[[Type[TBaseInvocation]], Type[TBaseInvocation]]: """ Adds metadata to an invocation. @@ -668,12 +718,15 @@ def invocation( :param Optional[bool] use_cache: Whether or not to use the invocation cache. Defaults to True. The user may override this in the workflow editor. """ - def wrapper(cls: Type[GenericBaseInvocation]) -> Type[GenericBaseInvocation]: + def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: # Validate invocation types on creation of invocation classes # TODO: ensure unique? if re.compile(r"^\S+$").match(invocation_type) is None: raise ValueError(f'"invocation_type" must consist of non-whitespace characters, got "{invocation_type}"') + if invocation_type in BaseInvocation.get_invocation_types(): + raise ValueError(f'Invocation type "{invocation_type}" already exists') + # Add OpenAPI schema extras uiconf_name = cls.__qualname__ + ".UIConfig" if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name: @@ -691,59 +744,83 @@ def invocation( raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e cls.UIConfig.version = version if use_cache is not None: - cls.__fields__["use_cache"].default = use_cache + cls.model_fields["use_cache"].default = use_cache + + # Add the invocation type to the model. + + # You'd be tempted to just add the type field and rebuild the model, like this: + # cls.model_fields.update(type=FieldInfo.from_annotated_attribute(Literal[invocation_type], invocation_type)) + # cls.model_rebuild() or cls.model_rebuild(force=True) + + # Unfortunately, because the `GraphInvocation` uses a forward ref in its `graph` field's annotation, this does + # not work. Instead, we have to create a new class with the type field and patch the original class with it. - # Add the invocation type to the pydantic model of the invocation invocation_type_annotation = Literal[invocation_type] # type: ignore - invocation_type_field = ModelField.infer( - name="type", - value=invocation_type, - annotation=invocation_type_annotation, - class_validators=None, - config=cls.__config__, + invocation_type_field = Field( + title="type", + default=invocation_type, ) - cls.__fields__.update({"type": invocation_type_field}) - # to support 3.9, 3.10 and 3.11, as described in https://docs.python.org/3/howto/annotations.html - if annotations := cls.__dict__.get("__annotations__", None): - annotations.update({"type": invocation_type_annotation}) + + docstring = cls.__doc__ + cls = create_model( + cls.__qualname__, + __base__=cls, + __module__=cls.__module__, + type=(invocation_type_annotation, invocation_type_field), + ) + cls.__doc__ = docstring + + # TODO: how to type this correctly? it's typed as ModelMetaclass, a private class in pydantic + BaseInvocation.register_invocation(cls) # type: ignore + return cls return wrapper -GenericBaseInvocationOutput = TypeVar("GenericBaseInvocationOutput", bound=BaseInvocationOutput) +TBaseInvocationOutput = TypeVar("TBaseInvocationOutput", bound=BaseInvocationOutput) def invocation_output( output_type: str, -) -> Callable[[Type[GenericBaseInvocationOutput]], Type[GenericBaseInvocationOutput]]: +) -> Callable[[Type[TBaseInvocationOutput]], Type[TBaseInvocationOutput]]: """ Adds metadata to an invocation output. :param str output_type: The type of the invocation output. Must be unique among all invocation outputs. """ - def wrapper(cls: Type[GenericBaseInvocationOutput]) -> Type[GenericBaseInvocationOutput]: + def wrapper(cls: Type[TBaseInvocationOutput]) -> Type[TBaseInvocationOutput]: # Validate output types on creation of invocation output classes # TODO: ensure unique? if re.compile(r"^\S+$").match(output_type) is None: raise ValueError(f'"output_type" must consist of non-whitespace characters, got "{output_type}"') - # Add the output type to the pydantic model of the invocation output - output_type_annotation = Literal[output_type] # type: ignore - output_type_field = ModelField.infer( - name="type", - value=output_type, - annotation=output_type_annotation, - class_validators=None, - config=cls.__config__, - ) - cls.__fields__.update({"type": output_type_field}) + if output_type in BaseInvocationOutput.get_output_types(): + raise ValueError(f'Invocation type "{output_type}" already exists') - # to support 3.9, 3.10 and 3.11, as described in https://docs.python.org/3/howto/annotations.html - if annotations := cls.__dict__.get("__annotations__", None): - annotations.update({"type": output_type_annotation}) + # Add the output type to the model. + + output_type_annotation = Literal[output_type] # type: ignore + output_type_field = Field( + title="type", + default=output_type, + ) + + docstring = cls.__doc__ + cls = create_model( + cls.__qualname__, + __base__=cls, + __module__=cls.__module__, + type=(output_type_annotation, output_type_field), + ) + cls.__doc__ = docstring + + BaseInvocationOutput.register_output(cls) # type: ignore # TODO: how to type this correctly? return cls return wrapper + + +GenericBaseModel = TypeVar("GenericBaseModel", bound=BaseModel) diff --git a/invokeai/app/invocations/collections.py b/invokeai/app/invocations/collections.py index 83863422f8..f26eebe1ff 100644 --- a/invokeai/app/invocations/collections.py +++ b/invokeai/app/invocations/collections.py @@ -2,7 +2,7 @@ import numpy as np -from pydantic import validator +from pydantic import ValidationInfo, field_validator from invokeai.app.invocations.primitives import IntegerCollectionOutput from invokeai.app.util.misc import SEED_MAX, get_random_seed @@ -20,9 +20,9 @@ class RangeInvocation(BaseInvocation): stop: int = InputField(default=10, description="The stop of the range") step: int = InputField(default=1, description="The step of the range") - @validator("stop") - def stop_gt_start(cls, v, values): - if "start" in values and v <= values["start"]: + @field_validator("stop") + def stop_gt_start(cls, v: int, info: ValidationInfo): + if "start" in info.data and v <= info.data["start"]: raise ValueError("stop must be greater than start") return v diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index b2634c2c56..b3ebc92320 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -1,6 +1,6 @@ import re from dataclasses import dataclass -from typing import List, Union +from typing import List, Optional, Union import torch from compel import Compel, ReturnedEmbeddingsType @@ -43,7 +43,13 @@ class ConditioningFieldData: # PerpNeg = "perp_neg" -@invocation("compel", title="Prompt", tags=["prompt", "compel"], category="conditioning", version="1.0.0") +@invocation( + "compel", + title="Prompt", + tags=["prompt", "compel"], + category="conditioning", + version="1.0.0", +) class CompelInvocation(BaseInvocation): """Parse prompt using compel package to conditioning.""" @@ -61,17 +67,19 @@ class CompelInvocation(BaseInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> ConditioningOutput: tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.dict(), + **self.clip.tokenizer.model_dump(), context=context, ) text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.dict(), + **self.clip.text_encoder.model_dump(), context=context, ) def _lora_loader(): for lora in self.clip.loras: - lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context) + lora_info = context.services.model_manager.get_model( + **lora.model_dump(exclude={"weight"}), context=context + ) yield (lora_info.context.model, lora.weight) del lora_info return @@ -160,11 +168,11 @@ class SDXLPromptInvocationBase: zero_on_empty: bool, ): tokenizer_info = context.services.model_manager.get_model( - **clip_field.tokenizer.dict(), + **clip_field.tokenizer.model_dump(), context=context, ) text_encoder_info = context.services.model_manager.get_model( - **clip_field.text_encoder.dict(), + **clip_field.text_encoder.model_dump(), context=context, ) @@ -172,7 +180,11 @@ class SDXLPromptInvocationBase: if prompt == "" and zero_on_empty: cpu_text_encoder = text_encoder_info.context.model c = torch.zeros( - (1, cpu_text_encoder.config.max_position_embeddings, cpu_text_encoder.config.hidden_size), + ( + 1, + cpu_text_encoder.config.max_position_embeddings, + cpu_text_encoder.config.hidden_size, + ), dtype=text_encoder_info.context.cache.precision, ) if get_pooled: @@ -186,7 +198,9 @@ class SDXLPromptInvocationBase: def _lora_loader(): for lora in clip_field.loras: - lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context) + lora_info = context.services.model_manager.get_model( + **lora.model_dump(exclude={"weight"}), context=context + ) yield (lora_info.context.model, lora.weight) del lora_info return @@ -273,8 +287,16 @@ class SDXLPromptInvocationBase: class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): """Parse prompt using compel package to conditioning.""" - prompt: str = InputField(default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea) - style: str = InputField(default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea) + prompt: str = InputField( + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, + ) + style: str = InputField( + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, + ) original_width: int = InputField(default=1024, description="") original_height: int = InputField(default=1024, description="") crop_top: int = InputField(default=0, description="") @@ -310,7 +332,9 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): [ c1, torch.zeros( - (c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), device=c1.device, dtype=c1.dtype + (c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), + device=c1.device, + dtype=c1.dtype, ), ], dim=1, @@ -321,7 +345,9 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): [ c2, torch.zeros( - (c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), device=c2.device, dtype=c2.dtype + (c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), + device=c2.device, + dtype=c2.dtype, ), ], dim=1, @@ -359,7 +385,9 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase """Parse prompt using compel package to conditioning.""" style: str = InputField( - default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, ) # TODO: ? original_width: int = InputField(default=1024, description="") original_height: int = InputField(default=1024, description="") @@ -403,10 +431,16 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase class ClipSkipInvocationOutput(BaseInvocationOutput): """Clip skip node output""" - clip: ClipField = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") + clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") -@invocation("clip_skip", title="CLIP Skip", tags=["clipskip", "clip", "skip"], category="conditioning", version="1.0.0") +@invocation( + "clip_skip", + title="CLIP Skip", + tags=["clipskip", "clip", "skip"], + category="conditioning", + version="1.0.0", +) class ClipSkipInvocation(BaseInvocation): """Skip layers in clip text_encoder model.""" @@ -421,7 +455,9 @@ class ClipSkipInvocation(BaseInvocation): def get_max_token_count( - tokenizer, prompt: Union[FlattenedPrompt, Blend, Conjunction], truncate_if_too_long=False + tokenizer, + prompt: Union[FlattenedPrompt, Blend, Conjunction], + truncate_if_too_long=False, ) -> int: if type(prompt) is Blend: blend: Blend = prompt diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 59a36935df..200c37d851 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -2,7 +2,7 @@ # initial implementation by Gregg Helt, 2023 # heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux from builtins import bool, float -from typing import Dict, List, Literal, Optional, Union +from typing import Dict, List, Literal, Union import cv2 import numpy as np @@ -24,7 +24,7 @@ from controlnet_aux import ( ) from controlnet_aux.util import HWC3, ade_palette from PIL import Image -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from invokeai.app.invocations.primitives import ImageField, ImageOutput from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin @@ -57,6 +57,8 @@ class ControlNetModelField(BaseModel): model_name: str = Field(description="Name of the ControlNet model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class ControlField(BaseModel): image: ImageField = Field(description="The control image") @@ -71,7 +73,7 @@ class ControlField(BaseModel): control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use") resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use") - @validator("control_weight") + @field_validator("control_weight") def validate_control_weight(cls, v): """Validate that all control weights in the valid range""" if isinstance(v, list): @@ -124,9 +126,7 @@ class ControlNetInvocation(BaseInvocation): ) -@invocation( - "image_processor", title="Base Image Processor", tags=["controlnet"], category="controlnet", version="1.0.0" -) +# This invocation exists for other invocations to subclass it - do not register with @invocation! class ImageProcessorInvocation(BaseInvocation): """Base class for invocations that preprocess images for ControlNet""" @@ -393,9 +393,9 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation): detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) - h: Optional[int] = InputField(default=512, ge=0, description="Content shuffle `h` parameter") - w: Optional[int] = InputField(default=512, ge=0, description="Content shuffle `w` parameter") - f: Optional[int] = InputField(default=256, ge=0, description="Content shuffle `f` parameter") + h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter") + w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter") + f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter") def run_processor(self, image): content_shuffle_processor = ContentShuffleDetector() @@ -575,14 +575,14 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation): def run_processor(self, image: Image.Image): image = image.convert("RGB") - image = np.array(image, dtype=np.uint8) - height, width = image.shape[:2] + np_image = np.array(image, dtype=np.uint8) + height, width = np_image.shape[:2] width_tile_size = min(self.color_map_tile_size, width) height_tile_size = min(self.color_map_tile_size, height) color_map = cv2.resize( - image, + np_image, (width // width_tile_size, height // height_tile_size), interpolation=cv2.INTER_CUBIC, ) diff --git a/invokeai/app/invocations/facetools.py b/invokeai/app/invocations/facetools.py index 31ab77bd1a..40e15e9476 100644 --- a/invokeai/app/invocations/facetools.py +++ b/invokeai/app/invocations/facetools.py @@ -8,7 +8,7 @@ import numpy as np from mediapipe.python.solutions.face_mesh import FaceMesh # type: ignore[import] from PIL import Image, ImageDraw, ImageFilter, ImageFont, ImageOps from PIL.Image import Image as ImageType -from pydantic import validator +from pydantic import field_validator import invokeai.assets.fonts as font_assets from invokeai.app.invocations.baseinvocation import ( @@ -550,7 +550,7 @@ class FaceMaskInvocation(BaseInvocation): ) invert_mask: bool = InputField(default=False, description="Toggle to invert the mask") - @validator("face_ids") + @field_validator("face_ids") def validate_comma_separated_ints(cls, v) -> str: comma_separated_ints_regex = re.compile(r"^\d*(,\d+)*$") if comma_separated_ints_regex.match(v) is None: diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 2d59a567c0..3a4f4eadac 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -36,7 +36,13 @@ class ShowImageInvocation(BaseInvocation): ) -@invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.0.0") +@invocation( + "blank_image", + title="Blank Image", + tags=["image"], + category="image", + version="1.0.0", +) class BlankImageInvocation(BaseInvocation): """Creates a blank image and forwards it to the pipeline""" @@ -65,7 +71,13 @@ class BlankImageInvocation(BaseInvocation): ) -@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.0.0") +@invocation( + "img_crop", + title="Crop Image", + tags=["image", "crop"], + category="image", + version="1.0.0", +) class ImageCropInvocation(BaseInvocation): """Crops an image to a specified box. The box can be outside of the image.""" @@ -98,7 +110,13 @@ class ImageCropInvocation(BaseInvocation): ) -@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.0.1") +@invocation( + "img_paste", + title="Paste Image", + tags=["image", "paste"], + category="image", + version="1.0.1", +) class ImagePasteInvocation(BaseInvocation): """Pastes an image into another image.""" @@ -151,7 +169,13 @@ class ImagePasteInvocation(BaseInvocation): ) -@invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.0.0") +@invocation( + "tomask", + title="Mask from Alpha", + tags=["image", "mask"], + category="image", + version="1.0.0", +) class MaskFromAlphaInvocation(BaseInvocation): """Extracts the alpha channel of an image as a mask.""" @@ -182,7 +206,13 @@ class MaskFromAlphaInvocation(BaseInvocation): ) -@invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.0.0") +@invocation( + "img_mul", + title="Multiply Images", + tags=["image", "multiply"], + category="image", + version="1.0.0", +) class ImageMultiplyInvocation(BaseInvocation): """Multiplies two images together using `PIL.ImageChops.multiply()`.""" @@ -215,7 +245,13 @@ class ImageMultiplyInvocation(BaseInvocation): IMAGE_CHANNELS = Literal["A", "R", "G", "B"] -@invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.0.0") +@invocation( + "img_chan", + title="Extract Image Channel", + tags=["image", "channel"], + category="image", + version="1.0.0", +) class ImageChannelInvocation(BaseInvocation): """Gets a channel from an image.""" @@ -247,7 +283,13 @@ class ImageChannelInvocation(BaseInvocation): IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] -@invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.0.0") +@invocation( + "img_conv", + title="Convert Image Mode", + tags=["image", "convert"], + category="image", + version="1.0.0", +) class ImageConvertInvocation(BaseInvocation): """Converts an image to a different mode.""" @@ -276,7 +318,13 @@ class ImageConvertInvocation(BaseInvocation): ) -@invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.0.0") +@invocation( + "img_blur", + title="Blur Image", + tags=["image", "blur"], + category="image", + version="1.0.0", +) class ImageBlurInvocation(BaseInvocation): """Blurs an image""" @@ -330,7 +378,13 @@ PIL_RESAMPLING_MAP = { } -@invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.0.0") +@invocation( + "img_resize", + title="Resize Image", + tags=["image", "resize"], + category="image", + version="1.0.0", +) class ImageResizeInvocation(BaseInvocation): """Resizes an image to specific dimensions""" @@ -359,7 +413,7 @@ class ImageResizeInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -370,7 +424,13 @@ class ImageResizeInvocation(BaseInvocation): ) -@invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.0.0") +@invocation( + "img_scale", + title="Scale Image", + tags=["image", "scale"], + category="image", + version="1.0.0", +) class ImageScaleInvocation(BaseInvocation): """Scales an image by a factor""" @@ -411,7 +471,13 @@ class ImageScaleInvocation(BaseInvocation): ) -@invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.0.0") +@invocation( + "img_lerp", + title="Lerp Image", + tags=["image", "lerp"], + category="image", + version="1.0.0", +) class ImageLerpInvocation(BaseInvocation): """Linear interpolation of all pixels of an image""" @@ -444,7 +510,13 @@ class ImageLerpInvocation(BaseInvocation): ) -@invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.0.0") +@invocation( + "img_ilerp", + title="Inverse Lerp Image", + tags=["image", "ilerp"], + category="image", + version="1.0.0", +) class ImageInverseLerpInvocation(BaseInvocation): """Inverse linear interpolation of all pixels of an image""" @@ -456,7 +528,7 @@ class ImageInverseLerpInvocation(BaseInvocation): image = context.services.images.get_pil_image(self.image.image_name) image_arr = numpy.asarray(image, dtype=numpy.float32) - image_arr = numpy.minimum(numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1) * 255 + image_arr = numpy.minimum(numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1) * 255 # type: ignore [assignment] ilerp_image = Image.fromarray(numpy.uint8(image_arr)) @@ -477,7 +549,13 @@ class ImageInverseLerpInvocation(BaseInvocation): ) -@invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.0.0") +@invocation( + "img_nsfw", + title="Blur NSFW Image", + tags=["image", "nsfw"], + category="image", + version="1.0.0", +) class ImageNSFWBlurInvocation(BaseInvocation): """Add blur to NSFW-flagged images""" @@ -505,7 +583,7 @@ class ImageNSFWBlurInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -515,7 +593,7 @@ class ImageNSFWBlurInvocation(BaseInvocation): height=image_dto.height, ) - def _get_caution_img(self) -> Image: + def _get_caution_img(self) -> Image.Image: import invokeai.app.assets.images as image_assets caution = Image.open(Path(image_assets.__path__[0]) / "caution.png") @@ -523,7 +601,11 @@ class ImageNSFWBlurInvocation(BaseInvocation): @invocation( - "img_watermark", title="Add Invisible Watermark", tags=["image", "watermark"], category="image", version="1.0.0" + "img_watermark", + title="Add Invisible Watermark", + tags=["image", "watermark"], + category="image", + version="1.0.0", ) class ImageWatermarkInvocation(BaseInvocation): """Add an invisible watermark to an image""" @@ -544,7 +626,7 @@ class ImageWatermarkInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -555,7 +637,13 @@ class ImageWatermarkInvocation(BaseInvocation): ) -@invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.0.0") +@invocation( + "mask_edge", + title="Mask Edge", + tags=["image", "mask", "inpaint"], + category="image", + version="1.0.0", +) class MaskEdgeInvocation(BaseInvocation): """Applies an edge mask to an image""" @@ -601,7 +689,11 @@ class MaskEdgeInvocation(BaseInvocation): @invocation( - "mask_combine", title="Combine Masks", tags=["image", "mask", "multiply"], category="image", version="1.0.0" + "mask_combine", + title="Combine Masks", + tags=["image", "mask", "multiply"], + category="image", + version="1.0.0", ) class MaskCombineInvocation(BaseInvocation): """Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.""" @@ -632,7 +724,13 @@ class MaskCombineInvocation(BaseInvocation): ) -@invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.0.0") +@invocation( + "color_correct", + title="Color Correct", + tags=["image", "color"], + category="image", + version="1.0.0", +) class ColorCorrectInvocation(BaseInvocation): """ Shifts the colors of a target image to match the reference image, optionally @@ -742,7 +840,13 @@ class ColorCorrectInvocation(BaseInvocation): ) -@invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.0.0") +@invocation( + "img_hue_adjust", + title="Adjust Image Hue", + tags=["image", "hue"], + category="image", + version="1.0.0", +) class ImageHueAdjustmentInvocation(BaseInvocation): """Adjusts the Hue of an image.""" @@ -980,7 +1084,7 @@ class SaveImageInvocation(BaseInvocation): image: ImageField = InputField(description=FieldDescriptions.image) board: Optional[BoardField] = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct) - metadata: CoreMetadata = InputField( + metadata: Optional[CoreMetadata] = InputField( default=None, description=FieldDescriptions.core_metadata, ui_hidden=True, @@ -997,7 +1101,7 @@ class SaveImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 3e3a3d9b1f..81fd1f9f5d 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -2,7 +2,7 @@ import os from builtins import float from typing import List, Union -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -25,11 +25,15 @@ class IPAdapterModelField(BaseModel): model_name: str = Field(description="Name of the IP-Adapter model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class CLIPVisionModelField(BaseModel): model_name: str = Field(description="Name of the CLIP Vision image encoder model") base_model: BaseModelType = Field(description="Base model (usually 'Any')") + model_config = ConfigDict(protected_namespaces=()) + class IPAdapterField(BaseModel): image: ImageField = Field(description="The IP-Adapter image prompt.") diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 7ca8cbbe6c..7ce0ae7a8a 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -19,7 +19,7 @@ from diffusers.models.attention_processor import ( ) from diffusers.schedulers import DPMSolverSDEScheduler from diffusers.schedulers import SchedulerMixin as Scheduler -from pydantic import validator +from pydantic import field_validator from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.ip_adapter import IPAdapterField @@ -84,12 +84,20 @@ class SchedulerOutput(BaseInvocationOutput): scheduler: SAMPLER_NAME_VALUES = OutputField(description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler) -@invocation("scheduler", title="Scheduler", tags=["scheduler"], category="latents", version="1.0.0") +@invocation( + "scheduler", + title="Scheduler", + tags=["scheduler"], + category="latents", + version="1.0.0", +) class SchedulerInvocation(BaseInvocation): """Selects a scheduler.""" scheduler: SAMPLER_NAME_VALUES = InputField( - default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler + default="euler", + description=FieldDescriptions.scheduler, + ui_type=UIType.Scheduler, ) def invoke(self, context: InvocationContext) -> SchedulerOutput: @@ -97,7 +105,11 @@ class SchedulerInvocation(BaseInvocation): @invocation( - "create_denoise_mask", title="Create Denoise Mask", tags=["mask", "denoise"], category="latents", version="1.0.0" + "create_denoise_mask", + title="Create Denoise Mask", + tags=["mask", "denoise"], + category="latents", + version="1.0.0", ) class CreateDenoiseMaskInvocation(BaseInvocation): """Creates mask for denoising model run.""" @@ -106,7 +118,11 @@ class CreateDenoiseMaskInvocation(BaseInvocation): image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1) mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) - fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32, ui_order=4) + fp32: bool = InputField( + default=DEFAULT_PRECISION == "float32", + description=FieldDescriptions.fp32, + ui_order=4, + ) def prep_mask_tensor(self, mask_image): if mask_image.mode != "L": @@ -134,7 +150,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation): if image is not None: vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -167,7 +183,7 @@ def get_scheduler( ) -> Scheduler: scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"]) orig_scheduler_info = context.services.model_manager.get_model( - **scheduler_info.dict(), + **scheduler_info.model_dump(), context=context, ) with orig_scheduler_info as orig_scheduler: @@ -209,34 +225,64 @@ class DenoiseLatentsInvocation(BaseInvocation): negative_conditioning: ConditioningField = InputField( description=FieldDescriptions.negative_cond, input=Input.Connection, ui_order=1 ) - noise: Optional[LatentsField] = InputField(description=FieldDescriptions.noise, input=Input.Connection, ui_order=3) + noise: Optional[LatentsField] = InputField( + default=None, + description=FieldDescriptions.noise, + input=Input.Connection, + ui_order=3, + ) steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps) cfg_scale: Union[float, List[float]] = InputField( default=7.5, ge=1, description=FieldDescriptions.cfg_scale, title="CFG Scale" ) - denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start) + denoising_start: float = InputField( + default=0.0, + ge=0, + le=1, + description=FieldDescriptions.denoising_start, + ) denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end) scheduler: SAMPLER_NAME_VALUES = InputField( - default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler + default="euler", + description=FieldDescriptions.scheduler, + ui_type=UIType.Scheduler, ) - unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection, title="UNet", ui_order=2) - control: Union[ControlField, list[ControlField]] = InputField( + unet: UNetField = InputField( + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", + ui_order=2, + ) + control: Optional[Union[ControlField, list[ControlField]]] = InputField( default=None, input=Input.Connection, ui_order=5, ) ip_adapter: Optional[Union[IPAdapterField, list[IPAdapterField]]] = InputField( - description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection, ui_order=6 + description=FieldDescriptions.ip_adapter, + title="IP-Adapter", + default=None, + input=Input.Connection, + ui_order=6, ) - t2i_adapter: Union[T2IAdapterField, list[T2IAdapterField]] = InputField( - description=FieldDescriptions.t2i_adapter, title="T2I-Adapter", default=None, input=Input.Connection, ui_order=7 + t2i_adapter: Optional[Union[T2IAdapterField, list[T2IAdapterField]]] = InputField( + description=FieldDescriptions.t2i_adapter, + title="T2I-Adapter", + default=None, + input=Input.Connection, + ui_order=7, + ) + latents: Optional[LatentsField] = InputField( + default=None, description=FieldDescriptions.latents, input=Input.Connection ) - latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection) denoise_mask: Optional[DenoiseMaskField] = InputField( - default=None, description=FieldDescriptions.mask, input=Input.Connection, ui_order=8 + default=None, + description=FieldDescriptions.mask, + input=Input.Connection, + ui_order=8, ) - @validator("cfg_scale") + @field_validator("cfg_scale") def ge_one(cls, v): """validate that all cfg_scale values are >= 1""" if isinstance(v, list): @@ -259,7 +305,7 @@ class DenoiseLatentsInvocation(BaseInvocation): stable_diffusion_step_callback( context=context, intermediate_state=intermediate_state, - node=self.dict(), + node=self.model_dump(), source_node_id=source_node_id, base_model=base_model, ) @@ -451,9 +497,10 @@ class DenoiseLatentsInvocation(BaseInvocation): # models are needed in memory. This would help to reduce peak memory utilization in low-memory environments. with image_encoder_model_info as image_encoder_model: # Get image embeddings from CLIP and ImageProjModel. - image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds( - input_image, image_encoder_model - ) + ( + image_prompt_embeds, + uncond_image_prompt_embeds, + ) = ip_adapter_model.get_image_embeds(input_image, image_encoder_model) conditioning_data.ip_adapter_conditioning.append( IPAdapterConditioningInfo(image_prompt_embeds, uncond_image_prompt_embeds) ) @@ -628,7 +675,10 @@ class DenoiseLatentsInvocation(BaseInvocation): # TODO(ryand): I have hard-coded `do_classifier_free_guidance=True` to mirror the behaviour of ControlNets, # below. Investigate whether this is appropriate. t2i_adapter_data = self.run_t2i_adapters( - context, self.t2i_adapter, latents.shape, do_classifier_free_guidance=True + context, + self.t2i_adapter, + latents.shape, + do_classifier_free_guidance=True, ) # Get the source node id (we are invoking the prepared node) @@ -641,7 +691,7 @@ class DenoiseLatentsInvocation(BaseInvocation): def _lora_loader(): for lora in self.unet.loras: lora_info = context.services.model_manager.get_model( - **lora.dict(exclude={"weight"}), + **lora.model_dump(exclude={"weight"}), context=context, ) yield (lora_info.context.model, lora.weight) @@ -649,7 +699,7 @@ class DenoiseLatentsInvocation(BaseInvocation): return unet_info = context.services.model_manager.get_model( - **self.unet.unet.dict(), + **self.unet.unet.model_dump(), context=context, ) with ( @@ -700,7 +750,10 @@ class DenoiseLatentsInvocation(BaseInvocation): denoising_end=self.denoising_end, ) - result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( + ( + result_latents, + result_attention_map_saver, + ) = pipeline.latents_from_embeddings( latents=latents, timesteps=timesteps, init_timestep=init_timestep, @@ -728,7 +781,11 @@ class DenoiseLatentsInvocation(BaseInvocation): @invocation( - "l2i", title="Latents to Image", tags=["latents", "image", "vae", "l2i"], category="latents", version="1.0.0" + "l2i", + title="Latents to Image", + tags=["latents", "image", "vae", "l2i"], + category="latents", + version="1.0.0", ) class LatentsToImageInvocation(BaseInvocation): """Generates an image from latents.""" @@ -743,7 +800,7 @@ class LatentsToImageInvocation(BaseInvocation): ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) - metadata: CoreMetadata = InputField( + metadata: Optional[CoreMetadata] = InputField( default=None, description=FieldDescriptions.core_metadata, ui_hidden=True, @@ -754,7 +811,7 @@ class LatentsToImageInvocation(BaseInvocation): latents = context.services.latents.get(self.latents.latents_name) vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -816,7 +873,7 @@ class LatentsToImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -830,7 +887,13 @@ class LatentsToImageInvocation(BaseInvocation): LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"] -@invocation("lresize", title="Resize Latents", tags=["latents", "resize"], category="latents", version="1.0.0") +@invocation( + "lresize", + title="Resize Latents", + tags=["latents", "resize"], + category="latents", + version="1.0.0", +) class ResizeLatentsInvocation(BaseInvocation): """Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8.""" @@ -876,7 +939,13 @@ class ResizeLatentsInvocation(BaseInvocation): return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed) -@invocation("lscale", title="Scale Latents", tags=["latents", "resize"], category="latents", version="1.0.0") +@invocation( + "lscale", + title="Scale Latents", + tags=["latents", "resize"], + category="latents", + version="1.0.0", +) class ScaleLatentsInvocation(BaseInvocation): """Scales latents by a given factor.""" @@ -915,7 +984,11 @@ class ScaleLatentsInvocation(BaseInvocation): @invocation( - "i2l", title="Image to Latents", tags=["latents", "image", "vae", "i2l"], category="latents", version="1.0.0" + "i2l", + title="Image to Latents", + tags=["latents", "image", "vae", "i2l"], + category="latents", + version="1.0.0", ) class ImageToLatentsInvocation(BaseInvocation): """Encodes an image into latents.""" @@ -979,7 +1052,7 @@ class ImageToLatentsInvocation(BaseInvocation): image = context.services.images.get_pil_image(self.image.image_name) vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -1007,7 +1080,13 @@ class ImageToLatentsInvocation(BaseInvocation): return vae.encode(image_tensor).latents -@invocation("lblend", title="Blend Latents", tags=["latents", "blend"], category="latents", version="1.0.0") +@invocation( + "lblend", + title="Blend Latents", + tags=["latents", "blend"], + category="latents", + version="1.0.0", +) class BlendLatentsInvocation(BaseInvocation): """Blend two latents using a given alpha. Latents must have same size.""" diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index b52cbb28bf..2aefa1def4 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -3,7 +3,7 @@ from typing import Literal import numpy as np -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput @@ -72,7 +72,14 @@ class RandomIntInvocation(BaseInvocation): return IntegerOutput(value=np.random.randint(self.low, self.high)) -@invocation("rand_float", title="Random Float", tags=["math", "float", "random"], category="math", version="1.0.0") +@invocation( + "rand_float", + title="Random Float", + tags=["math", "float", "random"], + category="math", + version="1.0.1", + use_cache=False, +) class RandomFloatInvocation(BaseInvocation): """Outputs a single random float""" @@ -178,7 +185,7 @@ class IntegerMathInvocation(BaseInvocation): a: int = InputField(default=0, description=FieldDescriptions.num_1) b: int = InputField(default=0, description=FieldDescriptions.num_2) - @validator("b") + @field_validator("b") def no_unrepresentable_results(cls, v, values): if values["operation"] == "DIV" and v == 0: raise ValueError("Cannot divide by zero") @@ -252,7 +259,7 @@ class FloatMathInvocation(BaseInvocation): a: float = InputField(default=0, description=FieldDescriptions.num_1) b: float = InputField(default=0, description=FieldDescriptions.num_2) - @validator("b") + @field_validator("b") def no_unrepresentable_results(cls, v, values): if values["operation"] == "DIV" and v == 0: raise ValueError("Cannot divide by zero") diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 449f332387..9578fc3ae9 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -223,4 +223,4 @@ class MetadataAccumulatorInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput: """Collects and outputs a CoreMetadata object""" - return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.dict())) + return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.model_dump())) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 571cb2e730..dfa1075d6e 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -1,7 +1,7 @@ import copy from typing import List, Optional -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from ...backend.model_management import BaseModelType, ModelType, SubModelType from .baseinvocation import ( @@ -24,6 +24,8 @@ class ModelInfo(BaseModel): model_type: ModelType = Field(description="Info to load submodel") submodel: Optional[SubModelType] = Field(default=None, description="Info to load submodel") + model_config = ConfigDict(protected_namespaces=()) + class LoraInfo(ModelInfo): weight: float = Field(description="Lora's weight which to use when apply to model") @@ -65,6 +67,8 @@ class MainModelField(BaseModel): base_model: BaseModelType = Field(description="Base model") model_type: ModelType = Field(description="Model Type") + model_config = ConfigDict(protected_namespaces=()) + class LoRAModelField(BaseModel): """LoRA model field""" @@ -72,8 +76,16 @@ class LoRAModelField(BaseModel): model_name: str = Field(description="Name of the LoRA model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) -@invocation("main_model_loader", title="Main Model", tags=["model"], category="model", version="1.0.0") + +@invocation( + "main_model_loader", + title="Main Model", + tags=["model"], + category="model", + version="1.0.0", +) class MainModelLoaderInvocation(BaseInvocation): """Loads a main model, outputting its submodels.""" @@ -180,10 +192,16 @@ class LoraLoaderInvocation(BaseInvocation): lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight) unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) clip: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP", ) def invoke(self, context: InvocationContext) -> LoraLoaderOutput: @@ -244,20 +262,35 @@ class SDXLLoraLoaderOutput(BaseInvocationOutput): clip2: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 2") -@invocation("sdxl_lora_loader", title="SDXL LoRA", tags=["lora", "model"], category="model", version="1.0.0") +@invocation( + "sdxl_lora_loader", + title="SDXL LoRA", + tags=["lora", "model"], + category="model", + version="1.0.0", +) class SDXLLoraLoaderInvocation(BaseInvocation): """Apply selected lora to unet and text_encoder.""" lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight) unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) clip: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP 1", ) clip2: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP 2", ) def invoke(self, context: InvocationContext) -> SDXLLoraLoaderOutput: @@ -330,6 +363,8 @@ class VAEModelField(BaseModel): model_name: str = Field(description="Name of the model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + @invocation_output("vae_loader_output") class VaeLoaderOutput(BaseInvocationOutput): @@ -343,7 +378,10 @@ class VaeLoaderInvocation(BaseInvocation): """Loads a VAE model, outputting a VaeLoaderOutput""" vae_model: VAEModelField = InputField( - description=FieldDescriptions.vae_model, input=Input.Direct, ui_type=UIType.VaeModel, title="VAE" + description=FieldDescriptions.vae_model, + input=Input.Direct, + ui_type=UIType.VaeModel, + title="VAE", ) def invoke(self, context: InvocationContext) -> VaeLoaderOutput: @@ -372,19 +410,31 @@ class VaeLoaderInvocation(BaseInvocation): class SeamlessModeOutput(BaseInvocationOutput): """Modified Seamless Model output""" - unet: Optional[UNetField] = OutputField(description=FieldDescriptions.unet, title="UNet") - vae: Optional[VaeField] = OutputField(description=FieldDescriptions.vae, title="VAE") + unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet") + vae: Optional[VaeField] = OutputField(default=None, description=FieldDescriptions.vae, title="VAE") -@invocation("seamless", title="Seamless", tags=["seamless", "model"], category="model", version="1.0.0") +@invocation( + "seamless", + title="Seamless", + tags=["seamless", "model"], + category="model", + version="1.0.0", +) class SeamlessModeInvocation(BaseInvocation): """Applies the seamless transformation to the Model UNet and VAE.""" unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) vae: Optional[VaeField] = InputField( - default=None, description=FieldDescriptions.vae_model, input=Input.Connection, title="VAE" + default=None, + description=FieldDescriptions.vae_model, + input=Input.Connection, + title="VAE", ) seamless_y: bool = InputField(default=True, input=Input.Any, description="Specify whether Y axis is seamless") seamless_x: bool = InputField(default=True, input=Input.Any, description="Specify whether X axis is seamless") diff --git a/invokeai/app/invocations/noise.py b/invokeai/app/invocations/noise.py index c46747aa89..3c1651a2f0 100644 --- a/invokeai/app/invocations/noise.py +++ b/invokeai/app/invocations/noise.py @@ -2,7 +2,7 @@ import torch -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.latent import LatentsField from invokeai.app.util.misc import SEED_MAX, get_random_seed @@ -65,7 +65,7 @@ Nodes class NoiseOutput(BaseInvocationOutput): """Invocation noise output""" - noise: LatentsField = OutputField(default=None, description=FieldDescriptions.noise) + noise: LatentsField = OutputField(description=FieldDescriptions.noise) width: int = OutputField(description=FieldDescriptions.width) height: int = OutputField(description=FieldDescriptions.height) @@ -78,7 +78,13 @@ def build_noise_output(latents_name: str, latents: torch.Tensor, seed: int): ) -@invocation("noise", title="Noise", tags=["latents", "noise"], category="latents", version="1.0.0") +@invocation( + "noise", + title="Noise", + tags=["latents", "noise"], + category="latents", + version="1.0.0", +) class NoiseInvocation(BaseInvocation): """Generates latent noise.""" @@ -105,7 +111,7 @@ class NoiseInvocation(BaseInvocation): description="Use CPU for noise generation (for reproducible results across platforms)", ) - @validator("seed", pre=True) + @field_validator("seed", mode="before") def modulo_seed(cls, v): """Returns the seed modulo (SEED_MAX + 1) to ensure it is within the valid range.""" return v % (SEED_MAX + 1) diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py index 35f8ed965e..3f4f688cf4 100644 --- a/invokeai/app/invocations/onnx.py +++ b/invokeai/app/invocations/onnx.py @@ -9,7 +9,7 @@ from typing import List, Literal, Optional, Union import numpy as np import torch from diffusers.image_processor import VaeImageProcessor -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from tqdm import tqdm from invokeai.app.invocations.metadata import CoreMetadata @@ -63,14 +63,17 @@ class ONNXPromptInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> ConditioningOutput: tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.dict(), + **self.clip.tokenizer.model_dump(), ) text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.dict(), + **self.clip.text_encoder.model_dump(), ) with tokenizer_info as orig_tokenizer, text_encoder_info as text_encoder: # , ExitStack() as stack: loras = [ - (context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) + ( + context.services.model_manager.get_model(**lora.model_dump(exclude={"weight"})).context.model, + lora.weight, + ) for lora in self.clip.loras ] @@ -175,14 +178,14 @@ class ONNXTextToLatentsInvocation(BaseInvocation): description=FieldDescriptions.unet, input=Input.Connection, ) - control: Optional[Union[ControlField, list[ControlField]]] = InputField( + control: Union[ControlField, list[ControlField]] = InputField( default=None, description=FieldDescriptions.control, ) # seamless: bool = InputField(default=False, description="Whether or not to generate an image that can tile without seams", ) # seamless_axes: str = InputField(default="", description="The axes to tile the image on, 'x' and/or 'y'") - @validator("cfg_scale") + @field_validator("cfg_scale") def ge_one(cls, v): """validate that all cfg_scale values are >= 1""" if isinstance(v, list): @@ -241,7 +244,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation): stable_diffusion_step_callback( context=context, intermediate_state=intermediate_state, - node=self.dict(), + node=self.model_dump(), source_node_id=source_node_id, ) @@ -254,12 +257,15 @@ class ONNXTextToLatentsInvocation(BaseInvocation): eta=0.0, ) - unet_info = context.services.model_manager.get_model(**self.unet.unet.dict()) + unet_info = context.services.model_manager.get_model(**self.unet.unet.model_dump()) with unet_info as unet: # , ExitStack() as stack: # loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.unet.loras] loras = [ - (context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) + ( + context.services.model_manager.get_model(**lora.model_dump(exclude={"weight"})).context.model, + lora.weight, + ) for lora in self.unet.loras ] @@ -346,7 +352,7 @@ class ONNXLatentsToImageInvocation(BaseInvocation): raise Exception(f"Expected vae_decoder, found: {self.vae.vae.model_type}") vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), ) # clear memory as vae decode can request a lot @@ -375,7 +381,7 @@ class ONNXLatentsToImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -403,6 +409,8 @@ class OnnxModelField(BaseModel): base_model: BaseModelType = Field(description="Base model") model_type: ModelType = Field(description="Model Type") + model_config = ConfigDict(protected_namespaces=()) + @invocation("onnx_model_loader", title="ONNX Main Model", tags=["onnx", "model"], category="model", version="1.0.0") class OnnxModelLoaderInvocation(BaseInvocation): diff --git a/invokeai/app/invocations/param_easing.py b/invokeai/app/invocations/param_easing.py index 7c327a6657..0e86fb978b 100644 --- a/invokeai/app/invocations/param_easing.py +++ b/invokeai/app/invocations/param_easing.py @@ -44,13 +44,22 @@ from invokeai.app.invocations.primitives import FloatCollectionOutput from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation -@invocation("float_range", title="Float Range", tags=["math", "range"], category="math", version="1.0.0") +@invocation( + "float_range", + title="Float Range", + tags=["math", "range"], + category="math", + version="1.0.0", +) class FloatLinearRangeInvocation(BaseInvocation): """Creates a range""" start: float = InputField(default=5, description="The first value of the range") stop: float = InputField(default=10, description="The last value of the range") - steps: int = InputField(default=30, description="number of values to interpolate over (including start and stop)") + steps: int = InputField( + default=30, + description="number of values to interpolate over (including start and stop)", + ) def invoke(self, context: InvocationContext) -> FloatCollectionOutput: param_list = list(np.linspace(self.start, self.stop, self.steps)) @@ -95,7 +104,13 @@ EASING_FUNCTION_KEYS = Literal[tuple(list(EASING_FUNCTIONS_MAP.keys()))] # actually I think for now could just use CollectionOutput (which is list[Any] -@invocation("step_param_easing", title="Step Param Easing", tags=["step", "easing"], category="step", version="1.0.0") +@invocation( + "step_param_easing", + title="Step Param Easing", + tags=["step", "easing"], + category="step", + version="1.0.0", +) class StepParamEasingInvocation(BaseInvocation): """Experimental per-step parameter easing for denoising steps""" @@ -159,7 +174,9 @@ class StepParamEasingInvocation(BaseInvocation): context.services.logger.debug("base easing duration: " + str(base_easing_duration)) even_num_steps = num_easing_steps % 2 == 0 # even number of steps easing_function = easing_class( - start=self.start_value, end=self.end_value, duration=base_easing_duration - 1 + start=self.start_value, + end=self.end_value, + duration=base_easing_duration - 1, ) base_easing_vals = list() for step_index in range(base_easing_duration): @@ -199,7 +216,11 @@ class StepParamEasingInvocation(BaseInvocation): # else: # no mirroring (default) - easing_function = easing_class(start=self.start_value, end=self.end_value, duration=num_easing_steps - 1) + easing_function = easing_class( + start=self.start_value, + end=self.end_value, + duration=num_easing_steps - 1, + ) for step_index in range(num_easing_steps): step_val = easing_function.ease(step_index) easing_list.append(step_val) diff --git a/invokeai/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py index b3d482b779..cb43a52447 100644 --- a/invokeai/app/invocations/prompt.py +++ b/invokeai/app/invocations/prompt.py @@ -3,7 +3,7 @@ from typing import Optional, Union import numpy as np from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.primitives import StringCollectionOutput @@ -21,7 +21,10 @@ from .baseinvocation import BaseInvocation, InputField, InvocationContext, UICom class DynamicPromptInvocation(BaseInvocation): """Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator""" - prompt: str = InputField(description="The prompt to parse with dynamicprompts", ui_component=UIComponent.Textarea) + prompt: str = InputField( + description="The prompt to parse with dynamicprompts", + ui_component=UIComponent.Textarea, + ) max_prompts: int = InputField(default=1, description="The number of prompts to generate") combinatorial: bool = InputField(default=False, description="Whether to use the combinatorial generator") @@ -36,21 +39,31 @@ class DynamicPromptInvocation(BaseInvocation): return StringCollectionOutput(collection=prompts) -@invocation("prompt_from_file", title="Prompts from File", tags=["prompt", "file"], category="prompt", version="1.0.0") +@invocation( + "prompt_from_file", + title="Prompts from File", + tags=["prompt", "file"], + category="prompt", + version="1.0.0", +) class PromptsFromFileInvocation(BaseInvocation): """Loads prompts from a text file""" file_path: str = InputField(description="Path to prompt text file") pre_prompt: Optional[str] = InputField( - default=None, description="String to prepend to each prompt", ui_component=UIComponent.Textarea + default=None, + description="String to prepend to each prompt", + ui_component=UIComponent.Textarea, ) post_prompt: Optional[str] = InputField( - default=None, description="String to append to each prompt", ui_component=UIComponent.Textarea + default=None, + description="String to append to each prompt", + ui_component=UIComponent.Textarea, ) start_line: int = InputField(default=1, ge=1, description="Line in the file to start start from") max_prompts: int = InputField(default=1, ge=0, description="Max lines to read from file (0=all)") - @validator("file_path") + @field_validator("file_path") def file_path_exists(cls, v): if not exists(v): raise ValueError(FileNotFoundError) @@ -79,6 +92,10 @@ class PromptsFromFileInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> StringCollectionOutput: prompts = self.promptsFromFile( - self.file_path, self.pre_prompt, self.post_prompt, self.start_line, self.max_prompts + self.file_path, + self.pre_prompt, + self.post_prompt, + self.start_line, + self.max_prompts, ) return StringCollectionOutput(collection=prompts) diff --git a/invokeai/app/invocations/t2i_adapter.py b/invokeai/app/invocations/t2i_adapter.py index e1bd8d0d04..76c250a552 100644 --- a/invokeai/app/invocations/t2i_adapter.py +++ b/invokeai/app/invocations/t2i_adapter.py @@ -1,6 +1,6 @@ from typing import Union -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -23,6 +23,8 @@ class T2IAdapterModelField(BaseModel): model_name: str = Field(description="Name of the T2I-Adapter model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class T2IAdapterField(BaseModel): image: ImageField = Field(description="The T2I-Adapter image prompt.") diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index e26c1b9084..d30bb71d95 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -7,6 +7,7 @@ import numpy as np import torch from basicsr.archs.rrdbnet_arch import RRDBNet from PIL import Image +from pydantic import ConfigDict from realesrgan import RealESRGANer from invokeai.app.invocations.primitives import ImageField, ImageOutput @@ -38,6 +39,8 @@ class ESRGANInvocation(BaseInvocation): default=400, ge=0, description="Tile size for tiled ESRGAN upscaling (0=tiling disabled)" ) + model_config = ConfigDict(protected_namespaces=()) + def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) models_path = context.services.configuration.models_path diff --git a/invokeai/app/services/board_records/board_records_common.py b/invokeai/app/services/board_records/board_records_common.py index e0264dde0d..d08951b499 100644 --- a/invokeai/app/services/board_records/board_records_common.py +++ b/invokeai/app/services/board_records/board_records_common.py @@ -1,7 +1,7 @@ from datetime import datetime from typing import Optional, Union -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, Field from invokeai.app.util.misc import get_iso_timestamp from invokeai.app.util.model_exclude_null import BaseModelExcludeNull @@ -18,9 +18,9 @@ class BoardRecord(BaseModelExcludeNull): """The created timestamp of the image.""" updated_at: Union[datetime, str] = Field(description="The updated timestamp of the board.") """The updated timestamp of the image.""" - deleted_at: Union[datetime, str, None] = Field(description="The deleted timestamp of the board.") + deleted_at: Optional[Union[datetime, str]] = Field(default=None, description="The deleted timestamp of the board.") """The updated timestamp of the image.""" - cover_image_name: Optional[str] = Field(description="The name of the cover image of the board.") + cover_image_name: Optional[str] = Field(default=None, description="The name of the cover image of the board.") """The name of the cover image of the board.""" @@ -46,9 +46,9 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord: ) -class BoardChanges(BaseModel, extra=Extra.forbid): - board_name: Optional[str] = Field(description="The board's new name.") - cover_image_name: Optional[str] = Field(description="The name of the board's new cover image.") +class BoardChanges(BaseModel, extra="forbid"): + board_name: Optional[str] = Field(default=None, description="The board's new name.") + cover_image_name: Optional[str] = Field(default=None, description="The name of the board's new cover image.") class BoardRecordNotFoundException(Exception): diff --git a/invokeai/app/services/boards/boards_common.py b/invokeai/app/services/boards/boards_common.py index e22e1915fe..0cb54102bb 100644 --- a/invokeai/app/services/boards/boards_common.py +++ b/invokeai/app/services/boards/boards_common.py @@ -17,7 +17,7 @@ class BoardDTO(BoardRecord): def board_record_to_dto(board_record: BoardRecord, cover_image_name: Optional[str], image_count: int) -> BoardDTO: """Converts a board record to a board DTO.""" return BoardDTO( - **board_record.dict(exclude={"cover_image_name"}), + **board_record.model_dump(exclude={"cover_image_name"}), cover_image_name=cover_image_name, image_count=image_count, ) diff --git a/invokeai/app/services/config/config_base.py b/invokeai/app/services/config/config_base.py index a07e14252a..9405c1dfae 100644 --- a/invokeai/app/services/config/config_base.py +++ b/invokeai/app/services/config/config_base.py @@ -18,7 +18,7 @@ from pathlib import Path from typing import ClassVar, Dict, List, Literal, Optional, Union, get_args, get_origin, get_type_hints from omegaconf import DictConfig, ListConfig, OmegaConf -from pydantic import BaseSettings +from pydantic_settings import BaseSettings, SettingsConfigDict from invokeai.app.services.config.config_common import PagingArgumentParser, int_or_float_or_str @@ -32,12 +32,14 @@ class InvokeAISettings(BaseSettings): initconf: ClassVar[Optional[DictConfig]] = None argparse_groups: ClassVar[Dict] = {} + model_config = SettingsConfigDict(env_file_encoding="utf-8", arbitrary_types_allowed=True, case_sensitive=True) + def parse_args(self, argv: Optional[list] = sys.argv[1:]): parser = self.get_parser() opt, unknown_opts = parser.parse_known_args(argv) if len(unknown_opts) > 0: print("Unknown args:", unknown_opts) - for name in self.__fields__: + for name in self.model_fields: if name not in self._excluded(): value = getattr(opt, name) if isinstance(value, ListConfig): @@ -54,10 +56,12 @@ class InvokeAISettings(BaseSettings): cls = self.__class__ type = get_args(get_type_hints(cls)["type"])[0] field_dict = dict({type: dict()}) - for name, field in self.__fields__.items(): + for name, field in self.model_fields.items(): if name in cls._excluded_from_yaml(): continue - category = field.field_info.extra.get("category") or "Uncategorized" + category = ( + field.json_schema_extra.get("category", "Uncategorized") if field.json_schema_extra else "Uncategorized" + ) value = getattr(self, name) if category not in field_dict[type]: field_dict[type][category] = dict() @@ -73,7 +77,7 @@ class InvokeAISettings(BaseSettings): else: settings_stanza = "Uncategorized" - env_prefix = getattr(cls.Config, "env_prefix", None) + env_prefix = getattr(cls.model_config, "env_prefix", None) env_prefix = env_prefix if env_prefix is not None else settings_stanza.upper() initconf = ( @@ -89,14 +93,18 @@ class InvokeAISettings(BaseSettings): for key, value in os.environ.items(): upcase_environ[key.upper()] = value - fields = cls.__fields__ + fields = cls.model_fields cls.argparse_groups = {} for name, field in fields.items(): if name not in cls._excluded(): current_default = field.default - category = field.field_info.extra.get("category", "Uncategorized") + category = ( + field.json_schema_extra.get("category", "Uncategorized") + if field.json_schema_extra + else "Uncategorized" + ) env_name = env_prefix + "_" + name if category in initconf and name in initconf.get(category): field.default = initconf.get(category).get(name) @@ -146,11 +154,6 @@ class InvokeAISettings(BaseSettings): "tiled_decode", ] - class Config: - env_file_encoding = "utf-8" - arbitrary_types_allowed = True - case_sensitive = True - @classmethod def add_field_argument(cls, command_parser, name: str, field, default_override=None): field_type = get_type_hints(cls).get(name) @@ -161,7 +164,7 @@ class InvokeAISettings(BaseSettings): if field.default_factory is None else field.default_factory() ) - if category := field.field_info.extra.get("category"): + if category := (field.json_schema_extra.get("category", None) if field.json_schema_extra else None): if category not in cls.argparse_groups: cls.argparse_groups[category] = command_parser.add_argument_group(category) argparse_group = cls.argparse_groups[category] @@ -169,7 +172,7 @@ class InvokeAISettings(BaseSettings): argparse_group = command_parser if get_origin(field_type) == Literal: - allowed_values = get_args(field.type_) + allowed_values = get_args(field.annotation) allowed_types = set() for val in allowed_values: allowed_types.add(type(val)) @@ -182,7 +185,7 @@ class InvokeAISettings(BaseSettings): type=field_type, default=default, choices=allowed_values, - help=field.field_info.description, + help=field.description, ) elif get_origin(field_type) == Union: @@ -191,7 +194,7 @@ class InvokeAISettings(BaseSettings): dest=name, type=int_or_float_or_str, default=default, - help=field.field_info.description, + help=field.description, ) elif get_origin(field_type) == list: @@ -199,17 +202,17 @@ class InvokeAISettings(BaseSettings): f"--{name}", dest=name, nargs="*", - type=field.type_, + type=field.annotation, default=default, - action=argparse.BooleanOptionalAction if field.type_ == bool else "store", - help=field.field_info.description, + action=argparse.BooleanOptionalAction if field.annotation == bool else "store", + help=field.description, ) else: argparse_group.add_argument( f"--{name}", dest=name, - type=field.type_, + type=field.annotation, default=default, - action=argparse.BooleanOptionalAction if field.type_ == bool else "store", - help=field.field_info.description, + action=argparse.BooleanOptionalAction if field.annotation == bool else "store", + help=field.description, ) diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index 2a42c99bd8..df01b65882 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -144,8 +144,8 @@ which is set to the desired top-level name. For example, to create a class InvokeBatch(InvokeAISettings): type: Literal["InvokeBatch"] = "InvokeBatch" - node_count : int = Field(default=1, description="Number of nodes to run on", category='Resources') - cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", category='Resources') + node_count : int = Field(default=1, description="Number of nodes to run on", json_schema_extra=dict(category='Resources')) + cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", json_schema_extra=dict(category='Resources')) This will now read and write from the "InvokeBatch" section of the config file, look for environment variables named INVOKEBATCH_*, and @@ -175,7 +175,8 @@ from pathlib import Path from typing import ClassVar, Dict, List, Literal, Optional, Union, get_type_hints from omegaconf import DictConfig, OmegaConf -from pydantic import Field, parse_obj_as +from pydantic import Field, TypeAdapter +from pydantic_settings import SettingsConfigDict from .config_base import InvokeAISettings @@ -185,6 +186,21 @@ LEGACY_INIT_FILE = Path("invokeai.init") DEFAULT_MAX_VRAM = 0.5 +class Categories(object): + WebServer = dict(category="Web Server") + Features = dict(category="Features") + Paths = dict(category="Paths") + Logging = dict(category="Logging") + Development = dict(category="Development") + Other = dict(category="Other") + ModelCache = dict(category="Model Cache") + Device = dict(category="Device") + Generation = dict(category="Generation") + Queue = dict(category="Queue") + Nodes = dict(category="Nodes") + MemoryPerformance = dict(category="Memory/Performance") + + class InvokeAIAppConfig(InvokeAISettings): """ Generate images using Stable Diffusion. Use "invokeai" to launch @@ -201,86 +217,88 @@ class InvokeAIAppConfig(InvokeAISettings): type: Literal["InvokeAI"] = "InvokeAI" # WEB - host : str = Field(default="127.0.0.1", description="IP address to bind to", category='Web Server') - port : int = Field(default=9090, description="Port to bind to", category='Web Server') - allow_origins : List[str] = Field(default=[], description="Allowed CORS origins", category='Web Server') - allow_credentials : bool = Field(default=True, description="Allow CORS credentials", category='Web Server') - allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", category='Web Server') - allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", category='Web Server') + host : str = Field(default="127.0.0.1", description="IP address to bind to", json_schema_extra=Categories.WebServer) + port : int = Field(default=9090, description="Port to bind to", json_schema_extra=Categories.WebServer) + allow_origins : List[str] = Field(default=[], description="Allowed CORS origins", json_schema_extra=Categories.WebServer) + allow_credentials : bool = Field(default=True, description="Allow CORS credentials", json_schema_extra=Categories.WebServer) + allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", json_schema_extra=Categories.WebServer) + allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", json_schema_extra=Categories.WebServer) # FEATURES - esrgan : bool = Field(default=True, description="Enable/disable upscaling code", category='Features') - internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features') - log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features') - patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features') - ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', category='Features') + esrgan : bool = Field(default=True, description="Enable/disable upscaling code", json_schema_extra=Categories.Features) + internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", json_schema_extra=Categories.Features) + log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", json_schema_extra=Categories.Features) + patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", json_schema_extra=Categories.Features) + ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', json_schema_extra=Categories.Features) # PATHS - root : Path = Field(default=None, description='InvokeAI runtime root directory', category='Paths') - autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths') - lora_dir : Path = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths') - embedding_dir : Path = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths') - controlnet_dir : Path = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', category='Paths') - conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') - models_dir : Path = Field(default='models', description='Path to the models directory', category='Paths') - legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths') - db_dir : Path = Field(default='databases', description='Path to InvokeAI databases directory', category='Paths') - outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') - use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths') - from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') + root : Optional[Path] = Field(default=None, description='InvokeAI runtime root directory', json_schema_extra=Categories.Paths) + autoimport_dir : Optional[Path] = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths) + lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths) + embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths) + controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths) + conf_path : Optional[Path] = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths) + models_dir : Optional[Path] = Field(default=Path('models'), description='Path to the models directory', json_schema_extra=Categories.Paths) + legacy_conf_dir : Optional[Path] = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files', json_schema_extra=Categories.Paths) + db_dir : Optional[Path] = Field(default=Path('databases'), description='Path to InvokeAI databases directory', json_schema_extra=Categories.Paths) + outdir : Optional[Path] = Field(default=Path('outputs'), description='Default folder for output images', json_schema_extra=Categories.Paths) + use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', json_schema_extra=Categories.Paths) + from_file : Optional[Path] = Field(default=None, description='Take command input from the indicated file (command-line client only)', json_schema_extra=Categories.Paths) # LOGGING - log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http="', category="Logging") + log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http="', json_schema_extra=Categories.Logging) # note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues - log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging") - log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging") - log_sql : bool = Field(default=False, description="Log SQL queries", category="Logging") + log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', json_schema_extra=Categories.Logging) + log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging) + log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging) - dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", category="Development") + dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development) - version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other") + version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other) # CACHE - ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", category="Model Cache", ) - vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", category="Model Cache", ) - lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", ) + ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) # DEVICE - device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", category="Device", ) - precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", category="Device", ) + device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device) + precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device) # GENERATION - sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category="Generation", ) - attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", category="Generation", ) - attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', category="Generation", ) - force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",) - force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",) - png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", category="Generation", ) + sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", json_schema_extra=Categories.Generation) + attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation) + attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation) + force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation) + png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation) # QUEUE - max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", category="Queue", ) + max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue) # NODES - allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", category="Nodes") - deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", category="Nodes") - node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", category="Nodes", ) + allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", json_schema_extra=Categories.Nodes) + deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes) + node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes) # DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES - always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance') - free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", category='Memory/Performance') - max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance') - max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance') - xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') - tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance') + always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance) + free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance) + max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance) + max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", json_schema_extra=Categories.MemoryPerformance) + xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", json_schema_extra=Categories.MemoryPerformance) + tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.MemoryPerformance) # See InvokeAIAppConfig subclass below for CACHE and DEVICE categories # fmt: on - class Config: - validate_assignment = True - env_prefix = "INVOKEAI" + model_config = SettingsConfigDict(validate_assignment=True, env_prefix="INVOKEAI") - def parse_args(self, argv: Optional[list[str]] = None, conf: Optional[DictConfig] = None, clobber=False): + def parse_args( + self, + argv: Optional[list[str]] = None, + conf: Optional[DictConfig] = None, + clobber=False, + ): """ Update settings with contents of init file, environment, and command-line settings. @@ -308,7 +326,11 @@ class InvokeAIAppConfig(InvokeAISettings): if self.singleton_init and not clobber: hints = get_type_hints(self.__class__) for k in self.singleton_init: - setattr(self, k, parse_obj_as(hints[k], self.singleton_init[k])) + setattr( + self, + k, + TypeAdapter(hints[k]).validate_python(self.singleton_init[k]), + ) @classmethod def get_config(cls, **kwargs) -> InvokeAIAppConfig: diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py index 8685db3717..ad00815151 100644 --- a/invokeai/app/services/events/events_base.py +++ b/invokeai/app/services/events/events_base.py @@ -2,7 +2,6 @@ from typing import Any, Optional -from invokeai.app.invocations.model import ModelInfo from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage from invokeai.app.services.session_queue.session_queue_common import ( BatchStatus, @@ -11,6 +10,7 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueStatus, ) from invokeai.app.util.misc import get_timestamp +from invokeai.backend.model_management.model_manager import ModelInfo from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType @@ -55,7 +55,7 @@ class EventServiceBase: graph_execution_state_id=graph_execution_state_id, node_id=node.get("id"), source_node_id=source_node_id, - progress_image=progress_image.dict() if progress_image is not None else None, + progress_image=progress_image.model_dump() if progress_image is not None else None, step=step, order=order, total_steps=total_steps, @@ -291,8 +291,8 @@ class EventServiceBase: started_at=str(session_queue_item.started_at) if session_queue_item.started_at else None, completed_at=str(session_queue_item.completed_at) if session_queue_item.completed_at else None, ), - batch_status=batch_status.dict(), - queue_status=queue_status.dict(), + batch_status=batch_status.model_dump(), + queue_status=queue_status.model_dump(), ), ) diff --git a/invokeai/app/services/image_files/image_files_base.py b/invokeai/app/services/image_files/image_files_base.py index d998f9024b..5dde7b05d6 100644 --- a/invokeai/app/services/image_files/image_files_base.py +++ b/invokeai/app/services/image_files/image_files_base.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod +from pathlib import Path from typing import Optional from PIL.Image import Image as PILImageType @@ -13,7 +14,7 @@ class ImageFileStorageBase(ABC): pass @abstractmethod - def get_path(self, image_name: str, thumbnail: bool = False) -> str: + def get_path(self, image_name: str, thumbnail: bool = False) -> Path: """Gets the internal path to an image or thumbnail.""" pass diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index 58db6feb23..107ff85f9b 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -34,8 +34,8 @@ class ImageRecordStorageBase(ABC): @abstractmethod def get_many( self, - offset: Optional[int] = None, - limit: Optional[int] = None, + offset: int = 0, + limit: int = 10, image_origin: Optional[ResourceOrigin] = None, categories: Optional[list[ImageCategory]] = None, is_intermediate: Optional[bool] = None, @@ -69,11 +69,11 @@ class ImageRecordStorageBase(ABC): image_category: ImageCategory, width: int, height: int, - session_id: Optional[str], - node_id: Optional[str], - metadata: Optional[dict], - is_intermediate: bool = False, - starred: bool = False, + is_intermediate: Optional[bool] = False, + starred: Optional[bool] = False, + session_id: Optional[str] = None, + node_id: Optional[str] = None, + metadata: Optional[dict] = None, ) -> datetime: """Saves an image record.""" pass diff --git a/invokeai/app/services/image_records/image_records_common.py b/invokeai/app/services/image_records/image_records_common.py index 39fac92048..5a6e5652c9 100644 --- a/invokeai/app/services/image_records/image_records_common.py +++ b/invokeai/app/services/image_records/image_records_common.py @@ -3,7 +3,7 @@ import datetime from enum import Enum from typing import Optional, Union -from pydantic import Extra, Field, StrictBool, StrictStr +from pydantic import Field, StrictBool, StrictStr from invokeai.app.util.metaenum import MetaEnum from invokeai.app.util.misc import get_iso_timestamp @@ -129,7 +129,9 @@ class ImageRecord(BaseModelExcludeNull): """The created timestamp of the image.""" updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the image.") """The updated timestamp of the image.""" - deleted_at: Union[datetime.datetime, str, None] = Field(description="The deleted timestamp of the image.") + deleted_at: Optional[Union[datetime.datetime, str]] = Field( + default=None, description="The deleted timestamp of the image." + ) """The deleted timestamp of the image.""" is_intermediate: bool = Field(description="Whether this is an intermediate image.") """Whether this is an intermediate image.""" @@ -147,7 +149,7 @@ class ImageRecord(BaseModelExcludeNull): """Whether this image is starred.""" -class ImageRecordChanges(BaseModelExcludeNull, extra=Extra.forbid): +class ImageRecordChanges(BaseModelExcludeNull, extra="allow"): """A set of changes to apply to an image record. Only limited changes are valid: diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 864f4eff00..9793236d9c 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -2,7 +2,7 @@ import json import sqlite3 import threading from datetime import datetime -from typing import Optional, cast +from typing import Optional, Union, cast from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -117,7 +117,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): """ ) - def get(self, image_name: str) -> Optional[ImageRecord]: + def get(self, image_name: str) -> ImageRecord: try: self._lock.acquire() @@ -223,8 +223,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): def get_many( self, - offset: Optional[int] = None, - limit: Optional[int] = None, + offset: int = 0, + limit: int = 10, image_origin: Optional[ResourceOrigin] = None, categories: Optional[list[ImageCategory]] = None, is_intermediate: Optional[bool] = None, @@ -249,7 +249,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): """ query_conditions = "" - query_params = [] + query_params: list[Union[int, str, bool]] = [] if image_origin is not None: query_conditions += """--sql @@ -387,13 +387,13 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): image_name: str, image_origin: ResourceOrigin, image_category: ImageCategory, - session_id: Optional[str], width: int, height: int, - node_id: Optional[str], - metadata: Optional[dict], - is_intermediate: bool = False, - starred: bool = False, + is_intermediate: Optional[bool] = False, + starred: Optional[bool] = False, + session_id: Optional[str] = None, + node_id: Optional[str] = None, + metadata: Optional[dict] = None, ) -> datetime: try: metadata_json = None if metadata is None else json.dumps(metadata) diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py index 71581099a3..a611e9485d 100644 --- a/invokeai/app/services/images/images_base.py +++ b/invokeai/app/services/images/images_base.py @@ -49,7 +49,7 @@ class ImageServiceABC(ABC): node_id: Optional[str] = None, session_id: Optional[str] = None, board_id: Optional[str] = None, - is_intermediate: bool = False, + is_intermediate: Optional[bool] = False, metadata: Optional[dict] = None, workflow: Optional[str] = None, ) -> ImageDTO: diff --git a/invokeai/app/services/images/images_common.py b/invokeai/app/services/images/images_common.py index f8b63a16c1..325cecdd26 100644 --- a/invokeai/app/services/images/images_common.py +++ b/invokeai/app/services/images/images_common.py @@ -20,7 +20,9 @@ class ImageUrlsDTO(BaseModelExcludeNull): class ImageDTO(ImageRecord, ImageUrlsDTO): """Deserialized image record, enriched for the frontend.""" - board_id: Optional[str] = Field(description="The id of the board the image belongs to, if one exists.") + board_id: Optional[str] = Field( + default=None, description="The id of the board the image belongs to, if one exists." + ) """The id of the board the image belongs to, if one exists.""" pass @@ -34,7 +36,7 @@ def image_record_to_dto( ) -> ImageDTO: """Converts an image record to an image DTO.""" return ImageDTO( - **image_record.dict(), + **image_record.model_dump(), image_url=image_url, thumbnail_url=thumbnail_url, board_id=board_id, diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index 9134b9a4f6..d4e473b8e4 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -41,7 +41,7 @@ class ImageService(ImageServiceABC): node_id: Optional[str] = None, session_id: Optional[str] = None, board_id: Optional[str] = None, - is_intermediate: bool = False, + is_intermediate: Optional[bool] = False, metadata: Optional[dict] = None, workflow: Optional[str] = None, ) -> ImageDTO: @@ -146,7 +146,7 @@ class ImageService(ImageServiceABC): self.__invoker.services.logger.error("Problem getting image DTO") raise e - def get_metadata(self, image_name: str) -> Optional[ImageMetadata]: + def get_metadata(self, image_name: str) -> ImageMetadata: try: image_record = self.__invoker.services.image_records.get(image_name) metadata = self.__invoker.services.image_records.get_metadata(image_name) @@ -174,7 +174,7 @@ class ImageService(ImageServiceABC): def get_path(self, image_name: str, thumbnail: bool = False) -> str: try: - return self.__invoker.services.image_files.get_path(image_name, thumbnail) + return str(self.__invoker.services.image_files.get_path(image_name, thumbnail)) except Exception as e: self.__invoker.services.logger.error("Problem getting image path") raise e diff --git a/invokeai/app/services/invocation_cache/invocation_cache_memory.py b/invokeai/app/services/invocation_cache/invocation_cache_memory.py index 817dbb958e..4a503b3c6b 100644 --- a/invokeai/app/services/invocation_cache/invocation_cache_memory.py +++ b/invokeai/app/services/invocation_cache/invocation_cache_memory.py @@ -58,7 +58,12 @@ class MemoryInvocationCache(InvocationCacheBase): # If the cache is full, we need to remove the least used number_to_delete = len(self._cache) + 1 - self._max_cache_size self._delete_oldest_access(number_to_delete) - self._cache[key] = CachedItem(invocation_output, invocation_output.json()) + self._cache[key] = CachedItem( + invocation_output, + invocation_output.model_dump_json( + warnings=False, exclude_defaults=True, exclude_unset=True, include={"type"} + ), + ) def _delete_oldest_access(self, number_to_delete: int) -> None: number_to_delete = min(number_to_delete, len(self._cache)) @@ -85,7 +90,7 @@ class MemoryInvocationCache(InvocationCacheBase): @staticmethod def create_key(invocation: BaseInvocation) -> int: - return hash(invocation.json(exclude={"id"})) + return hash(invocation.model_dump_json(exclude={"id"}, warnings=False)) def disable(self) -> None: with self._lock: diff --git a/invokeai/app/services/invocation_processor/invocation_processor_default.py b/invokeai/app/services/invocation_processor/invocation_processor_default.py index 349c4a03e4..c59fb678ef 100644 --- a/invokeai/app/services/invocation_processor/invocation_processor_default.py +++ b/invokeai/app/services/invocation_processor/invocation_processor_default.py @@ -89,7 +89,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, ) @@ -127,9 +127,9 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, - result=outputs.dict(), + result=outputs.model_dump(), ) self.__invoker.services.performance_statistics.log_stats() @@ -157,7 +157,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, error_type=e.__class__.__name__, error=error, @@ -187,7 +187,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, error_type=e.__class__.__name__, error=traceback.format_exc(), diff --git a/invokeai/app/services/invocation_stats/invocation_stats_default.py b/invokeai/app/services/invocation_stats/invocation_stats_default.py index 2041ab6190..be019b6820 100644 --- a/invokeai/app/services/invocation_stats/invocation_stats_default.py +++ b/invokeai/app/services/invocation_stats/invocation_stats_default.py @@ -72,7 +72,7 @@ class InvocationStatsService(InvocationStatsServiceBase): ) self.collector.update_invocation_stats( graph_id=self.graph_id, - invocation_type=self.invocation.type, # type: ignore - `type` is not on the `BaseInvocation` model, but *is* on all invocations + invocation_type=self.invocation.type, # type: ignore # `type` is not on the `BaseInvocation` model, but *is* on all invocations time_used=time.time() - self.start_time, vram_used=torch.cuda.max_memory_allocated() / GIG if torch.cuda.is_available() else 0.0, ) diff --git a/invokeai/app/services/item_storage/item_storage_sqlite.py b/invokeai/app/services/item_storage/item_storage_sqlite.py index 1d6008e90f..1bb9429130 100644 --- a/invokeai/app/services/item_storage/item_storage_sqlite.py +++ b/invokeai/app/services/item_storage/item_storage_sqlite.py @@ -2,7 +2,7 @@ import sqlite3 import threading from typing import Generic, Optional, TypeVar, get_args -from pydantic import BaseModel, parse_raw_as +from pydantic import BaseModel, TypeAdapter from invokeai.app.services.shared.pagination import PaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -18,6 +18,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): _cursor: sqlite3.Cursor _id_field: str _lock: threading.RLock + _adapter: Optional[TypeAdapter[T]] def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"): super().__init__() @@ -27,6 +28,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._table_name = table_name self._id_field = id_field # TODO: validate that T has this field self._cursor = self._conn.cursor() + self._adapter: Optional[TypeAdapter[T]] = None self._create_table() @@ -45,16 +47,21 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._lock.release() def _parse_item(self, item: str) -> T: - # __orig_class__ is technically an implementation detail of the typing module, not a supported API - item_type = get_args(self.__orig_class__)[0] # type: ignore - return parse_raw_as(item_type, item) + if self._adapter is None: + """ + We don't get access to `__orig_class__` in `__init__()`, and we need this before start(), so + we can create it when it is first needed instead. + __orig_class__ is technically an implementation detail of the typing module, not a supported API + """ + self._adapter = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined] + return self._adapter.validate_json(item) def set(self, item: T): try: self._lock.acquire() self._cursor.execute( f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""", - (item.json(),), + (item.model_dump_json(warnings=False, exclude_none=True),), ) self._conn.commit() finally: diff --git a/invokeai/app/services/model_manager/model_manager_base.py b/invokeai/app/services/model_manager/model_manager_base.py index bb9110ba0a..4c2fc4c085 100644 --- a/invokeai/app/services/model_manager/model_manager_base.py +++ b/invokeai/app/services/model_manager/model_manager_base.py @@ -231,7 +231,7 @@ class ModelManagerServiceBase(ABC): def merge_models( self, model_names: List[str] = Field( - default=None, min_items=2, max_items=3, description="List of model names to merge" + default=None, min_length=2, max_length=3, description="List of model names to merge" ), base_model: Union[BaseModelType, str] = Field( default=None, description="Base model shared by all models to be merged" diff --git a/invokeai/app/services/model_manager/model_manager_default.py b/invokeai/app/services/model_manager/model_manager_default.py index 263f804b4d..cdb3e59a91 100644 --- a/invokeai/app/services/model_manager/model_manager_default.py +++ b/invokeai/app/services/model_manager/model_manager_default.py @@ -327,7 +327,7 @@ class ModelManagerService(ModelManagerServiceBase): def merge_models( self, model_names: List[str] = Field( - default=None, min_items=2, max_items=3, description="List of model names to merge" + default=None, min_length=2, max_length=3, description="List of model names to merge" ), base_model: Union[BaseModelType, str] = Field( default=None, description="Base model shared by all models to be merged" diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py index 2d40a5b0c4..48e1da83b5 100644 --- a/invokeai/app/services/session_queue/session_queue_common.py +++ b/invokeai/app/services/session_queue/session_queue_common.py @@ -3,8 +3,8 @@ import json from itertools import chain, product from typing import Generator, Iterable, Literal, NamedTuple, Optional, TypeAlias, Union, cast -from pydantic import BaseModel, Field, StrictStr, parse_raw_as, root_validator, validator -from pydantic.json import pydantic_encoder +from pydantic import BaseModel, ConfigDict, Field, StrictStr, TypeAdapter, field_validator, model_validator +from pydantic_core import to_jsonable_python from invokeai.app.invocations.baseinvocation import BaseInvocation from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError @@ -17,7 +17,7 @@ class BatchZippedLengthError(ValueError): """Raise when a batch has items of different lengths.""" -class BatchItemsTypeError(TypeError): +class BatchItemsTypeError(ValueError): # this cannot be a TypeError in pydantic v2 """Raise when a batch has items of different types.""" @@ -70,7 +70,7 @@ class Batch(BaseModel): default=1, ge=1, description="Int stating how many times to iterate through all possible batch indices" ) - @validator("data") + @field_validator("data") def validate_lengths(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -81,7 +81,7 @@ class Batch(BaseModel): raise BatchZippedLengthError("Zipped batch items must all have the same length") return v - @validator("data") + @field_validator("data") def validate_types(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -94,7 +94,7 @@ class Batch(BaseModel): raise BatchItemsTypeError("All items in a batch must have the same type") return v - @validator("data") + @field_validator("data") def validate_unique_field_mappings(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -107,34 +107,35 @@ class Batch(BaseModel): paths.add(pair) return v - @root_validator(skip_on_failure=True) + @model_validator(mode="after") def validate_batch_nodes_and_edges(cls, values): - batch_data_collection = cast(Optional[BatchDataCollection], values["data"]) + batch_data_collection = cast(Optional[BatchDataCollection], values.data) if batch_data_collection is None: return values - graph = cast(Graph, values["graph"]) + graph = cast(Graph, values.graph) for batch_data_list in batch_data_collection: for batch_data in batch_data_list: try: node = cast(BaseInvocation, graph.get_node(batch_data.node_path)) except NodeNotFoundError: raise NodeNotFoundError(f"Node {batch_data.node_path} not found in graph") - if batch_data.field_name not in node.__fields__: + if batch_data.field_name not in node.model_fields: raise NodeNotFoundError(f"Field {batch_data.field_name} not found in node {batch_data.node_path}") return values - @validator("graph") + @field_validator("graph") def validate_graph(cls, v: Graph): v.validate_self() return v - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "graph", "runs", ] - } + ) + ) # endregion Batch @@ -146,15 +147,21 @@ DEFAULT_QUEUE_ID = "default" QUEUE_ITEM_STATUS = Literal["pending", "in_progress", "completed", "failed", "canceled"] +adapter_NodeFieldValue = TypeAdapter(list[NodeFieldValue]) + def get_field_values(queue_item_dict: dict) -> Optional[list[NodeFieldValue]]: field_values_raw = queue_item_dict.get("field_values", None) - return parse_raw_as(list[NodeFieldValue], field_values_raw) if field_values_raw is not None else None + return adapter_NodeFieldValue.validate_json(field_values_raw) if field_values_raw is not None else None + + +adapter_GraphExecutionState = TypeAdapter(GraphExecutionState) def get_session(queue_item_dict: dict) -> GraphExecutionState: session_raw = queue_item_dict.get("session", "{}") - return parse_raw_as(GraphExecutionState, session_raw) + session = adapter_GraphExecutionState.validate_json(session_raw, strict=False) + return session class SessionQueueItemWithoutGraph(BaseModel): @@ -178,14 +185,14 @@ class SessionQueueItemWithoutGraph(BaseModel): ) @classmethod - def from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO": + def queue_item_dto_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO": # must parse these manually queue_item_dict["field_values"] = get_field_values(queue_item_dict) return SessionQueueItemDTO(**queue_item_dict) - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "item_id", "status", "batch_id", @@ -196,7 +203,8 @@ class SessionQueueItemWithoutGraph(BaseModel): "created_at", "updated_at", ] - } + ) + ) class SessionQueueItemDTO(SessionQueueItemWithoutGraph): @@ -207,15 +215,15 @@ class SessionQueueItem(SessionQueueItemWithoutGraph): session: GraphExecutionState = Field(description="The fully-populated session to be executed") @classmethod - def from_dict(cls, queue_item_dict: dict) -> "SessionQueueItem": + def queue_item_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItem": # must parse these manually queue_item_dict["field_values"] = get_field_values(queue_item_dict) queue_item_dict["session"] = get_session(queue_item_dict) return SessionQueueItem(**queue_item_dict) - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "item_id", "status", "batch_id", @@ -227,7 +235,8 @@ class SessionQueueItem(SessionQueueItemWithoutGraph): "created_at", "updated_at", ] - } + ) + ) # endregion Queue Items @@ -321,7 +330,7 @@ def populate_graph(graph: Graph, node_field_values: Iterable[NodeFieldValue]) -> """ Populates the given graph with the given batch data items. """ - graph_clone = graph.copy(deep=True) + graph_clone = graph.model_copy(deep=True) for item in node_field_values: node = graph_clone.get_node(item.node_path) if node is None: @@ -354,7 +363,7 @@ def create_session_nfv_tuples( for item in batch_datum.items ] node_field_values_to_zip.append(node_field_values) - data.append(list(zip(*node_field_values_to_zip))) + data.append(list(zip(*node_field_values_to_zip))) # type: ignore [arg-type] # create generator to yield session,nfv tuples count = 0 @@ -409,11 +418,11 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new values_to_insert.append( SessionQueueValueToInsert( queue_id, # queue_id - session.json(), # session (json) + session.model_dump_json(warnings=False, exclude_none=True), # session (json) session.id, # session_id batch.batch_id, # batch_id # must use pydantic_encoder bc field_values is a list of models - json.dumps(field_values, default=pydantic_encoder) if field_values else None, # field_values (json) + json.dumps(field_values, default=to_jsonable_python) if field_values else None, # field_values (json) priority, # priority ) ) @@ -421,3 +430,6 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new # endregion Util + +Batch.model_rebuild(force=True) +SessionQueueItem.model_rebuild(force=True) diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index eb82667be5..4daab9cdbc 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -277,8 +277,8 @@ class SqliteSessionQueue(SessionQueueBase): if result is None: raise SessionQueueItemNotFoundError(f"No queue item with batch id {enqueue_result.batch.batch_id}") return EnqueueGraphResult( - **enqueue_result.dict(), - queue_item=SessionQueueItemDTO.from_dict(dict(result)), + **enqueue_result.model_dump(), + queue_item=SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)), ) def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult: @@ -351,7 +351,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - queue_item = SessionQueueItem.from_dict(dict(result)) + queue_item = SessionQueueItem.queue_item_from_dict(dict(result)) queue_item = self._set_queue_item_status(item_id=queue_item.item_id, status="in_progress") return queue_item @@ -380,7 +380,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def get_current(self, queue_id: str) -> Optional[SessionQueueItem]: try: @@ -404,7 +404,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def _set_queue_item_status( self, item_id: int, status: QUEUE_ITEM_STATUS, error: Optional[str] = None @@ -564,7 +564,7 @@ class SqliteSessionQueue(SessionQueueBase): queue_item = self.get_queue_item(item_id) if queue_item.status not in ["canceled", "failed", "completed"]: status = "failed" if error is not None else "canceled" - queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) + queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) # type: ignore [arg-type] # mypy seems to not narrow the Literals here self.__invoker.services.queue.cancel(queue_item.session_id) self.__invoker.services.events.emit_session_canceled( queue_item_id=queue_item.item_id, @@ -699,7 +699,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}") - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def list_queue_items( self, @@ -751,7 +751,7 @@ class SqliteSessionQueue(SessionQueueBase): params.append(limit + 1) self.__cursor.execute(query, params) results = cast(list[sqlite3.Row], self.__cursor.fetchall()) - items = [SessionQueueItemDTO.from_dict(dict(result)) for result in results] + items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results] has_more = False if len(items) > limit: # remove the extra item diff --git a/invokeai/app/services/shared/default_graphs.py b/invokeai/app/services/shared/default_graphs.py index b2d0a1f0b6..9a6e2456cb 100644 --- a/invokeai/app/services/shared/default_graphs.py +++ b/invokeai/app/services/shared/default_graphs.py @@ -80,10 +80,10 @@ def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[Li # TODO: Uncomment this when we are ready to fix this up to prevent breaking changes graphs: list[LibraryGraph] = list() - # text_to_image = graph_library.get(default_text_to_image_graph_id) + text_to_image = graph_library.get(default_text_to_image_graph_id) - # # TODO: Check if the graph is the same as the default one, and if not, update it - # #if text_to_image is None: + # TODO: Check if the graph is the same as the default one, and if not, update it + # if text_to_image is None: text_to_image = create_text_to_image() graph_library.set(text_to_image) diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index dab045af9d..8f974f7c6b 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -5,7 +5,7 @@ import itertools from typing import Annotated, Any, Optional, Union, get_args, get_origin, get_type_hints import networkx as nx -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, ConfigDict, field_validator, model_validator from pydantic.fields import Field # Importing * is bad karma but needed here for node detection @@ -235,7 +235,8 @@ class CollectInvocationOutput(BaseInvocationOutput): class CollectInvocation(BaseInvocation): """Collects values into a collection""" - item: Any = InputField( + item: Optional[Any] = InputField( + default=None, description="The item to collect (all inputs must be of the same type)", ui_type=UIType.CollectionItem, title="Collection Item", @@ -250,8 +251,8 @@ class CollectInvocation(BaseInvocation): return CollectInvocationOutput(collection=copy.copy(self.collection)) -InvocationsUnion = Union[BaseInvocation.get_invocations()] # type: ignore -InvocationOutputsUnion = Union[BaseInvocationOutput.get_all_subclasses_tuple()] # type: ignore +InvocationsUnion: Any = BaseInvocation.get_invocations_union() +InvocationOutputsUnion: Any = BaseInvocationOutput.get_outputs_union() class Graph(BaseModel): @@ -378,13 +379,13 @@ class Graph(BaseModel): raise NodeNotFoundError(f"Edge destination node {edge.destination.node_id} does not exist in the graph") # output fields are not on the node object directly, they are on the output type - if edge.source.field not in source_node.get_output_type().__fields__: + if edge.source.field not in source_node.get_output_type().model_fields: raise NodeFieldNotFoundError( f"Edge source field {edge.source.field} does not exist in node {edge.source.node_id}" ) # input fields are on the node - if edge.destination.field not in destination_node.__fields__: + if edge.destination.field not in destination_node.model_fields: raise NodeFieldNotFoundError( f"Edge destination field {edge.destination.field} does not exist in node {edge.destination.node_id}" ) @@ -395,24 +396,24 @@ class Graph(BaseModel): raise CyclicalGraphError("Graph contains cycles") # Validate all edge connections are valid - for e in self.edges: + for edge in self.edges: if not are_connections_compatible( - self.get_node(e.source.node_id), - e.source.field, - self.get_node(e.destination.node_id), - e.destination.field, + self.get_node(edge.source.node_id), + edge.source.field, + self.get_node(edge.destination.node_id), + edge.destination.field, ): raise InvalidEdgeError( - f"Invalid edge from {e.source.node_id}.{e.source.field} to {e.destination.node_id}.{e.destination.field}" + f"Invalid edge from {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}" ) # Validate all iterators & collectors # TODO: may need to validate all iterators & collectors in subgraphs so edge connections in parent graphs will be available - for n in self.nodes.values(): - if isinstance(n, IterateInvocation) and not self._is_iterator_connection_valid(n.id): - raise InvalidEdgeError(f"Invalid iterator node {n.id}") - if isinstance(n, CollectInvocation) and not self._is_collector_connection_valid(n.id): - raise InvalidEdgeError(f"Invalid collector node {n.id}") + for node in self.nodes.values(): + if isinstance(node, IterateInvocation) and not self._is_iterator_connection_valid(node.id): + raise InvalidEdgeError(f"Invalid iterator node {node.id}") + if isinstance(node, CollectInvocation) and not self._is_collector_connection_valid(node.id): + raise InvalidEdgeError(f"Invalid collector node {node.id}") return None @@ -594,7 +595,7 @@ class Graph(BaseModel): def _get_input_edges_and_graphs( self, node_path: str, prefix: Optional[str] = None - ) -> list[tuple["Graph", str, Edge]]: + ) -> list[tuple["Graph", Union[str, None], Edge]]: """Gets all input edges for a node along with the graph they are in and the graph's path""" edges = list() @@ -636,7 +637,7 @@ class Graph(BaseModel): def _get_output_edges_and_graphs( self, node_path: str, prefix: Optional[str] = None - ) -> list[tuple["Graph", str, Edge]]: + ) -> list[tuple["Graph", Union[str, None], Edge]]: """Gets all output edges for a node along with the graph they are in and the graph's path""" edges = list() @@ -817,15 +818,15 @@ class GraphExecutionState(BaseModel): default_factory=dict, ) - @validator("graph") + @field_validator("graph") def graph_is_valid(cls, v: Graph): """Validates that the graph is valid""" v.validate_self() return v - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "id", "graph", "execution_graph", @@ -836,7 +837,8 @@ class GraphExecutionState(BaseModel): "prepared_source_mapping", "source_prepared_mapping", ] - } + ) + ) def next(self) -> Optional[BaseInvocation]: """Gets the next node ready to execute.""" @@ -910,7 +912,7 @@ class GraphExecutionState(BaseModel): input_collection = getattr(input_collection_prepared_node_output, input_collection_edge.source.field) self_iteration_count = len(input_collection) - new_nodes = list() + new_nodes: list[str] = list() if self_iteration_count == 0: # TODO: should this raise a warning? It might just happen if an empty collection is input, and should be valid. return new_nodes @@ -920,7 +922,7 @@ class GraphExecutionState(BaseModel): # Create new edges for this iteration # For collect nodes, this may contain multiple inputs to the same field - new_edges = list() + new_edges: list[Edge] = list() for edge in input_edges: for input_node_id in (n[1] for n in iteration_node_map if n[0] == edge.source.node_id): new_edge = Edge( @@ -1179,18 +1181,18 @@ class LibraryGraph(BaseModel): description="The outputs exposed by this graph", default_factory=list ) - @validator("exposed_inputs", "exposed_outputs") - def validate_exposed_aliases(cls, v): + @field_validator("exposed_inputs", "exposed_outputs") + def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]): if len(v) != len(set(i.alias for i in v)): raise ValueError("Duplicate exposed alias") return v - @root_validator + @model_validator(mode="after") def validate_exposed_nodes(cls, values): - graph = values["graph"] + graph = values.graph # Validate exposed inputs - for exposed_input in values["exposed_inputs"]: + for exposed_input in values.exposed_inputs: if not graph.has_node(exposed_input.node_path): raise ValueError(f"Exposed input node {exposed_input.node_path} does not exist") node = graph.get_node(exposed_input.node_path) @@ -1200,7 +1202,7 @@ class LibraryGraph(BaseModel): ) # Validate exposed outputs - for exposed_output in values["exposed_outputs"]: + for exposed_output in values.exposed_outputs: if not graph.has_node(exposed_output.node_path): raise ValueError(f"Exposed output node {exposed_output.node_path} does not exist") node = graph.get_node(exposed_output.node_path) @@ -1212,4 +1214,6 @@ class LibraryGraph(BaseModel): return values -GraphInvocation.update_forward_refs() +GraphInvocation.model_rebuild(force=True) +Graph.model_rebuild(force=True) +GraphExecutionState.model_rebuild(force=True) diff --git a/invokeai/app/services/shared/pagination.py b/invokeai/app/services/shared/pagination.py index 85c8fb984e..ea342b1101 100644 --- a/invokeai/app/services/shared/pagination.py +++ b/invokeai/app/services/shared/pagination.py @@ -1,12 +1,11 @@ from typing import Generic, TypeVar from pydantic import BaseModel, Field -from pydantic.generics import GenericModel GenericBaseModel = TypeVar("GenericBaseModel", bound=BaseModel) -class CursorPaginatedResults(GenericModel, Generic[GenericBaseModel]): +class CursorPaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Cursor-paginated results Generic must be a Pydantic model @@ -17,7 +16,7 @@ class CursorPaginatedResults(GenericModel, Generic[GenericBaseModel]): items: list[GenericBaseModel] = Field(..., description="Items") -class OffsetPaginatedResults(GenericModel, Generic[GenericBaseModel]): +class OffsetPaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Offset-paginated results Generic must be a Pydantic model @@ -29,7 +28,7 @@ class OffsetPaginatedResults(GenericModel, Generic[GenericBaseModel]): items: list[GenericBaseModel] = Field(description="Items") -class PaginatedResults(GenericModel, Generic[GenericBaseModel]): +class PaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Paginated results Generic must be a Pydantic model diff --git a/invokeai/app/util/controlnet_utils.py b/invokeai/app/util/controlnet_utils.py index e6f34a4c44..51ceec2edd 100644 --- a/invokeai/app/util/controlnet_utils.py +++ b/invokeai/app/util/controlnet_utils.py @@ -265,7 +265,7 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device: def prepare_control_image( - image: Image, + image: Image.Image, width: int, height: int, num_channels: int = 3, diff --git a/invokeai/app/util/misc.py b/invokeai/app/util/misc.py index 6d56652ed4..910b05d8dd 100644 --- a/invokeai/app/util/misc.py +++ b/invokeai/app/util/misc.py @@ -1,4 +1,5 @@ import datetime +import typing import uuid import numpy as np @@ -27,3 +28,8 @@ def get_random_seed(): def uuid_string(): res = uuid.uuid4() return str(res) + + +def is_optional(value: typing.Any): + """Checks if a value is typed as Optional. Note that Optional is sugar for Union[x, None].""" + return typing.get_origin(value) is typing.Union and type(None) in typing.get_args(value) diff --git a/invokeai/app/util/model_exclude_null.py b/invokeai/app/util/model_exclude_null.py index b75f127ec7..6da41039b4 100644 --- a/invokeai/app/util/model_exclude_null.py +++ b/invokeai/app/util/model_exclude_null.py @@ -13,11 +13,11 @@ From https://github.com/tiangolo/fastapi/discussions/8882#discussioncomment-5154 class BaseModelExcludeNull(BaseModel): - def dict(self, *args, **kwargs) -> dict[str, Any]: + def model_dump(self, *args, **kwargs) -> dict[str, Any]: """ Override the default dict method to exclude None values in the response """ kwargs.pop("exclude_none", None) - return super().dict(*args, exclude_none=True, **kwargs) + return super().model_dump(*args, exclude_none=True, **kwargs) pass diff --git a/invokeai/assets/__init__.py b/invokeai/assets/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/backend/image_util/txt2mask.py b/invokeai/backend/image_util/txt2mask.py index 12db54b0db..de0c6a1652 100644 --- a/invokeai/backend/image_util/txt2mask.py +++ b/invokeai/backend/image_util/txt2mask.py @@ -41,18 +41,18 @@ config = InvokeAIAppConfig.get_config() class SegmentedGrayscale(object): - def __init__(self, image: Image, heatmap: torch.Tensor): + def __init__(self, image: Image.Image, heatmap: torch.Tensor): self.heatmap = heatmap self.image = image - def to_grayscale(self, invert: bool = False) -> Image: + def to_grayscale(self, invert: bool = False) -> Image.Image: return self._rescale(Image.fromarray(np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255))) - def to_mask(self, threshold: float = 0.5) -> Image: + def to_mask(self, threshold: float = 0.5) -> Image.Image: discrete_heatmap = self.heatmap.lt(threshold).int() return self._rescale(Image.fromarray(np.uint8(discrete_heatmap * 255), mode="L")) - def to_transparent(self, invert: bool = False) -> Image: + def to_transparent(self, invert: bool = False) -> Image.Image: transparent_image = self.image.copy() # For img2img, we want the selected regions to be transparent, # but to_grayscale() returns the opposite. Thus invert. @@ -61,7 +61,7 @@ class SegmentedGrayscale(object): return transparent_image # unscales and uncrops the 352x352 heatmap so that it matches the image again - def _rescale(self, heatmap: Image) -> Image: + def _rescale(self, heatmap: Image.Image) -> Image.Image: size = self.image.width if (self.image.width > self.image.height) else self.image.height resized_image = heatmap.resize((size, size), resample=Image.Resampling.LANCZOS) return resized_image.crop((0, 0, self.image.width, self.image.height)) @@ -82,7 +82,7 @@ class Txt2Mask(object): self.model = CLIPSegForImageSegmentation.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir) @torch.no_grad() - def segment(self, image, prompt: str) -> SegmentedGrayscale: + def segment(self, image: Image.Image, prompt: str) -> SegmentedGrayscale: """ Given a prompt string such as "a bagel", tries to identify the object in the provided image and returns a SegmentedGrayscale object in which the brighter @@ -99,7 +99,7 @@ class Txt2Mask(object): heatmap = torch.sigmoid(outputs.logits) return SegmentedGrayscale(image, heatmap) - def _scale_and_crop(self, image: Image) -> Image: + def _scale_and_crop(self, image: Image.Image) -> Image.Image: scaled_image = Image.new("RGB", (CLIPSEG_SIZE, CLIPSEG_SIZE)) if image.width > image.height: # width is constraint scale = CLIPSEG_SIZE / image.width diff --git a/invokeai/backend/image_util/util.py b/invokeai/backend/image_util/util.py index bc7fa01e3b..7eceb9be82 100644 --- a/invokeai/backend/image_util/util.py +++ b/invokeai/backend/image_util/util.py @@ -9,7 +9,7 @@ class InitImageResizer: def __init__(self, Image): self.image = Image - def resize(self, width=None, height=None) -> Image: + def resize(self, width=None, height=None) -> Image.Image: """ Return a copy of the image resized to fit within a box width x height. The aspect ratio is diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index d4bcea64d0..59cf1260ba 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -793,7 +793,11 @@ def migrate_init_file(legacy_format: Path): old = legacy_parser.parse_args([f"@{str(legacy_format)}"]) new = InvokeAIAppConfig.get_config() - fields = [x for x, y in InvokeAIAppConfig.__fields__.items() if y.field_info.extra.get("category") != "DEPRECATED"] + fields = [ + x + for x, y in InvokeAIAppConfig.model_fields.items() + if (y.json_schema_extra.get("category", None) if y.json_schema_extra else None) != "DEPRECATED" + ] for attr in fields: if hasattr(old, attr): try: diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index bdc9a6c6bb..38a7361c85 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -236,13 +236,13 @@ import types from dataclasses import dataclass from pathlib import Path from shutil import move, rmtree -from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union +from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union, cast import torch import yaml from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig @@ -294,6 +294,8 @@ class AddModelResult(BaseModel): base_model: BaseModelType = Field(description="The base model") config: ModelConfigBase = Field(description="The configuration of the model") + model_config = ConfigDict(protected_namespaces=()) + MAX_CACHE_SIZE = 6.0 # GB @@ -576,7 +578,7 @@ class ModelManager(object): """ model_key = self.create_key(model_name, base_model, model_type) if model_key in self.models: - return self.models[model_key].dict(exclude_defaults=True) + return self.models[model_key].model_dump(exclude_defaults=True) else: return None # TODO: None or empty dict on not found @@ -632,7 +634,7 @@ class ModelManager(object): continue model_dict = dict( - **model_config.dict(exclude_defaults=True), + **model_config.model_dump(exclude_defaults=True), # OpenAPIModelInfoBase model_name=cur_model_name, base_model=cur_base_model, @@ -900,14 +902,16 @@ class ModelManager(object): Write current configuration out to the indicated file. """ data_to_save = dict() - data_to_save["__metadata__"] = self.config_meta.dict() + data_to_save["__metadata__"] = self.config_meta.model_dump() for model_key, model_config in self.models.items(): model_name, base_model, model_type = self.parse_key(model_key) model_class = self._get_implementation(base_model, model_type) if model_class.save_to_config: # TODO: or exclude_unset better fits here? - data_to_save[model_key] = model_config.dict(exclude_defaults=True, exclude={"error"}) + data_to_save[model_key] = cast(BaseModel, model_config).model_dump( + exclude_defaults=True, exclude={"error"}, mode="json" + ) # alias for config file data_to_save[model_key]["format"] = data_to_save[model_key].pop("model_format") diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py index bf4b208395..0afd731032 100644 --- a/invokeai/backend/model_management/models/__init__.py +++ b/invokeai/backend/model_management/models/__init__.py @@ -2,7 +2,7 @@ import inspect from enum import Enum from typing import Literal, get_origin -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict, create_model from .base import ( # noqa: F401 BaseModelType, @@ -106,6 +106,8 @@ class OpenAPIModelInfoBase(BaseModel): base_model: BaseModelType model_type: ModelType + model_config = ConfigDict(protected_namespaces=()) + for base_model, models in MODEL_CLASSES.items(): for model_type, model_class in models.items(): @@ -121,17 +123,11 @@ for base_model, models in MODEL_CLASSES.items(): if openapi_cfg_name in vars(): continue - api_wrapper = type( + api_wrapper = create_model( openapi_cfg_name, - (cfg, OpenAPIModelInfoBase), - dict( - __annotations__=dict( - model_type=Literal[model_type.value], - ), - ), + __base__=(cfg, OpenAPIModelInfoBase), + model_type=(Literal[model_type], model_type), # type: ignore ) - - # globals()[openapi_cfg_name] = api_wrapper vars()[openapi_cfg_name] = api_wrapper OPENAPI_MODEL_CONFIGS.append(api_wrapper) diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index 6e507735d4..f735e37189 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -19,7 +19,7 @@ from diffusers import logging as diffusers_logging from onnx import numpy_helper from onnxruntime import InferenceSession, SessionOptions, get_available_providers from picklescan.scanner import scan_file_path -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from transformers import logging as transformers_logging @@ -86,14 +86,21 @@ class ModelError(str, Enum): NotFound = "not_found" +def model_config_json_schema_extra(schema: dict[str, Any]) -> None: + if "required" not in schema: + schema["required"] = [] + schema["required"].append("model_type") + + class ModelConfigBase(BaseModel): path: str # or Path description: Optional[str] = Field(None) model_format: Optional[str] = Field(None) error: Optional[ModelError] = Field(None) - class Config: - use_enum_values = True + model_config = ConfigDict( + use_enum_values=True, protected_namespaces=(), json_schema_extra=model_config_json_schema_extra + ) class EmptyConfigLoader(ConfigMixin): diff --git a/invokeai/backend/model_management/models/ip_adapter.py b/invokeai/backend/model_management/models/ip_adapter.py index 63694af0c8..c60edd0abe 100644 --- a/invokeai/backend/model_management/models/ip_adapter.py +++ b/invokeai/backend/model_management/models/ip_adapter.py @@ -58,14 +58,16 @@ class IPAdapterModel(ModelBase): def get_model( self, - torch_dtype: Optional[torch.dtype], + torch_dtype: torch.dtype, child_type: Optional[SubModelType] = None, ) -> typing.Union[IPAdapter, IPAdapterPlus]: if child_type is not None: raise ValueError("There are no child models in an IP-Adapter model.") model = build_ip_adapter( - ip_adapter_ckpt_path=os.path.join(self.model_path, "ip_adapter.bin"), device="cpu", dtype=torch_dtype + ip_adapter_ckpt_path=os.path.join(self.model_path, "ip_adapter.bin"), + device=torch.device("cpu"), + dtype=torch_dtype, ) self.model_size = model.calc_size() diff --git a/invokeai/backend/model_management/seamless.py b/invokeai/backend/model_management/seamless.py index 7138f2e123..bfdf9e0c53 100644 --- a/invokeai/backend/model_management/seamless.py +++ b/invokeai/backend/model_management/seamless.py @@ -96,7 +96,7 @@ def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axe finally: for module, orig_conv_forward in to_restore: module._conv_forward = orig_conv_forward - if hasattr(m, "asymmetric_padding_mode"): - del m.asymmetric_padding_mode - if hasattr(m, "asymmetric_padding"): - del m.asymmetric_padding + if hasattr(module, "asymmetric_padding_mode"): + del module.asymmetric_padding_mode + if hasattr(module, "asymmetric_padding"): + del module.asymmetric_padding diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py index abef979b1c..b5ea40185a 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py @@ -1,7 +1,8 @@ import math +from typing import Optional -import PIL import torch +from PIL import Image from torchvision.transforms.functional import InterpolationMode from torchvision.transforms.functional import resize as tv_resize @@ -11,7 +12,7 @@ class AttentionMapSaver: self.token_ids = token_ids self.latents_shape = latents_shape # self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]]) - self.collated_maps = {} + self.collated_maps: dict[str, torch.Tensor] = {} def clear_maps(self): self.collated_maps = {} @@ -38,9 +39,10 @@ class AttentionMapSaver: def write_maps_to_disk(self, path: str): pil_image = self.get_stacked_maps_image() - pil_image.save(path, "PNG") + if pil_image is not None: + pil_image.save(path, "PNG") - def get_stacked_maps_image(self) -> PIL.Image: + def get_stacked_maps_image(self) -> Optional[Image.Image]: """ Scale all collected attention maps to the same size, blend them together and return as an image. :return: An image containing a vertical stack of blended attention maps, one for each requested token. @@ -95,4 +97,4 @@ class AttentionMapSaver: return None merged_bytes = merged.mul(0xFF).byte() - return PIL.Image.fromarray(merged_bytes.numpy(), mode="L") + return Image.fromarray(merged_bytes.numpy(), mode="L") diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts index 79a09c628f..bd5422841f 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts @@ -151,7 +151,9 @@ export const addRequestedSingleImageDeletionListener = () => { if (wasImageDeleted) { dispatch( - api.util.invalidateTags([{ type: 'Board', id: imageDTO.board_id }]) + api.util.invalidateTags([ + { type: 'Board', id: imageDTO.board_id ?? 'none' }, + ]) ); } }, diff --git a/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx index 7c85b3557e..5ea17f788c 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx @@ -6,7 +6,7 @@ import { useMantineMultiSelectStyles } from 'mantine-theme/hooks/useMantineMulti import { KeyboardEvent, RefObject, memo, useCallback } from 'react'; type IAIMultiSelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; inputRef?: RefObject; label?: string; }; diff --git a/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx index 39fe7ead3c..675314b421 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx @@ -12,7 +12,7 @@ export type IAISelectDataType = { }; type IAISelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; label?: string; inputRef?: RefObject; }; diff --git a/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx index 8cc08d2304..9541015b65 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx @@ -10,7 +10,7 @@ export type IAISelectDataType = { }; export type IAISelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; inputRef?: RefObject; label?: string; }; diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts b/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts index 32e24845ea..4c2cd31eca 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts +++ b/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts @@ -39,7 +39,10 @@ export const dynamicPromptsSlice = createSlice({ promptsChanged: (state, action: PayloadAction) => { state.prompts = action.payload; }, - parsingErrorChanged: (state, action: PayloadAction) => { + parsingErrorChanged: ( + state, + action: PayloadAction + ) => { state.parsingError = action.payload; }, isErrorChanged: (state, action: PayloadAction) => { diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index f7ef848211..87c716bb81 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -10,7 +10,7 @@ import { } from 'features/parameters/types/parameterSchemas'; import i18n from 'i18next'; import { has, keyBy } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { OpenAPIV3_1 } from 'openapi-types'; import { RgbaColor } from 'react-colorful'; import { Node } from 'reactflow'; import { Graph, _InputField, _OutputField } from 'services/api/types'; @@ -791,9 +791,9 @@ export type IntegerInputFieldTemplate = InputFieldTemplateBase & { default: number; multipleOf?: number; maximum?: number; - exclusiveMaximum?: boolean; + exclusiveMaximum?: number; minimum?: number; - exclusiveMinimum?: boolean; + exclusiveMinimum?: number; }; export type IntegerCollectionInputFieldTemplate = InputFieldTemplateBase & { @@ -814,9 +814,9 @@ export type FloatInputFieldTemplate = InputFieldTemplateBase & { default: number; multipleOf?: number; maximum?: number; - exclusiveMaximum?: boolean; + exclusiveMaximum?: number; minimum?: number; - exclusiveMinimum?: boolean; + exclusiveMinimum?: number; }; export type FloatCollectionInputFieldTemplate = InputFieldTemplateBase & { @@ -1163,20 +1163,20 @@ export type TypeHints = { }; export type InvocationSchemaExtra = { - output: OpenAPIV3.ReferenceObject; // the output of the invocation + output: OpenAPIV3_1.ReferenceObject; // the output of the invocation title: string; category?: string; tags?: string[]; version?: string; properties: Omit< - NonNullable & + NonNullable & (_InputField | _OutputField), 'type' > & { - type: Omit & { + type: Omit & { default: AnyInvocationType; }; - use_cache: Omit & { + use_cache: Omit & { default: boolean; }; }; @@ -1187,17 +1187,17 @@ export type InvocationSchemaType = { }; export type InvocationBaseSchemaObject = Omit< - OpenAPIV3.BaseSchemaObject, + OpenAPIV3_1.BaseSchemaObject, 'title' | 'type' | 'properties' > & InvocationSchemaExtra; export type InvocationOutputSchemaObject = Omit< - OpenAPIV3.SchemaObject, + OpenAPIV3_1.SchemaObject, 'properties' > & { - properties: OpenAPIV3.SchemaObject['properties'] & { - type: Omit & { + properties: OpenAPIV3_1.SchemaObject['properties'] & { + type: Omit & { default: string; }; } & { @@ -1205,14 +1205,18 @@ export type InvocationOutputSchemaObject = Omit< }; }; -export type InvocationFieldSchema = OpenAPIV3.SchemaObject & _InputField; +export type InvocationFieldSchema = OpenAPIV3_1.SchemaObject & _InputField; + +export type OpenAPIV3_1SchemaOrRef = + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject; export interface ArraySchemaObject extends InvocationBaseSchemaObject { - type: OpenAPIV3.ArraySchemaObjectType; - items: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject; + type: OpenAPIV3_1.ArraySchemaObjectType; + items: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject; } export interface NonArraySchemaObject extends InvocationBaseSchemaObject { - type?: OpenAPIV3.NonArraySchemaObjectType; + type?: OpenAPIV3_1.NonArraySchemaObjectType; } export type InvocationSchemaObject = ( @@ -1221,41 +1225,41 @@ export type InvocationSchemaObject = ( ) & { class: 'invocation' }; export const isSchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.SchemaObject => Boolean(obj && !('$ref' in obj)); + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.SchemaObject => Boolean(obj && !('$ref' in obj)); export const isArraySchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.ArraySchemaObject => + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.ArraySchemaObject => Boolean(obj && !('$ref' in obj) && obj.type === 'array'); export const isNonArraySchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.NonArraySchemaObject => + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.NonArraySchemaObject => Boolean(obj && !('$ref' in obj) && obj.type !== 'array'); export const isRefObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.ReferenceObject => Boolean(obj && '$ref' in obj); + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.ReferenceObject => Boolean(obj && '$ref' in obj); export const isInvocationSchemaObject = ( obj: - | OpenAPIV3.ReferenceObject - | OpenAPIV3.SchemaObject + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject | InvocationSchemaObject ): obj is InvocationSchemaObject => 'class' in obj && obj.class === 'invocation'; export const isInvocationOutputSchemaObject = ( obj: - | OpenAPIV3.ReferenceObject - | OpenAPIV3.SchemaObject + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject | InvocationOutputSchemaObject ): obj is InvocationOutputSchemaObject => 'class' in obj && obj.class === 'output'; export const isInvocationFieldSchema = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject ): obj is InvocationFieldSchema => !('$ref' in obj); export type InvocationEdgeExtra = { type: 'default' | 'collapsed' }; diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts index 1f7fe81620..3fd44207c0 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts @@ -1,5 +1,12 @@ -import { isBoolean, isInteger, isNumber, isString } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { + isArray, + isBoolean, + isInteger, + isNumber, + isString, + startCase, +} from 'lodash-es'; +import { OpenAPIV3_1 } from 'openapi-types'; import { COLLECTION_MAP, POLYMORPHIC_TYPES, @@ -72,6 +79,7 @@ import { T2IAdapterCollectionInputFieldTemplate, BoardInputFieldTemplate, InputFieldTemplate, + OpenAPIV3_1SchemaOrRef, } from '../types/types'; import { ControlField } from 'services/api/types'; @@ -90,7 +98,7 @@ export type BuildInputFieldArg = { * @example * refObjectToFieldType({ "$ref": "#/components/schemas/ImageField" }) --> 'ImageField' */ -export const refObjectToSchemaName = (refObject: OpenAPIV3.ReferenceObject) => +export const refObjectToSchemaName = (refObject: OpenAPIV3_1.ReferenceObject) => refObject.$ref.split('/').slice(-1)[0]; const buildIntegerInputFieldTemplate = ({ @@ -111,7 +119,10 @@ const buildIntegerInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -119,7 +130,10 @@ const buildIntegerInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -144,7 +158,10 @@ const buildIntegerPolymorphicInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -152,7 +169,10 @@ const buildIntegerPolymorphicInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -195,7 +215,10 @@ const buildFloatInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -203,7 +226,10 @@ const buildFloatInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -227,7 +253,10 @@ const buildFloatPolymorphicInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -235,7 +264,10 @@ const buildFloatPolymorphicInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } return template; @@ -872,84 +904,106 @@ const buildSchedulerInputFieldTemplate = ({ }; export const getFieldType = ( - schemaObject: InvocationFieldSchema + schemaObject: OpenAPIV3_1SchemaOrRef ): string | undefined => { - if (schemaObject?.ui_type) { - return schemaObject.ui_type; - } else if (!schemaObject.type) { - // if schemaObject has no type, then it should have one of allOf, anyOf, oneOf + if (isSchemaObject(schemaObject)) { + if (!schemaObject.type) { + // if schemaObject has no type, then it should have one of allOf, anyOf, oneOf - if (schemaObject.allOf) { - const allOf = schemaObject.allOf; - if (allOf && allOf[0] && isRefObject(allOf[0])) { - return refObjectToSchemaName(allOf[0]); - } - } else if (schemaObject.anyOf) { - const anyOf = schemaObject.anyOf; - /** - * Handle Polymorphic inputs, eg string | string[]. In OpenAPI, this is: - * - an `anyOf` with two items - * - one is an `ArraySchemaObject` with a single `SchemaObject or ReferenceObject` of type T in its `items` - * - the other is a `SchemaObject` or `ReferenceObject` of type T - * - * Any other cases we ignore. - */ - - let firstType: string | undefined; - let secondType: string | undefined; - - if (isArraySchemaObject(anyOf[0])) { - // first is array, second is not - const first = anyOf[0].items; - const second = anyOf[1]; - if (isRefObject(first) && isRefObject(second)) { - firstType = refObjectToSchemaName(first); - secondType = refObjectToSchemaName(second); - } else if ( - isNonArraySchemaObject(first) && - isNonArraySchemaObject(second) - ) { - firstType = first.type; - secondType = second.type; + if (schemaObject.allOf) { + const allOf = schemaObject.allOf; + if (allOf && allOf[0] && isRefObject(allOf[0])) { + return refObjectToSchemaName(allOf[0]); } - } else if (isArraySchemaObject(anyOf[1])) { - // first is not array, second is - const first = anyOf[0]; - const second = anyOf[1].items; - if (isRefObject(first) && isRefObject(second)) { - firstType = refObjectToSchemaName(first); - secondType = refObjectToSchemaName(second); - } else if ( - isNonArraySchemaObject(first) && - isNonArraySchemaObject(second) - ) { - firstType = first.type; - secondType = second.type; + } else if (schemaObject.anyOf) { + // ignore null types + const anyOf = schemaObject.anyOf.filter((i) => { + if (isSchemaObject(i)) { + if (i.type === 'null') { + return false; + } + } + return true; + }); + if (anyOf.length === 1) { + if (isRefObject(anyOf[0])) { + return refObjectToSchemaName(anyOf[0]); + } else if (isSchemaObject(anyOf[0])) { + return getFieldType(anyOf[0]); + } + } + /** + * Handle Polymorphic inputs, eg string | string[]. In OpenAPI, this is: + * - an `anyOf` with two items + * - one is an `ArraySchemaObject` with a single `SchemaObject or ReferenceObject` of type T in its `items` + * - the other is a `SchemaObject` or `ReferenceObject` of type T + * + * Any other cases we ignore. + */ + + let firstType: string | undefined; + let secondType: string | undefined; + + if (isArraySchemaObject(anyOf[0])) { + // first is array, second is not + const first = anyOf[0].items; + const second = anyOf[1]; + if (isRefObject(first) && isRefObject(second)) { + firstType = refObjectToSchemaName(first); + secondType = refObjectToSchemaName(second); + } else if ( + isNonArraySchemaObject(first) && + isNonArraySchemaObject(second) + ) { + firstType = first.type; + secondType = second.type; + } + } else if (isArraySchemaObject(anyOf[1])) { + // first is not array, second is + const first = anyOf[0]; + const second = anyOf[1].items; + if (isRefObject(first) && isRefObject(second)) { + firstType = refObjectToSchemaName(first); + secondType = refObjectToSchemaName(second); + } else if ( + isNonArraySchemaObject(first) && + isNonArraySchemaObject(second) + ) { + firstType = first.type; + secondType = second.type; + } + } + if (firstType === secondType && isPolymorphicItemType(firstType)) { + return SINGLE_TO_POLYMORPHIC_MAP[firstType]; } } - if (firstType === secondType && isPolymorphicItemType(firstType)) { - return SINGLE_TO_POLYMORPHIC_MAP[firstType]; + } else if (schemaObject.enum) { + return 'enum'; + } else if (schemaObject.type) { + if (schemaObject.type === 'number') { + // floats are "number" in OpenAPI, while ints are "integer" - we need to distinguish them + return 'float'; + } else if (schemaObject.type === 'array') { + const itemType = isSchemaObject(schemaObject.items) + ? schemaObject.items.type + : refObjectToSchemaName(schemaObject.items); + + if (isArray(itemType)) { + // This is a nested array, which we don't support + return; + } + + if (isCollectionItemType(itemType)) { + return COLLECTION_MAP[itemType]; + } + + return; + } else if (!isArray(schemaObject.type)) { + return schemaObject.type; } } - } else if (schemaObject.enum) { - return 'enum'; - } else if (schemaObject.type) { - if (schemaObject.type === 'number') { - // floats are "number" in OpenAPI, while ints are "integer" - we need to distinguish them - return 'float'; - } else if (schemaObject.type === 'array') { - const itemType = isSchemaObject(schemaObject.items) - ? schemaObject.items.type - : refObjectToSchemaName(schemaObject.items); - - if (isCollectionItemType(itemType)) { - return COLLECTION_MAP[itemType]; - } - - return; - } else { - return schemaObject.type; - } + } else if (isRefObject(schemaObject)) { + return refObjectToSchemaName(schemaObject); } return; }; @@ -1025,7 +1079,15 @@ export const buildInputFieldTemplate = ( name: string, fieldType: FieldType ) => { - const { input, ui_hidden, ui_component, ui_type, ui_order } = fieldSchema; + const { + input, + ui_hidden, + ui_component, + ui_type, + ui_order, + ui_choice_labels, + item_default, + } = fieldSchema; const extra = { // TODO: Can we support polymorphic inputs in the UI? @@ -1035,11 +1097,13 @@ export const buildInputFieldTemplate = ( ui_type, required: nodeSchema.required?.includes(name) ?? false, ui_order, + ui_choice_labels, + item_default, }; const baseField = { name, - title: fieldSchema.title ?? '', + title: fieldSchema.title ?? (name ? startCase(name) : ''), description: fieldSchema.description ?? '', fieldKind: 'input' as const, ...extra, diff --git a/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts b/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts index 69d8d9dd4c..93cd75dd75 100644 --- a/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts +++ b/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts @@ -1,7 +1,7 @@ import { logger } from 'app/logging/logger'; import { parseify } from 'common/util/serialize'; -import { reduce } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { reduce, startCase } from 'lodash-es'; +import { OpenAPIV3_1 } from 'openapi-types'; import { AnyInvocationType } from 'services/events/types'; import { FieldType, @@ -60,7 +60,7 @@ const isNotInDenylist = (schema: InvocationSchemaObject) => !invocationDenylist.includes(schema.properties.type.default); export const parseSchema = ( - openAPI: OpenAPIV3.Document, + openAPI: OpenAPIV3_1.Document, nodesAllowlistExtra: string[] | undefined = undefined, nodesDenylistExtra: string[] | undefined = undefined ): Record => { @@ -110,7 +110,7 @@ export const parseSchema = ( return inputsAccumulator; } - const fieldType = getFieldType(property); + const fieldType = property.ui_type ?? getFieldType(property); if (!isFieldType(fieldType)) { logger('nodes').warn( @@ -209,7 +209,7 @@ export const parseSchema = ( return outputsAccumulator; } - const fieldType = getFieldType(property); + const fieldType = property.ui_type ?? getFieldType(property); if (!isFieldType(fieldType)) { logger('nodes').warn( @@ -222,7 +222,8 @@ export const parseSchema = ( outputsAccumulator[propertyName] = { fieldKind: 'output', name: propertyName, - title: property.title ?? '', + title: + property.title ?? (propertyName ? startCase(propertyName) : ''), description: property.description ?? '', type: fieldType, ui_hidden: property.ui_hidden ?? false, diff --git a/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx b/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx index 9cc991335e..d441be4ecb 100644 --- a/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx +++ b/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx @@ -7,7 +7,7 @@ const QueueItemCard = ({ session_queue_item, label, }: { - session_queue_item?: components['schemas']['SessionQueueItem']; + session_queue_item?: components['schemas']['SessionQueueItem'] | null; label: string; }) => { return ( diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx index 6837a2e853..e5c68ba6cf 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx @@ -112,7 +112,7 @@ export default function MergeModelsPanel() { } }); - const mergeModelsInfo: MergeModelConfig = { + const mergeModelsInfo: MergeModelConfig['body'] = { model_names: models_names, merged_model_name: mergedModelName !== '' ? mergedModelName : models_names.join('-'), @@ -125,7 +125,7 @@ export default function MergeModelsPanel() { mergeModels({ base_model: baseModel, - body: mergeModelsInfo, + body: { body: mergeModelsInfo }, }) .unwrap() .then((_) => { diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts index 3fa606d4b6..99a5fc5f50 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/images.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts @@ -520,7 +520,7 @@ export const imagesApi = api.injectEndpoints({ // assume all images are on the same board/category if (images[0]) { const categories = getCategories(images[0]); - const boardId = images[0].board_id; + const boardId = images[0].board_id ?? undefined; return [ { @@ -637,7 +637,7 @@ export const imagesApi = api.injectEndpoints({ // assume all images are on the same board/category if (images[0]) { const categories = getCategories(images[0]); - const boardId = images[0].board_id; + const boardId = images[0].board_id ?? undefined; return [ { type: 'ImageList', diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index e476217e6c..d4678dc03b 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,80 +5,6 @@ export type paths = { - "/api/v1/sessions/": { - /** - * List Sessions - * @deprecated - * @description Gets a list of sessions, optionally searching - */ - get: operations["list_sessions"]; - /** - * Create Session - * @deprecated - * @description Creates a new session, optionally initializing it with an invocation graph - */ - post: operations["create_session"]; - }; - "/api/v1/sessions/{session_id}": { - /** - * Get Session - * @deprecated - * @description Gets a session - */ - get: operations["get_session"]; - }; - "/api/v1/sessions/{session_id}/nodes": { - /** - * Add Node - * @deprecated - * @description Adds a node to the graph - */ - post: operations["add_node"]; - }; - "/api/v1/sessions/{session_id}/nodes/{node_path}": { - /** - * Update Node - * @deprecated - * @description Updates a node in the graph and removes all linked edges - */ - put: operations["update_node"]; - /** - * Delete Node - * @deprecated - * @description Deletes a node in the graph and removes all linked edges - */ - delete: operations["delete_node"]; - }; - "/api/v1/sessions/{session_id}/edges": { - /** - * Add Edge - * @deprecated - * @description Adds an edge to the graph - */ - post: operations["add_edge"]; - }; - "/api/v1/sessions/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}": { - /** - * Delete Edge - * @deprecated - * @description Deletes an edge from the graph - */ - delete: operations["delete_edge"]; - }; - "/api/v1/sessions/{session_id}/invoke": { - /** - * Invoke Session - * @deprecated - * @description Invokes a session - */ - put: operations["invoke_session"]; - /** - * Cancel Session Invoke - * @deprecated - * @description Invokes a session - */ - delete: operations["cancel_session_invoke"]; - }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -481,18 +407,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -506,9 +432,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default add - * @enum {string} + * @constant */ type: "add"; }; @@ -551,7 +477,6 @@ export type components = { }; /** * BaseModelType - * @description An enumeration. * @enum {string} */ BaseModelType: "any" | "sd-1" | "sd-2" | "sdxl" | "sdxl-refiner"; @@ -566,11 +491,8 @@ export type components = { * Data * @description The batch data collection. */ - data?: components["schemas"]["BatchDatum"][][]; - /** - * Graph - * @description The graph to initialize the session with - */ + data?: components["schemas"]["BatchDatum"][][] | null; + /** @description The graph to initialize the session with */ graph: components["schemas"]["Graph"]; /** * Runs @@ -655,18 +577,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Width * @description The width of the image @@ -687,20 +609,19 @@ export type components = { */ mode?: "RGB" | "RGBA"; /** - * Color * @description The color of the image * @default { - * "r": 0, - * "g": 0, + * "a": 255, * "b": 0, - * "a": 255 + * "g": 0, + * "r": 0 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default blank_image - * @enum {string} + * @constant */ type: "blank_image"; }; @@ -719,27 +640,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents A - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents_a?: components["schemas"]["LatentsField"]; - /** - * Latents B - * @description Latents tensor - */ + /** @description Latents tensor */ latents_b?: components["schemas"]["LatentsField"]; /** * Alpha @@ -748,9 +663,9 @@ export type components = { */ alpha?: number; /** - * Type + * type * @default lblend - * @enum {string} + * @constant */ type: "lblend"; }; @@ -760,12 +675,12 @@ export type components = { * Board Name * @description The board's new name. */ - board_name?: string; + board_name?: string | null; /** * Cover Image Name * @description The name of the board's new cover image. */ - cover_image_name?: string; + cover_image_name?: string | null; }; /** * BoardDTO @@ -796,12 +711,12 @@ export type components = { * Deleted At * @description The deleted timestamp of the board. */ - deleted_at?: string; + deleted_at?: string | null; /** * Cover Image Name * @description The name of the board's cover image. */ - cover_image_name?: string; + cover_image_name: string | null; /** * Image Count * @description The number of images in the board. @@ -872,14 +787,11 @@ export type components = { * Board Id * @description The board from which image should be downloaded from */ - board_id?: string; + board_id?: string | null; }; /** Body_enqueue_batch */ Body_enqueue_batch: { - /** - * Batch - * @description Batch to process - */ + /** @description Batch to process */ batch: components["schemas"]["Batch"]; /** * Prepend @@ -890,10 +802,7 @@ export type components = { }; /** Body_enqueue_graph */ Body_enqueue_graph: { - /** - * Graph - * @description The graph to enqueue - */ + /** @description The graph to enqueue */ graph: components["schemas"]["Graph"]; /** * Prepend @@ -912,41 +821,13 @@ export type components = { /** * Prediction Type * @description Prediction type for SDv2 checkpoints and rare SDv1 checkpoints - * @enum {string} */ - prediction_type?: "v_prediction" | "epsilon" | "sample"; + prediction_type?: ("v_prediction" | "epsilon" | "sample") | null; }; /** Body_merge_models */ Body_merge_models: { - /** - * Model Names - * @description model name - */ - model_names: string[]; - /** - * Merged Model Name - * @description Name of destination model - */ - merged_model_name: string; - /** - * Alpha - * @description Alpha weighting strength to apply to 2d and 3d models - * @default 0.5 - */ - alpha?: number; - /** @description Interpolation method */ - interp: components["schemas"]["MergeInterpolationMethod"]; - /** - * Force - * @description Force merging of models created with different versions of diffusers - * @default false - */ - force?: boolean; - /** - * Merge Dest Directory - * @description Save the merged model to the designated directory (with 'merged_model_name' appended) - */ - merge_dest_directory?: string; + /** @description Model configuration */ + body: components["schemas"]["MergeModelsBody"]; }; /** Body_parse_dynamicprompts */ Body_parse_dynamicprompts: { @@ -1023,27 +904,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of boolean values */ collection?: boolean[]; /** - * Type + * type * @default boolean_collection - * @enum {string} + * @constant */ type: "boolean_collection"; }; @@ -1058,9 +939,9 @@ export type components = { */ collection: boolean[]; /** - * Type + * type * @default boolean_collection_output - * @enum {string} + * @constant */ type: "boolean_collection_output"; }; @@ -1079,18 +960,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The boolean value @@ -1098,9 +979,9 @@ export type components = { */ value?: boolean; /** - * Type + * type * @default boolean - * @enum {string} + * @constant */ type: "boolean"; }; @@ -1115,9 +996,9 @@ export type components = { */ value: boolean; /** - * Type + * type * @default boolean_output - * @enum {string} + * @constant */ type: "boolean_output"; }; @@ -1128,19 +1009,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default clip_vision + * @constant */ model_type: "clip_vision"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** CLIPVisionModelField */ CLIPVisionModelField: { @@ -1167,27 +1049,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default infill_cv2 - * @enum {string} + * @constant */ type: "infill_cv2"; }; @@ -1217,29 +1096,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default canny_image_processor - * @enum {string} - */ - type: "canny_image_processor"; /** * Low Threshold * @description The low threshold of the Canny pixel gradient (0-255) @@ -1252,6 +1122,12 @@ export type components = { * @default 200 */ high_threshold?: number; + /** + * type + * @default canny_image_processor + * @constant + */ + type: "canny_image_processor"; }; /** * ClearResult @@ -1266,15 +1142,9 @@ export type components = { }; /** ClipField */ ClipField: { - /** - * Tokenizer - * @description Info to load tokenizer submodel - */ + /** @description Info to load tokenizer submodel */ tokenizer: components["schemas"]["ModelInfo"]; - /** - * Text Encoder - * @description Info to load text_encoder submodel - */ + /** @description Info to load text_encoder submodel */ text_encoder: components["schemas"]["ModelInfo"]; /** * Skipped Layers @@ -1302,18 +1172,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count @@ -1326,9 +1196,9 @@ export type components = { */ skipped_layers?: number; /** - * Type + * type * @default clip_skip - * @enum {string} + * @constant */ type: "clip_skip"; }; @@ -1341,11 +1211,11 @@ export type components = { * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default clip_skip_output - * @enum {string} + * @constant */ type: "clip_skip_output"; }; @@ -1364,18 +1234,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection Item * @description The item to collect (all inputs must be of the same type) @@ -1387,18 +1257,13 @@ export type components = { */ collection?: unknown[]; /** - * Type + * type * @default collect - * @enum {string} + * @constant */ type: "collect"; }; - /** - * CollectInvocationOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** CollectInvocationOutput */ CollectInvocationOutput: { /** * Collection @@ -1406,9 +1271,9 @@ export type components = { */ collection: unknown[]; /** - * Type + * type * @default collect_output - * @enum {string} + * @constant */ type: "collect_output"; }; @@ -1423,9 +1288,9 @@ export type components = { */ collection: components["schemas"]["ColorField"][]; /** - * Type + * type * @default color_collection_output - * @enum {string} + * @constant */ type: "color_collection_output"; }; @@ -1445,33 +1310,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to color-correct - */ + use_cache?: boolean | null; + /** @description The image to color-correct */ image?: components["schemas"]["ImageField"]; - /** - * Reference - * @description Reference image for color-correction - */ + /** @description Reference image for color-correction */ reference?: components["schemas"]["ImageField"]; - /** - * Mask - * @description Mask to use when applying color-correction - */ - mask?: components["schemas"]["ImageField"]; + /** @description Mask to use when applying color-correction */ + mask?: components["schemas"]["ImageField"] | null; /** * Mask Blur Radius * @description Mask blur radius @@ -1479,9 +1335,9 @@ export type components = { */ mask_blur_radius?: number; /** - * Type + * type * @default color_correct - * @enum {string} + * @constant */ type: "color_correct"; }; @@ -1526,33 +1382,32 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** - * Color * @description The color value * @default { - * "r": 0, - * "g": 0, + * "a": 255, * "b": 0, - * "a": 255 + * "g": 0, + * "r": 0 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default color - * @enum {string} + * @constant */ type: "color"; }; @@ -1571,50 +1426,44 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default color_map_image_processor - * @enum {string} - */ - type: "color_map_image_processor"; /** * Color Map Tile Size * @description Tile size * @default 64 */ color_map_tile_size?: number; + /** + * type + * @default color_map_image_processor + * @constant + */ + type: "color_map_image_processor"; }; /** * ColorOutput * @description Base class for nodes that output a single color */ ColorOutput: { - /** - * Color - * @description The output color - */ + /** @description The output color */ color: components["schemas"]["ColorField"]; /** - * Type + * type * @default color_output - * @enum {string} + * @constant */ type: "color_output"; }; @@ -1633,35 +1482,35 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor * @default */ prompt?: string; - /** - * Type - * @default compel - * @enum {string} - */ - type: "compel"; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip?: components["schemas"]["ClipField"]; + /** + * type + * @default compel + * @constant + */ + type: "compel"; }; /** * Conditioning Collection Primitive @@ -1678,27 +1527,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of conditioning tensors */ collection?: components["schemas"]["ConditioningField"][]; /** - * Type + * type * @default conditioning_collection - * @enum {string} + * @constant */ type: "conditioning_collection"; }; @@ -1713,9 +1562,9 @@ export type components = { */ collection: components["schemas"]["ConditioningField"][]; /** - * Type + * type * @default conditioning_collection_output - * @enum {string} + * @constant */ type: "conditioning_collection_output"; }; @@ -1745,27 +1594,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Conditioning - * @description Conditioning tensor - */ + use_cache?: boolean | null; + /** @description Conditioning tensor */ conditioning?: components["schemas"]["ConditioningField"]; /** - * Type + * type * @default conditioning - * @enum {string} + * @constant */ type: "conditioning"; }; @@ -1774,15 +1620,12 @@ export type components = { * @description Base class for nodes that output a single conditioning tensor */ ConditioningOutput: { - /** - * Conditioning - * @description Conditioning tensor - */ + /** @description Conditioning tensor */ conditioning: components["schemas"]["ConditioningField"]; /** - * Type + * type * @default conditioning_output - * @enum {string} + * @constant */ type: "conditioning_output"; }; @@ -1801,29 +1644,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default content_shuffle_image_processor - * @enum {string} - */ - type: "content_shuffle_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -1854,18 +1688,18 @@ export type components = { * @default 256 */ f?: number; + /** + * type + * @default content_shuffle_image_processor + * @constant + */ + type: "content_shuffle_image_processor"; }; /** ControlField */ ControlField: { - /** - * Image - * @description The control image - */ + /** @description The control image */ image: components["schemas"]["ImageField"]; - /** - * Control Model - * @description The ControlNet model to use - */ + /** @description The ControlNet model to use */ control_model: components["schemas"]["ControlNetModelField"]; /** * Control Weight @@ -1915,27 +1749,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The control image - */ + use_cache?: boolean | null; + /** @description The control image */ image?: components["schemas"]["ImageField"]; - /** - * Control Model - * @description ControlNet model to load - */ + /** @description ControlNet model to load */ control_model: components["schemas"]["ControlNetModelField"]; /** * Control Weight @@ -1970,9 +1798,9 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * Type + * type * @default controlnet - * @enum {string} + * @constant */ type: "controlnet"; }; @@ -1983,19 +1811,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default controlnet + * @constant */ model_type: "controlnet"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Config */ config: string; }; @@ -2006,19 +1835,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default controlnet + * @constant */ model_type: "controlnet"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * ControlNetModelField @@ -2038,15 +1868,12 @@ export type components = { * @description node output for ControlNet info */ ControlOutput: { - /** - * Control - * @description ControlNet(s) to apply - */ + /** @description ControlNet(s) to apply */ control: components["schemas"]["ControlField"]; /** - * Type + * type * @default control_output - * @enum {string} + * @constant */ type: "control_output"; }; @@ -2065,147 +1892,138 @@ export type components = { * Generation Mode * @description The generation mode that output this image */ - generation_mode?: string; + generation_mode?: string | null; /** * Created By * @description The name of the creator of the image */ - created_by?: string; + created_by: string | null; /** * Positive Prompt * @description The positive prompt parameter */ - positive_prompt?: string; + positive_prompt?: string | null; /** * Negative Prompt * @description The negative prompt parameter */ - negative_prompt?: string; + negative_prompt?: string | null; /** * Width * @description The width parameter */ - width?: number; + width?: number | null; /** * Height * @description The height parameter */ - height?: number; + height?: number | null; /** * Seed * @description The seed used for noise generation */ - seed?: number; + seed?: number | null; /** * Rand Device * @description The device used for random number generation */ - rand_device?: string; + rand_device?: string | null; /** * Cfg Scale * @description The classifier-free guidance scale parameter */ - cfg_scale?: number; + cfg_scale?: number | null; /** * Steps * @description The number of steps used for inference */ - steps?: number; + steps?: number | null; /** * Scheduler * @description The scheduler used for inference */ - scheduler?: string; + scheduler?: string | null; /** * Clip Skip * @description The number of skipped CLIP layers */ - clip_skip?: number; - /** - * Model - * @description The main model used for inference - */ - model?: components["schemas"]["MainModelField"]; + clip_skip?: number | null; + /** @description The main model used for inference */ + model?: components["schemas"]["MainModelField"] | null; /** * Controlnets * @description The ControlNets used for inference */ - controlnets?: components["schemas"]["ControlField"][]; + controlnets?: components["schemas"]["ControlField"][] | null; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters?: components["schemas"]["T2IAdapterField"][]; + t2iAdapters?: components["schemas"]["T2IAdapterField"][] | null; /** * Loras * @description The LoRAs used for inference */ - loras?: components["schemas"]["LoRAMetadataField"][]; - /** - * Vae - * @description The VAE used for decoding, if the main model's default was not used - */ - vae?: components["schemas"]["VAEModelField"]; + loras?: components["schemas"]["LoRAMetadataField"][] | null; + /** @description The VAE used for decoding, if the main model's default was not used */ + vae?: components["schemas"]["VAEModelField"] | null; /** * Strength * @description The strength used for latents-to-latents */ - strength?: number; + strength?: number | null; /** * Init Image * @description The name of the initial image */ - init_image?: string; + init_image?: string | null; /** * Positive Style Prompt * @description The positive style prompt parameter */ - positive_style_prompt?: string; + positive_style_prompt?: string | null; /** * Negative Style Prompt * @description The negative style prompt parameter */ - negative_style_prompt?: string; - /** - * Refiner Model - * @description The SDXL Refiner model used - */ - refiner_model?: components["schemas"]["MainModelField"]; + negative_style_prompt?: string | null; + /** @description The SDXL Refiner model used */ + refiner_model?: components["schemas"]["MainModelField"] | null; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner */ - refiner_cfg_scale?: number; + refiner_cfg_scale?: number | null; /** * Refiner Steps * @description The number of steps used for the refiner */ - refiner_steps?: number; + refiner_steps?: number | null; /** * Refiner Scheduler * @description The scheduler used for the refiner */ - refiner_scheduler?: string; + refiner_scheduler?: string | null; /** * Refiner Positive Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_positive_aesthetic_score?: number; + refiner_positive_aesthetic_score?: number | null; /** * Refiner Negative Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_negative_aesthetic_score?: number; + refiner_negative_aesthetic_score?: number | null; /** * Refiner Start * @description The start value used for refiner denoising */ - refiner_start?: number; + refiner_start?: number | null; }; /** * Create Denoise Mask @@ -2222,32 +2040,23 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Vae - * @description VAE - */ + use_cache?: boolean | null; + /** @description VAE */ vae?: components["schemas"]["VaeField"]; - /** - * Image - * @description Image which will be masked - */ - image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when pasting - */ + /** @description Image which will be masked */ + image?: components["schemas"]["ImageField"] | null; + /** @description The mask to use when pasting */ mask?: components["schemas"]["ImageField"]; /** * Tiled @@ -2258,21 +2067,17 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; /** - * Type + * type * @default create_denoise_mask - * @enum {string} + * @constant */ type: "create_denoise_mask"; }; - /** - * CursorPaginatedResults[SessionQueueItemDTO] - * @description Cursor-paginated results - * Generic must be a Pydantic model - */ + /** CursorPaginatedResults[SessionQueueItemDTO] */ CursorPaginatedResults_SessionQueueItemDTO_: { /** * Limit @@ -2305,32 +2110,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to inpaint - */ + use_cache?: boolean | null; + /** @description The image to inpaint */ image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when inpainting - */ + /** @description The mask to use when inpainting */ mask?: components["schemas"]["ImageField"]; /** - * Type + * type * @default cv_inpaint - * @enum {string} + * @constant */ type: "cv_inpaint"; }; @@ -2372,23 +2171,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Noise - * @description Noise tensor - */ - noise?: components["schemas"]["LatentsField"]; + use_cache?: boolean | null; + /** @description Positive conditioning tensor */ + positive_conditioning?: components["schemas"]["ConditioningField"]; + /** @description Negative conditioning tensor */ + negative_conditioning?: components["schemas"]["ConditioningField"]; + /** @description Noise tensor */ + noise?: components["schemas"]["LatentsField"] | null; /** * Steps * @description Number of steps to run @@ -2420,49 +2220,33 @@ export type components = { * @enum {string} */ scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; - /** Control */ - control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; - /** - * IP-Adapter - * @description IP-Adapter to apply - */ - ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][]; - /** - * T2I-Adapter - * @description T2I-Adapter(s) to apply - */ - t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][]; - /** - * Latents - * @description Latents tensor - */ - latents?: components["schemas"]["LatentsField"]; - /** - * Denoise Mask - * @description The mask to use for the operation - */ - denoise_mask?: components["schemas"]["DenoiseMaskField"]; - /** - * Type - * @default denoise_latents - * @enum {string} - */ - type: "denoise_latents"; - /** - * Positive Conditioning - * @description Positive conditioning tensor - */ - positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning tensor - */ - negative_conditioning?: components["schemas"]["ConditioningField"]; /** * UNet * @description UNet (scheduler, LoRAs) */ unet?: components["schemas"]["UNetField"]; + /** Control */ + control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + /** + * IP-Adapter + * @description IP-Adapter to apply + */ + ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + /** + * T2I-Adapter + * @description T2I-Adapter(s) to apply + */ + t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; + /** @description Latents tensor */ + latents?: components["schemas"]["LatentsField"] | null; + /** @description The mask to use for the operation */ + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + /** + * type + * @default denoise_latents + * @constant + */ + type: "denoise_latents"; }; /** * DenoiseMaskField @@ -2478,22 +2262,19 @@ export type components = { * Masked Latents Name * @description The name of the masked image latents */ - masked_latents_name?: string; + masked_latents_name: string | null; }; /** * DenoiseMaskOutput * @description Base class for nodes that output a single image */ DenoiseMaskOutput: { - /** - * Denoise Mask - * @description Mask for denoise model run - */ + /** @description Mask for denoise model run */ denoise_mask: components["schemas"]["DenoiseMaskField"]; /** - * Type + * type * @default denoise_mask_output - * @enum {string} + * @constant */ type: "denoise_mask_output"; }; @@ -2512,18 +2293,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -2537,9 +2318,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default div - * @enum {string} + * @constant */ type: "div"; }; @@ -2558,18 +2339,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description The prompt to parse with dynamicprompts @@ -2588,9 +2369,9 @@ export type components = { */ combinatorial?: boolean; /** - * Type + * type * @default dynamic_prompt - * @enum {string} + * @constant */ type: "dynamic_prompt"; }; @@ -2599,7 +2380,7 @@ export type components = { /** Prompts */ prompts: string[]; /** Error */ - error?: string; + error?: string | null; }; /** * Upscale (RealESRGAN) @@ -2616,22 +2397,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The input image - */ + use_cache?: boolean | null; + /** @description The input image */ image?: components["schemas"]["ImageField"]; /** * Model Name @@ -2647,23 +2425,17 @@ export type components = { */ tile_size?: number; /** - * Type + * type * @default esrgan - * @enum {string} + * @constant */ type: "esrgan"; }; /** Edge */ Edge: { - /** - * Source - * @description The connection for the edge's from node and field - */ + /** @description The connection for the edge's from node and field */ source: components["schemas"]["EdgeConnection"]; - /** - * Destination - * @description The connection for the edge's to node and field - */ + /** @description The connection for the edge's to node and field */ destination: components["schemas"]["EdgeConnection"]; }; /** EdgeConnection */ @@ -2696,10 +2468,7 @@ export type components = { * @description The total number of queue items requested to be enqueued */ requested: number; - /** - * Batch - * @description The batch that was enqueued - */ + /** @description The batch that was enqueued */ batch: components["schemas"]["Batch"]; /** * Priority @@ -2719,20 +2488,14 @@ export type components = { * @description The total number of queue items requested to be enqueued */ requested: number; - /** - * Batch - * @description The batch that was enqueued - */ + /** @description The batch that was enqueued */ batch: components["schemas"]["Batch"]; /** * Priority * @description The priority of the enqueued batch */ priority: number; - /** - * Queue Item - * @description The queue item that was enqueued - */ + /** @description The queue item that was enqueued */ queue_item: components["schemas"]["SessionQueueItemDTO"]; }; /** @@ -2750,22 +2513,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image to face detect - */ + use_cache?: boolean | null; + /** @description Image to face detect */ image?: components["schemas"]["ImageField"]; /** * Minimum Confidence @@ -2780,9 +2540,9 @@ export type components = { */ chunk?: boolean; /** - * Type + * type * @default face_identifier - * @enum {string} + * @constant */ type: "face_identifier"; }; @@ -2801,22 +2561,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image to face detect - */ + use_cache?: boolean | null; + /** @description Image to face detect */ image?: components["schemas"]["ImageField"]; /** * Face Ids @@ -2855,9 +2612,9 @@ export type components = { */ invert_mask?: boolean; /** - * Type + * type * @default face_mask_detection - * @enum {string} + * @constant */ type: "face_mask_detection"; }; @@ -2866,10 +2623,7 @@ export type components = { * @description Base class for FaceMask output */ FaceMaskOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -2882,15 +2636,12 @@ export type components = { */ height: number; /** - * Type + * type * @default face_mask_output - * @enum {string} + * @constant */ type: "face_mask_output"; - /** - * Mask - * @description The output mask - */ + /** @description The output mask */ mask: components["schemas"]["ImageField"]; }; /** @@ -2908,22 +2659,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image for face detection - */ + use_cache?: boolean | null; + /** @description Image for face detection */ image?: components["schemas"]["ImageField"]; /** * Face Id @@ -2962,9 +2710,9 @@ export type components = { */ chunk?: boolean; /** - * Type + * type * @default face_off - * @enum {string} + * @constant */ type: "face_off"; }; @@ -2973,10 +2721,7 @@ export type components = { * @description Base class for FaceOff Output */ FaceOffOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -2989,15 +2734,12 @@ export type components = { */ height: number; /** - * Type + * type * @default face_off_output - * @enum {string} + * @constant */ type: "face_off_output"; - /** - * Mask - * @description The output mask - */ + /** @description The output mask */ mask: components["schemas"]["ImageField"]; /** * X @@ -3025,27 +2767,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of float values */ collection?: number[]; /** - * Type + * type * @default float_collection - * @enum {string} + * @constant */ type: "float_collection"; }; @@ -3060,9 +2802,9 @@ export type components = { */ collection: number[]; /** - * Type + * type * @default float_collection_output - * @enum {string} + * @constant */ type: "float_collection_output"; }; @@ -3081,18 +2823,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The float value @@ -3100,9 +2842,9 @@ export type components = { */ value?: number; /** - * Type + * type * @default float - * @enum {string} + * @constant */ type: "float"; }; @@ -3121,18 +2863,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The first value of the range @@ -3152,9 +2894,9 @@ export type components = { */ steps?: number; /** - * Type + * type * @default float_range - * @enum {string} + * @constant */ type: "float_range"; }; @@ -3173,18 +2915,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Operation * @description The operation to perform @@ -3205,9 +2947,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default float_math - * @enum {string} + * @constant */ type: "float_math"; }; @@ -3222,9 +2964,9 @@ export type components = { */ value: number; /** - * Type + * type * @default float_output - * @enum {string} + * @constant */ type: "float_output"; }; @@ -3243,18 +2985,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The value to round @@ -3275,9 +3017,9 @@ export type components = { */ method?: "Nearest" | "Floor" | "Ceiling" | "Truncate"; /** - * Type + * type * @default float_to_int - * @enum {string} + * @constant */ type: "float_to_int"; }; @@ -3293,7 +3035,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; }; /** * Edges @@ -3311,15 +3053,9 @@ export type components = { * @description The id of the execution state */ id: string; - /** - * Graph - * @description The graph being executed - */ + /** @description The graph being executed */ graph: components["schemas"]["Graph"]; - /** - * Execution Graph - * @description The expanded graph of activated and executed nodes - */ + /** @description The expanded graph of activated and executed nodes */ execution_graph: components["schemas"]["Graph"]; /** * Executed @@ -3336,7 +3072,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["String2Output"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"]; + [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; }; /** * Errors @@ -3375,41 +3111,33 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Graph - * @description The graph to run - */ + use_cache?: boolean | null; + /** @description The graph to run */ graph?: components["schemas"]["Graph"]; /** - * Type + * type * @default graph - * @enum {string} + * @constant */ type: "graph"; }; - /** - * GraphInvocationOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** GraphInvocationOutput */ GraphInvocationOutput: { /** - * Type + * type * @default graph_output - * @enum {string} + * @constant */ type: "graph_output"; }; @@ -3433,29 +3161,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default hed_image_processor - * @enum {string} - */ - type: "hed_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -3474,23 +3193,20 @@ export type components = { * @default false */ scribble?: boolean; + /** + * type + * @default hed_image_processor + * @constant + */ + type: "hed_image_processor"; }; /** IPAdapterField */ IPAdapterField: { - /** - * Image - * @description The IP-Adapter image prompt. - */ + /** @description The IP-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * Ip Adapter Model - * @description The IP-Adapter model to use. - */ + /** @description The IP-Adapter model to use. */ ip_adapter_model: components["schemas"]["IPAdapterModelField"]; - /** - * Image Encoder Model - * @description The name of the CLIP image encoder model. - */ + /** @description The name of the CLIP image encoder model. */ image_encoder_model: components["schemas"]["CLIPVisionModelField"]; /** * Weight @@ -3526,22 +3242,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The IP-Adapter image prompt. - */ + use_cache?: boolean | null; + /** @description The IP-Adapter image prompt. */ image?: components["schemas"]["ImageField"]; /** * IP-Adapter Model @@ -3567,23 +3280,17 @@ export type components = { */ end_step_percent?: number; /** - * Type + * type * @default ip_adapter - * @enum {string} + * @constant */ type: "ip_adapter"; }; /** IPAdapterMetadataField */ IPAdapterMetadataField: { - /** - * Image - * @description The IP-Adapter image prompt. - */ + /** @description The IP-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * Ip Adapter Model - * @description The IP-Adapter model to use. - */ + /** @description The IP-Adapter model to use. */ ip_adapter_model: components["schemas"]["IPAdapterModelField"]; /** * Weight @@ -3620,26 +3327,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default ip_adapter + * @constant */ model_type: "ip_adapter"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "invokeai"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; - /** - * IPAdapterOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** IPAdapterOutput */ IPAdapterOutput: { /** * IP-Adapter @@ -3647,9 +3350,9 @@ export type components = { */ ip_adapter: components["schemas"]["IPAdapterField"]; /** - * Type + * type * @default ip_adapter_output - * @enum {string} + * @constant */ type: "ip_adapter_output"; }; @@ -3668,22 +3371,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to blur - */ + use_cache?: boolean | null; + /** @description The image to blur */ image?: components["schemas"]["ImageField"]; /** * Radius @@ -3699,9 +3399,9 @@ export type components = { */ blur_type?: "gaussian" | "box"; /** - * Type + * type * @default img_blur - * @enum {string} + * @constant */ type: "img_blur"; }; @@ -3732,22 +3432,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to get the channel from - */ + use_cache?: boolean | null; + /** @description The image to get the channel from */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3757,9 +3454,9 @@ export type components = { */ channel?: "A" | "R" | "G" | "B"; /** - * Type + * type * @default img_chan - * @enum {string} + * @constant */ type: "img_chan"; }; @@ -3778,22 +3475,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3814,9 +3508,9 @@ export type components = { */ invert_channel?: boolean; /** - * Type + * type * @default img_channel_multiply - * @enum {string} + * @constant */ type: "img_channel_multiply"; }; @@ -3835,22 +3529,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3865,9 +3556,9 @@ export type components = { */ offset?: number; /** - * Type + * type * @default img_channel_offset - * @enum {string} + * @constant */ type: "img_channel_offset"; }; @@ -3886,27 +3577,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of image values */ collection?: components["schemas"]["ImageField"][]; /** - * Type + * type * @default image_collection - * @enum {string} + * @constant */ type: "image_collection"; }; @@ -3921,9 +3612,9 @@ export type components = { */ collection: components["schemas"]["ImageField"][]; /** - * Type + * type * @default image_collection_output - * @enum {string} + * @constant */ type: "image_collection_output"; }; @@ -3942,22 +3633,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to convert - */ + use_cache?: boolean | null; + /** @description The image to convert */ image?: components["schemas"]["ImageField"]; /** * Mode @@ -3967,9 +3655,9 @@ export type components = { */ mode?: "L" | "RGB" | "RGBA" | "CMYK" | "YCbCr" | "LAB" | "HSV" | "I" | "F"; /** - * Type + * type * @default img_conv - * @enum {string} + * @constant */ type: "img_conv"; }; @@ -3988,22 +3676,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to crop - */ + use_cache?: boolean | null; + /** @description The image to crop */ image?: components["schemas"]["ImageField"]; /** * X @@ -4030,9 +3715,9 @@ export type components = { */ height?: number; /** - * Type + * type * @default img_crop - * @enum {string} + * @constant */ type: "img_crop"; }; @@ -4084,7 +3769,7 @@ export type components = { * Deleted At * @description The deleted timestamp of the image. */ - deleted_at?: string; + deleted_at?: string | null; /** * Is Intermediate * @description Whether this is an intermediate image. @@ -4094,12 +3779,12 @@ export type components = { * Session Id * @description The session ID that generated this image, if it is a generated image. */ - session_id?: string; + session_id?: string | null; /** * Node Id * @description The node ID that generated this image, if it is a generated image. */ - node_id?: string; + node_id?: string | null; /** * Starred * @description Whether this image is starred. @@ -4109,7 +3794,7 @@ export type components = { * Board Id * @description The id of the board the image belongs to, if one exists. */ - board_id?: string; + board_id?: string | null; }; /** * ImageField @@ -4137,22 +3822,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Hue @@ -4161,9 +3843,9 @@ export type components = { */ hue?: number; /** - * Type + * type * @default img_hue_adjust - * @enum {string} + * @constant */ type: "img_hue_adjust"; }; @@ -4182,22 +3864,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to lerp - */ + use_cache?: boolean | null; + /** @description The image to lerp */ image?: components["schemas"]["ImageField"]; /** * Min @@ -4212,9 +3891,9 @@ export type components = { */ max?: number; /** - * Type + * type * @default img_ilerp - * @enum {string} + * @constant */ type: "img_ilerp"; }; @@ -4233,27 +3912,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to load - */ + use_cache?: boolean | null; + /** @description The image to load */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default image - * @enum {string} + * @constant */ type: "image"; }; @@ -4272,22 +3948,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to lerp - */ + use_cache?: boolean | null; + /** @description The image to lerp */ image?: components["schemas"]["ImageField"]; /** * Min @@ -4302,9 +3975,9 @@ export type components = { */ max?: number; /** - * Type + * type * @default img_lerp - * @enum {string} + * @constant */ type: "img_lerp"; }; @@ -4317,12 +3990,12 @@ export type components = { * Metadata * @description The image's core metadata, if it was created in the Linear or Canvas UI */ - metadata?: Record; + metadata?: Record | null; /** * Graph * @description The graph that created the image */ - graph?: Record; + graph?: Record | null; }; /** * Multiply Images @@ -4339,32 +4012,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image1 - * @description The first image to multiply - */ + use_cache?: boolean | null; + /** @description The first image to multiply */ image1?: components["schemas"]["ImageField"]; - /** - * Image2 - * @description The second image to multiply - */ + /** @description The second image to multiply */ image2?: components["schemas"]["ImageField"]; /** - * Type + * type * @default img_mul - * @enum {string} + * @constant */ type: "img_mul"; }; @@ -4383,44 +4050,35 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; + /** @description The image to check */ + image?: components["schemas"]["ImageField"]; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_nsfw - * @enum {string} + * @constant */ type: "img_nsfw"; - /** - * Image - * @description The image to check - */ - image?: components["schemas"]["ImageField"]; }; /** * ImageOutput * @description Base class for nodes that output a single image */ ImageOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -4433,9 +4091,9 @@ export type components = { */ height: number; /** - * Type + * type * @default image_output - * @enum {string} + * @constant */ type: "image_output"; }; @@ -4454,33 +4112,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Base Image - * @description The base image - */ + use_cache?: boolean | null; + /** @description The base image */ base_image?: components["schemas"]["ImageField"]; - /** - * Image - * @description The image to paste - */ + /** @description The image to paste */ image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when pasting - */ - mask?: components["schemas"]["ImageField"]; + /** @description The mask to use when pasting */ + mask?: components["schemas"]["ImageField"] | null; /** * X * @description The left x coordinate at which to paste the image @@ -4500,51 +4149,12 @@ export type components = { */ crop?: boolean; /** - * Type + * type * @default img_paste - * @enum {string} + * @constant */ type: "img_paste"; }; - /** - * Base Image Processor - * @description Base class for invocations that preprocess images for ControlNet - */ - ImageProcessorInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ - image?: components["schemas"]["ImageField"]; - /** - * Type - * @default image_processor - * @enum {string} - */ - type: "image_processor"; - }; /** * ImageRecordChanges * @description A set of changes to apply to an image record. @@ -4557,22 +4167,23 @@ export type components = { */ ImageRecordChanges: { /** @description The image's new category. */ - image_category?: components["schemas"]["ImageCategory"]; + image_category?: components["schemas"]["ImageCategory"] | null; /** * Session Id * @description The image's new session ID. */ - session_id?: string; + session_id?: string | null; /** * Is Intermediate * @description The image's new `is_intermediate` flag. */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Starred * @description The image's new `starred` state */ - starred?: boolean; + starred?: boolean | null; + [key: string]: unknown; }; /** * Resize Image @@ -4589,22 +4200,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to resize - */ + use_cache?: boolean | null; + /** @description The image to resize */ image?: components["schemas"]["ImageField"]; /** * Width @@ -4625,15 +4233,12 @@ export type components = { * @enum {string} */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_resize - * @enum {string} + * @constant */ type: "img_resize"; }; @@ -4652,22 +4257,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to scale - */ + use_cache?: boolean | null; + /** @description The image to scale */ image?: components["schemas"]["ImageField"]; /** * Scale Factor @@ -4683,9 +4285,9 @@ export type components = { */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; /** - * Type + * type * @default img_scale - * @enum {string} + * @constant */ type: "img_scale"; }; @@ -4704,27 +4306,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to encode - */ + use_cache?: boolean | null; + /** @description The image to encode */ image?: components["schemas"]["ImageField"]; - /** - * Vae - * @description VAE - */ + /** @description VAE */ vae?: components["schemas"]["VaeField"]; /** * Tiled @@ -4735,13 +4331,13 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; /** - * Type + * type * @default i2l - * @enum {string} + * @constant */ type: "i2l"; }; @@ -4781,22 +4377,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to check - */ + use_cache?: boolean | null; + /** @description The image to check */ image?: components["schemas"]["ImageField"]; /** * Text @@ -4804,15 +4397,12 @@ export type components = { * @default InvokeAI */ text?: string; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_watermark - * @enum {string} + * @constant */ type: "img_watermark"; }; @@ -4822,7 +4412,7 @@ export type components = { * Response * @description If defined, the message to display to the user when images begin downloading */ - response?: string; + response: string | null; }; /** ImagesUpdatedFromListResult */ ImagesUpdatedFromListResult: { @@ -4847,38 +4437,34 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Color * @description The color to use to infill * @default { - * "r": 127, - * "g": 127, + * "a": 255, * "b": 127, - * "a": 255 + * "g": 127, + * "r": 127 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default infill_rgba - * @enum {string} + * @constant */ type: "infill_rgba"; }; @@ -4897,22 +4483,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** * Downscale @@ -4928,9 +4511,9 @@ export type components = { */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; /** - * Type + * type * @default infill_patchmatch - * @enum {string} + * @constant */ type: "infill_patchmatch"; }; @@ -4949,22 +4532,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** * Tile Size @@ -4978,9 +4558,9 @@ export type components = { */ seed?: number; /** - * Type + * type * @default infill_tile - * @enum {string} + * @constant */ type: "infill_tile"; }; @@ -4999,27 +4579,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of integer values */ collection?: number[]; /** - * Type + * type * @default integer_collection - * @enum {string} + * @constant */ type: "integer_collection"; }; @@ -5034,9 +4614,9 @@ export type components = { */ collection: number[]; /** - * Type + * type * @default integer_collection_output - * @enum {string} + * @constant */ type: "integer_collection_output"; }; @@ -5055,18 +4635,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The integer value @@ -5074,9 +4654,9 @@ export type components = { */ value?: number; /** - * Type + * type * @default integer - * @enum {string} + * @constant */ type: "integer"; }; @@ -5095,18 +4675,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Operation * @description The operation to perform @@ -5127,9 +4707,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default integer_math - * @enum {string} + * @constant */ type: "integer_math"; }; @@ -5144,9 +4724,9 @@ export type components = { */ value: number; /** - * Type + * type * @default integer_output - * @enum {string} + * @constant */ type: "integer_output"; }; @@ -5193,18 +4773,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The list of items to iterate over @@ -5217,9 +4797,9 @@ export type components = { */ index?: number; /** - * Type + * type * @default iterate - * @enum {string} + * @constant */ type: "iterate"; }; @@ -5232,11 +4812,11 @@ export type components = { * Collection Item * @description The item being iterated over */ - item?: unknown; + item: unknown; /** - * Type + * type * @default iterate_output - * @enum {string} + * @constant */ type: "iterate_output"; }; @@ -5255,27 +4835,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default infill_lama - * @enum {string} + * @constant */ type: "infill_lama"; }; @@ -5294,27 +4871,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of latents tensors */ collection?: components["schemas"]["LatentsField"][]; /** - * Type + * type * @default latents_collection - * @enum {string} + * @constant */ type: "latents_collection"; }; @@ -5329,9 +4906,9 @@ export type components = { */ collection: components["schemas"]["LatentsField"][]; /** - * Type + * type * @default latents_collection_output - * @enum {string} + * @constant */ type: "latents_collection_output"; }; @@ -5349,7 +4926,7 @@ export type components = { * Seed * @description Seed used to generate this latents */ - seed?: number; + seed?: number | null; }; /** * Latents Primitive @@ -5366,27 +4943,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description The latents tensor - */ + use_cache?: boolean | null; + /** @description The latents tensor */ latents?: components["schemas"]["LatentsField"]; /** - * Type + * type * @default latents - * @enum {string} + * @constant */ type: "latents"; }; @@ -5395,10 +4969,7 @@ export type components = { * @description Base class for nodes that output a single latents tensor */ LatentsOutput: { - /** - * Latents - * @description Latents tensor - */ + /** @description Latents tensor */ latents: components["schemas"]["LatentsField"]; /** * Width @@ -5411,9 +4982,9 @@ export type components = { */ height: number; /** - * Type + * type * @default latents_output - * @enum {string} + * @constant */ type: "latents_output"; }; @@ -5432,18 +5003,22 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; + /** @description Latents tensor */ + latents?: components["schemas"]["LatentsField"]; + /** @description VAE */ + vae?: components["schemas"]["VaeField"]; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -5453,30 +5028,17 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default l2i - * @enum {string} + * @constant */ type: "l2i"; - /** - * Latents - * @description Latents tensor - */ - latents?: components["schemas"]["LatentsField"]; - /** - * Vae - * @description VAE - */ - vae?: components["schemas"]["VaeField"]; }; /** * Leres (Depth) Processor @@ -5493,29 +5055,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default leres_image_processor - * @enum {string} - */ - type: "leres_image_processor"; /** * Thr A * @description Leres parameter `thr_a` @@ -5546,6 +5099,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default leres_image_processor + * @constant + */ + type: "leres_image_processor"; }; /** * Lineart Anime Processor @@ -5562,29 +5121,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default lineart_anime_image_processor - * @enum {string} - */ - type: "lineart_anime_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -5597,6 +5147,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default lineart_anime_image_processor + * @constant + */ + type: "lineart_anime_image_processor"; }; /** * Lineart Processor @@ -5613,29 +5169,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default lineart_image_processor - * @enum {string} - */ - type: "lineart_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -5654,16 +5201,19 @@ export type components = { * @default false */ coarse?: boolean; + /** + * type + * @default lineart_image_processor + * @constant + */ + type: "lineart_image_processor"; }; /** * LoRAMetadataField * @description LoRA metadata for an image generated in InvokeAI. */ LoRAMetadataField: { - /** - * Lora - * @description The LoRA model - */ + /** @description The LoRA model */ lora: components["schemas"]["LoRAModelField"]; /** * Weight @@ -5678,15 +5228,16 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default lora + * @constant */ model_type: "lora"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; model_format: components["schemas"]["LoRAModelFormat"]; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * LoRAModelField @@ -5703,13 +5254,11 @@ export type components = { }; /** * LoRAModelFormat - * @description An enumeration. * @enum {string} */ LoRAModelFormat: "lycoris" | "diffusers"; /** * LogLevel - * @description An enumeration. * @enum {integer} */ LogLevel: 0 | 10 | 20 | 30 | 40 | 50; @@ -5725,7 +5274,7 @@ export type components = { /** @description Info to load submodel */ model_type: components["schemas"]["ModelType"]; /** @description Info to load submodel */ - submodel?: components["schemas"]["SubModelType"]; + submodel?: components["schemas"]["SubModelType"] | null; /** * Weight * @description Lora's weight which to use when apply to model @@ -5747,18 +5296,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * LoRA * @description LoRA model to load @@ -5774,16 +5323,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default lora_loader - * @enum {string} + * @constant */ type: "lora_loader"; }; @@ -5796,16 +5345,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default lora_loader_output - * @enum {string} + * @constant */ type: "lora_loader_output"; }; @@ -5839,27 +5388,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description Main model (UNet, VAE, CLIP) to load - */ + use_cache?: boolean | null; + /** @description Main model (UNet, VAE, CLIP) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default main_model_loader - * @enum {string} + * @constant */ type: "main_model_loader"; }; @@ -5878,32 +5424,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Mask1 - * @description The first mask to combine - */ + use_cache?: boolean | null; + /** @description The first mask to combine */ mask1?: components["schemas"]["ImageField"]; - /** - * Mask2 - * @description The second image to combine - */ + /** @description The second image to combine */ mask2?: components["schemas"]["ImageField"]; /** - * Type + * type * @default mask_combine - * @enum {string} + * @constant */ type: "mask_combine"; }; @@ -5922,22 +5462,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to apply the mask to - */ + use_cache?: boolean | null; + /** @description The image to apply the mask to */ image?: components["schemas"]["ImageField"]; /** * Edge Size @@ -5960,9 +5497,9 @@ export type components = { */ high_threshold?: number; /** - * Type + * type * @default mask_edge - * @enum {string} + * @constant */ type: "mask_edge"; }; @@ -5981,22 +5518,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to create the mask from - */ + use_cache?: boolean | null; + /** @description The image to create the mask from */ image?: components["schemas"]["ImageField"]; /** * Invert @@ -6005,9 +5539,9 @@ export type components = { */ invert?: boolean; /** - * Type + * type * @default tomask - * @enum {string} + * @constant */ type: "tomask"; }; @@ -6026,29 +5560,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default mediapipe_face_processor - * @enum {string} - */ - type: "mediapipe_face_processor"; /** * Max Faces * @description Maximum number of faces to detect @@ -6061,13 +5586,50 @@ export type components = { * @default 0.5 */ min_confidence?: number; + /** + * type + * @default mediapipe_face_processor + * @constant + */ + type: "mediapipe_face_processor"; }; /** * MergeInterpolationMethod - * @description An enumeration. * @enum {string} */ MergeInterpolationMethod: "weighted_sum" | "sigmoid" | "inv_sigmoid" | "add_difference"; + /** MergeModelsBody */ + MergeModelsBody: { + /** + * Model Names + * @description model name + */ + model_names: string[]; + /** + * Merged Model Name + * @description Name of destination model + */ + merged_model_name: string | null; + /** + * Alpha + * @description Alpha weighting strength to apply to 2d and 3d models + * @default 0.5 + */ + alpha?: number | null; + /** @description Interpolation method */ + interp: components["schemas"]["MergeInterpolationMethod"] | null; + /** + * Force + * @description Force merging of models created with different versions of diffusers + * @default false + */ + force?: boolean | null; + /** + * Merge Dest Directory + * @description Save the merged model to the designated directory (with 'merged_model_name' appended) + */ + merge_dest_directory?: string | null; + }; /** * Metadata Accumulator * @description Outputs a Core Metadata Object @@ -6083,177 +5645,168 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Generation Mode * @description The generation mode that output this image */ - generation_mode?: string; + generation_mode?: string | null; /** * Positive Prompt * @description The positive prompt parameter */ - positive_prompt?: string; + positive_prompt?: string | null; /** * Negative Prompt * @description The negative prompt parameter */ - negative_prompt?: string; + negative_prompt?: string | null; /** * Width * @description The width parameter */ - width?: number; + width?: number | null; /** * Height * @description The height parameter */ - height?: number; + height?: number | null; /** * Seed * @description The seed used for noise generation */ - seed?: number; + seed?: number | null; /** * Rand Device * @description The device used for random number generation */ - rand_device?: string; + rand_device?: string | null; /** * Cfg Scale * @description The classifier-free guidance scale parameter */ - cfg_scale?: number; + cfg_scale?: number | null; /** * Steps * @description The number of steps used for inference */ - steps?: number; + steps?: number | null; /** * Scheduler * @description The scheduler used for inference */ - scheduler?: string; + scheduler?: string | null; /** * Clip Skip * @description The number of skipped CLIP layers */ - clip_skip?: number; - /** - * Model - * @description The main model used for inference - */ - model?: components["schemas"]["MainModelField"]; + clip_skip?: number | null; + /** @description The main model used for inference */ + model?: components["schemas"]["MainModelField"] | null; /** * Controlnets * @description The ControlNets used for inference */ - controlnets?: components["schemas"]["ControlField"][]; + controlnets?: components["schemas"]["ControlField"][] | null; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters?: components["schemas"]["T2IAdapterField"][]; + t2iAdapters?: components["schemas"]["T2IAdapterField"][] | null; /** * Loras * @description The LoRAs used for inference */ - loras?: components["schemas"]["LoRAMetadataField"][]; + loras?: components["schemas"]["LoRAMetadataField"][] | null; /** * Strength * @description The strength used for latents-to-latents */ - strength?: number; + strength?: number | null; /** * Init Image * @description The name of the initial image */ - init_image?: string; - /** - * Vae - * @description The VAE used for decoding, if the main model's default was not used - */ - vae?: components["schemas"]["VAEModelField"]; + init_image?: string | null; + /** @description The VAE used for decoding, if the main model's default was not used */ + vae?: components["schemas"]["VAEModelField"] | null; /** * Hrf Width * @description The high resolution fix height and width multipler. */ - hrf_width?: number; + hrf_width?: number | null; /** * Hrf Height * @description The high resolution fix height and width multipler. */ - hrf_height?: number; + hrf_height?: number | null; /** * Hrf Strength * @description The high resolution fix img2img strength used in the upscale pass. */ - hrf_strength?: number; + hrf_strength?: number | null; /** * Positive Style Prompt * @description The positive style prompt parameter */ - positive_style_prompt?: string; + positive_style_prompt?: string | null; /** * Negative Style Prompt * @description The negative style prompt parameter */ - negative_style_prompt?: string; - /** - * Refiner Model - * @description The SDXL Refiner model used - */ - refiner_model?: components["schemas"]["MainModelField"]; + negative_style_prompt?: string | null; + /** @description The SDXL Refiner model used */ + refiner_model?: components["schemas"]["MainModelField"] | null; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner */ - refiner_cfg_scale?: number; + refiner_cfg_scale?: number | null; /** * Refiner Steps * @description The number of steps used for the refiner */ - refiner_steps?: number; + refiner_steps?: number | null; /** * Refiner Scheduler * @description The scheduler used for the refiner */ - refiner_scheduler?: string; + refiner_scheduler?: string | null; /** * Refiner Positive Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_positive_aesthetic_score?: number; + refiner_positive_aesthetic_score?: number | null; /** * Refiner Negative Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_negative_aesthetic_score?: number; + refiner_negative_aesthetic_score?: number | null; /** * Refiner Start * @description The start value used for refiner denoising */ - refiner_start?: number; + refiner_start?: number | null; /** - * Type + * type * @default metadata_accumulator - * @enum {string} + * @constant */ type: "metadata_accumulator"; }; @@ -6262,15 +5815,12 @@ export type components = { * @description The output of the MetadataAccumulator node */ MetadataAccumulatorOutput: { - /** - * Metadata - * @description The core metadata for the image - */ + /** @description The core metadata for the image */ metadata: components["schemas"]["CoreMetadata"]; /** - * Type + * type * @default metadata_accumulator_output - * @enum {string} + * @constant */ type: "metadata_accumulator_output"; }; @@ -6289,29 +5839,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default midas_depth_image_processor - * @enum {string} - */ - type: "midas_depth_image_processor"; /** * A Mult * @description Midas parameter `a_mult` (a = a_mult * PI) @@ -6324,6 +5865,12 @@ export type components = { * @default 0.1 */ bg_th?: number; + /** + * type + * @default midas_depth_image_processor + * @constant + */ + type: "midas_depth_image_processor"; }; /** * MLSD Processor @@ -6340,29 +5887,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default mlsd_image_processor - * @enum {string} - */ - type: "mlsd_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -6387,11 +5925,16 @@ export type components = { * @default 0.1 */ thr_d?: number; + /** + * type + * @default mlsd_image_processor + * @constant + */ + type: "mlsd_image_processor"; }; /** * ModelError - * @description An enumeration. - * @enum {string} + * @constant */ ModelError: "not_found"; /** ModelInfo */ @@ -6406,7 +5949,7 @@ export type components = { /** @description Info to load submodel */ model_type: components["schemas"]["ModelType"]; /** @description Info to load submodel */ - submodel?: components["schemas"]["SubModelType"]; + submodel?: components["schemas"]["SubModelType"] | null; }; /** * ModelLoaderOutput @@ -6429,21 +5972,19 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default model_loader_output - * @enum {string} + * @constant */ type: "model_loader_output"; }; /** * ModelType - * @description An enumeration. * @enum {string} */ ModelType: "onnx" | "main" | "vae" | "lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "t2i_adapter"; /** * ModelVariantType - * @description An enumeration. * @enum {string} */ ModelVariantType: "normal" | "inpaint" | "depth"; @@ -6467,18 +6008,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -6492,9 +6033,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default mul - * @enum {string} + * @constant */ type: "mul"; }; @@ -6531,18 +6072,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Seed * @description Seed for random number generation @@ -6567,9 +6108,9 @@ export type components = { */ use_cpu?: boolean; /** - * Type + * type * @default noise - * @enum {string} + * @constant */ type: "noise"; }; @@ -6578,11 +6119,8 @@ export type components = { * @description Invocation noise output */ NoiseOutput: { - /** - * Noise - * @description Noise tensor - */ - noise?: components["schemas"]["LatentsField"]; + /** @description Noise tensor */ + noise: components["schemas"]["LatentsField"]; /** * Width * @description Width of output (px) @@ -6594,9 +6132,9 @@ export type components = { */ height: number; /** - * Type + * type * @default noise_output - * @enum {string} + * @constant */ type: "noise_output"; }; @@ -6615,29 +6153,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default normalbae_image_processor - * @enum {string} - */ - type: "normalbae_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -6650,6 +6179,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default normalbae_image_processor + * @constant + */ + type: "normalbae_image_processor"; }; /** * ONNX Latents to Image @@ -6666,37 +6201,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Denoised latents tensor - */ + use_cache?: boolean | null; + /** @description Denoised latents tensor */ latents?: components["schemas"]["LatentsField"]; - /** - * Vae - * @description VAE - */ + /** @description VAE */ vae?: components["schemas"]["VaeField"]; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default l2i_onnx - * @enum {string} + * @constant */ type: "l2i_onnx"; }; @@ -6726,19 +6252,13 @@ export type components = { */ vae_encoder?: components["schemas"]["VaeField"]; /** - * Type + * type * @default model_loader_output_onnx - * @enum {string} + * @constant */ type: "model_loader_output_onnx"; }; - /** - * ONNX Prompt (Raw) - * @description A node to process inputs and produce outputs. - * May use dependency injection in __init__ to receive providers. - * - * All invocations must use the `@invocation` decorator to provide their unique type. - */ + /** ONNX Prompt (Raw) */ ONNXPromptInvocation: { /** * Id @@ -6750,33 +6270,30 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Raw prompt text (no parsing) * @default */ prompt?: string; - /** - * Clip - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ + /** @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip?: components["schemas"]["ClipField"]; /** - * Type + * type * @default prompt_onnx - * @enum {string} + * @constant */ type: "prompt_onnx"; }; @@ -6787,19 +6304,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default onnx + * @constant */ model_type: "onnx"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "onnx"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; variant: components["schemas"]["ModelVariantType"]; }; /** ONNXStableDiffusion2ModelConfig */ @@ -6809,19 +6327,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default onnx + * @constant */ model_type: "onnx"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "onnx"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; variant: components["schemas"]["ModelVariantType"]; prediction_type: components["schemas"]["SchedulerPredictionType"]; /** Upcast Attention */ @@ -6842,32 +6361,23 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Positive Conditioning - * @description Positive conditioning tensor - */ + use_cache?: boolean | null; + /** @description Positive conditioning tensor */ positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning tensor - */ + /** @description Negative conditioning tensor */ negative_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Noise - * @description Noise tensor - */ + /** @description Noise tensor */ noise?: components["schemas"]["LatentsField"]; /** * Steps @@ -6895,10 +6405,7 @@ export type components = { * @enum {string} */ precision?: "tensor(bool)" | "tensor(int8)" | "tensor(uint8)" | "tensor(int16)" | "tensor(uint16)" | "tensor(int32)" | "tensor(uint32)" | "tensor(int64)" | "tensor(uint64)" | "tensor(float16)" | "tensor(float)" | "tensor(double)"; - /** - * Unet - * @description UNet (scheduler, LoRAs) - */ + /** @description UNet (scheduler, LoRAs) */ unet?: components["schemas"]["UNetField"]; /** * Control @@ -6906,17 +6413,13 @@ export type components = { */ control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; /** - * Type + * type * @default t2l_onnx - * @enum {string} + * @constant */ type: "t2l_onnx"; }; - /** - * OffsetPaginatedResults[BoardDTO] - * @description Offset-paginated results - * Generic must be a Pydantic model - */ + /** OffsetPaginatedResults[BoardDTO] */ OffsetPaginatedResults_BoardDTO_: { /** * Limit @@ -6939,11 +6442,7 @@ export type components = { */ items: components["schemas"]["BoardDTO"][]; }; - /** - * OffsetPaginatedResults[ImageDTO] - * @description Offset-paginated results - * Generic must be a Pydantic model - */ + /** OffsetPaginatedResults[ImageDTO] */ OffsetPaginatedResults_ImageDTO_: { /** * Limit @@ -6996,27 +6495,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description ONNX Main model (UNet, VAE, CLIP) to load - */ + use_cache?: boolean | null; + /** @description ONNX Main model (UNet, VAE, CLIP) to load */ model: components["schemas"]["OnnxModelField"]; /** - * Type + * type * @default onnx_model_loader - * @enum {string} + * @constant */ type: "onnx_model_loader"; }; @@ -7035,29 +6531,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default openpose_image_processor - * @enum {string} - */ - type: "openpose_image_processor"; /** * Hand And Face * @description Whether to use hands and face mode @@ -7076,38 +6563,12 @@ export type components = { * @default 512 */ image_resolution?: number; - }; - /** - * PaginatedResults[GraphExecutionState] - * @description Paginated results - * Generic must be a Pydantic model - */ - PaginatedResults_GraphExecutionState_: { /** - * Page - * @description Current Page + * type + * @default openpose_image_processor + * @constant */ - page: number; - /** - * Pages - * @description Total number of pages - */ - pages: number; - /** - * Per Page - * @description Number of items per page - */ - per_page: number; - /** - * Total - * @description Total number of items in result - */ - total: number; - /** - * Items - * @description Items - */ - items: components["schemas"]["GraphExecutionState"][]; + type: "openpose_image_processor"; }; /** * PIDI Processor @@ -7124,29 +6585,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default pidi_image_processor - * @enum {string} - */ - type: "pidi_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -7171,6 +6623,12 @@ export type components = { * @default false */ scribble?: boolean; + /** + * type + * @default pidi_image_processor + * @constant + */ + type: "pidi_image_processor"; }; /** * Prompts from File @@ -7187,18 +6645,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * File Path * @description Path to prompt text file @@ -7208,12 +6666,12 @@ export type components = { * Pre Prompt * @description String to prepend to each prompt */ - pre_prompt?: string; + pre_prompt?: string | null; /** * Post Prompt * @description String to append to each prompt */ - post_prompt?: string; + post_prompt?: string | null; /** * Start Line * @description Line in the file to start start from @@ -7227,9 +6685,9 @@ export type components = { */ max_prompts?: number; /** - * Type + * type * @default prompt_from_file - * @enum {string} + * @constant */ type: "prompt_from_file"; }; @@ -7259,18 +6717,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7290,9 +6748,9 @@ export type components = { */ decimals?: number; /** - * Type + * type * @default rand_float - * @enum {string} + * @constant */ type: "rand_float"; }; @@ -7311,18 +6769,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7336,9 +6794,9 @@ export type components = { */ high?: number; /** - * Type + * type * @default rand_int - * @enum {string} + * @constant */ type: "rand_int"; }; @@ -7357,18 +6815,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7393,9 +6851,9 @@ export type components = { */ seed?: number; /** - * Type + * type * @default random_range - * @enum {string} + * @constant */ type: "random_range"; }; @@ -7414,18 +6872,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The start of the range @@ -7445,9 +6903,9 @@ export type components = { */ step?: number; /** - * Type + * type * @default range - * @enum {string} + * @constant */ type: "range"; }; @@ -7466,18 +6924,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The start of the range @@ -7497,9 +6955,9 @@ export type components = { */ step?: number; /** - * Type + * type * @default range_of_size - * @enum {string} + * @constant */ type: "range_of_size"; }; @@ -7526,22 +6984,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** * Width @@ -7567,9 +7022,9 @@ export type components = { */ antialias?: boolean; /** - * Type + * type * @default lresize - * @enum {string} + * @constant */ type: "lresize"; }; @@ -7598,18 +7053,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The float value @@ -7623,9 +7078,9 @@ export type components = { */ decimals?: number; /** - * Type + * type * @default round_float - * @enum {string} + * @constant */ type: "round_float"; }; @@ -7644,18 +7099,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -7709,9 +7164,9 @@ export type components = { */ clip2?: components["schemas"]["ClipField"]; /** - * Type + * type * @default sdxl_compel_prompt - * @enum {string} + * @constant */ type: "sdxl_compel_prompt"; }; @@ -7730,18 +7185,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * LoRA * @description LoRA model to load @@ -7757,21 +7212,21 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default sdxl_lora_loader - * @enum {string} + * @constant */ type: "sdxl_lora_loader"; }; @@ -7784,21 +7239,21 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default sdxl_lora_loader_output - * @enum {string} + * @constant */ type: "sdxl_lora_loader_output"; }; @@ -7817,27 +7272,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load - */ + use_cache?: boolean | null; + /** @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default sdxl_model_loader - * @enum {string} + * @constant */ type: "sdxl_model_loader"; }; @@ -7867,9 +7319,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default sdxl_model_loader_output - * @enum {string} + * @constant */ type: "sdxl_model_loader_output"; }; @@ -7888,18 +7340,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Style * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -7932,15 +7384,12 @@ export type components = { * @default 6 */ aesthetic_score?: number; - /** - * Clip2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ + /** @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip2?: components["schemas"]["ClipField"]; /** - * Type + * type * @default sdxl_refiner_compel_prompt - * @enum {string} + * @constant */ type: "sdxl_refiner_compel_prompt"; }; @@ -7959,27 +7408,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load - */ + use_cache?: boolean | null; + /** @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default sdxl_refiner_model_loader - * @enum {string} + * @constant */ type: "sdxl_refiner_model_loader"; }; @@ -8004,9 +7450,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default sdxl_refiner_model_loader_output - * @enum {string} + * @constant */ type: "sdxl_refiner_model_loader_output"; }; @@ -8025,37 +7471,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; + /** @description The board to save the image to */ + board?: components["schemas"]["BoardField"] | null; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Board - * @description The board to save the image to - */ - board?: components["schemas"]["BoardField"]; - /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default save_image - * @enum {string} + * @constant */ type: "save_image"; }; @@ -8074,22 +7511,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** * Scale Factor @@ -8110,9 +7544,9 @@ export type components = { */ antialias?: boolean; /** - * Type + * type * @default lscale - * @enum {string} + * @constant */ type: "lscale"; }; @@ -8131,18 +7565,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Scheduler * @description Scheduler to use during inference @@ -8151,18 +7585,13 @@ export type components = { */ scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; /** - * Type + * type * @default scheduler - * @enum {string} + * @constant */ type: "scheduler"; }; - /** - * SchedulerOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** SchedulerOutput */ SchedulerOutput: { /** * Scheduler @@ -8171,15 +7600,14 @@ export type components = { */ scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; /** - * Type + * type * @default scheduler_output - * @enum {string} + * @constant */ type: "scheduler_output"; }; /** * SchedulerPredictionType - * @description An enumeration. * @enum {string} */ SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; @@ -8198,28 +7626,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * VAE * @description VAE model to load */ - vae?: components["schemas"]["VaeField"]; + vae?: components["schemas"]["VaeField"] | null; /** * Seamless Y * @description Specify whether Y axis is seamless @@ -8233,9 +7661,9 @@ export type components = { */ seamless_x?: boolean; /** - * Type + * type * @default seamless - * @enum {string} + * @constant */ type: "seamless"; }; @@ -8248,16 +7676,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * VAE * @description VAE */ - vae?: components["schemas"]["VaeField"]; + vae?: components["schemas"]["VaeField"] | null; /** - * Type + * type * @default seamless_output - * @enum {string} + * @constant */ type: "seamless_output"; }; @@ -8276,27 +7704,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default segment_anything_processor - * @enum {string} + * @constant */ type: "segment_anything_processor"; }; @@ -8321,10 +7746,7 @@ export type components = { queue: components["schemas"]["SessionQueueStatus"]; processor: components["schemas"]["SessionProcessorStatus"]; }; - /** - * SessionQueueItem - * @description Session queue item without the full graph. Used for serialization. - */ + /** SessionQueueItem */ SessionQueueItem: { /** * Item Id @@ -8358,7 +7780,7 @@ export type components = { * Error * @description The error message if this queue item errored */ - error?: string; + error?: string | null; /** * Created At * @description When this queue item was created @@ -8373,12 +7795,12 @@ export type components = { * Started At * @description When this queue item was started */ - started_at?: string; + started_at?: string | null; /** * Completed At * @description When this queue item was completed */ - completed_at?: string; + completed_at?: string | null; /** * Queue Id * @description The id of the queue with which this item is associated @@ -8388,17 +7810,11 @@ export type components = { * Field Values * @description The field values that were used for this queue item */ - field_values?: components["schemas"]["NodeFieldValue"][]; - /** - * Session - * @description The fully-populated session to be executed - */ + field_values?: components["schemas"]["NodeFieldValue"][] | null; + /** @description The fully-populated session to be executed */ session: components["schemas"]["GraphExecutionState"]; }; - /** - * SessionQueueItemDTO - * @description Session queue item without the full graph. Used for serialization. - */ + /** SessionQueueItemDTO */ SessionQueueItemDTO: { /** * Item Id @@ -8432,7 +7848,7 @@ export type components = { * Error * @description The error message if this queue item errored */ - error?: string; + error?: string | null; /** * Created At * @description When this queue item was created @@ -8447,12 +7863,12 @@ export type components = { * Started At * @description When this queue item was started */ - started_at?: string; + started_at?: string | null; /** * Completed At * @description When this queue item was completed */ - completed_at?: string; + completed_at?: string | null; /** * Queue Id * @description The id of the queue with which this item is associated @@ -8462,7 +7878,7 @@ export type components = { * Field Values * @description The field values that were used for this queue item */ - field_values?: components["schemas"]["NodeFieldValue"][]; + field_values?: components["schemas"]["NodeFieldValue"][] | null; }; /** SessionQueueStatus */ SessionQueueStatus: { @@ -8475,17 +7891,17 @@ export type components = { * Item Id * @description The current queue item id */ - item_id?: number; + item_id: number | null; /** * Batch Id * @description The current queue item's batch id */ - batch_id?: string; + batch_id: string | null; /** * Session Id * @description The current queue item's session id */ - session_id?: string; + session_id: string | null; /** * Pending * @description Number of queue items with status 'pending' @@ -8532,27 +7948,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to show - */ + use_cache?: boolean | null; + /** @description The image to show */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default show_image - * @enum {string} + * @constant */ type: "show_image"; }; @@ -8563,21 +7976,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8589,21 +8003,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusion2ModelCheckpointConfig */ @@ -8613,21 +8028,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8639,21 +8055,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusionXLModelCheckpointConfig */ @@ -8663,21 +8080,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8689,21 +8107,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** @@ -8721,18 +8140,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Easing * @description The easing function to use @@ -8774,12 +8193,12 @@ export type components = { * Pre Start Value * @description value before easing start */ - pre_start_value?: number; + pre_start_value?: number | null; /** * Post End Value * @description value after easing end */ - post_end_value?: number; + post_end_value?: number | null; /** * Mirror * @description include mirror of easing function @@ -8793,9 +8212,9 @@ export type components = { */ show_easing_plot?: boolean; /** - * Type + * type * @default step_param_easing - * @enum {string} + * @constant */ type: "step_param_easing"; }; @@ -8815,9 +8234,9 @@ export type components = { */ string_2: string; /** - * Type + * type * @default string_2_output - * @enum {string} + * @constant */ type: "string_2_output"; }; @@ -8836,27 +8255,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of string values */ collection?: string[]; /** - * Type + * type * @default string_collection - * @enum {string} + * @constant */ type: "string_collection"; }; @@ -8871,9 +8290,9 @@ export type components = { */ collection: string[]; /** - * Type + * type * @default string_collection_output - * @enum {string} + * @constant */ type: "string_collection_output"; }; @@ -8892,18 +8311,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The string value @@ -8911,9 +8330,9 @@ export type components = { */ value?: string; /** - * Type + * type * @default string - * @enum {string} + * @constant */ type: "string"; }; @@ -8932,18 +8351,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String Left * @description String Left @@ -8957,9 +8376,9 @@ export type components = { */ string_right?: string; /** - * Type + * type * @default string_join - * @enum {string} + * @constant */ type: "string_join"; }; @@ -8978,18 +8397,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String Left * @description String Left @@ -9009,9 +8428,9 @@ export type components = { */ string_right?: string; /** - * Type + * type * @default string_join_three - * @enum {string} + * @constant */ type: "string_join_three"; }; @@ -9026,9 +8445,9 @@ export type components = { */ value: string; /** - * Type + * type * @default string_output - * @enum {string} + * @constant */ type: "string_output"; }; @@ -9048,9 +8467,9 @@ export type components = { */ negative_string: string; /** - * Type + * type * @default string_pos_neg_output - * @enum {string} + * @constant */ type: "string_pos_neg_output"; }; @@ -9069,18 +8488,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to work on @@ -9106,9 +8525,9 @@ export type components = { */ use_regex?: boolean; /** - * Type + * type * @default string_replace - * @enum {string} + * @constant */ type: "string_replace"; }; @@ -9127,18 +8546,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to split @@ -9152,9 +8571,9 @@ export type components = { */ delimiter?: string; /** - * Type + * type * @default string_split - * @enum {string} + * @constant */ type: "string_split"; }; @@ -9173,18 +8592,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to split @@ -9192,15 +8611,14 @@ export type components = { */ string?: string; /** - * Type + * type * @default string_split_neg - * @enum {string} + * @constant */ type: "string_split_neg"; }; /** * SubModelType - * @description An enumeration. * @enum {string} */ SubModelType: "unet" | "text_encoder" | "text_encoder_2" | "tokenizer" | "tokenizer_2" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker"; @@ -9219,18 +8637,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -9244,23 +8662,17 @@ export type components = { */ b?: number; /** - * Type + * type * @default sub - * @enum {string} + * @constant */ type: "sub"; }; /** T2IAdapterField */ T2IAdapterField: { - /** - * Image - * @description The T2I-Adapter image prompt. - */ + /** @description The T2I-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * T2I Adapter Model - * @description The T2I-Adapter model to use. - */ + /** @description The T2I-Adapter model to use. */ t2i_adapter_model: components["schemas"]["T2IAdapterModelField"]; /** * Weight @@ -9303,22 +8715,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The IP-Adapter image prompt. - */ + use_cache?: boolean | null; + /** @description The IP-Adapter image prompt. */ image?: components["schemas"]["ImageField"]; /** * T2I-Adapter Model @@ -9351,9 +8760,9 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * Type + * type * @default t2i_adapter - * @enum {string} + * @constant */ type: "t2i_adapter"; }; @@ -9364,19 +8773,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default t2i_adapter + * @constant */ model_type: "t2i_adapter"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** T2IAdapterModelField */ T2IAdapterModelField: { @@ -9388,12 +8798,7 @@ export type components = { /** @description Base model */ base_model: components["schemas"]["BaseModelType"]; }; - /** - * T2IAdapterOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** T2IAdapterOutput */ T2IAdapterOutput: { /** * T2I Adapter @@ -9401,9 +8806,9 @@ export type components = { */ t2i_adapter: components["schemas"]["T2IAdapterField"]; /** - * Type + * type * @default t2i_adapter_output - * @enum {string} + * @constant */ type: "t2i_adapter_output"; }; @@ -9414,16 +8819,17 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default embedding + * @constant */ model_type: "embedding"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** Model Format */ model_format: null; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * Tile Resample Processor @@ -9440,47 +8846,38 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default tile_image_processor - * @enum {string} - */ - type: "tile_image_processor"; /** * Down Sampling Rate * @description Down sampling rate * @default 1 */ down_sampling_rate?: number; + /** + * type + * @default tile_image_processor + * @constant + */ + type: "tile_image_processor"; }; /** UNetField */ UNetField: { - /** - * Unet - * @description Info to load unet submodel - */ + /** @description Info to load unet submodel */ unet: components["schemas"]["ModelInfo"]; - /** - * Scheduler - * @description Info to load scheduler submodel - */ + /** @description Info to load scheduler submodel */ scheduler: components["schemas"]["ModelInfo"]; /** * Loras @@ -9521,10 +8918,7 @@ export type components = { }; /** VaeField */ VaeField: { - /** - * Vae - * @description Info to load vae submodel - */ + /** @description Info to load vae submodel */ vae: components["schemas"]["ModelInfo"]; /** * Seamless Axes @@ -9547,27 +8941,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * VAE * @description VAE model to load */ vae_model: components["schemas"]["VAEModelField"]; /** - * Type + * type * @default vae_loader - * @enum {string} + * @constant */ type: "vae_loader"; }; @@ -9582,9 +8976,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default vae_loader_output - * @enum {string} + * @constant */ type: "vae_loader_output"; }; @@ -9595,19 +8989,19 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default vae + * @constant */ model_type: "vae"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; model_format: components["schemas"]["VaeModelFormat"]; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * VaeModelFormat - * @description An enumeration. * @enum {string} */ VaeModelFormat: "checkpoint" | "diffusers"; @@ -9635,57 +9029,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default zoe_depth_image_processor - * @enum {string} + * @constant */ type: "zoe_depth_image_processor"; }; - /** - * UIConfigBase - * @description Provides additional node configuration to the UI. - * This is used internally by the @invocation decorator logic. Do not use this directly. - */ - UIConfigBase: { - /** - * Tags - * @description The node's tags - */ - tags?: string[]; - /** - * Title - * @description The node's display name - */ - title?: string; - /** - * Category - * @description The node's category - */ - category?: string; - /** - * Version - * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". - */ - version?: string; - }; /** * Input * @description The type of input a field accepts. @@ -9695,6 +9059,42 @@ export type components = { * @enum {string} */ Input: "connection" | "direct" | "any"; + /** + * UIComponent + * @description The type of UI component to use for a field, used to override the default components, which are inferred from the field type. + * @enum {string} + */ + UIComponent: "none" | "textarea" | "slider"; + /** + * UIConfigBase + * @description Provides additional node configuration to the UI. + * This is used internally by the @invocation decorator logic. Do not use this directly. + */ + UIConfigBase: { + /** + * Tags + * @description The node's tags + */ + tags: string[] | null; + /** + * Title + * @description The node's display name + * @default null + */ + title: string | null; + /** + * Category + * @description The node's category + * @default null + */ + category: string | null; + /** + * Version + * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". + * @default null + */ + version: string | null; + }; /** * UIType * @description Type hints for the UI. @@ -9702,12 +9102,6 @@ export type components = { * @enum {string} */ UIType: "boolean" | "ColorField" | "ConditioningField" | "ControlField" | "float" | "ImageField" | "integer" | "LatentsField" | "string" | "BooleanCollection" | "ColorCollection" | "ConditioningCollection" | "ControlCollection" | "FloatCollection" | "ImageCollection" | "IntegerCollection" | "LatentsCollection" | "StringCollection" | "BooleanPolymorphic" | "ColorPolymorphic" | "ConditioningPolymorphic" | "ControlPolymorphic" | "FloatPolymorphic" | "ImagePolymorphic" | "IntegerPolymorphic" | "LatentsPolymorphic" | "StringPolymorphic" | "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VaeModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "UNetField" | "VaeField" | "ClipField" | "Collection" | "CollectionItem" | "enum" | "Scheduler" | "WorkflowField" | "IsIntermediate" | "MetadataField" | "BoardField"; - /** - * UIComponent - * @description The type of UI component to use for a field, used to override the default components, which are inferred from the field type. - * @enum {string} - */ - UIComponent: "none" | "textarea" | "slider"; /** * _InputField * @description *DO NOT USE* @@ -9719,16 +9113,16 @@ export type components = { input: components["schemas"]["Input"]; /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components["schemas"]["UIType"]; - ui_component?: components["schemas"]["UIComponent"]; + ui_type: components["schemas"]["UIType"] | null; + ui_component: components["schemas"]["UIComponent"] | null; /** Ui Order */ - ui_order?: number; + ui_order: number | null; /** Ui Choice Labels */ - ui_choice_labels?: { + ui_choice_labels: { [key: string]: string; - }; + } | null; /** Item Default */ - item_default?: unknown; + item_default: unknown; }; /** * _OutputField @@ -9740,10 +9134,46 @@ export type components = { _OutputField: { /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components["schemas"]["UIType"]; + ui_type: components["schemas"]["UIType"] | null; /** Ui Order */ - ui_order?: number; + ui_order: number | null; }; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * CLIPVisionModelFormat + * @description An enumeration. + * @enum {string} + */ + CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; /** * IPAdapterModelFormat * @description An enumeration. @@ -9756,42 +9186,6 @@ export type components = { * @enum {string} */ T2IAdapterModelFormat: "diffusers"; - /** - * StableDiffusion2ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * CLIPVisionModelFormat - * @description An enumeration. - * @enum {string} - */ - CLIPVisionModelFormat: "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9806,382 +9200,6 @@ export type external = Record; export type operations = { - /** - * List Sessions - * @deprecated - * @description Gets a list of sessions, optionally searching - */ - list_sessions: { - parameters: { - query?: { - /** @description The page of results to get */ - page?: number; - /** @description The number of results per page */ - per_page?: number; - /** @description The query string to search for */ - query?: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["PaginatedResults_GraphExecutionState_"]; - }; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Create Session - * @deprecated - * @description Creates a new session, optionally initializing it with an invocation graph - */ - create_session: { - parameters: { - query?: { - /** @description The id of the queue to associate the session with */ - queue_id?: string; - }; - }; - requestBody?: { - content: { - "application/json": components["schemas"]["Graph"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid json */ - 400: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Get Session - * @deprecated - * @description Gets a session - */ - get_session: { - parameters: { - path: { - /** @description The id of the session to get */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Add Node - * @deprecated - * @description Adds a node to the graph - */ - add_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": string; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Update Node - * @deprecated - * @description Updates a node in the graph and removes all linked edges - */ - update_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The path to the node in the graph */ - node_path: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Delete Node - * @deprecated - * @description Deletes a node in the graph and removes all linked edges - */ - delete_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The path to the node to delete */ - node_path: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Add Edge - * @deprecated - * @description Adds an edge to the graph - */ - add_edge: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Edge"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Delete Edge - * @deprecated - * @description Deletes an edge from the graph - */ - delete_edge: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The id of the node the edge is coming from */ - from_node_id: string; - /** @description The field of the node the edge is coming from */ - from_field: string; - /** @description The id of the node the edge is going to */ - to_node_id: string; - /** @description The field of the node the edge is going to */ - to_field: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Invoke Session - * @deprecated - * @description Invokes a session - */ - invoke_session: { - parameters: { - query: { - /** @description The id of the queue to associate the session with */ - queue_id: string; - /** @description Whether or not to invoke all remaining invocations */ - all?: boolean; - }; - path: { - /** @description The id of the session to invoke */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": unknown; - }; - }; - /** @description The invocation is queued */ - 202: { - content: never; - }; - /** @description The session has no invocations ready to invoke */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Cancel Session Invoke - * @deprecated - * @description Invokes a session - */ - cancel_session_invoke: { - parameters: { - path: { - /** @description The id of the session to cancel */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": unknown; - }; - }; - /** @description The invocation is canceled */ - 202: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; /** * Parse Dynamicprompts * @description Creates a batch process @@ -10215,9 +9233,9 @@ export type operations = { parameters: { query?: { /** @description Base models to include */ - base_models?: components["schemas"]["BaseModelType"][]; + base_models?: components["schemas"]["BaseModelType"][] | null; /** @description The type of model to get */ - model_type?: components["schemas"]["ModelType"]; + model_type?: components["schemas"]["ModelType"] | null; }; }; responses: { @@ -10400,7 +9418,7 @@ export type operations = { parameters: { query?: { /** @description Save the converted model to the designated directory */ - convert_dest_directory?: string; + convert_dest_directory?: string | null; }; path: { /** @description Base model */ @@ -10541,11 +9559,11 @@ export type operations = { /** @description Whether this is an intermediate image */ is_intermediate: boolean; /** @description The board to add this image to, if any */ - board_id?: string; + board_id?: string | null; /** @description The session ID associated with this upload, if any */ - session_id?: string; + session_id?: string | null; /** @description Whether to crop the image */ - crop_visible?: boolean; + crop_visible?: boolean | null; }; }; requestBody: { @@ -10664,7 +9682,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": number; }; }; }; @@ -10789,13 +9807,13 @@ export type operations = { parameters: { query?: { /** @description The origin of images to list. */ - image_origin?: components["schemas"]["ResourceOrigin"]; + image_origin?: components["schemas"]["ResourceOrigin"] | null; /** @description The categories of image to include. */ - categories?: components["schemas"]["ImageCategory"][]; + categories?: components["schemas"]["ImageCategory"][] | null; /** @description Whether to list intermediate images. */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** @description The board id to filter by. Use 'none' to find images without a board. */ - board_id?: string; + board_id?: string | null; /** @description The page offset */ offset?: number; /** @description The number of images per page */ @@ -10913,11 +9931,11 @@ export type operations = { parameters: { query?: { /** @description Whether to list all boards */ - all?: boolean; + all?: boolean | null; /** @description The page offset */ - offset?: number; + offset?: number | null; /** @description The number of boards per page */ - limit?: number; + limit?: number | null; }; }; responses: { @@ -10995,7 +10013,7 @@ export type operations = { parameters: { query?: { /** @description Permanently delete all images on the board */ - include_images?: boolean; + include_images?: boolean | null; }; path: { /** @description The id of board to delete */ @@ -11311,7 +10329,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": components["schemas"]["EnqueueGraphResult"]; }; }; /** @description Created */ @@ -11348,7 +10366,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": components["schemas"]["EnqueueBatchResult"]; }; }; /** @description Created */ @@ -11375,9 +10393,9 @@ export type operations = { /** @description The number of items to fetch */ limit?: number; /** @description The status of items to fetch */ - status?: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + status?: ("pending" | "in_progress" | "completed" | "failed" | "canceled") | null; /** @description The pagination cursor */ - cursor?: number; + cursor?: number | null; /** @description The pagination cursor priority */ priority?: number; }; @@ -11551,7 +10569,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + "application/json": components["schemas"]["SessionQueueItem"] | null; }; }; /** @description Validation Error */ @@ -11577,7 +10595,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + "application/json": components["schemas"]["SessionQueueItem"] | null; }; }; /** @description Validation Error */ diff --git a/pyproject.toml b/pyproject.toml index bab87172c2..4c8ec0f5e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,10 +35,10 @@ dependencies = [ "accelerate~=0.23.0", "albumentations", "click", - "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", + "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel~=2.0.2", "controlnet-aux>=0.0.6", - "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 + "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", # When bumping diffusers beyond 0.21, make sure to address this: # https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513 @@ -48,19 +48,20 @@ dependencies = [ "easing-functions", "einops", "facexlib", - "fastapi==0.88.0", - "fastapi-events==0.8.0", + "fastapi~=0.103.2", + "fastapi-events~=0.9.1", "huggingface-hub~=0.16.4", - "invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids - "matplotlib", # needed for plotting of Penner easing functions - "mediapipe", # needed for "mediapipeface" controlnet model + "invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids + "matplotlib", # needed for plotting of Penner easing functions + "mediapipe", # needed for "mediapipeface" controlnet model "numpy", "npyscreen", "omegaconf", "onnx", "onnxruntime", "opencv-python", - "pydantic==1.*", + "pydantic~=2.4.2", + "pydantic-settings~=2.0.3", "picklescan", "pillow", "prompt-toolkit", @@ -95,33 +96,25 @@ dependencies = [ "mkdocs-git-revision-date-localized-plugin", "mkdocs-redirects==1.2.0", ] -"dev" = [ - "jurigged", - "pudb", -] +"dev" = ["jurigged", "pudb"] "test" = [ "black", "flake8", "Flake8-pyproject", "isort", + "mypy", "pre-commit", "pytest>6.0.0", "pytest-cov", "pytest-datadir", ] "xformers" = [ - "xformers~=0.0.19; sys_platform!='darwin'", - "triton; sys_platform=='linux'", -] -"onnx" = [ - "onnxruntime", -] -"onnx-cuda" = [ - "onnxruntime-gpu", -] -"onnx-directml" = [ - "onnxruntime-directml", + "xformers~=0.0.19; sys_platform!='darwin'", + "triton; sys_platform=='linux'", ] +"onnx" = ["onnxruntime"] +"onnx-cuda" = ["onnxruntime-gpu"] +"onnx-directml" = ["onnxruntime-directml"] [project.scripts] @@ -163,12 +156,15 @@ version = { attr = "invokeai.version.__version__" } [tool.setuptools.packages.find] "where" = ["."] "include" = [ - "invokeai.assets.fonts*","invokeai.version*", - "invokeai.generator*","invokeai.backend*", - "invokeai.frontend*", "invokeai.frontend.web.dist*", - "invokeai.frontend.web.static*", - "invokeai.configs*", - "invokeai.app*", + "invokeai.assets.fonts*", + "invokeai.version*", + "invokeai.generator*", + "invokeai.backend*", + "invokeai.frontend*", + "invokeai.frontend.web.dist*", + "invokeai.frontend.web.static*", + "invokeai.configs*", + "invokeai.app*", ] [tool.setuptools.package-data] @@ -182,7 +178,7 @@ version = { attr = "invokeai.version.__version__" } [tool.pytest.ini_options] addopts = "--cov-report term --cov-report html --cov-report xml --strict-markers -m \"not slow\"" markers = [ - "slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\"." + "slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\".", ] [tool.coverage.run] branch = true @@ -190,7 +186,7 @@ source = ["invokeai"] omit = ["*tests*", "*migrations*", ".venv/*", "*.env"] [tool.coverage.report] show_missing = true -fail_under = 85 # let's set something sensible on Day 1 ... +fail_under = 85 # let's set something sensible on Day 1 ... [tool.coverage.json] output = "coverage/coverage.json" pretty_print = true @@ -209,7 +205,7 @@ exclude = [ "__pycache__", "build", "dist", - "invokeai/frontend/web/node_modules/" + "invokeai/frontend/web/node_modules/", ] [tool.black] @@ -218,3 +214,53 @@ line-length = 120 [tool.isort] profile = "black" line_length = 120 + +[tool.mypy] +ignore_missing_imports = true # ignores missing types in third-party libraries + +[[tool.mypy.overrides]] +follow_imports = "skip" +module = [ + "invokeai.app.api.routers.models", + "invokeai.app.invocations.compel", + "invokeai.app.invocations.latent", + "invokeai.app.services.config.config_base", + "invokeai.app.services.config.config_default", + "invokeai.app.services.invocation_stats.invocation_stats_default", + "invokeai.app.services.model_manager.model_manager_base", + "invokeai.app.services.model_manager.model_manager_default", + "invokeai.app.util.controlnet_utils", + "invokeai.backend.image_util.txt2mask", + "invokeai.backend.image_util.safety_checker", + "invokeai.backend.image_util.patchmatch", + "invokeai.backend.image_util.invisible_watermark", + "invokeai.backend.install.model_install_backend", + "invokeai.backend.ip_adapter.ip_adapter", + "invokeai.backend.ip_adapter.resampler", + "invokeai.backend.ip_adapter.unet_patcher", + "invokeai.backend.model_management.convert_ckpt_to_diffusers", + "invokeai.backend.model_management.lora", + "invokeai.backend.model_management.model_cache", + "invokeai.backend.model_management.model_manager", + "invokeai.backend.model_management.model_merge", + "invokeai.backend.model_management.model_probe", + "invokeai.backend.model_management.model_search", + "invokeai.backend.model_management.models.*", # this is needed to ignore the module's `__init__.py` + "invokeai.backend.model_management.models.base", + "invokeai.backend.model_management.models.controlnet", + "invokeai.backend.model_management.models.ip_adapter", + "invokeai.backend.model_management.models.lora", + "invokeai.backend.model_management.models.sdxl", + "invokeai.backend.model_management.models.stable_diffusion", + "invokeai.backend.model_management.models.vae", + "invokeai.backend.model_management.seamless", + "invokeai.backend.model_management.util", + "invokeai.backend.stable_diffusion.diffusers_pipeline", + "invokeai.backend.stable_diffusion.diffusion.cross_attention_control", + "invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion", + "invokeai.backend.util.hotfixes", + "invokeai.backend.util.logging", + "invokeai.backend.util.mps_fixes", + "invokeai.backend.util.util", + "invokeai.frontend.install.model_install", +] diff --git a/tests/nodes/test_node_graph.py b/tests/nodes/test_node_graph.py index 822ffc1588..3c965895f9 100644 --- a/tests/nodes/test_node_graph.py +++ b/tests/nodes/test_node_graph.py @@ -1,4 +1,5 @@ import pytest +from pydantic import TypeAdapter from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -593,20 +594,21 @@ def test_graph_can_serialize(): g.add_edge(e) # Not throwing on this line is sufficient - _ = g.json() + _ = g.model_dump_json() def test_graph_can_deserialize(): g = Graph() n1 = TextToImageTestInvocation(id="1", prompt="Banana sushi") - n2 = ESRGANInvocation(id="2") + n2 = ImageToImageTestInvocation(id="2") g.add_node(n1) g.add_node(n2) e = create_edge(n1.id, "image", n2.id, "image") g.add_edge(e) - json = g.json() - g2 = Graph.parse_raw(json) + json = g.model_dump_json() + adapter_graph = TypeAdapter(Graph) + g2 = adapter_graph.validate_json(json) assert g2 is not None assert g2.nodes["1"] is not None @@ -619,7 +621,7 @@ def test_graph_can_deserialize(): def test_invocation_decorator(): - invocation_type = "test_invocation" + invocation_type = "test_invocation_decorator" title = "Test Invocation" tags = ["first", "second", "third"] category = "category" @@ -630,7 +632,7 @@ def test_invocation_decorator(): def invoke(self): pass - schema = TestInvocation.schema() + schema = TestInvocation.model_json_schema() assert schema.get("title") == title assert schema.get("tags") == tags @@ -640,18 +642,17 @@ def test_invocation_decorator(): def test_invocation_version_must_be_semver(): - invocation_type = "test_invocation" valid_version = "1.0.0" invalid_version = "not_semver" - @invocation(invocation_type, version=valid_version) + @invocation("test_invocation_version_valid", version=valid_version) class ValidVersionInvocation(BaseInvocation): def invoke(self): pass with pytest.raises(InvalidVersionError): - @invocation(invocation_type, version=invalid_version) + @invocation("test_invocation_version_invalid", version=invalid_version) class InvalidVersionInvocation(BaseInvocation): def invoke(self): pass @@ -694,4 +695,4 @@ def test_ints_do_not_accept_floats(): def test_graph_can_generate_schema(): # Not throwing on this line is sufficient # NOTE: if this test fails, it's PROBABLY because a new invocation type is breaking schema generation - _ = Graph.schema_json(indent=2) + _ = Graph.model_json_schema() diff --git a/tests/nodes/test_session_queue.py b/tests/nodes/test_session_queue.py index 6dd7c4845a..731316068c 100644 --- a/tests/nodes/test_session_queue.py +++ b/tests/nodes/test_session_queue.py @@ -1,5 +1,5 @@ import pytest -from pydantic import ValidationError, parse_raw_as +from pydantic import TypeAdapter, ValidationError from invokeai.app.services.session_queue.session_queue_common import ( Batch, @@ -150,8 +150,9 @@ def test_prepare_values_to_insert(batch_data_collection, batch_graph): values = prepare_values_to_insert(queue_id="default", batch=b, priority=0, max_new_queue_items=1000) assert len(values) == 8 + session_adapter = TypeAdapter(GraphExecutionState) # graph should be serialized - ges = parse_raw_as(GraphExecutionState, values[0].session) + ges = session_adapter.validate_json(values[0].session) # graph values should be populated assert ges.graph.get_node("1").prompt == "Banana sushi" @@ -160,15 +161,16 @@ def test_prepare_values_to_insert(batch_data_collection, batch_graph): assert ges.graph.get_node("4").prompt == "Nissan" # session ids should match deserialized graph - assert [v.session_id for v in values] == [parse_raw_as(GraphExecutionState, v.session).id for v in values] + assert [v.session_id for v in values] == [session_adapter.validate_json(v.session).id for v in values] # should unique session ids sids = [v.session_id for v in values] assert len(sids) == len(set(sids)) + nfv_list_adapter = TypeAdapter(list[NodeFieldValue]) # should have 3 node field values assert type(values[0].field_values) is str - assert len(parse_raw_as(list[NodeFieldValue], values[0].field_values)) == 3 + assert len(nfv_list_adapter.validate_json(values[0].field_values)) == 3 # should have batch id and priority assert all(v.batch_id == b.batch_id for v in values) diff --git a/tests/nodes/test_sqlite.py b/tests/nodes/test_sqlite.py index 6e4da8b36e..818f9d048f 100644 --- a/tests/nodes/test_sqlite.py +++ b/tests/nodes/test_sqlite.py @@ -15,7 +15,8 @@ class TestModel(BaseModel): @pytest.fixture def db() -> SqliteItemStorage[TestModel]: sqlite_db = SqliteDatabase(InvokeAIAppConfig(use_memory_db=True), InvokeAILogger.get_logger()) - return SqliteItemStorage[TestModel](db=sqlite_db, table_name="test", id_field="id") + sqlite_item_storage = SqliteItemStorage[TestModel](db=sqlite_db, table_name="test", id_field="id") + return sqlite_item_storage def test_sqlite_service_can_create_and_get(db: SqliteItemStorage[TestModel]): From 2c39557dc9e16470ae179c5dd5a541f1db9f09d3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:28:29 +1100 Subject: [PATCH 076/202] fix(nodes): fix metadata validation error --- invokeai/app/invocations/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 9578fc3ae9..4d76926aaa 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -48,7 +48,7 @@ class CoreMetadata(BaseModelExcludeNull): default=None, description="The generation mode that output this image", ) - created_by: Optional[str] = Field(description="The name of the creator of the image") + created_by: Optional[str] = Field(default=None, description="The name of the creator of the image") positive_prompt: Optional[str] = Field(default=None, description="The positive prompt parameter") negative_prompt: Optional[str] = Field(default=None, description="The negative prompt parameter") width: Optional[int] = Field(default=None, description="The width parameter") From 685cda89ff5d2448857d195d3f79a258bbe48c14 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:28:39 +1100 Subject: [PATCH 077/202] feat(api): restore get_session route --- invokeai/app/api/routers/sessions.py | 456 +++++++++++++-------------- invokeai/app/api_app.py | 4 +- 2 files changed, 226 insertions(+), 234 deletions(-) diff --git a/invokeai/app/api/routers/sessions.py b/invokeai/app/api/routers/sessions.py index cd93a267ad..fb850d0b2b 100644 --- a/invokeai/app/api/routers/sessions.py +++ b/invokeai/app/api/routers/sessions.py @@ -1,57 +1,50 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from typing import Annotated, Optional, Union -from fastapi import Body, HTTPException, Path, Query, Response +from fastapi import HTTPException, Path from fastapi.routing import APIRouter -from pydantic.fields import Field -from invokeai.app.services.shared.pagination import PaginatedResults - -# Importing * is bad karma but needed here for node detection -from ...invocations import * # noqa: F401 F403 -from ...invocations.baseinvocation import BaseInvocation -from ...services.shared.graph import Edge, EdgeConnection, Graph, GraphExecutionState, NodeAlreadyExecutedError +from ...services.shared.graph import GraphExecutionState from ..dependencies import ApiDependencies session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"]) -@session_router.post( - "/", - operation_id="create_session", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid json"}, - }, - deprecated=True, -) -async def create_session( - queue_id: str = Query(default="", description="The id of the queue to associate the session with"), - graph: Optional[Graph] = Body(default=None, description="The graph to initialize the session with"), -) -> GraphExecutionState: - """Creates a new session, optionally initializing it with an invocation graph""" - session = ApiDependencies.invoker.create_execution_state(queue_id=queue_id, graph=graph) - return session +# @session_router.post( +# "/", +# operation_id="create_session", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid json"}, +# }, +# deprecated=True, +# ) +# async def create_session( +# queue_id: str = Query(default="", description="The id of the queue to associate the session with"), +# graph: Optional[Graph] = Body(default=None, description="The graph to initialize the session with"), +# ) -> GraphExecutionState: +# """Creates a new session, optionally initializing it with an invocation graph""" +# session = ApiDependencies.invoker.create_execution_state(queue_id=queue_id, graph=graph) +# return session -@session_router.get( - "/", - operation_id="list_sessions", - responses={200: {"model": PaginatedResults[GraphExecutionState]}}, - deprecated=True, -) -async def list_sessions( - page: int = Query(default=0, description="The page of results to get"), - per_page: int = Query(default=10, description="The number of results per page"), - query: str = Query(default="", description="The query string to search for"), -) -> PaginatedResults[GraphExecutionState]: - """Gets a list of sessions, optionally searching""" - if query == "": - result = ApiDependencies.invoker.services.graph_execution_manager.list(page, per_page) - else: - result = ApiDependencies.invoker.services.graph_execution_manager.search(query, page, per_page) - return result +# @session_router.get( +# "/", +# operation_id="list_sessions", +# responses={200: {"model": PaginatedResults[GraphExecutionState]}}, +# deprecated=True, +# ) +# async def list_sessions( +# page: int = Query(default=0, description="The page of results to get"), +# per_page: int = Query(default=10, description="The number of results per page"), +# query: str = Query(default="", description="The query string to search for"), +# ) -> PaginatedResults[GraphExecutionState]: +# """Gets a list of sessions, optionally searching""" +# if query == "": +# result = ApiDependencies.invoker.services.graph_execution_manager.list(page, per_page) +# else: +# result = ApiDependencies.invoker.services.graph_execution_manager.search(query, page, per_page) +# return result @session_router.get( @@ -61,7 +54,6 @@ async def list_sessions( 200: {"model": GraphExecutionState}, 404: {"description": "Session not found"}, }, - deprecated=True, ) async def get_session( session_id: str = Path(description="The id of the session to get"), @@ -74,211 +66,211 @@ async def get_session( return session -@session_router.post( - "/{session_id}/nodes", - operation_id="add_node", - responses={ - 200: {"model": str}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def add_node( - session_id: str = Path(description="The id of the session"), - node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore - description="The node to add" - ), -) -> str: - """Adds a node to the graph""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.post( +# "/{session_id}/nodes", +# operation_id="add_node", +# responses={ +# 200: {"model": str}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def add_node( +# session_id: str = Path(description="The id of the session"), +# node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore +# description="The node to add" +# ), +# ) -> str: +# """Adds a node to the graph""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - session.add_node(node) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session.id - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# session.add_node(node) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session.id +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -@session_router.put( - "/{session_id}/nodes/{node_path}", - operation_id="update_node", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def update_node( - session_id: str = Path(description="The id of the session"), - node_path: str = Path(description="The path to the node in the graph"), - node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore - description="The new node" - ), -) -> GraphExecutionState: - """Updates a node in the graph and removes all linked edges""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.put( +# "/{session_id}/nodes/{node_path}", +# operation_id="update_node", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def update_node( +# session_id: str = Path(description="The id of the session"), +# node_path: str = Path(description="The path to the node in the graph"), +# node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore +# description="The new node" +# ), +# ) -> GraphExecutionState: +# """Updates a node in the graph and removes all linked edges""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - session.update_node(node_path, node) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# session.update_node(node_path, node) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -@session_router.delete( - "/{session_id}/nodes/{node_path}", - operation_id="delete_node", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def delete_node( - session_id: str = Path(description="The id of the session"), - node_path: str = Path(description="The path to the node to delete"), -) -> GraphExecutionState: - """Deletes a node in the graph and removes all linked edges""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.delete( +# "/{session_id}/nodes/{node_path}", +# operation_id="delete_node", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def delete_node( +# session_id: str = Path(description="The id of the session"), +# node_path: str = Path(description="The path to the node to delete"), +# ) -> GraphExecutionState: +# """Deletes a node in the graph and removes all linked edges""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - session.delete_node(node_path) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# session.delete_node(node_path) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -@session_router.post( - "/{session_id}/edges", - operation_id="add_edge", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def add_edge( - session_id: str = Path(description="The id of the session"), - edge: Edge = Body(description="The edge to add"), -) -> GraphExecutionState: - """Adds an edge to the graph""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.post( +# "/{session_id}/edges", +# operation_id="add_edge", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def add_edge( +# session_id: str = Path(description="The id of the session"), +# edge: Edge = Body(description="The edge to add"), +# ) -> GraphExecutionState: +# """Adds an edge to the graph""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - session.add_edge(edge) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# session.add_edge(edge) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -# TODO: the edge being in the path here is really ugly, find a better solution -@session_router.delete( - "/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}", - operation_id="delete_edge", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def delete_edge( - session_id: str = Path(description="The id of the session"), - from_node_id: str = Path(description="The id of the node the edge is coming from"), - from_field: str = Path(description="The field of the node the edge is coming from"), - to_node_id: str = Path(description="The id of the node the edge is going to"), - to_field: str = Path(description="The field of the node the edge is going to"), -) -> GraphExecutionState: - """Deletes an edge from the graph""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# # TODO: the edge being in the path here is really ugly, find a better solution +# @session_router.delete( +# "/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}", +# operation_id="delete_edge", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def delete_edge( +# session_id: str = Path(description="The id of the session"), +# from_node_id: str = Path(description="The id of the node the edge is coming from"), +# from_field: str = Path(description="The field of the node the edge is coming from"), +# to_node_id: str = Path(description="The id of the node the edge is going to"), +# to_field: str = Path(description="The field of the node the edge is going to"), +# ) -> GraphExecutionState: +# """Deletes an edge from the graph""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - edge = Edge( - source=EdgeConnection(node_id=from_node_id, field=from_field), - destination=EdgeConnection(node_id=to_node_id, field=to_field), - ) - session.delete_edge(edge) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# edge = Edge( +# source=EdgeConnection(node_id=from_node_id, field=from_field), +# destination=EdgeConnection(node_id=to_node_id, field=to_field), +# ) +# session.delete_edge(edge) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -@session_router.put( - "/{session_id}/invoke", - operation_id="invoke_session", - responses={ - 200: {"model": None}, - 202: {"description": "The invocation is queued"}, - 400: {"description": "The session has no invocations ready to invoke"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def invoke_session( - queue_id: str = Query(description="The id of the queue to associate the session with"), - session_id: str = Path(description="The id of the session to invoke"), - all: bool = Query(default=False, description="Whether or not to invoke all remaining invocations"), -) -> Response: - """Invokes a session""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.put( +# "/{session_id}/invoke", +# operation_id="invoke_session", +# responses={ +# 200: {"model": None}, +# 202: {"description": "The invocation is queued"}, +# 400: {"description": "The session has no invocations ready to invoke"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def invoke_session( +# queue_id: str = Query(description="The id of the queue to associate the session with"), +# session_id: str = Path(description="The id of the session to invoke"), +# all: bool = Query(default=False, description="Whether or not to invoke all remaining invocations"), +# ) -> Response: +# """Invokes a session""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - if session.is_complete(): - raise HTTPException(status_code=400) +# if session.is_complete(): +# raise HTTPException(status_code=400) - ApiDependencies.invoker.invoke(queue_id, session, invoke_all=all) - return Response(status_code=202) +# ApiDependencies.invoker.invoke(queue_id, session, invoke_all=all) +# return Response(status_code=202) -@session_router.delete( - "/{session_id}/invoke", - operation_id="cancel_session_invoke", - responses={202: {"description": "The invocation is canceled"}}, - deprecated=True, -) -async def cancel_session_invoke( - session_id: str = Path(description="The id of the session to cancel"), -) -> Response: - """Invokes a session""" - ApiDependencies.invoker.cancel(session_id) - return Response(status_code=202) +# @session_router.delete( +# "/{session_id}/invoke", +# operation_id="cancel_session_invoke", +# responses={202: {"description": "The invocation is canceled"}}, +# deprecated=True, +# ) +# async def cancel_session_invoke( +# session_id: str = Path(description="The id of the session to cancel"), +# ) -> Response: +# """Invokes a session""" +# ApiDependencies.invoker.cancel(session_id) +# return Response(status_code=202) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 5bbd8150c1..fa68d1b3e7 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -31,7 +31,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from ..backend.util.logging import InvokeAILogger from .api.dependencies import ApiDependencies - from .api.routers import app_info, board_images, boards, images, models, session_queue, utilities + from .api.routers import app_info, board_images, boards, images, models, sessions, session_queue, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField @@ -85,7 +85,7 @@ async def shutdown_event(): # Include all routers -# app.include_router(sessions.session_router, prefix="/api") +app.include_router(sessions.session_router, prefix="/api") app.include_router(utilities.utilities_router, prefix="/api") From 9d9592230a402fe9e632b52d07c510406e62bc86 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:29:47 +1100 Subject: [PATCH 078/202] chore: lint --- invokeai/app/api_app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index fa68d1b3e7..e07b037dd1 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -31,7 +31,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from ..backend.util.logging import InvokeAILogger from .api.dependencies import ApiDependencies - from .api.routers import app_info, board_images, boards, images, models, sessions, session_queue, utilities + from .api.routers import app_info, board_images, boards, images, models, session_queue, sessions, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField From a094f4ca2b4e28445bf52388f60b296097ea693a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:37:53 +1100 Subject: [PATCH 079/202] fix: pin `python-socketio~=5.10.0` --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4c8ec0f5e8..03fc45c5dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,10 +35,10 @@ dependencies = [ "accelerate~=0.23.0", "albumentations", "click", - "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", + "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel~=2.0.2", "controlnet-aux>=0.0.6", - "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 + "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", # When bumping diffusers beyond 0.21, make sure to address this: # https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513 @@ -70,7 +70,7 @@ dependencies = [ 'pyperclip', "pyreadline3", "python-multipart", - "python-socketio", + "python-socketio~=5.10.0", "pytorch-lightning", "realesrgan", "requests~=2.28.2", From c69715636dcd6bbb425d01a1a63d3a84b40bb212 Mon Sep 17 00:00:00 2001 From: Surisen Date: Tue, 17 Oct 2023 06:15:20 +0200 Subject: [PATCH 080/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 100.0% (1217 of 1217 strings) Co-authored-by: Surisen Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/zh_CN.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 23940542a9..3f896076d4 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -1101,16 +1101,16 @@ "contentShuffle": "Content Shuffle", "f": "F", "h": "H", - "controlnet": "$t(controlnet.controlAdapter) #{{number}} ($t(common.controlNet))", + "controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))", "control": "Control (普通控制)", "coarse": "Coarse", "depthMidas": "Depth (Midas)", "w": "W", - "ip_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.ipAdapter))", + "ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))", "mediapipeFace": "Mediapipe Face", "mlsd": "M-LSD", "lineart": "Lineart", - "t2i_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.t2iAdapter))", + "t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))", "megaControl": "Mega Control (超级控制)", "depthZoe": "Depth (Zoe)", "colorMap": "Color", From 9542883bb5b88e97a66dfe4da60d366238dcfb41 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 28 Sep 2023 09:28:41 -0400 Subject: [PATCH 081/202] update requirements to python 3.10-11 --- docs/installation/010_INSTALL_AUTOMATED.md | 4 ++-- docs/installation/020_INSTALL_MANUAL.md | 4 ++-- docs/installation/060_INSTALL_PATCHMATCH.md | 3 +-- installer/install.bat.in | 13 ++++++------- installer/install.sh.in | 4 ++-- installer/lib/installer.py | 10 +--------- installer/readme.txt | 8 ++++---- pyproject.toml | 2 +- 8 files changed, 19 insertions(+), 29 deletions(-) diff --git a/docs/installation/010_INSTALL_AUTOMATED.md b/docs/installation/010_INSTALL_AUTOMATED.md index 0937c07bca..52192f33c0 100644 --- a/docs/installation/010_INSTALL_AUTOMATED.md +++ b/docs/installation/010_INSTALL_AUTOMATED.md @@ -40,7 +40,7 @@ experimental versions later. this, open up a command-line window ("Terminal" on Linux and Macintosh, "Command" or "Powershell" on Windows) and type `python --version`. If Python is installed, it will print out the version - number. If it is version `3.9.*`, `3.10.*` or `3.11.*` you meet + number. If it is version `3.10.*` or `3.11.*` you meet requirements. !!! warning "What to do if you have an unsupported version" @@ -48,7 +48,7 @@ experimental versions later. Go to [Python Downloads](https://www.python.org/downloads/) and download the appropriate installer package for your platform. We recommend [Version - 3.10.9](https://www.python.org/downloads/release/python-3109/), + 3.10.12](https://www.python.org/downloads/release/python-3109/), which has been extensively tested with InvokeAI. _Please select your platform in the section below for platform-specific diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md index a19992d266..27484c0ffd 100644 --- a/docs/installation/020_INSTALL_MANUAL.md +++ b/docs/installation/020_INSTALL_MANUAL.md @@ -32,7 +32,7 @@ gaming): * **Python** - version 3.9 through 3.11 + version 3.10 through 3.11 * **CUDA Tools** @@ -65,7 +65,7 @@ gaming): To install InvokeAI with virtual environments and the PIP package manager, please follow these steps: -1. Please make sure you are using Python 3.9 through 3.11. The rest of the install +1. Please make sure you are using Python 3.10 through 3.11. The rest of the install procedure depends on this and will not work with other versions: ```bash diff --git a/docs/installation/060_INSTALL_PATCHMATCH.md b/docs/installation/060_INSTALL_PATCHMATCH.md index ccfd19d207..a9646f8b60 100644 --- a/docs/installation/060_INSTALL_PATCHMATCH.md +++ b/docs/installation/060_INSTALL_PATCHMATCH.md @@ -59,8 +59,7 @@ Prior to installing PyPatchMatch, you need to take the following steps: `from patchmatch import patch_match`: It should look like the following: ```py - Python 3.9.5 (default, Nov 23 2021, 15:27:38) - [GCC 9.3.0] on linux + Python 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> from patchmatch import patch_match Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch". diff --git a/installer/install.bat.in b/installer/install.bat.in index ffe96d4355..5fa76471de 100644 --- a/installer/install.bat.in +++ b/installer/install.bat.in @@ -1,7 +1,7 @@ @echo off setlocal EnableExtensions EnableDelayedExpansion -@rem This script requires the user to install Python 3.9 or higher. All other +@rem This script requires the user to install Python 3.10 or higher. All other @rem requirements are downloaded as needed. @rem change to the script's directory @@ -19,7 +19,7 @@ set INVOKEAI_VERSION=latest set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/ set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting set PYTHON_URL=https://www.python.org/downloads/windows/ -set MINIMUM_PYTHON_VERSION=3.9.0 +set MINIMUM_PYTHON_VERSION=3.10.0 set PYTHON_URL=https://www.python.org/downloads/release/python-3109/ set err_msg=An error has occurred and the script could not continue. @@ -28,8 +28,7 @@ set err_msg=An error has occurred and the script could not continue. echo This script will install InvokeAI and its dependencies. echo. echo BEFORE YOU START PLEASE MAKE SURE TO DO THE FOLLOWING -echo 1. Install python 3.9 or 3.10. Python version 3.11 and above are -echo not supported at the moment. +echo 1. Install python 3.10 or 3.11. Python version 3.9 is no longer supported. echo 2. Double-click on the file WinLongPathsEnabled.reg in order to echo enable long path support on your system. echo 3. Install the Visual C++ core libraries. @@ -46,19 +45,19 @@ echo ***** Checking and Updating Python ***** call python --version >.tmp1 2>.tmp2 if %errorlevel% == 1 ( - set err_msg=Please install Python 3.10. See %INSTRUCTIONS% for details. + set err_msg=Please install Python 3.10-11. See %INSTRUCTIONS% for details. goto err_exit ) for /f "tokens=2" %%i in (.tmp1) do set python_version=%%i if "%python_version%" == "" ( - set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.9 from %PYTHON_URL% + set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.12 from %PYTHON_URL% goto err_exit ) call :compareVersions %MINIMUM_PYTHON_VERSION% %python_version% if %errorlevel% == 1 ( - set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.9 from %PYTHON_URL% + set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.12 from %PYTHON_URL% goto err_exit ) diff --git a/installer/install.sh.in b/installer/install.sh.in index 1b8ba92ea6..9cf41192bf 100755 --- a/installer/install.sh.in +++ b/installer/install.sh.in @@ -8,10 +8,10 @@ cd $scriptdir function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; } -MINIMUM_PYTHON_VERSION=3.9.0 +MINIMUM_PYTHON_VERSION=3.10.0 MAXIMUM_PYTHON_VERSION=3.11.100 PYTHON="" -for candidate in python3.11 python3.10 python3.9 python3 python ; do +for candidate in python3.11 python3.10 python3 python ; do if ppath=`which $candidate`; then # when using `pyenv`, the executable for an inactive Python version will exist but will not be operational # we check that this found executable can actually run diff --git a/installer/lib/installer.py b/installer/lib/installer.py index 70ed4d4331..bf48e3b06d 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -13,7 +13,7 @@ from pathlib import Path from tempfile import TemporaryDirectory from typing import Union -SUPPORTED_PYTHON = ">=3.9.0,<=3.11.100" +SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100" INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"] BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp" @@ -67,7 +67,6 @@ class Installer: # Cleaning up temporary directories on Windows results in a race condition # and a stack trace. # `ignore_cleanup_errors` was only added in Python 3.10 - # users of Python 3.9 will see a gnarly stack trace on installer exit if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10: venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True) else: @@ -139,13 +138,6 @@ class Installer: except shutil.SameFileError: venv.create(venv_dir, with_pip=True, symlinks=True) - # upgrade pip in Python 3.9 environments - if int(platform.python_version_tuple()[1]) == 9: - from plumbum import FG, local - - pip = local[get_pip_from_venv(venv_dir)] - pip["install", "--upgrade", "pip"] & FG - return venv_dir def install( diff --git a/installer/readme.txt b/installer/readme.txt index b9a97e2093..ef040c3913 100644 --- a/installer/readme.txt +++ b/installer/readme.txt @@ -4,7 +4,7 @@ Project homepage: https://github.com/invoke-ai/InvokeAI Preparations: - You will need to install Python 3.9 or higher for this installer + You will need to install Python 3.10 or higher for this installer to work. Instructions are given here: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/ @@ -14,15 +14,15 @@ Preparations: python --version If all is well, it will print "Python 3.X.X", where the version number - is at least 3.9.*, and not higher than 3.11.*. + is at least 3.10.*, and not higher than 3.11.*. If this works, check the version of the Python package manager, pip: pip --version You should get a message that indicates that the pip package - installer was derived from Python 3.9 or 3.10. For example: - "pip 22.3.1 from /usr/bin/pip (python 3.9)" + installer was derived from Python 3.10 or 3.11. For example: + "pip 22.0.1 from /usr/bin/pip (python 3.10)" Long Paths on Windows: diff --git a/pyproject.toml b/pyproject.toml index 03fc45c5dc..67486e1120 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "InvokeAI" description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process" -requires-python = ">=3.9, <3.12" +requires-python = ">=3.10, <3.12" readme = { content-type = "text/markdown", file = "README.md" } keywords = ["stable-diffusion", "AI"] dynamic = ["version"] From f11ba81a8d5e200c2985658ad05d1b6f0ad4f593 Mon Sep 17 00:00:00 2001 From: user1 Date: Tue, 10 Oct 2023 11:29:19 -0700 Subject: [PATCH 082/202] Fixing some var and arg names. --- .../stable_diffusion/diffusers_pipeline.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 0943b78bf8..5681a04695 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -546,11 +546,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # Handle ControlNet(s) and T2I-Adapter(s) down_block_additional_residuals = None mid_block_additional_residual = None - if control_data is not None and t2i_adapter_data is not None: + down_intrablock_additional_residuals = None + # if control_data is not None and t2i_adapter_data is not None: # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. - raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") - elif control_data is not None: + # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") + # elif control_data is not None: + if control_data is not None: down_block_additional_residuals, mid_block_additional_residual = self.invokeai_diffuser.do_controlnet_step( control_data=control_data, sample=latent_model_input, @@ -559,7 +561,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, ) - elif t2i_adapter_data is not None: + # elif t2i_adapter_data is not None: + if t2i_adapter_data is not None: accum_adapter_state = None for single_t2i_adapter_data in t2i_adapter_data: # Determine the T2I-Adapter weights for the current denoising step. @@ -584,7 +587,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): for idx, value in enumerate(single_t2i_adapter_data.adapter_state): accum_adapter_state[idx] += value * t2i_adapter_weight - down_block_additional_residuals = accum_adapter_state + # down_block_additional_residuals = accum_adapter_state + down_intrablock_additional_residuals = accum_adapter_state uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step( sample=latent_model_input, @@ -593,8 +597,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, # extra: - down_block_additional_residuals=down_block_additional_residuals, - mid_block_additional_residual=mid_block_additional_residual, + down_block_additional_residuals=down_block_additional_residuals, # for ControlNet + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale From 378689a519eaebb0181c38d0ad8faf6e747ceb16 Mon Sep 17 00:00:00 2001 From: user1 Date: Tue, 10 Oct 2023 12:25:54 -0700 Subject: [PATCH 083/202] Changes to _apply_standard_conditioning_sequentially() and _apply_cross_attention_controlled_conditioning() to reflect changes to T2I-Adapter implementation to allow usage of T2I-Adapter and ControlNet at the same time. Also, the PREVIOUS commit (@8d3885d, which was already pushed to github repo) was wrongly commented, but too late to fix without a force push or other mucking that I'm reluctant to do. That commit is actually the one that has all the changes to diffusers_pipeline.py to use additional arg down_intrablock_additional_residuals (introduced in diffusers PR https://github.com/huggingface/diffusers/pull/5362) to detangle T2I-Adapter from ControlNet inputs to main UNet. --- .../diffusion/shared_invokeai_diffusion.py | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index c12c86ed92..ef0f3ee261 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -260,7 +260,6 @@ class InvokeAIDiffuserComponent: conditioning_data, **kwargs, ) - else: ( unconditioned_next_x, @@ -407,6 +406,16 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) + uncond_down_intrablock, cond_down_intrablock = None, None + down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) + if down_intrablock_additional_residuals is not None: + uncond_down_intrablock, cond_down_intrablock = [], [] + for down_intrablock in down_intrablock_additional_residuals: + print("down_intrablock shape: ", down_intrablock.shape) + _uncond_down, _cond_down = down_intrablock.chunk(2) + uncond_down_intrablock.append(_uncond_down) + cond_down_intrablock.append(_cond_down) + uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -437,6 +446,7 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -465,6 +475,7 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -489,6 +500,15 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) + uncond_down_intrablock, cond_down_intrablock = None, None + down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) + if down_intrablock_additional_residuals is not None: + uncond_down_intrablock, cond_down_intrablock = [], [] + for down_intrablock in down_intrablock_additional_residuals: + _uncond_down, _cond_down = down_intrablock.chunk(2) + uncond_down_intrablock.append(_uncond_down) + cond_down_intrablock.append(_cond_down) + uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -517,6 +537,7 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -536,6 +557,7 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) From 06f8a3276d96c4d4f4f316d5714fcc885a77c5d2 Mon Sep 17 00:00:00 2001 From: user1 Date: Mon, 16 Oct 2023 10:15:12 -0700 Subject: [PATCH 084/202] Cleaning up (removing diagnostic prints) --- .../stable_diffusion/diffusion/shared_invokeai_diffusion.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index ef0f3ee261..d2af522496 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -411,7 +411,6 @@ class InvokeAIDiffuserComponent: if down_intrablock_additional_residuals is not None: uncond_down_intrablock, cond_down_intrablock = [], [] for down_intrablock in down_intrablock_additional_residuals: - print("down_intrablock shape: ", down_intrablock.shape) _uncond_down, _cond_down = down_intrablock.chunk(2) uncond_down_intrablock.append(_uncond_down) cond_down_intrablock.append(_cond_down) From fff29d663db391307db82a84c1a7af644d5b6d45 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 18:54:15 +1100 Subject: [PATCH 085/202] chore: lint --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 5681a04695..1b65326f6e 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -548,8 +548,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): mid_block_additional_residual = None down_intrablock_additional_residuals = None # if control_data is not None and t2i_adapter_data is not None: - # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility - # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. + # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility + # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") # elif control_data is not None: if control_data is not None: @@ -598,8 +598,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data=conditioning_data, # extra: down_block_additional_residuals=down_block_additional_residuals, # for ControlNet - mid_block_additional_residual=mid_block_additional_residual, # for ControlNet - down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale From b14699355317fdaf1eaff03b36a5ce85fedb2943 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 18:58:02 +1100 Subject: [PATCH 086/202] feat(ui): remove special handling for t2i vs controlnet --- .../middleware/listenerMiddleware/index.ts | 7 +- .../listeners/controlAdapterAddedOrEnabled.ts | 87 ------------------- .../store/controlAdaptersSlice.ts | 67 -------------- .../frontend/web/src/services/api/schema.d.ts | 71 +++++++++++---- 4 files changed, 55 insertions(+), 177 deletions(-) delete mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index cbc88966a7..772ea216c0 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -12,6 +12,7 @@ import { addFirstListImagesListener } from './listeners/addFirstListImagesListen import { addAnyEnqueuedListener } from './listeners/anyEnqueued'; import { addAppConfigReceivedListener } from './listeners/appConfigReceived'; import { addAppStartedListener } from './listeners/appStarted'; +import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted'; import { addBoardIdSelectedListener } from './listeners/boardIdSelected'; import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard'; @@ -71,8 +72,6 @@ import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSa import { addTabChangedListener } from './listeners/tabChanged'; import { addUpscaleRequestedListener } from './listeners/upscaleRequested'; import { addWorkflowLoadedListener } from './listeners/workflowLoaded'; -import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; -import { addControlAdapterAddedOrEnabledListener } from './listeners/controlAdapterAddedOrEnabled'; export const listenerMiddleware = createListenerMiddleware(); @@ -200,7 +199,3 @@ addTabChangedListener(); // Dynamic prompts addDynamicPromptsListener(); - -// Display toast when controlnet or t2i adapter enabled -// TODO: Remove when they can both be enabled at same time -addControlAdapterAddedOrEnabledListener(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts deleted file mode 100644 index bc5387c1fb..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { isAnyOf } from '@reduxjs/toolkit'; -import { - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterIsEnabledChanged, - controlAdapterRecalled, - selectControlAdapterAll, - selectControlAdapterById, -} from 'features/controlAdapters/store/controlAdaptersSlice'; -import { ControlAdapterType } from 'features/controlAdapters/store/types'; -import { addToast } from 'features/system/store/systemSlice'; -import i18n from 'i18n'; -import { startAppListening } from '..'; - -const isAnyControlAdapterAddedOrEnabled = isAnyOf( - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterRecalled, - controlAdapterIsEnabledChanged -); - -/** - * Until we can have both controlnet and t2i adapter enabled at once, they are mutually exclusive - * This displays a toast when one is enabled and the other is already enabled, or one is added - * with the other enabled - */ -export const addControlAdapterAddedOrEnabledListener = () => { - startAppListening({ - matcher: isAnyControlAdapterAddedOrEnabled, - effect: async (action, { dispatch, getOriginalState }) => { - const controlAdapters = getOriginalState().controlAdapters; - - const hasEnabledControlNets = selectControlAdapterAll( - controlAdapters - ).some((ca) => ca.isEnabled && ca.type === 'controlnet'); - - const hasEnabledT2IAdapters = selectControlAdapterAll( - controlAdapters - ).some((ca) => ca.isEnabled && ca.type === 't2i_adapter'); - - let caType: ControlAdapterType | null = null; - - if (controlAdapterAdded.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterAddedFromImage.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterRecalled.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterIsEnabledChanged.match(action)) { - const _caType = selectControlAdapterById( - controlAdapters, - action.payload.id - )?.type; - if (!_caType) { - return; - } - caType = _caType; - } - - if ( - (caType === 'controlnet' && hasEnabledT2IAdapters) || - (caType === 't2i_adapter' && hasEnabledControlNets) - ) { - const title = - caType === 'controlnet' - ? i18n.t('controlnet.controlNetEnabledT2IDisabled') - : i18n.t('controlnet.t2iEnabledControlNetDisabled'); - - const description = i18n.t('controlnet.controlNetT2IMutexDesc'); - - dispatch( - addToast({ - title, - description, - status: 'warning', - }) - ); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index a3645fad9d..9e293f1104 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -88,61 +88,6 @@ export const selectValidT2IAdapters = (controlAdapters: ControlAdaptersState) => (ca.processorType === 'none' && Boolean(ca.controlImage))) ); -// TODO: I think we can safely remove this? -// const disableAllIPAdapters = ( -// state: ControlAdaptersState, -// exclude?: string -// ) => { -// const updates: Update[] = selectAllIPAdapters(state) -// .filter((ca) => ca.id !== exclude) -// .map((ca) => ({ -// id: ca.id, -// changes: { isEnabled: false }, -// })); -// caAdapter.updateMany(state, updates); -// }; - -const disableAllControlNets = ( - state: ControlAdaptersState, - exclude?: string -) => { - const updates: Update[] = selectAllControlNets(state) - .filter((ca) => ca.id !== exclude) - .map((ca) => ({ - id: ca.id, - changes: { isEnabled: false }, - })); - caAdapter.updateMany(state, updates); -}; - -const disableAllT2IAdapters = ( - state: ControlAdaptersState, - exclude?: string -) => { - const updates: Update[] = selectAllT2IAdapters(state) - .filter((ca) => ca.id !== exclude) - .map((ca) => ({ - id: ca.id, - changes: { isEnabled: false }, - })); - caAdapter.updateMany(state, updates); -}; - -const disableIncompatibleControlAdapters = ( - state: ControlAdaptersState, - type: ControlAdapterType, - exclude?: string -) => { - if (type === 'controlnet') { - // we cannot do controlnet + t2i adapter, if we are enabled a controlnet, disable all t2is - disableAllT2IAdapters(state, exclude); - } - if (type === 't2i_adapter') { - // we cannot do controlnet + t2i adapter, if we are enabled a t2i, disable controlnets - disableAllControlNets(state, exclude); - } -}; - export const controlAdaptersSlice = createSlice({ name: 'controlAdapters', initialState: initialControlAdapterState, @@ -158,7 +103,6 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, type, overrides } = action.payload; caAdapter.addOne(state, buildControlAdapter(id, type, overrides)); - disableIncompatibleControlAdapters(state, type, id); }, prepare: ({ type, @@ -175,8 +119,6 @@ export const controlAdaptersSlice = createSlice({ action: PayloadAction ) => { caAdapter.addOne(state, action.payload); - const { type, id } = action.payload; - disableIncompatibleControlAdapters(state, type, id); }, controlAdapterDuplicated: { reducer: ( @@ -196,8 +138,6 @@ export const controlAdaptersSlice = createSlice({ isEnabled: true, }); caAdapter.addOne(state, newControlAdapter); - const { type } = newControlAdapter; - disableIncompatibleControlAdapters(state, type, newId); }, prepare: (id: string) => { return { payload: { id, newId: uuidv4() } }; @@ -217,7 +157,6 @@ export const controlAdaptersSlice = createSlice({ state, buildControlAdapter(id, type, { controlImage }) ); - disableIncompatibleControlAdapters(state, type, id); }, prepare: (payload: { type: ControlAdapterType; @@ -235,12 +174,6 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, isEnabled } = action.payload; caAdapter.updateOne(state, { id, changes: { isEnabled } }); - if (isEnabled) { - // we are enabling a control adapter. due to limitations in the current system, we may need to disable other adapters - // TODO: disable when multiple IP adapters are supported - const ca = selectControlAdapterById(state, id); - ca && disableIncompatibleControlAdapters(state, ca.type, id); - } }, controlAdapterImageChanged: ( state, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index d4678dc03b..e0da45c4c9 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,6 +5,13 @@ export type paths = { + "/api/v1/sessions/{session_id}": { + /** + * Get Session + * @description Gets a session + */ + get: operations["get_session"]; + }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -1897,7 +1904,7 @@ export type components = { * Created By * @description The name of the creator of the image */ - created_by: string | null; + created_by?: string | null; /** * Positive Prompt * @description The positive prompt parameter @@ -3035,7 +3042,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; + [key: string]: components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"]; }; /** * Edges @@ -3072,7 +3079,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; + [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageCollectionOutput"]; }; /** * Errors @@ -9139,11 +9146,11 @@ export type components = { ui_order: number | null; }; /** - * StableDiffusionOnnxModelFormat + * IPAdapterModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; + IPAdapterModelFormat: "invokeai"; /** * StableDiffusion2ModelFormat * @description An enumeration. @@ -9156,36 +9163,36 @@ export type components = { * @enum {string} */ StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; /** * CLIPVisionModelFormat * @description An enumeration. * @enum {string} */ CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * IPAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - IPAdapterModelFormat: "invokeai"; /** * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ T2IAdapterModelFormat: "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9200,6 +9207,36 @@ export type external = Record; export type operations = { + /** + * Get Session + * @description Gets a session + */ + get_session: { + parameters: { + path: { + /** @description The id of the session to get */ + session_id: string; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["GraphExecutionState"]; + }; + }; + /** @description Session not found */ + 404: { + content: never; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; /** * Parse Dynamicprompts * @description Creates a batch process From bdf4c4944cf0e9e827ee026fddfd1bd74f3c40e0 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 087/202] Revert "feat(ui): remove special handling for t2i vs controlnet" This reverts commit b14699355317fdaf1eaff03b36a5ce85fedb2943. --- .../middleware/listenerMiddleware/index.ts | 7 +- .../listeners/controlAdapterAddedOrEnabled.ts | 87 +++++++++++++++++++ .../store/controlAdaptersSlice.ts | 67 ++++++++++++++ .../frontend/web/src/services/api/schema.d.ts | 71 ++++----------- 4 files changed, 177 insertions(+), 55 deletions(-) create mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index 772ea216c0..cbc88966a7 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -12,7 +12,6 @@ import { addFirstListImagesListener } from './listeners/addFirstListImagesListen import { addAnyEnqueuedListener } from './listeners/anyEnqueued'; import { addAppConfigReceivedListener } from './listeners/appConfigReceived'; import { addAppStartedListener } from './listeners/appStarted'; -import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted'; import { addBoardIdSelectedListener } from './listeners/boardIdSelected'; import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard'; @@ -72,6 +71,8 @@ import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSa import { addTabChangedListener } from './listeners/tabChanged'; import { addUpscaleRequestedListener } from './listeners/upscaleRequested'; import { addWorkflowLoadedListener } from './listeners/workflowLoaded'; +import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; +import { addControlAdapterAddedOrEnabledListener } from './listeners/controlAdapterAddedOrEnabled'; export const listenerMiddleware = createListenerMiddleware(); @@ -199,3 +200,7 @@ addTabChangedListener(); // Dynamic prompts addDynamicPromptsListener(); + +// Display toast when controlnet or t2i adapter enabled +// TODO: Remove when they can both be enabled at same time +addControlAdapterAddedOrEnabledListener(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts new file mode 100644 index 0000000000..bc5387c1fb --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts @@ -0,0 +1,87 @@ +import { isAnyOf } from '@reduxjs/toolkit'; +import { + controlAdapterAdded, + controlAdapterAddedFromImage, + controlAdapterIsEnabledChanged, + controlAdapterRecalled, + selectControlAdapterAll, + selectControlAdapterById, +} from 'features/controlAdapters/store/controlAdaptersSlice'; +import { ControlAdapterType } from 'features/controlAdapters/store/types'; +import { addToast } from 'features/system/store/systemSlice'; +import i18n from 'i18n'; +import { startAppListening } from '..'; + +const isAnyControlAdapterAddedOrEnabled = isAnyOf( + controlAdapterAdded, + controlAdapterAddedFromImage, + controlAdapterRecalled, + controlAdapterIsEnabledChanged +); + +/** + * Until we can have both controlnet and t2i adapter enabled at once, they are mutually exclusive + * This displays a toast when one is enabled and the other is already enabled, or one is added + * with the other enabled + */ +export const addControlAdapterAddedOrEnabledListener = () => { + startAppListening({ + matcher: isAnyControlAdapterAddedOrEnabled, + effect: async (action, { dispatch, getOriginalState }) => { + const controlAdapters = getOriginalState().controlAdapters; + + const hasEnabledControlNets = selectControlAdapterAll( + controlAdapters + ).some((ca) => ca.isEnabled && ca.type === 'controlnet'); + + const hasEnabledT2IAdapters = selectControlAdapterAll( + controlAdapters + ).some((ca) => ca.isEnabled && ca.type === 't2i_adapter'); + + let caType: ControlAdapterType | null = null; + + if (controlAdapterAdded.match(action)) { + caType = action.payload.type; + } + + if (controlAdapterAddedFromImage.match(action)) { + caType = action.payload.type; + } + + if (controlAdapterRecalled.match(action)) { + caType = action.payload.type; + } + + if (controlAdapterIsEnabledChanged.match(action)) { + const _caType = selectControlAdapterById( + controlAdapters, + action.payload.id + )?.type; + if (!_caType) { + return; + } + caType = _caType; + } + + if ( + (caType === 'controlnet' && hasEnabledT2IAdapters) || + (caType === 't2i_adapter' && hasEnabledControlNets) + ) { + const title = + caType === 'controlnet' + ? i18n.t('controlnet.controlNetEnabledT2IDisabled') + : i18n.t('controlnet.t2iEnabledControlNetDisabled'); + + const description = i18n.t('controlnet.controlNetT2IMutexDesc'); + + dispatch( + addToast({ + title, + description, + status: 'warning', + }) + ); + } + }, + }); +}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index 9e293f1104..a3645fad9d 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -88,6 +88,61 @@ export const selectValidT2IAdapters = (controlAdapters: ControlAdaptersState) => (ca.processorType === 'none' && Boolean(ca.controlImage))) ); +// TODO: I think we can safely remove this? +// const disableAllIPAdapters = ( +// state: ControlAdaptersState, +// exclude?: string +// ) => { +// const updates: Update[] = selectAllIPAdapters(state) +// .filter((ca) => ca.id !== exclude) +// .map((ca) => ({ +// id: ca.id, +// changes: { isEnabled: false }, +// })); +// caAdapter.updateMany(state, updates); +// }; + +const disableAllControlNets = ( + state: ControlAdaptersState, + exclude?: string +) => { + const updates: Update[] = selectAllControlNets(state) + .filter((ca) => ca.id !== exclude) + .map((ca) => ({ + id: ca.id, + changes: { isEnabled: false }, + })); + caAdapter.updateMany(state, updates); +}; + +const disableAllT2IAdapters = ( + state: ControlAdaptersState, + exclude?: string +) => { + const updates: Update[] = selectAllT2IAdapters(state) + .filter((ca) => ca.id !== exclude) + .map((ca) => ({ + id: ca.id, + changes: { isEnabled: false }, + })); + caAdapter.updateMany(state, updates); +}; + +const disableIncompatibleControlAdapters = ( + state: ControlAdaptersState, + type: ControlAdapterType, + exclude?: string +) => { + if (type === 'controlnet') { + // we cannot do controlnet + t2i adapter, if we are enabled a controlnet, disable all t2is + disableAllT2IAdapters(state, exclude); + } + if (type === 't2i_adapter') { + // we cannot do controlnet + t2i adapter, if we are enabled a t2i, disable controlnets + disableAllControlNets(state, exclude); + } +}; + export const controlAdaptersSlice = createSlice({ name: 'controlAdapters', initialState: initialControlAdapterState, @@ -103,6 +158,7 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, type, overrides } = action.payload; caAdapter.addOne(state, buildControlAdapter(id, type, overrides)); + disableIncompatibleControlAdapters(state, type, id); }, prepare: ({ type, @@ -119,6 +175,8 @@ export const controlAdaptersSlice = createSlice({ action: PayloadAction ) => { caAdapter.addOne(state, action.payload); + const { type, id } = action.payload; + disableIncompatibleControlAdapters(state, type, id); }, controlAdapterDuplicated: { reducer: ( @@ -138,6 +196,8 @@ export const controlAdaptersSlice = createSlice({ isEnabled: true, }); caAdapter.addOne(state, newControlAdapter); + const { type } = newControlAdapter; + disableIncompatibleControlAdapters(state, type, newId); }, prepare: (id: string) => { return { payload: { id, newId: uuidv4() } }; @@ -157,6 +217,7 @@ export const controlAdaptersSlice = createSlice({ state, buildControlAdapter(id, type, { controlImage }) ); + disableIncompatibleControlAdapters(state, type, id); }, prepare: (payload: { type: ControlAdapterType; @@ -174,6 +235,12 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, isEnabled } = action.payload; caAdapter.updateOne(state, { id, changes: { isEnabled } }); + if (isEnabled) { + // we are enabling a control adapter. due to limitations in the current system, we may need to disable other adapters + // TODO: disable when multiple IP adapters are supported + const ca = selectControlAdapterById(state, id); + ca && disableIncompatibleControlAdapters(state, ca.type, id); + } }, controlAdapterImageChanged: ( state, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index e0da45c4c9..d4678dc03b 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,13 +5,6 @@ export type paths = { - "/api/v1/sessions/{session_id}": { - /** - * Get Session - * @description Gets a session - */ - get: operations["get_session"]; - }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -1904,7 +1897,7 @@ export type components = { * Created By * @description The name of the creator of the image */ - created_by?: string | null; + created_by: string | null; /** * Positive Prompt * @description The positive prompt parameter @@ -3042,7 +3035,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"]; + [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; }; /** * Edges @@ -3079,7 +3072,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageCollectionOutput"]; + [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; }; /** * Errors @@ -9146,11 +9139,11 @@ export type components = { ui_order: number | null; }; /** - * IPAdapterModelFormat + * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ - IPAdapterModelFormat: "invokeai"; + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusion2ModelFormat * @description An enumeration. @@ -9163,36 +9156,36 @@ export type components = { * @enum {string} */ StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * CLIPVisionModelFormat * @description An enumeration. * @enum {string} */ CLIPVisionModelFormat: "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * IPAdapterModelFormat + * @description An enumeration. + * @enum {string} + */ + IPAdapterModelFormat: "invokeai"; /** * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ T2IAdapterModelFormat: "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9207,36 +9200,6 @@ export type external = Record; export type operations = { - /** - * Get Session - * @description Gets a session - */ - get_session: { - parameters: { - path: { - /** @description The id of the session to get */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; /** * Parse Dynamicprompts * @description Creates a batch process From 38e7eb8878aa213426764b5aaa27532ef2b9db8a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 088/202] Revert "chore: lint" This reverts commit fff29d663db391307db82a84c1a7af644d5b6d45. --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 1b65326f6e..5681a04695 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -548,8 +548,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): mid_block_additional_residual = None down_intrablock_additional_residuals = None # if control_data is not None and t2i_adapter_data is not None: - # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility - # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. + # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility + # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") # elif control_data is not None: if control_data is not None: @@ -598,8 +598,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data=conditioning_data, # extra: down_block_additional_residuals=down_block_additional_residuals, # for ControlNet - mid_block_additional_residual=mid_block_additional_residual, # for ControlNet - down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale From 6e697b7b6f0a7d7ae24455e15c15baa1bc0adaf6 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 089/202] Revert "Cleaning up (removing diagnostic prints)" This reverts commit 06f8a3276d96c4d4f4f316d5714fcc885a77c5d2. --- .../stable_diffusion/diffusion/shared_invokeai_diffusion.py | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index d2af522496..ef0f3ee261 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -411,6 +411,7 @@ class InvokeAIDiffuserComponent: if down_intrablock_additional_residuals is not None: uncond_down_intrablock, cond_down_intrablock = [], [] for down_intrablock in down_intrablock_additional_residuals: + print("down_intrablock shape: ", down_intrablock.shape) _uncond_down, _cond_down = down_intrablock.chunk(2) uncond_down_intrablock.append(_uncond_down) cond_down_intrablock.append(_cond_down) From c04fb451ee4603cad7751f890a0b3047c15df5b3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 090/202] Revert "Changes to _apply_standard_conditioning_sequentially() and _apply_cross_attention_controlled_conditioning() to reflect changes to T2I-Adapter implementation to allow usage of T2I-Adapter and ControlNet at the same time." This reverts commit 378689a519eaebb0181c38d0ad8faf6e747ceb16. --- .../diffusion/shared_invokeai_diffusion.py | 24 +------------------ 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index ef0f3ee261..c12c86ed92 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -260,6 +260,7 @@ class InvokeAIDiffuserComponent: conditioning_data, **kwargs, ) + else: ( unconditioned_next_x, @@ -406,16 +407,6 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) - uncond_down_intrablock, cond_down_intrablock = None, None - down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) - if down_intrablock_additional_residuals is not None: - uncond_down_intrablock, cond_down_intrablock = [], [] - for down_intrablock in down_intrablock_additional_residuals: - print("down_intrablock shape: ", down_intrablock.shape) - _uncond_down, _cond_down = down_intrablock.chunk(2) - uncond_down_intrablock.append(_uncond_down) - cond_down_intrablock.append(_cond_down) - uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -446,7 +437,6 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, - down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -475,7 +465,6 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, - down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -500,15 +489,6 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) - uncond_down_intrablock, cond_down_intrablock = None, None - down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) - if down_intrablock_additional_residuals is not None: - uncond_down_intrablock, cond_down_intrablock = [], [] - for down_intrablock in down_intrablock_additional_residuals: - _uncond_down, _cond_down = down_intrablock.chunk(2) - uncond_down_intrablock.append(_uncond_down) - cond_down_intrablock.append(_cond_down) - uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -537,7 +517,6 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, - down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -557,7 +536,6 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, - down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) From 58a0709c1ea19293e49f6c88d8836f0378a982cd Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 091/202] Revert "Fixing some var and arg names." This reverts commit f11ba81a8d5e200c2985658ad05d1b6f0ad4f593. --- .../stable_diffusion/diffusers_pipeline.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 5681a04695..0943b78bf8 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -546,13 +546,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # Handle ControlNet(s) and T2I-Adapter(s) down_block_additional_residuals = None mid_block_additional_residual = None - down_intrablock_additional_residuals = None - # if control_data is not None and t2i_adapter_data is not None: + if control_data is not None and t2i_adapter_data is not None: # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. - # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") - # elif control_data is not None: - if control_data is not None: + raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") + elif control_data is not None: down_block_additional_residuals, mid_block_additional_residual = self.invokeai_diffuser.do_controlnet_step( control_data=control_data, sample=latent_model_input, @@ -561,8 +559,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, ) - # elif t2i_adapter_data is not None: - if t2i_adapter_data is not None: + elif t2i_adapter_data is not None: accum_adapter_state = None for single_t2i_adapter_data in t2i_adapter_data: # Determine the T2I-Adapter weights for the current denoising step. @@ -587,8 +584,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): for idx, value in enumerate(single_t2i_adapter_data.adapter_state): accum_adapter_state[idx] += value * t2i_adapter_weight - # down_block_additional_residuals = accum_adapter_state - down_intrablock_additional_residuals = accum_adapter_state + down_block_additional_residuals = accum_adapter_state uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step( sample=latent_model_input, @@ -597,9 +593,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, # extra: - down_block_additional_residuals=down_block_additional_residuals, # for ControlNet - mid_block_additional_residual=mid_block_additional_residual, # for ControlNet - down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter + down_block_additional_residuals=down_block_additional_residuals, + mid_block_additional_residual=mid_block_additional_residual, ) guidance_scale = conditioning_data.guidance_scale From 282d36b6405409646e587e7ddbe510acaad1f01c Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Tue, 17 Oct 2023 11:59:19 -0400 Subject: [PATCH 092/202] Revert "Revert "Fixing some var and arg names."" This reverts commit 58a0709c1ea19293e49f6c88d8836f0378a982cd. --- .../stable_diffusion/diffusers_pipeline.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 0943b78bf8..5681a04695 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -546,11 +546,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # Handle ControlNet(s) and T2I-Adapter(s) down_block_additional_residuals = None mid_block_additional_residual = None - if control_data is not None and t2i_adapter_data is not None: + down_intrablock_additional_residuals = None + # if control_data is not None and t2i_adapter_data is not None: # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. - raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") - elif control_data is not None: + # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") + # elif control_data is not None: + if control_data is not None: down_block_additional_residuals, mid_block_additional_residual = self.invokeai_diffuser.do_controlnet_step( control_data=control_data, sample=latent_model_input, @@ -559,7 +561,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, ) - elif t2i_adapter_data is not None: + # elif t2i_adapter_data is not None: + if t2i_adapter_data is not None: accum_adapter_state = None for single_t2i_adapter_data in t2i_adapter_data: # Determine the T2I-Adapter weights for the current denoising step. @@ -584,7 +587,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): for idx, value in enumerate(single_t2i_adapter_data.adapter_state): accum_adapter_state[idx] += value * t2i_adapter_weight - down_block_additional_residuals = accum_adapter_state + # down_block_additional_residuals = accum_adapter_state + down_intrablock_additional_residuals = accum_adapter_state uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step( sample=latent_model_input, @@ -593,8 +597,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, # extra: - down_block_additional_residuals=down_block_additional_residuals, - mid_block_additional_residual=mid_block_additional_residual, + down_block_additional_residuals=down_block_additional_residuals, # for ControlNet + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale From a97ec88e06db8d50159f3bcd107e592fafdfc2c9 Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Tue, 17 Oct 2023 11:59:19 -0400 Subject: [PATCH 093/202] Revert "Revert "Changes to _apply_standard_conditioning_sequentially() and _apply_cross_attention_controlled_conditioning() to reflect changes to T2I-Adapter implementation to allow usage of T2I-Adapter and ControlNet at the same time."" This reverts commit c04fb451ee4603cad7751f890a0b3047c15df5b3. --- .../diffusion/shared_invokeai_diffusion.py | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index c12c86ed92..ef0f3ee261 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -260,7 +260,6 @@ class InvokeAIDiffuserComponent: conditioning_data, **kwargs, ) - else: ( unconditioned_next_x, @@ -407,6 +406,16 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) + uncond_down_intrablock, cond_down_intrablock = None, None + down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) + if down_intrablock_additional_residuals is not None: + uncond_down_intrablock, cond_down_intrablock = [], [] + for down_intrablock in down_intrablock_additional_residuals: + print("down_intrablock shape: ", down_intrablock.shape) + _uncond_down, _cond_down = down_intrablock.chunk(2) + uncond_down_intrablock.append(_uncond_down) + cond_down_intrablock.append(_cond_down) + uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -437,6 +446,7 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -465,6 +475,7 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -489,6 +500,15 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) + uncond_down_intrablock, cond_down_intrablock = None, None + down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) + if down_intrablock_additional_residuals is not None: + uncond_down_intrablock, cond_down_intrablock = [], [] + for down_intrablock in down_intrablock_additional_residuals: + _uncond_down, _cond_down = down_intrablock.chunk(2) + uncond_down_intrablock.append(_uncond_down) + cond_down_intrablock.append(_cond_down) + uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -517,6 +537,7 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -536,6 +557,7 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) From 8afc47018b868a5f72bd67751ca29ff3ea141f2b Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Tue, 17 Oct 2023 11:59:19 -0400 Subject: [PATCH 094/202] Revert "Revert "Cleaning up (removing diagnostic prints)"" This reverts commit 6e697b7b6f0a7d7ae24455e15c15baa1bc0adaf6. --- .../stable_diffusion/diffusion/shared_invokeai_diffusion.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index ef0f3ee261..d2af522496 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -411,7 +411,6 @@ class InvokeAIDiffuserComponent: if down_intrablock_additional_residuals is not None: uncond_down_intrablock, cond_down_intrablock = [], [] for down_intrablock in down_intrablock_additional_residuals: - print("down_intrablock shape: ", down_intrablock.shape) _uncond_down, _cond_down = down_intrablock.chunk(2) uncond_down_intrablock.append(_uncond_down) cond_down_intrablock.append(_cond_down) From b7555ddae868a29139892f944f9aa3e51ca19588 Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Tue, 17 Oct 2023 11:59:19 -0400 Subject: [PATCH 095/202] Revert "Revert "chore: lint"" This reverts commit 38e7eb8878aa213426764b5aaa27532ef2b9db8a. --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 5681a04695..1b65326f6e 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -548,8 +548,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): mid_block_additional_residual = None down_intrablock_additional_residuals = None # if control_data is not None and t2i_adapter_data is not None: - # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility - # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. + # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility + # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") # elif control_data is not None: if control_data is not None: @@ -598,8 +598,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data=conditioning_data, # extra: down_block_additional_residuals=down_block_additional_residuals, # for ControlNet - mid_block_additional_residual=mid_block_additional_residual, # for ControlNet - down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale From 55ad4feb5c2f1e15db17756ce22994402f0cb0ca Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Tue, 17 Oct 2023 11:59:19 -0400 Subject: [PATCH 096/202] Revert "Revert "feat(ui): remove special handling for t2i vs controlnet"" This reverts commit bdf4c4944cf0e9e827ee026fddfd1bd74f3c40e0. --- .../middleware/listenerMiddleware/index.ts | 7 +- .../listeners/controlAdapterAddedOrEnabled.ts | 87 ------------------- .../store/controlAdaptersSlice.ts | 67 -------------- .../frontend/web/src/services/api/schema.d.ts | 71 +++++++++++---- 4 files changed, 55 insertions(+), 177 deletions(-) delete mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index cbc88966a7..772ea216c0 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -12,6 +12,7 @@ import { addFirstListImagesListener } from './listeners/addFirstListImagesListen import { addAnyEnqueuedListener } from './listeners/anyEnqueued'; import { addAppConfigReceivedListener } from './listeners/appConfigReceived'; import { addAppStartedListener } from './listeners/appStarted'; +import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted'; import { addBoardIdSelectedListener } from './listeners/boardIdSelected'; import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard'; @@ -71,8 +72,6 @@ import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSa import { addTabChangedListener } from './listeners/tabChanged'; import { addUpscaleRequestedListener } from './listeners/upscaleRequested'; import { addWorkflowLoadedListener } from './listeners/workflowLoaded'; -import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; -import { addControlAdapterAddedOrEnabledListener } from './listeners/controlAdapterAddedOrEnabled'; export const listenerMiddleware = createListenerMiddleware(); @@ -200,7 +199,3 @@ addTabChangedListener(); // Dynamic prompts addDynamicPromptsListener(); - -// Display toast when controlnet or t2i adapter enabled -// TODO: Remove when they can both be enabled at same time -addControlAdapterAddedOrEnabledListener(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts deleted file mode 100644 index bc5387c1fb..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { isAnyOf } from '@reduxjs/toolkit'; -import { - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterIsEnabledChanged, - controlAdapterRecalled, - selectControlAdapterAll, - selectControlAdapterById, -} from 'features/controlAdapters/store/controlAdaptersSlice'; -import { ControlAdapterType } from 'features/controlAdapters/store/types'; -import { addToast } from 'features/system/store/systemSlice'; -import i18n from 'i18n'; -import { startAppListening } from '..'; - -const isAnyControlAdapterAddedOrEnabled = isAnyOf( - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterRecalled, - controlAdapterIsEnabledChanged -); - -/** - * Until we can have both controlnet and t2i adapter enabled at once, they are mutually exclusive - * This displays a toast when one is enabled and the other is already enabled, or one is added - * with the other enabled - */ -export const addControlAdapterAddedOrEnabledListener = () => { - startAppListening({ - matcher: isAnyControlAdapterAddedOrEnabled, - effect: async (action, { dispatch, getOriginalState }) => { - const controlAdapters = getOriginalState().controlAdapters; - - const hasEnabledControlNets = selectControlAdapterAll( - controlAdapters - ).some((ca) => ca.isEnabled && ca.type === 'controlnet'); - - const hasEnabledT2IAdapters = selectControlAdapterAll( - controlAdapters - ).some((ca) => ca.isEnabled && ca.type === 't2i_adapter'); - - let caType: ControlAdapterType | null = null; - - if (controlAdapterAdded.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterAddedFromImage.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterRecalled.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterIsEnabledChanged.match(action)) { - const _caType = selectControlAdapterById( - controlAdapters, - action.payload.id - )?.type; - if (!_caType) { - return; - } - caType = _caType; - } - - if ( - (caType === 'controlnet' && hasEnabledT2IAdapters) || - (caType === 't2i_adapter' && hasEnabledControlNets) - ) { - const title = - caType === 'controlnet' - ? i18n.t('controlnet.controlNetEnabledT2IDisabled') - : i18n.t('controlnet.t2iEnabledControlNetDisabled'); - - const description = i18n.t('controlnet.controlNetT2IMutexDesc'); - - dispatch( - addToast({ - title, - description, - status: 'warning', - }) - ); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index a3645fad9d..9e293f1104 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -88,61 +88,6 @@ export const selectValidT2IAdapters = (controlAdapters: ControlAdaptersState) => (ca.processorType === 'none' && Boolean(ca.controlImage))) ); -// TODO: I think we can safely remove this? -// const disableAllIPAdapters = ( -// state: ControlAdaptersState, -// exclude?: string -// ) => { -// const updates: Update[] = selectAllIPAdapters(state) -// .filter((ca) => ca.id !== exclude) -// .map((ca) => ({ -// id: ca.id, -// changes: { isEnabled: false }, -// })); -// caAdapter.updateMany(state, updates); -// }; - -const disableAllControlNets = ( - state: ControlAdaptersState, - exclude?: string -) => { - const updates: Update[] = selectAllControlNets(state) - .filter((ca) => ca.id !== exclude) - .map((ca) => ({ - id: ca.id, - changes: { isEnabled: false }, - })); - caAdapter.updateMany(state, updates); -}; - -const disableAllT2IAdapters = ( - state: ControlAdaptersState, - exclude?: string -) => { - const updates: Update[] = selectAllT2IAdapters(state) - .filter((ca) => ca.id !== exclude) - .map((ca) => ({ - id: ca.id, - changes: { isEnabled: false }, - })); - caAdapter.updateMany(state, updates); -}; - -const disableIncompatibleControlAdapters = ( - state: ControlAdaptersState, - type: ControlAdapterType, - exclude?: string -) => { - if (type === 'controlnet') { - // we cannot do controlnet + t2i adapter, if we are enabled a controlnet, disable all t2is - disableAllT2IAdapters(state, exclude); - } - if (type === 't2i_adapter') { - // we cannot do controlnet + t2i adapter, if we are enabled a t2i, disable controlnets - disableAllControlNets(state, exclude); - } -}; - export const controlAdaptersSlice = createSlice({ name: 'controlAdapters', initialState: initialControlAdapterState, @@ -158,7 +103,6 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, type, overrides } = action.payload; caAdapter.addOne(state, buildControlAdapter(id, type, overrides)); - disableIncompatibleControlAdapters(state, type, id); }, prepare: ({ type, @@ -175,8 +119,6 @@ export const controlAdaptersSlice = createSlice({ action: PayloadAction ) => { caAdapter.addOne(state, action.payload); - const { type, id } = action.payload; - disableIncompatibleControlAdapters(state, type, id); }, controlAdapterDuplicated: { reducer: ( @@ -196,8 +138,6 @@ export const controlAdaptersSlice = createSlice({ isEnabled: true, }); caAdapter.addOne(state, newControlAdapter); - const { type } = newControlAdapter; - disableIncompatibleControlAdapters(state, type, newId); }, prepare: (id: string) => { return { payload: { id, newId: uuidv4() } }; @@ -217,7 +157,6 @@ export const controlAdaptersSlice = createSlice({ state, buildControlAdapter(id, type, { controlImage }) ); - disableIncompatibleControlAdapters(state, type, id); }, prepare: (payload: { type: ControlAdapterType; @@ -235,12 +174,6 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, isEnabled } = action.payload; caAdapter.updateOne(state, { id, changes: { isEnabled } }); - if (isEnabled) { - // we are enabling a control adapter. due to limitations in the current system, we may need to disable other adapters - // TODO: disable when multiple IP adapters are supported - const ca = selectControlAdapterById(state, id); - ca && disableIncompatibleControlAdapters(state, ca.type, id); - } }, controlAdapterImageChanged: ( state, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index d4678dc03b..e0da45c4c9 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,6 +5,13 @@ export type paths = { + "/api/v1/sessions/{session_id}": { + /** + * Get Session + * @description Gets a session + */ + get: operations["get_session"]; + }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -1897,7 +1904,7 @@ export type components = { * Created By * @description The name of the creator of the image */ - created_by: string | null; + created_by?: string | null; /** * Positive Prompt * @description The positive prompt parameter @@ -3035,7 +3042,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; + [key: string]: components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"]; }; /** * Edges @@ -3072,7 +3079,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; + [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageCollectionOutput"]; }; /** * Errors @@ -9139,11 +9146,11 @@ export type components = { ui_order: number | null; }; /** - * StableDiffusionOnnxModelFormat + * IPAdapterModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; + IPAdapterModelFormat: "invokeai"; /** * StableDiffusion2ModelFormat * @description An enumeration. @@ -9156,36 +9163,36 @@ export type components = { * @enum {string} */ StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; /** * CLIPVisionModelFormat * @description An enumeration. * @enum {string} */ CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * IPAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - IPAdapterModelFormat: "invokeai"; /** * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ T2IAdapterModelFormat: "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9200,6 +9207,36 @@ export type external = Record; export type operations = { + /** + * Get Session + * @description Gets a session + */ + get_session: { + parameters: { + path: { + /** @description The id of the session to get */ + session_id: string; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["GraphExecutionState"]; + }; + }; + /** @description Session not found */ + 404: { + content: never; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; /** * Parse Dynamicprompts * @description Creates a batch process From 284a257c2531195b3b25138eb959325a9c434ef2 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 05:00:40 +1100 Subject: [PATCH 097/202] feat: remove `enqueue_graph` routes/methods (#4922) This is totally extraneous - it's almost identical to `enqueue_batch`. --- invokeai/app/api/routers/session_queue.py | 19 --- .../session_queue/session_queue_base.py | 7 - .../session_queue/session_queue_common.py | 8 - .../session_queue/session_queue_sqlite.py | 28 --- .../listeners/anyEnqueued.ts | 8 +- .../listeners/controlNetImageProcessed.ts | 84 ++++----- .../listeners/upscaleRequested.ts | 32 ++-- .../hooks/useIsQueueMutationInProgress.ts | 6 - .../web/src/services/api/endpoints/queue.ts | 25 --- .../frontend/web/src/services/api/schema.d.ts | 160 +++++++----------- .../frontend/web/src/services/api/types.ts | 1 - 11 files changed, 126 insertions(+), 252 deletions(-) diff --git a/invokeai/app/api/routers/session_queue.py b/invokeai/app/api/routers/session_queue.py index 7ecb0504a3..40f1f2213b 100644 --- a/invokeai/app/api/routers/session_queue.py +++ b/invokeai/app/api/routers/session_queue.py @@ -12,13 +12,11 @@ from invokeai.app.services.session_queue.session_queue_common import ( CancelByBatchIDsResult, ClearResult, EnqueueBatchResult, - EnqueueGraphResult, PruneResult, SessionQueueItem, SessionQueueItemDTO, SessionQueueStatus, ) -from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults from ..dependencies import ApiDependencies @@ -33,23 +31,6 @@ class SessionQueueAndProcessorStatus(BaseModel): processor: SessionProcessorStatus -@session_queue_router.post( - "/{queue_id}/enqueue_graph", - operation_id="enqueue_graph", - responses={ - 201: {"model": EnqueueGraphResult}, - }, -) -async def enqueue_graph( - queue_id: str = Path(description="The queue id to perform this operation on"), - graph: Graph = Body(description="The graph to enqueue"), - prepend: bool = Body(default=False, description="Whether or not to prepend this batch in the queue"), -) -> EnqueueGraphResult: - """Enqueues a graph for single execution.""" - - return ApiDependencies.invoker.services.session_queue.enqueue_graph(queue_id=queue_id, graph=graph, prepend=prepend) - - @session_queue_router.post( "/{queue_id}/enqueue_batch", operation_id="enqueue_batch", diff --git a/invokeai/app/services/session_queue/session_queue_base.py b/invokeai/app/services/session_queue/session_queue_base.py index b5272f1868..e0b6e4f528 100644 --- a/invokeai/app/services/session_queue/session_queue_base.py +++ b/invokeai/app/services/session_queue/session_queue_base.py @@ -9,7 +9,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( CancelByQueueIDResult, ClearResult, EnqueueBatchResult, - EnqueueGraphResult, IsEmptyResult, IsFullResult, PruneResult, @@ -17,7 +16,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueItemDTO, SessionQueueStatus, ) -from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults @@ -29,11 +27,6 @@ class SessionQueueBase(ABC): """Dequeues the next session queue item.""" pass - @abstractmethod - def enqueue_graph(self, queue_id: str, graph: Graph, prepend: bool) -> EnqueueGraphResult: - """Enqueues a single graph for execution.""" - pass - @abstractmethod def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult: """Enqueues all permutations of a batch for execution.""" diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py index 48e1da83b5..cbf2154b66 100644 --- a/invokeai/app/services/session_queue/session_queue_common.py +++ b/invokeai/app/services/session_queue/session_queue_common.py @@ -276,14 +276,6 @@ class EnqueueBatchResult(BaseModel): priority: int = Field(description="The priority of the enqueued batch") -class EnqueueGraphResult(BaseModel): - enqueued: int = Field(description="The total number of queue items enqueued") - requested: int = Field(description="The total number of queue items requested to be enqueued") - batch: Batch = Field(description="The batch that was enqueued") - priority: int = Field(description="The priority of the enqueued batch") - queue_item: SessionQueueItemDTO = Field(description="The queue item that was enqueued") - - class ClearResult(BaseModel): """Result of clearing the session queue""" diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index 4daab9cdbc..7259a7bd0c 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -17,7 +17,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( CancelByQueueIDResult, ClearResult, EnqueueBatchResult, - EnqueueGraphResult, IsEmptyResult, IsFullResult, PruneResult, @@ -28,7 +27,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( calc_session_count, prepare_values_to_insert, ) -from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -255,32 +253,6 @@ class SqliteSessionQueue(SessionQueueBase): ) return cast(Union[int, None], self.__cursor.fetchone()[0]) or 0 - def enqueue_graph(self, queue_id: str, graph: Graph, prepend: bool) -> EnqueueGraphResult: - enqueue_result = self.enqueue_batch(queue_id=queue_id, batch=Batch(graph=graph), prepend=prepend) - try: - self.__lock.acquire() - self.__cursor.execute( - """--sql - SELECT * - FROM session_queue - WHERE queue_id = ? - AND batch_id = ? - """, - (queue_id, enqueue_result.batch.batch_id), - ) - result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone()) - except Exception: - self.__conn.rollback() - raise - finally: - self.__lock.release() - if result is None: - raise SessionQueueItemNotFoundError(f"No queue item with batch id {enqueue_result.batch.batch_id}") - return EnqueueGraphResult( - **enqueue_result.model_dump(), - queue_item=SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)), - ) - def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult: try: self.__lock.acquire() diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts index ff11491b53..3f0e3342f9 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts @@ -1,15 +1,9 @@ -import { isAnyOf } from '@reduxjs/toolkit'; import { queueApi } from 'services/api/endpoints/queue'; import { startAppListening } from '..'; -const matcher = isAnyOf( - queueApi.endpoints.enqueueBatch.matchFulfilled, - queueApi.endpoints.enqueueGraph.matchFulfilled -); - export const addAnyEnqueuedListener = () => { startAppListening({ - matcher, + matcher: queueApi.endpoints.enqueueBatch.matchFulfilled, effect: async (_, { dispatch, getState }) => { const { data } = queueApi.endpoints.getQueueStatus.select()(getState()); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts index f3db0ea65f..a454e5ca48 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts @@ -1,22 +1,22 @@ import { logger } from 'app/logging/logger'; import { parseify } from 'common/util/serialize'; +import { controlAdapterImageProcessed } from 'features/controlAdapters/store/actions'; import { - pendingControlImagesCleared, controlAdapterImageChanged, - selectControlAdapterById, controlAdapterProcessedImageChanged, + pendingControlImagesCleared, + selectControlAdapterById, } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; import { SAVE_IMAGE } from 'features/nodes/util/graphBuilders/constants'; import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; import { queueApi } from 'services/api/endpoints/queue'; import { isImageOutput } from 'services/api/guards'; -import { Graph, ImageDTO } from 'services/api/types'; +import { BatchConfig, ImageDTO } from 'services/api/types'; import { socketInvocationComplete } from 'services/events/actions'; import { startAppListening } from '..'; -import { controlAdapterImageProcessed } from 'features/controlAdapters/store/actions'; -import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; export const addControlNetImageProcessedListener = () => { startAppListening({ @@ -37,41 +37,46 @@ export const addControlNetImageProcessedListener = () => { // ControlNet one-off procressing graph is just the processor node, no edges. // Also we need to grab the image. - const graph: Graph = { - nodes: { - [ca.processorNode.id]: { - ...ca.processorNode, - is_intermediate: true, - image: { image_name: ca.controlImage }, - }, - [SAVE_IMAGE]: { - id: SAVE_IMAGE, - type: 'save_image', - is_intermediate: true, - use_cache: false, + + const enqueueBatchArg: BatchConfig = { + prepend: true, + batch: { + graph: { + nodes: { + [ca.processorNode.id]: { + ...ca.processorNode, + is_intermediate: true, + image: { image_name: ca.controlImage }, + }, + [SAVE_IMAGE]: { + id: SAVE_IMAGE, + type: 'save_image', + is_intermediate: true, + use_cache: false, + }, + }, + edges: [ + { + source: { + node_id: ca.processorNode.id, + field: 'image', + }, + destination: { + node_id: SAVE_IMAGE, + field: 'image', + }, + }, + ], }, + runs: 1, }, - edges: [ - { - source: { - node_id: ca.processorNode.id, - field: 'image', - }, - destination: { - node_id: SAVE_IMAGE, - field: 'image', - }, - }, - ], }; + try { const req = dispatch( - queueApi.endpoints.enqueueGraph.initiate( - { graph, prepend: true }, - { - fixedCacheKey: 'enqueueGraph', - } - ) + queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, { + fixedCacheKey: 'enqueueBatch', + }) ); const enqueueResult = await req.unwrap(); req.reset(); @@ -83,8 +88,8 @@ export const addControlNetImageProcessedListener = () => { const [invocationCompleteAction] = await take( (action): action is ReturnType => socketInvocationComplete.match(action) && - action.payload.data.graph_execution_state_id === - enqueueResult.queue_item.session_id && + action.payload.data.queue_batch_id === + enqueueResult.batch.batch_id && action.payload.data.source_node_id === SAVE_IMAGE ); @@ -116,7 +121,10 @@ export const addControlNetImageProcessedListener = () => { ); } } catch (error) { - log.error({ graph: parseify(graph) }, t('queue.graphFailedToQueue')); + log.error( + { enqueueBatchArg: parseify(enqueueBatchArg) }, + t('queue.graphFailedToQueue') + ); // handle usage-related errors if (error instanceof Object) { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts index c252f412a6..9ddcdc9701 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts @@ -6,7 +6,7 @@ import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { queueApi } from 'services/api/endpoints/queue'; import { startAppListening } from '..'; -import { ImageDTO } from 'services/api/types'; +import { BatchConfig, ImageDTO } from 'services/api/types'; import { createIsAllowedToUpscaleSelector } from 'features/parameters/hooks/useIsAllowedToUpscale'; export const upscaleRequested = createAction<{ imageDTO: ImageDTO }>( @@ -44,20 +44,23 @@ export const addUpscaleRequestedListener = () => { const { esrganModelName } = state.postprocessing; const { autoAddBoardId } = state.gallery; - const graph = buildAdHocUpscaleGraph({ - image_name, - esrganModelName, - autoAddBoardId, - }); + const enqueueBatchArg: BatchConfig = { + prepend: true, + batch: { + graph: buildAdHocUpscaleGraph({ + image_name, + esrganModelName, + autoAddBoardId, + }), + runs: 1, + }, + }; try { const req = dispatch( - queueApi.endpoints.enqueueGraph.initiate( - { graph, prepend: true }, - { - fixedCacheKey: 'enqueueGraph', - } - ) + queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, { + fixedCacheKey: 'enqueueBatch', + }) ); const enqueueResult = await req.unwrap(); @@ -67,7 +70,10 @@ export const addUpscaleRequestedListener = () => { t('queue.graphQueued') ); } catch (error) { - log.error({ graph: parseify(graph) }, t('queue.graphFailedToQueue')); + log.error( + { enqueueBatchArg: parseify(enqueueBatchArg) }, + t('queue.graphFailedToQueue') + ); // handle usage-related errors if (error instanceof Object) { diff --git a/invokeai/frontend/web/src/features/queue/hooks/useIsQueueMutationInProgress.ts b/invokeai/frontend/web/src/features/queue/hooks/useIsQueueMutationInProgress.ts index abb3967b92..9947c17086 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useIsQueueMutationInProgress.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useIsQueueMutationInProgress.ts @@ -3,7 +3,6 @@ import { // useCancelByBatchIdsMutation, useClearQueueMutation, useEnqueueBatchMutation, - useEnqueueGraphMutation, usePruneQueueMutation, useResumeProcessorMutation, usePauseProcessorMutation, @@ -14,10 +13,6 @@ export const useIsQueueMutationInProgress = () => { useEnqueueBatchMutation({ fixedCacheKey: 'enqueueBatch', }); - const [_triggerEnqueueGraph, { isLoading: isLoadingEnqueueGraph }] = - useEnqueueGraphMutation({ - fixedCacheKey: 'enqueueGraph', - }); const [_triggerResumeProcessor, { isLoading: isLoadingResumeProcessor }] = useResumeProcessorMutation({ fixedCacheKey: 'resumeProcessor', @@ -44,7 +39,6 @@ export const useIsQueueMutationInProgress = () => { // }); return ( isLoadingEnqueueBatch || - isLoadingEnqueueGraph || isLoadingResumeProcessor || isLoadingPauseProcessor || isLoadingCancelQueue || diff --git a/invokeai/frontend/web/src/services/api/endpoints/queue.ts b/invokeai/frontend/web/src/services/api/endpoints/queue.ts index ab75964e89..d44e333850 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/queue.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/queue.ts @@ -83,30 +83,6 @@ export const queueApi = api.injectEndpoints({ } }, }), - enqueueGraph: build.mutation< - paths['/api/v1/queue/{queue_id}/enqueue_graph']['post']['responses']['201']['content']['application/json'], - paths['/api/v1/queue/{queue_id}/enqueue_graph']['post']['requestBody']['content']['application/json'] - >({ - query: (arg) => ({ - url: `queue/${$queueId.get()}/enqueue_graph`, - body: arg, - method: 'POST', - }), - invalidatesTags: [ - 'SessionQueueStatus', - 'CurrentSessionQueueItem', - 'NextSessionQueueItem', - ], - onQueryStarted: async (arg, api) => { - const { dispatch, queryFulfilled } = api; - try { - await queryFulfilled; - resetListQueryData(dispatch); - } catch { - // no-op - } - }, - }), resumeProcessor: build.mutation< paths['/api/v1/queue/{queue_id}/processor/resume']['put']['responses']['200']['content']['application/json'], void @@ -341,7 +317,6 @@ export const queueApi = api.injectEndpoints({ export const { useCancelByBatchIdsMutation, - useEnqueueGraphMutation, useEnqueueBatchMutation, usePauseProcessorMutation, useResumeProcessorMutation, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index d4678dc03b..6bc54f0e35 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,6 +5,13 @@ export type paths = { + "/api/v1/sessions/{session_id}": { + /** + * Get Session + * @description Gets a session + */ + get: operations["get_session"]; + }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -275,13 +282,6 @@ export type paths = { */ get: operations["get_invocation_cache_status"]; }; - "/api/v1/queue/{queue_id}/enqueue_graph": { - /** - * Enqueue Graph - * @description Enqueues a graph for single execution. - */ - post: operations["enqueue_graph"]; - }; "/api/v1/queue/{queue_id}/enqueue_batch": { /** * Enqueue Batch @@ -800,17 +800,6 @@ export type components = { */ prepend?: boolean; }; - /** Body_enqueue_graph */ - Body_enqueue_graph: { - /** @description The graph to enqueue */ - graph: components["schemas"]["Graph"]; - /** - * Prepend - * @description Whether or not to prepend this batch in the queue - * @default false - */ - prepend?: boolean; - }; /** Body_import_model */ Body_import_model: { /** @@ -1897,7 +1886,7 @@ export type components = { * Created By * @description The name of the creator of the image */ - created_by: string | null; + created_by?: string | null; /** * Positive Prompt * @description The positive prompt parameter @@ -2476,28 +2465,6 @@ export type components = { */ priority: number; }; - /** EnqueueGraphResult */ - EnqueueGraphResult: { - /** - * Enqueued - * @description The total number of queue items enqueued - */ - enqueued: number; - /** - * Requested - * @description The total number of queue items requested to be enqueued - */ - requested: number; - /** @description The batch that was enqueued */ - batch: components["schemas"]["Batch"]; - /** - * Priority - * @description The priority of the enqueued batch - */ - priority: number; - /** @description The queue item that was enqueued */ - queue_item: components["schemas"]["SessionQueueItemDTO"]; - }; /** * FaceIdentifier * @description Outputs an image with detected face IDs printed on each face. For use with other FaceTools. @@ -3035,7 +3002,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; + [key: string]: components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["InfillColorInvocation"]; }; /** * Edges @@ -3072,7 +3039,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; + [key: string]: components["schemas"]["String2Output"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["StringPosNegOutput"]; }; /** * Errors @@ -9138,6 +9105,18 @@ export type components = { /** Ui Order */ ui_order: number | null; }; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionOnnxModelFormat * @description An enumeration. @@ -9151,41 +9130,29 @@ export type components = { */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** - * StableDiffusion1ModelFormat + * IPAdapterModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; + IPAdapterModelFormat: "invokeai"; /** * CLIPVisionModelFormat * @description An enumeration. * @enum {string} */ CLIPVisionModelFormat: "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * IPAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - IPAdapterModelFormat: "invokeai"; /** * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ T2IAdapterModelFormat: "diffusers"; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9200,6 +9167,36 @@ export type external = Record; export type operations = { + /** + * Get Session + * @description Gets a session + */ + get_session: { + parameters: { + path: { + /** @description The id of the session to get */ + session_id: string; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["GraphExecutionState"]; + }; + }; + /** @description Session not found */ + 404: { + content: never; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; /** * Parse Dynamicprompts * @description Creates a batch process @@ -10309,43 +10306,6 @@ export type operations = { }; }; }; - /** - * Enqueue Graph - * @description Enqueues a graph for single execution. - */ - enqueue_graph: { - parameters: { - path: { - /** @description The queue id to perform this operation on */ - queue_id: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_enqueue_graph"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["EnqueueGraphResult"]; - }; - }; - /** @description Created */ - 201: { - content: { - "application/json": components["schemas"]["EnqueueGraphResult"]; - }; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; /** * Enqueue Batch * @description Processes a batch and enqueues the output graphs for execution. diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 6fda849b89..63617a4eb5 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -26,7 +26,6 @@ export type BatchConfig = paths['/api/v1/queue/{queue_id}/enqueue_batch']['post']['requestBody']['content']['application/json']; export type EnqueueBatchResult = components['schemas']['EnqueueBatchResult']; -export type EnqueueGraphResult = components['schemas']['EnqueueGraphResult']; /** * This is an unsafe type; the object inside is not guaranteed to be valid. From 975ba6b74f51394f78327431062c960e676cf9a9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 08:43:56 +1100 Subject: [PATCH 098/202] fix(ui): use pidi processor for sketch --- .../web/src/features/controlAdapters/store/constants.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/constants.ts b/invokeai/frontend/web/src/features/controlAdapters/store/constants.ts index c35847d323..db2311f3f5 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/constants.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/constants.ts @@ -246,7 +246,7 @@ export const CONTROLNET_MODEL_DEFAULT_PROCESSORS: { mlsd: 'mlsd_image_processor', depth: 'midas_depth_image_processor', bae: 'normalbae_image_processor', - sketch: 'lineart_image_processor', + sketch: 'pidi_image_processor', scribble: 'lineart_image_processor', lineart: 'lineart_image_processor', lineart_anime: 'lineart_anime_image_processor', From 252c9a5f5ab3d3301f8ae912137135a9b66b2c1a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 16:50:52 +1100 Subject: [PATCH 099/202] fix(backend): fix nsfw/watermarker util types --- .../backend/image_util/invisible_watermark.py | 6 ++--- invokeai/backend/image_util/safety_checker.py | 26 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/invokeai/backend/image_util/invisible_watermark.py b/invokeai/backend/image_util/invisible_watermark.py index 3e8604f9c3..37b3ca918c 100644 --- a/invokeai/backend/image_util/invisible_watermark.py +++ b/invokeai/backend/image_util/invisible_watermark.py @@ -20,12 +20,12 @@ class InvisibleWatermark: """ @classmethod - def invisible_watermark_available(self) -> bool: + def invisible_watermark_available(cls) -> bool: return config.invisible_watermark @classmethod - def add_watermark(self, image: Image, watermark_text: str) -> Image: - if not self.invisible_watermark_available(): + def add_watermark(cls, image: Image.Image, watermark_text: str) -> Image.Image: + if not cls.invisible_watermark_available(): return image logger.debug(f'Applying invisible watermark "{watermark_text}"') bgr = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR) diff --git a/invokeai/backend/image_util/safety_checker.py b/invokeai/backend/image_util/safety_checker.py index fd1f05f10e..b9649925e1 100644 --- a/invokeai/backend/image_util/safety_checker.py +++ b/invokeai/backend/image_util/safety_checker.py @@ -26,8 +26,8 @@ class SafetyChecker: tried_load: bool = False @classmethod - def _load_safety_checker(self): - if self.tried_load: + def _load_safety_checker(cls): + if cls.tried_load: return if config.nsfw_checker: @@ -35,31 +35,31 @@ class SafetyChecker: from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from transformers import AutoFeatureExtractor - self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(config.models_path / CHECKER_PATH) - self.feature_extractor = AutoFeatureExtractor.from_pretrained(config.models_path / CHECKER_PATH) + cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(config.models_path / CHECKER_PATH) + cls.feature_extractor = AutoFeatureExtractor.from_pretrained(config.models_path / CHECKER_PATH) logger.info("NSFW checker initialized") except Exception as e: logger.warning(f"Could not load NSFW checker: {str(e)}") else: logger.info("NSFW checker loading disabled") - self.tried_load = True + cls.tried_load = True @classmethod - def safety_checker_available(self) -> bool: - self._load_safety_checker() - return self.safety_checker is not None + def safety_checker_available(cls) -> bool: + cls._load_safety_checker() + return cls.safety_checker is not None @classmethod - def has_nsfw_concept(self, image: Image) -> bool: - if not self.safety_checker_available(): + def has_nsfw_concept(cls, image: Image.Image) -> bool: + if not cls.safety_checker_available(): return False device = choose_torch_device() - features = self.feature_extractor([image], return_tensors="pt") + features = cls.feature_extractor([image], return_tensors="pt") features.to(device) - self.safety_checker.to(device) + cls.safety_checker.to(device) x_image = np.array(image).astype(np.float32) / 255.0 x_image = x_image[None].transpose(0, 3, 1, 2) with SilenceWarnings(): - checked_image, has_nsfw_concept = self.safety_checker(images=x_image, clip_input=features.pixel_values) + checked_image, has_nsfw_concept = cls.safety_checker(images=x_image, clip_input=features.pixel_values) return has_nsfw_concept[0] From d27392cc2d9c4ac2ab2b9dd1703cc969daf85884 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 17 Oct 2023 12:59:48 -0400 Subject: [PATCH 100/202] remove all references to CLI --- docs/CHANGELOG.md | 4 +- CODE_OF_CONDUCT.md => docs/CODE_OF_CONDUCT.md | 0 .../contribution_guides/development.md | 2 +- docs/deprecated/CLI.md | 4 +- docs/{other => deprecated}/TRANSLATION.md | 0 docs/{features => deprecated}/VARIATIONS.md | 2 +- docs/features/CONCEPTS.md | 5 +- docs/features/CONFIGURATION.md | 2 +- docs/features/CONTROLNET.md | 6 +- docs/features/MODEL_MERGING.md | 7 +- docs/features/UTILITIES.md | 10 +- docs/features/index.md | 2 +- docs/index.md | 7 +- docs/installation/050_INSTALLING_MODELS.md | 2 +- .../deprecated_documentation/INSTALL_LINUX.md | 4 +- .../deprecated_documentation/INSTALL_MAC.md | 2 +- .../INSTALL_SOURCE.md | 2 +- .../INSTALL_WINDOWS.md | 4 +- installer/templates/invoke.bat.in | 40 +- installer/templates/invoke.sh.in | 61 +-- invokeai/app/cli/__init__.py | 0 invokeai/app/cli/commands.py | 312 ----------- invokeai/app/cli/completer.py | 171 ------- invokeai/app/cli_app.py | 484 ------------------ mkdocs.yml | 9 +- pyproject.toml | 3 +- 26 files changed, 86 insertions(+), 1059 deletions(-) rename CODE_OF_CONDUCT.md => docs/CODE_OF_CONDUCT.md (100%) rename docs/{other => deprecated}/TRANSLATION.md (100%) rename docs/{features => deprecated}/VARIATIONS.md (97%) delete mode 100644 invokeai/app/cli/__init__.py delete mode 100644 invokeai/app/cli/commands.py delete mode 100644 invokeai/app/cli/completer.py delete mode 100644 invokeai/app/cli_app.py diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index ca765b3ca6..24bd5ad7dd 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -488,7 +488,7 @@ sections describe what's new for InvokeAI. - A choice of installer scripts that automate installation and configuration. See - [Installation](installation/index.md). + [Installation](installation/INSTALLATION.md). - A streamlined manual installation process that works for both Conda and PIP-only installs. See [Manual Installation](installation/020_INSTALL_MANUAL.md). @@ -657,7 +657,7 @@ sections describe what's new for InvokeAI. ## v1.13 (3 September 2022) -- Support image variations (see [VARIATIONS](features/VARIATIONS.md) +- Support image variations (see [VARIATIONS](deprecated/VARIATIONS.md) ([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers) - Supports a Google Colab notebook for a standalone server running on Google diff --git a/CODE_OF_CONDUCT.md b/docs/CODE_OF_CONDUCT.md similarity index 100% rename from CODE_OF_CONDUCT.md rename to docs/CODE_OF_CONDUCT.md diff --git a/docs/contributing/contribution_guides/development.md b/docs/contributing/contribution_guides/development.md index 086fd6e90d..2f50d7f579 100644 --- a/docs/contributing/contribution_guides/development.md +++ b/docs/contributing/contribution_guides/development.md @@ -45,5 +45,5 @@ For backend related work, please reach out to **@blessedcoolant**, **@lstein**, ## **What does the Code of Conduct mean for me?** -Our [Code of Conduct](CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code. +Our [Code of Conduct](../../CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code. diff --git a/docs/deprecated/CLI.md b/docs/deprecated/CLI.md index eaa215c8dd..b40aeffc37 100644 --- a/docs/deprecated/CLI.md +++ b/docs/deprecated/CLI.md @@ -211,8 +211,8 @@ Here are the invoke> command that apply to txt2img: | `--facetool ` | `-ft ` | `-ft gfpgan` | Select face restoration algorithm to use: gfpgan, codeformer | | `--codeformer_fidelity` | `-cf ` | `0.75` | Used along with CodeFormer. Takes values between 0 and 1. 0 produces high quality but low accuracy. 1 produces high accuracy but low quality | | `--save_original` | `-save_orig` | `False` | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. | -| `--variation ` | `-v` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S` and `-n` to generate a series a riffs on a starting image. See [Variations](../features/VARIATIONS.md). | -| `--with_variations ` | | `None` | Combine two or more variations. See [Variations](../features/VARIATIONS.md) for now to use this. | +| `--variation ` | `-v` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S` and `-n` to generate a series a riffs on a starting image. See [Variations](VARIATIONS.md). | +| `--with_variations ` | | `None` | Combine two or more variations. See [Variations](VARIATIONS.md) for now to use this. | | `--save_intermediates ` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory | | `--h_symmetry_time_pct ` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) | | `--v_symmetry_time_pct ` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) | diff --git a/docs/other/TRANSLATION.md b/docs/deprecated/TRANSLATION.md similarity index 100% rename from docs/other/TRANSLATION.md rename to docs/deprecated/TRANSLATION.md diff --git a/docs/features/VARIATIONS.md b/docs/deprecated/VARIATIONS.md similarity index 97% rename from docs/features/VARIATIONS.md rename to docs/deprecated/VARIATIONS.md index e6e21490c8..0c09b71836 100644 --- a/docs/features/VARIATIONS.md +++ b/docs/deprecated/VARIATIONS.md @@ -126,6 +126,6 @@ amounts of image-to-image variation even when the seed is fixed and the `-v` argument is very low. Others are more deterministic. Feel free to experiment until you find the combination that you like. -Also be aware of the [Perlin Noise](OTHER.md#thresholding-and-perlin-noise-initialization-options) +Also be aware of the [Perlin Noise](../features/OTHER.md#thresholding-and-perlin-noise-initialization-options) feature, which provides another way of introducing variability into your image generation requests. diff --git a/docs/features/CONCEPTS.md b/docs/features/CONCEPTS.md index df9ee5bd26..5f3d2d961f 100644 --- a/docs/features/CONCEPTS.md +++ b/docs/features/CONCEPTS.md @@ -28,8 +28,9 @@ by placing them in the designated directory for the compatible model type ### An Example -Here are a few examples to illustrate how it works. All these images were -generated using the command-line client and the Stable Diffusion 1.5 model: +Here are a few examples to illustrate how it works. All these images +were generated using the legacy command-line client and the Stable +Diffusion 1.5 model: | Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> | | :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: | diff --git a/docs/features/CONFIGURATION.md b/docs/features/CONFIGURATION.md index cfd65f8a61..f83caf522d 100644 --- a/docs/features/CONFIGURATION.md +++ b/docs/features/CONFIGURATION.md @@ -82,7 +82,7 @@ format of YAML files can be found [here](https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/). You can fix a broken `invokeai.yaml` by deleting it and running the -configuration script again -- option [7] in the launcher, "Re-run the +configuration script again -- option [6] in the launcher, "Re-run the configure script". #### Reading Environment Variables diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index 8284ddf75d..d287e6cb19 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -46,7 +46,7 @@ Diffuser-style ControlNet models are available at HuggingFace (http://huggingface.co) and accessed via their repo IDs (identifiers in the format "author/modelname"). The easiest way to install them is to use the InvokeAI model installer application. Use the -`invoke.sh`/`invoke.bat` launcher to select item [5] and then navigate +`invoke.sh`/`invoke.bat` launcher to select item [4] and then navigate to the CONTROLNETS section. Select the models you wish to install and press "APPLY CHANGES". You may also enter additional HuggingFace repo_ids in the "Additional models" textbox: @@ -145,8 +145,8 @@ Additionally, each ControlNet section can be expanded in order to manipulate set #### Installation There are several ways to install IP-Adapter models with an existing InvokeAI installation: -1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [5] to download models. -2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models. +1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [4] to download models. +2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](https://www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models. 3. **Advanced -- Not recommended ** Manually downloading the IP-Adapter and Image Encoder files - Image Encoder folders shouid be placed in the `models\any\clip_vision` folders. IP Adapter Model folders should be placed in the relevant `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder. #### Using IP-Adapter diff --git a/docs/features/MODEL_MERGING.md b/docs/features/MODEL_MERGING.md index 6adf4db16a..e384662ef5 100644 --- a/docs/features/MODEL_MERGING.md +++ b/docs/features/MODEL_MERGING.md @@ -16,9 +16,10 @@ Model Merging can be be done by navigating to the Model Manager and clicking the display all the diffusers-style models that InvokeAI knows about. If you do not see the model you are looking for, then it is probably a legacy checkpoint model and needs to be converted using the - `invoke` command-line client and its `!optimize` command. You - must select at least two models to merge. The third can be left at - "None" if you desire. + "Convert" option in the Web-based Model Manager tab. + + You must select at least two models to merge. The third can be left + at "None" if you desire. * Alpha: This is the ratio to use when combining models. It ranges from 0 to 1. The higher the value, the more weight is given to the diff --git a/docs/features/UTILITIES.md b/docs/features/UTILITIES.md index a73118d55a..2d62fe3a79 100644 --- a/docs/features/UTILITIES.md +++ b/docs/features/UTILITIES.md @@ -8,7 +8,7 @@ title: Command-line Utilities InvokeAI comes with several scripts that are accessible via the command line. To access these commands, start the "developer's -console" from the launcher (`invoke.bat` menu item [8]). Users who are +console" from the launcher (`invoke.bat` menu item [7]). Users who are familiar with Python can alternatively activate InvokeAI's virtual environment (typically, but not necessarily `invokeai/.venv`). @@ -34,7 +34,7 @@ invokeai-web --ram 7 ## **invokeai-merge** -This is the model merge script, the same as launcher option [4]. Call +This is the model merge script, the same as launcher option [3]. Call it with the `--gui` command-line argument to start the interactive console-based GUI. Alternatively, you can run it non-interactively using command-line arguments as illustrated in the example below which @@ -48,7 +48,7 @@ invokeai-merge --force --base-model sd-1 --models stable-diffusion-1.5 inkdiffus ## **invokeai-ti** This is the textual inversion training script that is run by launcher -option [3]. Call it with `--gui` to run the interactive console-based +option [2]. Call it with `--gui` to run the interactive console-based front end. It can also be run non-interactively. It has about a zillion arguments, but a typical training session can be launched with: @@ -68,7 +68,7 @@ in Windows). ## **invokeai-install** This is the console-based model install script that is run by launcher -option [5]. If called without arguments, it will launch the +option [4]. If called without arguments, it will launch the interactive console-based interface. It can also be used non-interactively to list, add and remove models as shown by these examples: @@ -148,7 +148,7 @@ launch the web server against it with `invokeai-web --root InvokeAI-New`. ## **invokeai-update** This is the interactive console-based script that is run by launcher -menu item [9] to update to a new version of InvokeAI. It takes no +menu item [8] to update to a new version of InvokeAI. It takes no command-line arguments. ## **invokeai-metadata** diff --git a/docs/features/index.md b/docs/features/index.md index bd37366314..6315b20ca5 100644 --- a/docs/features/index.md +++ b/docs/features/index.md @@ -28,7 +28,7 @@ Learn how to install and use ControlNet models for fine control over image output. ### * [Image-to-Image Guide](IMG2IMG.md) -Use a seed image to build new creations in the CLI. +Use a seed image to build new creations. ## Model Management diff --git a/docs/index.md b/docs/index.md index 9a426e5684..8c9ed5b7f8 100644 --- a/docs/index.md +++ b/docs/index.md @@ -143,7 +143,6 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM. ### Prompt Engineering - [Prompt Syntax](features/PROMPTS.md) -- [Generating Variations](features/VARIATIONS.md) ### InvokeAI Configuration - [Guide to InvokeAI Runtime Settings](features/CONFIGURATION.md) @@ -166,10 +165,8 @@ still a work in progress, but coming soon. ### Command-Line Interface Retired -The original "invokeai" command-line interface has been retired. The -`invokeai` command will now launch a new command-line client that can -be used by developers to create and test nodes. It is not intended to -be used for routine image generation or manipulation. +All "invokeai" command-line interfaces have been retired as of version +3.4. To launch the Web GUI from the command-line, use the command `invokeai-web` rather than the traditional `invokeai --web`. diff --git a/docs/installation/050_INSTALLING_MODELS.md b/docs/installation/050_INSTALLING_MODELS.md index d455d2146f..5333e2aa88 100644 --- a/docs/installation/050_INSTALLING_MODELS.md +++ b/docs/installation/050_INSTALLING_MODELS.md @@ -84,7 +84,7 @@ InvokeAI root directory's `autoimport` folder. ### Installation via `invokeai-model-install` -From the `invoke` launcher, choose option [5] "Download and install +From the `invoke` launcher, choose option [4] "Download and install models." This will launch the same script that prompted you to select models at install time. You can use this to add models that you skipped the first time around. It is all right to specify a model that diff --git a/docs/installation/deprecated_documentation/INSTALL_LINUX.md b/docs/installation/deprecated_documentation/INSTALL_LINUX.md index 1e66698ec2..97060f85ad 100644 --- a/docs/installation/deprecated_documentation/INSTALL_LINUX.md +++ b/docs/installation/deprecated_documentation/INSTALL_LINUX.md @@ -79,7 +79,7 @@ title: Manual Installation, Linux and obtaining an access token for downloading. It will then download and install the weights files for you. - Please look [here](../INSTALL_MANUAL.md) for a manual process for doing + Please look [here](../020_INSTALL_MANUAL.md) for a manual process for doing the same thing. 7. Start generating images! @@ -112,7 +112,7 @@ title: Manual Installation, Linux To use an alternative model you may invoke the `!switch` command in the CLI, or pass `--model ` during `invoke.py` launch for either the CLI or the Web UI. See [Command Line - Client](../../features/CLI.md#model-selection-and-importation). The + Client](../../deprecated/CLI.md#model-selection-and-importation). The model names are defined in `configs/models.yaml`. 8. Subsequently, to relaunch the script, be sure to run "conda activate diff --git a/docs/installation/deprecated_documentation/INSTALL_MAC.md b/docs/installation/deprecated_documentation/INSTALL_MAC.md index 7a3c5b564f..dea3c329a7 100644 --- a/docs/installation/deprecated_documentation/INSTALL_MAC.md +++ b/docs/installation/deprecated_documentation/INSTALL_MAC.md @@ -150,7 +150,7 @@ will do our best to help. To use an alternative model you may invoke the `!switch` command in the CLI, or pass `--model ` during `invoke.py` launch for either the CLI or the Web UI. See [Command Line - Client](../../features/CLI.md#model-selection-and-importation). The + Client](../../deprecated/CLI.md#model-selection-and-importation). The model names are defined in `configs/models.yaml`. --- diff --git a/docs/installation/deprecated_documentation/INSTALL_SOURCE.md b/docs/installation/deprecated_documentation/INSTALL_SOURCE.md index 2b1b750fbf..b71cd68ab7 100644 --- a/docs/installation/deprecated_documentation/INSTALL_SOURCE.md +++ b/docs/installation/deprecated_documentation/INSTALL_SOURCE.md @@ -128,7 +128,7 @@ python scripts/invoke.py --web --max_load_models=3 \ ``` These options are described in detail in the -[Command-Line Interface](../../features/CLI.md) documentation. +[Command-Line Interface](../../deprecated/CLI.md) documentation. ## Troubleshooting diff --git a/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md b/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md index 19acb832e4..9536f09db2 100644 --- a/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md +++ b/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md @@ -75,7 +75,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan obtaining an access token for downloading. It will then download and install the weights files for you. - Please look [here](../INSTALL_MANUAL.md) for a manual process for doing the + Please look [here](../020_INSTALL_MANUAL.md) for a manual process for doing the same thing. 8. Start generating images! @@ -108,7 +108,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan To use an alternative model you may invoke the `!switch` command in the CLI, or pass `--model ` during `invoke.py` launch for either the CLI or the Web UI. See [Command Line - Client](../../features/CLI.md#model-selection-and-importation). The + Client](../../deprecated/CLI.md#model-selection-and-importation). The model names are defined in `configs/models.yaml`. 9. Subsequently, to relaunch the script, first activate the Anaconda diff --git a/installer/templates/invoke.bat.in b/installer/templates/invoke.bat.in index 227091b33a..ee6d56fc56 100644 --- a/installer/templates/invoke.bat.in +++ b/installer/templates/invoke.bat.in @@ -9,41 +9,37 @@ set INVOKEAI_ROOT=. :start echo Desired action: echo 1. Generate images with the browser-based interface -echo 2. Explore InvokeAI nodes using a command-line interface -echo 3. Run textual inversion training -echo 4. Merge models (diffusers type only) -echo 5. Download and install models -echo 6. Change InvokeAI startup options -echo 7. Re-run the configure script to fix a broken install or to complete a major upgrade -echo 8. Open the developer console -echo 9. Update InvokeAI -echo 10. Run the InvokeAI image database maintenance script -echo 11. Command-line help +echo 2. Run textual inversion training +echo 3. Merge models (diffusers type only) +echo 4. Download and install models +echo 5. Change InvokeAI startup options +echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade +echo 7. Open the developer console +echo 8. Update InvokeAI +echo 9. Run the InvokeAI image database maintenance script +echo 10. Command-line help echo Q - Quit -set /P choice="Please enter 1-11, Q: [1] " +set /P choice="Please enter 1-10, Q: [1] " if not defined choice set choice=1 IF /I "%choice%" == "1" ( echo Starting the InvokeAI browser-based UI.. python .venv\Scripts\invokeai-web.exe %* ) ELSE IF /I "%choice%" == "2" ( - echo Starting the InvokeAI command-line.. - python .venv\Scripts\invokeai.exe %* -) ELSE IF /I "%choice%" == "3" ( echo Starting textual inversion training.. python .venv\Scripts\invokeai-ti.exe --gui -) ELSE IF /I "%choice%" == "4" ( +) ELSE IF /I "%choice%" == "3" ( echo Starting model merging script.. python .venv\Scripts\invokeai-merge.exe --gui -) ELSE IF /I "%choice%" == "5" ( +) ELSE IF /I "%choice%" == "4" ( echo Running invokeai-model-install... python .venv\Scripts\invokeai-model-install.exe -) ELSE IF /I "%choice%" == "6" ( +) ELSE IF /I "%choice%" == "5" ( echo Running invokeai-configure... python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models -) ELSE IF /I "%choice%" == "7" ( +) ELSE IF /I "%choice%" == "6" ( echo Running invokeai-configure... python .venv\Scripts\invokeai-configure.exe --yes --skip-sd-weight -) ELSE IF /I "%choice%" == "8" ( +) ELSE IF /I "%choice%" == "7" ( echo Developer Console echo Python command is: where python @@ -55,13 +51,13 @@ IF /I "%choice%" == "1" ( echo ************************* echo *** Type `exit` to quit this shell and deactivate the Python virtual environment *** call cmd /k -) ELSE IF /I "%choice%" == "9" ( +) ELSE IF /I "%choice%" == "8" ( echo Running invokeai-update... python -m invokeai.frontend.install.invokeai_update -) ELSE IF /I "%choice%" == "10" ( +) ELSE IF /I "%choice%" == "9" ( echo Running the db maintenance script... python .venv\Scripts\invokeai-db-maintenance.exe -) ELSE IF /I "%choice%" == "11" ( +) ELSE IF /I "%choice%" == "10" ( echo Displaying command line help... python .venv\Scripts\invokeai-web.exe --help %* pause diff --git a/installer/templates/invoke.sh.in b/installer/templates/invoke.sh.in index 6cf6967608..3230c9f442 100644 --- a/installer/templates/invoke.sh.in +++ b/installer/templates/invoke.sh.in @@ -58,52 +58,47 @@ do_choice() { invokeai-web $PARAMS ;; 2) - clear - printf "Explore InvokeAI nodes using a command-line interface\n" - invokeai $PARAMS - ;; - 3) clear printf "Textual inversion training\n" invokeai-ti --gui $PARAMS ;; - 4) + 3) clear printf "Merge models (diffusers type only)\n" invokeai-merge --gui $PARAMS ;; - 5) + 4) clear printf "Download and install models\n" invokeai-model-install --root ${INVOKEAI_ROOT} ;; - 6) + 5) clear printf "Change InvokeAI startup options\n" invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models ;; - 7) + 6) clear printf "Re-run the configure script to fix a broken install or to complete a major upgrade\n" invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only --skip-sd-weights ;; - 8) + 7) clear printf "Open the developer console\n" file_name=$(basename "${BASH_SOURCE[0]}") bash --init-file "$file_name" ;; - 9) + 8) clear printf "Update InvokeAI\n" python -m invokeai.frontend.install.invokeai_update ;; - 10) + 9) clear printf "Running the db maintenance script\n" invokeai-db-maintenance --root ${INVOKEAI_ROOT} ;; - 11) + 10) clear printf "Command-line help\n" invokeai-web --help @@ -121,16 +116,15 @@ do_choice() { do_dialog() { options=( 1 "Generate images with a browser-based interface" - 2 "Explore InvokeAI nodes using a command-line interface" - 3 "Textual inversion training" - 4 "Merge models (diffusers type only)" - 5 "Download and install models" - 6 "Change InvokeAI startup options" - 7 "Re-run the configure script to fix a broken install or to complete a major upgrade" - 8 "Open the developer console" - 9 "Update InvokeAI" - 10 "Run the InvokeAI image database maintenance script" - 11 "Command-line help" + 2 "Textual inversion training" + 3 "Merge models (diffusers type only)" + 4 "Download and install models" + 5 "Change InvokeAI startup options" + 6 "Re-run the configure script to fix a broken install or to complete a major upgrade" + 7 "Open the developer console" + 8 "Update InvokeAI" + 9 "Run the InvokeAI image database maintenance script" + 10 "Command-line help" ) choice=$(dialog --clear \ @@ -155,18 +149,17 @@ do_line_input() { printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n" printf "What would you like to do?\n" printf "1: Generate images using the browser-based interface\n" - printf "2: Explore InvokeAI nodes using the command-line interface\n" - printf "3: Run textual inversion training\n" - printf "4: Merge models (diffusers type only)\n" - printf "5: Download and install models\n" - printf "6: Change InvokeAI startup options\n" - printf "7: Re-run the configure script to fix a broken install\n" - printf "8: Open the developer console\n" - printf "9: Update InvokeAI\n" - printf "10: Run the InvokeAI image database maintenance script\n" - printf "11: Command-line help\n" + printf "2: Run textual inversion training\n" + printf "3: Merge models (diffusers type only)\n" + printf "4: Download and install models\n" + printf "5: Change InvokeAI startup options\n" + printf "6: Re-run the configure script to fix a broken install\n" + printf "7: Open the developer console\n" + printf "8: Update InvokeAI\n" + printf "9: Run the InvokeAI image database maintenance script\n" + printf "10: Command-line help\n" printf "Q: Quit\n\n" - read -p "Please enter 1-11, Q: [1] " yn + read -p "Please enter 1-10, Q: [1] " yn choice=${yn:='1'} do_choice $choice clear diff --git a/invokeai/app/cli/__init__.py b/invokeai/app/cli/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/invokeai/app/cli/commands.py b/invokeai/app/cli/commands.py deleted file mode 100644 index c21c6315ed..0000000000 --- a/invokeai/app/cli/commands.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) - -import argparse -from abc import ABC, abstractmethod -from typing import Any, Callable, Iterable, Literal, Union, get_args, get_origin, get_type_hints - -import matplotlib.pyplot as plt -import networkx as nx -from pydantic import BaseModel, Field - -import invokeai.backend.util.logging as logger - -from ..invocations.baseinvocation import BaseInvocation -from ..invocations.image import ImageField -from ..services.graph import Edge, GraphExecutionState, LibraryGraph -from ..services.invoker import Invoker - - -def add_field_argument(command_parser, name: str, field, default_override=None): - default = ( - default_override - if default_override is not None - else field.default - if field.default_factory is None - else field.default_factory() - ) - if get_origin(field.annotation) == Literal: - allowed_values = get_args(field.annotation) - allowed_types = set() - for val in allowed_values: - allowed_types.add(type(val)) - allowed_types_list = list(allowed_types) - field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore - - command_parser.add_argument( - f"--{name}", - dest=name, - type=field_type, - default=default, - choices=allowed_values, - help=field.description, - ) - else: - command_parser.add_argument( - f"--{name}", - dest=name, - type=field.annotation, - default=default, - help=field.description, - ) - - -def add_parsers( - subparsers, - commands: list[type], - command_field: str = "type", - exclude_fields: list[str] = ["id", "type"], - add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None, -): - """Adds parsers for each command to the subparsers""" - - # Create subparsers for each command - for command in commands: - hints = get_type_hints(command) - cmd_name = get_args(hints[command_field])[0] - command_parser = subparsers.add_parser(cmd_name, help=command.__doc__) - - if add_arguments is not None: - add_arguments(command_parser) - - # Convert all fields to arguments - fields = command.__fields__ # type: ignore - for name, field in fields.items(): - if name in exclude_fields: - continue - - add_field_argument(command_parser, name, field) - - -def add_graph_parsers( - subparsers, graphs: list[LibraryGraph], add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None -): - for graph in graphs: - command_parser = subparsers.add_parser(graph.name, help=graph.description) - - if add_arguments is not None: - add_arguments(command_parser) - - # Add arguments for inputs - for exposed_input in graph.exposed_inputs: - node = graph.graph.get_node(exposed_input.node_path) - field = node.__fields__[exposed_input.field] - default_override = getattr(node, exposed_input.field) - add_field_argument(command_parser, exposed_input.alias, field, default_override) - - -class CliContext: - invoker: Invoker - session: GraphExecutionState - parser: argparse.ArgumentParser - defaults: dict[str, Any] - graph_nodes: dict[str, str] - nodes_added: list[str] - - def __init__(self, invoker: Invoker, session: GraphExecutionState, parser: argparse.ArgumentParser): - self.invoker = invoker - self.session = session - self.parser = parser - self.defaults = dict() - self.graph_nodes = dict() - self.nodes_added = list() - - def get_session(self): - self.session = self.invoker.services.graph_execution_manager.get(self.session.id) - return self.session - - def reset(self): - self.session = self.invoker.create_execution_state() - self.graph_nodes = dict() - self.nodes_added = list() - # Leave defaults unchanged - - def add_node(self, node: BaseInvocation): - self.get_session() - self.session.graph.add_node(node) - self.nodes_added.append(node.id) - self.invoker.services.graph_execution_manager.set(self.session) - - def add_edge(self, edge: Edge): - self.get_session() - self.session.add_edge(edge) - self.invoker.services.graph_execution_manager.set(self.session) - - -class ExitCli(Exception): - """Exception to exit the CLI""" - - pass - - -class BaseCommand(ABC, BaseModel): - """A CLI command""" - - # All commands must include a type name like this: - - @classmethod - def get_all_subclasses(cls): - subclasses = [] - toprocess = [cls] - while len(toprocess) > 0: - next = toprocess.pop(0) - next_subclasses = next.__subclasses__() - subclasses.extend(next_subclasses) - toprocess.extend(next_subclasses) - return subclasses - - @classmethod - def get_commands(cls): - return tuple(BaseCommand.get_all_subclasses()) - - @classmethod - def get_commands_map(cls): - # Get the type strings out of the literals and into a dictionary - return dict(map(lambda t: (get_args(get_type_hints(t)["type"])[0], t), BaseCommand.get_all_subclasses())) - - @abstractmethod - def run(self, context: CliContext) -> None: - """Run the command. Raise ExitCli to exit.""" - pass - - -class ExitCommand(BaseCommand): - """Exits the CLI""" - - type: Literal["exit"] = "exit" - - def run(self, context: CliContext) -> None: - raise ExitCli() - - -class HelpCommand(BaseCommand): - """Shows help""" - - type: Literal["help"] = "help" - - def run(self, context: CliContext) -> None: - context.parser.print_help() - - -def get_graph_execution_history( - graph_execution_state: GraphExecutionState, -) -> Iterable[str]: - """Gets the history of fully-executed invocations for a graph execution""" - return (n for n in reversed(graph_execution_state.executed_history) if n in graph_execution_state.graph.nodes) - - -def get_invocation_command(invocation) -> str: - fields = invocation.__fields__.items() - type_hints = get_type_hints(type(invocation)) - command = [invocation.type] - for name, field in fields: - if name in ["id", "type"]: - continue - - # TODO: add links - - # Skip image fields when serializing command - type_hint = type_hints.get(name) or None - if type_hint is ImageField or ImageField in get_args(type_hint): - continue - - field_value = getattr(invocation, name) - field_default = field.default - if field_value != field_default: - if type_hint is str or str in get_args(type_hint): - command.append(f'--{name} "{field_value}"') - else: - command.append(f"--{name} {field_value}") - - return " ".join(command) - - -class HistoryCommand(BaseCommand): - """Shows the invocation history""" - - type: Literal["history"] = "history" - - # Inputs - # fmt: off - count: int = Field(default=5, gt=0, description="The number of history entries to show") - # fmt: on - - def run(self, context: CliContext) -> None: - history = list(get_graph_execution_history(context.get_session())) - for i in range(min(self.count, len(history))): - entry_id = history[-1 - i] - entry = context.get_session().graph.get_node(entry_id) - logger.info(f"{entry_id}: {get_invocation_command(entry)}") - - -class SetDefaultCommand(BaseCommand): - """Sets a default value for a field""" - - type: Literal["default"] = "default" - - # Inputs - # fmt: off - field: str = Field(description="The field to set the default for") - value: str = Field(description="The value to set the default to, or None to clear the default") - # fmt: on - - def run(self, context: CliContext) -> None: - if self.value is None: - if self.field in context.defaults: - del context.defaults[self.field] - else: - context.defaults[self.field] = self.value - - -class DrawGraphCommand(BaseCommand): - """Debugs a graph""" - - type: Literal["draw_graph"] = "draw_graph" - - def run(self, context: CliContext) -> None: - session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id) - nxgraph = session.graph.nx_graph_flat() - - # Draw the networkx graph - plt.figure(figsize=(20, 20)) - pos = nx.spectral_layout(nxgraph) - nx.draw_networkx_nodes(nxgraph, pos, node_size=1000) - nx.draw_networkx_edges(nxgraph, pos, width=2) - nx.draw_networkx_labels(nxgraph, pos, font_size=20, font_family="sans-serif") - plt.axis("off") - plt.show() - - -class DrawExecutionGraphCommand(BaseCommand): - """Debugs an execution graph""" - - type: Literal["draw_xgraph"] = "draw_xgraph" - - def run(self, context: CliContext) -> None: - session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id) - nxgraph = session.execution_graph.nx_graph_flat() - - # Draw the networkx graph - plt.figure(figsize=(20, 20)) - pos = nx.spectral_layout(nxgraph) - nx.draw_networkx_nodes(nxgraph, pos, node_size=1000) - nx.draw_networkx_edges(nxgraph, pos, width=2) - nx.draw_networkx_labels(nxgraph, pos, font_size=20, font_family="sans-serif") - plt.axis("off") - plt.show() - - -class SortedHelpFormatter(argparse.HelpFormatter): - def _iter_indented_subactions(self, action): - try: - get_subactions = action._get_subactions - except AttributeError: - pass - else: - self._indent() - if isinstance(action, argparse._SubParsersAction): - for subaction in sorted(get_subactions(), key=lambda x: x.dest): - yield subaction - else: - for subaction in get_subactions(): - yield subaction - self._dedent() diff --git a/invokeai/app/cli/completer.py b/invokeai/app/cli/completer.py deleted file mode 100644 index 5aece8a058..0000000000 --- a/invokeai/app/cli/completer.py +++ /dev/null @@ -1,171 +0,0 @@ -""" -Readline helper functions for cli_app.py -You may import the global singleton `completer` to get access to the -completer object. -""" -import atexit -import readline -import shlex -from pathlib import Path -from typing import Dict, List, Literal, get_args, get_origin, get_type_hints - -import invokeai.backend.util.logging as logger - -from ...backend import ModelManager -from ..invocations.baseinvocation import BaseInvocation -from ..services.invocation_services import InvocationServices -from .commands import BaseCommand - -# singleton object, class variable -completer = None - - -class Completer(object): - def __init__(self, model_manager: ModelManager): - self.commands = self.get_commands() - self.matches = None - self.linebuffer = None - self.manager = model_manager - return - - def complete(self, text, state): - """ - Complete commands and switches fromm the node CLI command line. - Switches are determined in a context-specific manner. - """ - - buffer = readline.get_line_buffer() - if state == 0: - options = None - try: - current_command, current_switch = self.get_current_command(buffer) - options = self.get_command_options(current_command, current_switch) - except IndexError: - pass - options = options or list(self.parse_commands().keys()) - - if not text: # first time - self.matches = options - else: - self.matches = [s for s in options if s and s.startswith(text)] - - try: - match = self.matches[state] - except IndexError: - match = None - return match - - @classmethod - def get_commands(self) -> List[object]: - """ - Return a list of all the client commands and invocations. - """ - return BaseCommand.get_commands() + BaseInvocation.get_invocations() - - def get_current_command(self, buffer: str) -> tuple[str, str]: - """ - Parse the readline buffer to find the most recent command and its switch. - """ - if len(buffer) == 0: - return None, None - tokens = shlex.split(buffer) - command = None - switch = None - for t in tokens: - if t[0].isalpha(): - if switch is None: - command = t - else: - switch = t - # don't try to autocomplete switches that are already complete - if switch and buffer.endswith(" "): - switch = None - return command or "", switch or "" - - def parse_commands(self) -> Dict[str, List[str]]: - """ - Return a dict in which the keys are the command name - and the values are the parameters the command takes. - """ - result = dict() - for command in self.commands: - hints = get_type_hints(command) - name = get_args(hints["type"])[0] - result.update({name: hints}) - return result - - def get_command_options(self, command: str, switch: str) -> List[str]: - """ - Return all the parameters that can be passed to the command as - command-line switches. Returns None if the command is unrecognized. - """ - parsed_commands = self.parse_commands() - if command not in parsed_commands: - return None - - # handle switches in the format "-foo=bar" - argument = None - if switch and "=" in switch: - switch, argument = switch.split("=") - - parameter = switch.strip("-") - if parameter in parsed_commands[command]: - if argument is None: - return self.get_parameter_options(parameter, parsed_commands[command][parameter]) - else: - return [ - f"--{parameter}={x}" - for x in self.get_parameter_options(parameter, parsed_commands[command][parameter]) - ] - else: - return [f"--{x}" for x in parsed_commands[command].keys()] - - def get_parameter_options(self, parameter: str, typehint) -> List[str]: - """ - Given a parameter type (such as Literal), offers autocompletions. - """ - if get_origin(typehint) == Literal: - return get_args(typehint) - if parameter == "model": - return self.manager.model_names() - - def _pre_input_hook(self): - if self.linebuffer: - readline.insert_text(self.linebuffer) - readline.redisplay() - self.linebuffer = None - - -def set_autocompleter(services: InvocationServices) -> Completer: - global completer - - if completer: - return completer - - completer = Completer(services.model_manager) - - readline.set_completer(completer.complete) - try: - readline.set_auto_history(True) - except AttributeError: - # pyreadline3 does not have a set_auto_history() method - pass - readline.set_pre_input_hook(completer._pre_input_hook) - readline.set_completer_delims(" ") - readline.parse_and_bind("tab: complete") - readline.parse_and_bind("set print-completions-horizontally off") - readline.parse_and_bind("set page-completions on") - readline.parse_and_bind("set skip-completed-text on") - readline.parse_and_bind("set show-all-if-ambiguous on") - - histfile = Path(services.configuration.root_dir / ".invoke_history") - try: - readline.read_history_file(histfile) - readline.set_history_length(1000) - except FileNotFoundError: - pass - except OSError: # file likely corrupted - newname = f"{histfile}.old" - logger.error(f"Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}") - histfile.replace(Path(newname)) - atexit.register(readline.write_history_file, histfile) diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py deleted file mode 100644 index 2f8a4d2cbd..0000000000 --- a/invokeai/app/cli_app.py +++ /dev/null @@ -1,484 +0,0 @@ -# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team - -from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache - -from .services.config import InvokeAIAppConfig - -# parse_args() must be called before any other imports. if it is not called first, consumers of the config -# which are imported/used before parse_args() is called will get the default config values instead of the -# values from the command line or config file. - -if True: # hack to make flake8 happy with imports coming after setting up the config - import argparse - import re - import shlex - import sqlite3 - import sys - import time - from typing import Optional, Union, get_type_hints - - import torch - from pydantic import BaseModel, ValidationError - from pydantic.fields import Field - - import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) - from invokeai.app.services.board_image_record_storage import SqliteBoardImageRecordStorage - from invokeai.app.services.board_images import BoardImagesService, BoardImagesServiceDependencies - from invokeai.app.services.board_record_storage import SqliteBoardRecordStorage - from invokeai.app.services.boards import BoardService, BoardServiceDependencies - from invokeai.app.services.image_record_storage import SqliteImageRecordStorage - from invokeai.app.services.images import ImageService, ImageServiceDependencies - from invokeai.app.services.invocation_stats import InvocationStatsService - from invokeai.app.services.resource_name import SimpleNameService - from invokeai.app.services.urls import LocalUrlService - from invokeai.backend.util.logging import InvokeAILogger - from invokeai.version.invokeai_version import __version__ - - from .cli.commands import BaseCommand, CliContext, ExitCli, SortedHelpFormatter, add_graph_parsers, add_parsers - from .cli.completer import set_autocompleter - from .invocations.baseinvocation import BaseInvocation - from .services.default_graphs import create_system_graphs, default_text_to_image_graph_id - from .services.events import EventServiceBase - from .services.graph import ( - Edge, - EdgeConnection, - GraphExecutionState, - GraphInvocation, - LibraryGraph, - are_connection_types_compatible, - ) - from .services.image_file_storage import DiskImageFileStorage - from .services.invocation_queue import MemoryInvocationQueue - from .services.invocation_services import InvocationServices - from .services.invoker import Invoker - from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage - from .services.model_manager_service import ModelManagerService - from .services.processor import DefaultInvocationProcessor - from .services.sqlite import SqliteItemStorage - - if torch.backends.mps.is_available(): - import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import) - -config = InvokeAIAppConfig.get_config() -config.parse_args() -logger = InvokeAILogger().get_logger(config=config) - - -class CliCommand(BaseModel): - command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore - - -class InvalidArgs(Exception): - pass - - -def add_invocation_args(command_parser): - # Add linking capability - command_parser.add_argument( - "--link", - "-l", - action="append", - nargs=3, - help="A link in the format 'source_node source_field dest_field'. source_node can be relative to history (e.g. -1)", - ) - - command_parser.add_argument( - "--link_node", - "-ln", - action="append", - help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)", - ) - - -def get_command_parser(services: InvocationServices) -> argparse.ArgumentParser: - # Create invocation parser - parser = argparse.ArgumentParser(formatter_class=SortedHelpFormatter) - - def exit(*args, **kwargs): - raise InvalidArgs - - parser.exit = exit - subparsers = parser.add_subparsers(dest="type") - - # Create subparsers for each invocation - invocations = BaseInvocation.get_all_subclasses() - add_parsers(subparsers, invocations, add_arguments=add_invocation_args) - - # Create subparsers for each command - commands = BaseCommand.get_all_subclasses() - add_parsers(subparsers, commands, exclude_fields=["type"]) - - # Create subparsers for exposed CLI graphs - # TODO: add a way to identify these graphs - text_to_image = services.graph_library.get(default_text_to_image_graph_id) - add_graph_parsers(subparsers, [text_to_image], add_arguments=add_invocation_args) - - return parser - - -class NodeField: - alias: str - node_path: str - field: str - field_type: type - - def __init__(self, alias: str, node_path: str, field: str, field_type: type): - self.alias = alias - self.node_path = node_path - self.field = field - self.field_type = field_type - - -def fields_from_type_hints(hints: dict[str, type], node_path: str) -> dict[str, NodeField]: - return {k: NodeField(alias=k, node_path=node_path, field=k, field_type=v) for k, v in hints.items()} - - -def get_node_input_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField: - """Gets the node field for the specified field alias""" - exposed_input = next(e for e in graph.exposed_inputs if e.alias == field_alias) - node_type = type(graph.graph.get_node(exposed_input.node_path)) - return NodeField( - alias=exposed_input.alias, - node_path=f"{node_id}.{exposed_input.node_path}", - field=exposed_input.field, - field_type=get_type_hints(node_type)[exposed_input.field], - ) - - -def get_node_output_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField: - """Gets the node field for the specified field alias""" - exposed_output = next(e for e in graph.exposed_outputs if e.alias == field_alias) - node_type = type(graph.graph.get_node(exposed_output.node_path)) - node_output_type = node_type.get_output_type() - return NodeField( - alias=exposed_output.alias, - node_path=f"{node_id}.{exposed_output.node_path}", - field=exposed_output.field, - field_type=get_type_hints(node_output_type)[exposed_output.field], - ) - - -def get_node_inputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]: - """Gets the inputs for the specified invocation from the context""" - node_type = type(invocation) - if node_type is not GraphInvocation: - return fields_from_type_hints(get_type_hints(node_type), invocation.id) - else: - graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id]) - return {e.alias: get_node_input_field(graph, e.alias, invocation.id) for e in graph.exposed_inputs} - - -def get_node_outputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]: - """Gets the outputs for the specified invocation from the context""" - node_type = type(invocation) - if node_type is not GraphInvocation: - return fields_from_type_hints(get_type_hints(node_type.get_output_type()), invocation.id) - else: - graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id]) - return {e.alias: get_node_output_field(graph, e.alias, invocation.id) for e in graph.exposed_outputs} - - -def generate_matching_edges(a: BaseInvocation, b: BaseInvocation, context: CliContext) -> list[Edge]: - """Generates all possible edges between two invocations""" - afields = get_node_outputs(a, context) - bfields = get_node_inputs(b, context) - - matching_fields = set(afields.keys()).intersection(bfields.keys()) - - # Remove invalid fields - invalid_fields = set(["type", "id"]) - matching_fields = matching_fields.difference(invalid_fields) - - # Validate types - matching_fields = [ - f for f in matching_fields if are_connection_types_compatible(afields[f].field_type, bfields[f].field_type) - ] - - edges = [ - Edge( - source=EdgeConnection(node_id=afields[alias].node_path, field=afields[alias].field), - destination=EdgeConnection(node_id=bfields[alias].node_path, field=bfields[alias].field), - ) - for alias in matching_fields - ] - return edges - - -class SessionError(Exception): - """Raised when a session error has occurred""" - - pass - - -def invoke_all(context: CliContext): - """Runs all invocations in the specified session""" - context.invoker.invoke(context.session, invoke_all=True) - while not context.get_session().is_complete(): - # Wait some time - time.sleep(0.1) - - # Print any errors - if context.session.has_error(): - for n in context.session.errors: - context.invoker.services.logger.error( - f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}" - ) - - raise SessionError() - - -def invoke_cli(): - logger.info(f"InvokeAI version {__version__}") - # get the optional list of invocations to execute on the command line - parser = config.get_parser() - parser.add_argument("commands", nargs="*") - invocation_commands = parser.parse_args().commands - - # get the optional file to read commands from. - # Simplest is to use it for STDIN - if infile := config.from_file: - sys.stdin = open(infile, "r") - - model_manager = ModelManagerService(config, logger) - - events = EventServiceBase() - output_folder = config.output_path - - # TODO: build a file/path manager? - if config.use_memory_db: - db_location = ":memory:" - else: - db_location = config.db_path - db_location.parent.mkdir(parents=True, exist_ok=True) - - db_conn = sqlite3.connect(db_location, check_same_thread=False) # TODO: figure out a better threading solution - logger.info(f'InvokeAI database location is "{db_location}"') - - graph_execution_manager = SqliteItemStorage[GraphExecutionState](conn=db_conn, table_name="graph_executions") - - urls = LocalUrlService() - image_record_storage = SqliteImageRecordStorage(conn=db_conn) - image_file_storage = DiskImageFileStorage(f"{output_folder}/images") - names = SimpleNameService() - - board_record_storage = SqliteBoardRecordStorage(conn=db_conn) - board_image_record_storage = SqliteBoardImageRecordStorage(conn=db_conn) - - boards = BoardService( - services=BoardServiceDependencies( - board_image_record_storage=board_image_record_storage, - board_record_storage=board_record_storage, - image_record_storage=image_record_storage, - url=urls, - logger=logger, - ) - ) - - board_images = BoardImagesService( - services=BoardImagesServiceDependencies( - board_image_record_storage=board_image_record_storage, - board_record_storage=board_record_storage, - image_record_storage=image_record_storage, - url=urls, - logger=logger, - ) - ) - - images = ImageService( - services=ImageServiceDependencies( - board_image_record_storage=board_image_record_storage, - image_record_storage=image_record_storage, - image_file_storage=image_file_storage, - url=urls, - logger=logger, - names=names, - graph_execution_manager=graph_execution_manager, - ) - ) - - services = InvocationServices( - model_manager=model_manager, - events=events, - latents=ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents")), - images=images, - boards=boards, - board_images=board_images, - queue=MemoryInvocationQueue(), - graph_library=SqliteItemStorage[LibraryGraph](conn=db_conn, table_name="graphs"), - graph_execution_manager=graph_execution_manager, - processor=DefaultInvocationProcessor(), - performance_statistics=InvocationStatsService(graph_execution_manager), - logger=logger, - configuration=config, - invocation_cache=MemoryInvocationCache(max_cache_size=config.node_cache_size), - ) - - system_graphs = create_system_graphs(services.graph_library) - system_graph_names = set([g.name for g in system_graphs]) - set_autocompleter(services) - - invoker = Invoker(services) - session: GraphExecutionState = invoker.create_execution_state() - parser = get_command_parser(services) - - re_negid = re.compile("^-[0-9]+$") - - # Uncomment to print out previous sessions at startup - # print(services.session_manager.list()) - - context = CliContext(invoker, session, parser) - set_autocompleter(services) - - command_line_args_exist = len(invocation_commands) > 0 - done = False - - while not done: - try: - if command_line_args_exist: - cmd_input = invocation_commands.pop(0) - done = len(invocation_commands) == 0 - else: - cmd_input = input("invoke> ") - except (KeyboardInterrupt, EOFError): - # Ctrl-c exits - break - - try: - # Refresh the state of the session - # history = list(get_graph_execution_history(context.session)) - history = list(reversed(context.nodes_added)) - - # Split the command for piping - cmds = cmd_input.split("|") - start_id = len(context.nodes_added) - current_id = start_id - new_invocations = list() - for cmd in cmds: - if cmd is None or cmd.strip() == "": - raise InvalidArgs("Empty command") - - # Parse args to create invocation - args = vars(context.parser.parse_args(shlex.split(cmd.strip()))) - - # Override defaults - for field_name, field_default in context.defaults.items(): - if field_name in args: - args[field_name] = field_default - - # Parse invocation - command: CliCommand = None # type:ignore - system_graph: Optional[LibraryGraph] = None - if args["type"] in system_graph_names: - system_graph = next(filter(lambda g: g.name == args["type"], system_graphs)) - invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id)) - for exposed_input in system_graph.exposed_inputs: - if exposed_input.alias in args: - node = invocation.graph.get_node(exposed_input.node_path) - field = exposed_input.field - setattr(node, field, args[exposed_input.alias]) - command = CliCommand(command=invocation) - context.graph_nodes[invocation.id] = system_graph.id - else: - args["id"] = current_id - command = CliCommand(command=args) - - if command is None: - continue - - # Run any CLI commands immediately - if isinstance(command.command, BaseCommand): - # Invoke all current nodes to preserve operation order - invoke_all(context) - - # Run the command - command.command.run(context) - continue - - # TODO: handle linking with library graphs - # Pipe previous command output (if there was a previous command) - edges: list[Edge] = list() - if len(history) > 0 or current_id != start_id: - from_id = history[0] if current_id == start_id else str(current_id - 1) - from_node = ( - next(filter(lambda n: n[0].id == from_id, new_invocations))[0] - if current_id != start_id - else context.session.graph.get_node(from_id) - ) - matching_edges = generate_matching_edges(from_node, command.command, context) - edges.extend(matching_edges) - - # Parse provided links - if "link_node" in args and args["link_node"]: - for link in args["link_node"]: - node_id = link - if re_negid.match(node_id): - node_id = str(current_id + int(node_id)) - - link_node = context.session.graph.get_node(node_id) - matching_edges = generate_matching_edges(link_node, command.command, context) - matching_destinations = [e.destination for e in matching_edges] - edges = [e for e in edges if e.destination not in matching_destinations] - edges.extend(matching_edges) - - if "link" in args and args["link"]: - for link in args["link"]: - edges = [ - e - for e in edges - if e.destination.node_id != command.command.id or e.destination.field != link[2] - ] - - node_id = link[0] - if re_negid.match(node_id): - node_id = str(current_id + int(node_id)) - - # TODO: handle missing input/output - node_output = get_node_outputs(context.session.graph.get_node(node_id), context)[link[1]] - node_input = get_node_inputs(command.command, context)[link[2]] - - edges.append( - Edge( - source=EdgeConnection(node_id=node_output.node_path, field=node_output.field), - destination=EdgeConnection(node_id=node_input.node_path, field=node_input.field), - ) - ) - - new_invocations.append((command.command, edges)) - - current_id = current_id + 1 - - # Add the node to the session - context.add_node(command.command) - for edge in edges: - print(edge) - context.add_edge(edge) - - # Execute all remaining nodes - invoke_all(context) - - except InvalidArgs: - invoker.services.logger.warning('Invalid command, use "help" to list commands') - continue - - except ValidationError: - invoker.services.logger.warning('Invalid command arguments, run " --help" for summary') - - except SessionError: - # Start a new session - invoker.services.logger.warning("Session error: creating a new session") - context.reset() - - except ExitCli: - break - - except SystemExit: - continue - - invoker.stop() - - -if __name__ == "__main__": - if config.version: - print(f"InvokeAI version {__version__}") - else: - invoke_cli() diff --git a/mkdocs.yml b/mkdocs.yml index f95d83ac8f..97b2a16f19 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -134,6 +134,7 @@ nav: - List of Default Nodes: 'nodes/defaultNodes.md' - Workflow Editor Usage: 'nodes/NODES.md' - ComfyUI to InvokeAI: 'nodes/comfyToInvoke.md' + - Facetool Node: 'nodes/detailedNodes/faceTools.md' - Contributing Nodes: 'nodes/contributingNodes.md' - Features: - Overview: 'features/index.md' @@ -144,7 +145,7 @@ nav: - Image-to-Image: 'features/IMG2IMG.md' - Controlling Logging: 'features/LOGGING.md' - Model Merging: 'features/MODEL_MERGING.md' - - Using Nodes : './nodes/overview' + - Using Nodes : 'nodes/overview.md' - NSFW Checker: 'features/WATERMARK+NSFW.md' - Postprocessing: 'features/POSTPROCESS.md' - Prompting Features: 'features/PROMPTS.md' @@ -152,15 +153,18 @@ nav: - Unified Canvas: 'features/UNIFIED_CANVAS.md' - InvokeAI Web Server: 'features/WEB.md' - WebUI Hotkeys: "features/WEBUIHOTKEYS.md" + - Maintenance Utilities: "features/UTILITIES.md" - Other: 'features/OTHER.md' - Contributing: - How to Contribute: 'contributing/CONTRIBUTING.md' + - InvokeAI Code of Conduct: 'CODE_OF_CONDUCT.md' - Development: - Overview: 'contributing/contribution_guides/development.md' - New Contributors: 'contributing/contribution_guides/newContributorChecklist.md' - InvokeAI Architecture: 'contributing/ARCHITECTURE.md' - Frontend Documentation: 'contributing/contribution_guides/contributingToFrontend.md' - Local Development: 'contributing/LOCAL_DEVELOPMENT.md' + - Adding Tests: 'contributing/TESTS.md' - Documentation: 'contributing/contribution_guides/documentation.md' - Nodes: 'contributing/INVOCATIONS.md' - Translation: 'contributing/contribution_guides/translation.md' @@ -168,9 +172,12 @@ nav: - Changelog: 'CHANGELOG.md' - Deprecated: - Command Line Interface: 'deprecated/CLI.md' + - Variations: 'deprecated/VARIATIONS.md' + - Translations: 'deprecated/TRANSLATION.md' - Embiggen: 'deprecated/EMBIGGEN.md' - Inpainting: 'deprecated/INPAINTING.md' - Outpainting: 'deprecated/OUTPAINTING.md' + - Troubleshooting: 'help/deprecated/TROUBLESHOOT.md' - Help: - Getting Started: 'help/gettingStartedWithAI.md' - Diffusion Overview: 'help/diffusion.md' diff --git a/pyproject.toml b/pyproject.toml index 67486e1120..96c6c3dd73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -125,7 +125,7 @@ dependencies = [ # shortcut commands to start cli and web # "invokeai --web" will launch the web interface # "invokeai" will launch the CLI -"invokeai" = "invokeai.frontend.legacy_launch_invokeai:main" +# "invokeai" = "invokeai.frontend.legacy_launch_invokeai:main" # new shortcut to launch web interface "invokeai-web" = "invokeai.app.api_app:invoke_api" @@ -138,7 +138,6 @@ dependencies = [ "invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main" "invokeai-update" = "invokeai.frontend.install.invokeai_update:main" "invokeai-metadata" = "invokeai.backend.image_util.invoke_metadata:main" -"invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli" "invokeai-node-web" = "invokeai.app.api_app:invoke_api" "invokeai-import-images" = "invokeai.frontend.install.import_images:main" "invokeai-db-maintenance" = "invokeai.backend.util.db_maintenance:main" From 67a343b3e45bfafec0b5deb62f0117610f3acab6 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 11:20:06 +1100 Subject: [PATCH 101/202] Update pyproject.toml --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 96c6c3dd73..2bcaea2efa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,9 +122,8 @@ dependencies = [ "configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:invokeai_configure" "textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion" -# shortcut commands to start cli and web +# shortcut commands to start web ui # "invokeai --web" will launch the web interface -# "invokeai" will launch the CLI # "invokeai" = "invokeai.frontend.legacy_launch_invokeai:main" # new shortcut to launch web interface From 024aa5eb90a72eb99125895bba2397a4f23886bd Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:19:06 +1100 Subject: [PATCH 102/202] fix(ui): fix field sorting closes #4934 --- .../hooks/useAnyOrDirectInputFieldNames.ts | 19 +++++++--------- .../hooks/useConnectionInputFieldNames.ts | 22 +++++++++---------- .../nodes/hooks/useOutputFieldNames.ts | 8 +++---- .../nodes/util/getSortedFilteredFieldNames.ts | 20 +++++++++++++++++ 4 files changed, 42 insertions(+), 27 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/util/getSortedFilteredFieldNames.ts diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useAnyOrDirectInputFieldNames.ts b/invokeai/frontend/web/src/features/nodes/hooks/useAnyOrDirectInputFieldNames.ts index 36f2e8a62c..dda2efc156 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useAnyOrDirectInputFieldNames.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useAnyOrDirectInputFieldNames.ts @@ -9,6 +9,7 @@ import { POLYMORPHIC_TYPES, TYPES_WITH_INPUT_COMPONENTS, } from '../types/constants'; +import { getSortedFilteredFieldNames } from '../util/getSortedFilteredFieldNames'; export const useAnyOrDirectInputFieldNames = (nodeId: string) => { const selector = useMemo( @@ -24,17 +25,13 @@ export const useAnyOrDirectInputFieldNames = (nodeId: string) => { if (!nodeTemplate) { return []; } - return map(nodeTemplate.inputs) - .filter( - (field) => - (['any', 'direct'].includes(field.input) || - POLYMORPHIC_TYPES.includes(field.type)) && - TYPES_WITH_INPUT_COMPONENTS.includes(field.type) - ) - .filter((field) => !field.ui_hidden) - .sort((a, b) => (a.ui_order ?? 0) - (b.ui_order ?? 0)) - .map((field) => field.name) - .filter((fieldName) => fieldName !== 'is_intermediate'); + const fields = map(nodeTemplate.inputs).filter( + (field) => + (['any', 'direct'].includes(field.input) || + POLYMORPHIC_TYPES.includes(field.type)) && + TYPES_WITH_INPUT_COMPONENTS.includes(field.type) + ); + return getSortedFilteredFieldNames(fields); }, defaultSelectorOptions ), diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useConnectionInputFieldNames.ts b/invokeai/frontend/web/src/features/nodes/hooks/useConnectionInputFieldNames.ts index eea874cc87..9fb31df801 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useConnectionInputFieldNames.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useConnectionInputFieldNames.ts @@ -9,6 +9,7 @@ import { TYPES_WITH_INPUT_COMPONENTS, } from '../types/constants'; import { isInvocationNode } from '../types/types'; +import { getSortedFilteredFieldNames } from '../util/getSortedFilteredFieldNames'; export const useConnectionInputFieldNames = (nodeId: string) => { const selector = useMemo( @@ -24,17 +25,16 @@ export const useConnectionInputFieldNames = (nodeId: string) => { if (!nodeTemplate) { return []; } - return map(nodeTemplate.inputs) - .filter( - (field) => - (field.input === 'connection' && - !POLYMORPHIC_TYPES.includes(field.type)) || - !TYPES_WITH_INPUT_COMPONENTS.includes(field.type) - ) - .filter((field) => !field.ui_hidden) - .sort((a, b) => (a.ui_order ?? 0) - (b.ui_order ?? 0)) - .map((field) => field.name) - .filter((fieldName) => fieldName !== 'is_intermediate'); + + // get the visible fields + const fields = map(nodeTemplate.inputs).filter( + (field) => + (field.input === 'connection' && + !POLYMORPHIC_TYPES.includes(field.type)) || + !TYPES_WITH_INPUT_COMPONENTS.includes(field.type) + ); + + return getSortedFilteredFieldNames(fields); }, defaultSelectorOptions ), diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useOutputFieldNames.ts b/invokeai/frontend/web/src/features/nodes/hooks/useOutputFieldNames.ts index 97956a4889..e0a1e5433e 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useOutputFieldNames.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useOutputFieldNames.ts @@ -5,6 +5,7 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; import { map } from 'lodash-es'; import { useMemo } from 'react'; import { isInvocationNode } from '../types/types'; +import { getSortedFilteredFieldNames } from '../util/getSortedFilteredFieldNames'; export const useOutputFieldNames = (nodeId: string) => { const selector = useMemo( @@ -20,11 +21,8 @@ export const useOutputFieldNames = (nodeId: string) => { if (!nodeTemplate) { return []; } - return map(nodeTemplate.outputs) - .filter((field) => !field.ui_hidden) - .sort((a, b) => (a.ui_order ?? 0) - (b.ui_order ?? 0)) - .map((field) => field.name) - .filter((fieldName) => fieldName !== 'is_intermediate'); + + return getSortedFilteredFieldNames(map(nodeTemplate.outputs)); }, defaultSelectorOptions ), diff --git a/invokeai/frontend/web/src/features/nodes/util/getSortedFilteredFieldNames.ts b/invokeai/frontend/web/src/features/nodes/util/getSortedFilteredFieldNames.ts new file mode 100644 index 0000000000..b235fe8a07 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/getSortedFilteredFieldNames.ts @@ -0,0 +1,20 @@ +import { isNil } from 'lodash-es'; +import { InputFieldTemplate, OutputFieldTemplate } from '../types/types'; + +export const getSortedFilteredFieldNames = ( + fields: InputFieldTemplate[] | OutputFieldTemplate[] +): string[] => { + const visibleFields = fields.filter((field) => !field.ui_hidden); + + // we want explicitly ordered fields to be before unordered fields; split the list + const orderedFields = visibleFields + .filter((f) => !isNil(f.ui_order)) + .sort((a, b) => (a.ui_order ?? 0) - (b.ui_order ?? 0)); + const unorderedFields = visibleFields.filter((f) => isNil(f.ui_order)); + + // concat the lists, and return the field names, skipping `is_intermediate` + return orderedFields + .concat(unorderedFields) + .map((f) => f.name) + .filter((fieldName) => fieldName !== 'is_intermediate'); +}; From 5e6df975fd0245f1a0ccbc9f96e05f7c362311a3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 23:31:29 +1100 Subject: [PATCH 103/202] fix(nodes): fix math node validation Update field_validator api for pydantic v2 --- invokeai/app/invocations/math.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index 2aefa1def4..3ed325802e 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -3,7 +3,7 @@ from typing import Literal import numpy as np -from pydantic import field_validator +from pydantic import ValidationInfo, field_validator from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput @@ -186,12 +186,12 @@ class IntegerMathInvocation(BaseInvocation): b: int = InputField(default=0, description=FieldDescriptions.num_2) @field_validator("b") - def no_unrepresentable_results(cls, v, values): - if values["operation"] == "DIV" and v == 0: + def no_unrepresentable_results(cls, v: int, info: ValidationInfo): + if info.data["operation"] == "DIV" and v == 0: raise ValueError("Cannot divide by zero") - elif values["operation"] == "MOD" and v == 0: + elif info.data["operation"] == "MOD" and v == 0: raise ValueError("Cannot divide by zero") - elif values["operation"] == "EXP" and v < 0: + elif info.data["operation"] == "EXP" and v < 0: raise ValueError("Result of exponentiation is not an integer") return v @@ -260,12 +260,12 @@ class FloatMathInvocation(BaseInvocation): b: float = InputField(default=0, description=FieldDescriptions.num_2) @field_validator("b") - def no_unrepresentable_results(cls, v, values): - if values["operation"] == "DIV" and v == 0: + def no_unrepresentable_results(cls, v: float, info: ValidationInfo): + if info.data["operation"] == "DIV" and v == 0: raise ValueError("Cannot divide by zero") - elif values["operation"] == "EXP" and values["a"] == 0 and v < 0: + elif info.data["operation"] == "EXP" and info.data["a"] == 0 and v < 0: raise ValueError("Cannot raise zero to a negative power") - elif values["operation"] == "EXP" and type(values["a"] ** v) is complex: + elif info.data["operation"] == "EXP" and type(info.data["a"] ** v) is complex: raise ValueError("Root operation resulted in a complex number") return v From 0a01d86ab13293a0f2ac4d83d40fb93ebd85c6f8 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 22:36:25 +1100 Subject: [PATCH 104/202] fix(ui): fix multiple control adapters on canvas We were making an edges for each adapter where we should isntead have one from the adapter's collect node into the denoising node --- .../addControlNetToLinearGraph.ts | 20 ++++++++-------- .../addIPAdapterToLinearGraph.ts | 24 +++++++++---------- .../addT2IAdapterToLinearGraph.ts | 20 ++++++++-------- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addControlNetToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addControlNetToLinearGraph.ts index 552a9c6c9b..37bd82d4f8 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addControlNetToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addControlNetToLinearGraph.ts @@ -43,6 +43,16 @@ export const addControlNetToLinearGraph = ( }, }); + if (CANVAS_COHERENCE_DENOISE_LATENTS in graph.nodes) { + graph.edges.push({ + source: { node_id: CONTROL_NET_COLLECT, field: 'collection' }, + destination: { + node_id: CANVAS_COHERENCE_DENOISE_LATENTS, + field: 'control', + }, + }); + } + validControlNets.forEach((controlNet) => { if (!controlNet.model) { return; @@ -106,16 +116,6 @@ export const addControlNetToLinearGraph = ( field: 'item', }, }); - - if (CANVAS_COHERENCE_DENOISE_LATENTS in graph.nodes) { - graph.edges.push({ - source: { node_id: controlNetNode.id, field: 'control' }, - destination: { - node_id: CANVAS_COHERENCE_DENOISE_LATENTS, - field: 'control', - }, - }); - } }); } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addIPAdapterToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addIPAdapterToLinearGraph.ts index f96b2e52ed..19bf7d8338 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addIPAdapterToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addIPAdapterToLinearGraph.ts @@ -32,15 +32,25 @@ export const addIPAdapterToLinearGraph = ( type: 'collect', is_intermediate: true, }; - graph.nodes[ipAdapterCollectNode.id] = ipAdapterCollectNode; + graph.nodes[IP_ADAPTER_COLLECT] = ipAdapterCollectNode; graph.edges.push({ - source: { node_id: ipAdapterCollectNode.id, field: 'collection' }, + source: { node_id: IP_ADAPTER_COLLECT, field: 'collection' }, destination: { node_id: baseNodeId, field: 'ip_adapter', }, }); + if (CANVAS_COHERENCE_DENOISE_LATENTS in graph.nodes) { + graph.edges.push({ + source: { node_id: IP_ADAPTER_COLLECT, field: 'collection' }, + destination: { + node_id: CANVAS_COHERENCE_DENOISE_LATENTS, + field: 'ip_adapter', + }, + }); + } + validIPAdapters.forEach((ipAdapter) => { if (!ipAdapter.model) { return; @@ -87,16 +97,6 @@ export const addIPAdapterToLinearGraph = ( field: 'item', }, }); - - if (CANVAS_COHERENCE_DENOISE_LATENTS in graph.nodes) { - graph.edges.push({ - source: { node_id: ipAdapterNode.id, field: 'ip_adapter' }, - destination: { - node_id: CANVAS_COHERENCE_DENOISE_LATENTS, - field: 'ip_adapter', - }, - }); - } }); } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts index 16dc5bbc71..9511475bb3 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts @@ -42,6 +42,16 @@ export const addT2IAdaptersToLinearGraph = ( }, }); + if (CANVAS_COHERENCE_DENOISE_LATENTS in graph.nodes) { + graph.edges.push({ + source: { node_id: T2I_ADAPTER_COLLECT, field: 'collection' }, + destination: { + node_id: CANVAS_COHERENCE_DENOISE_LATENTS, + field: 't2i_adapter', + }, + }); + } + validT2IAdapters.forEach((t2iAdapter) => { if (!t2iAdapter.model) { return; @@ -103,16 +113,6 @@ export const addT2IAdaptersToLinearGraph = ( field: 'item', }, }); - - if (CANVAS_COHERENCE_DENOISE_LATENTS in graph.nodes) { - graph.edges.push({ - source: { node_id: t2iAdapterNode.id, field: 't2i_adapter' }, - destination: { - node_id: CANVAS_COHERENCE_DENOISE_LATENTS, - field: 't2i_adapter', - }, - }); - } }); } }; From fdf02c33d06ea6f2d297a6064981b94df7148c4d Mon Sep 17 00:00:00 2001 From: d8ahazard Date: Tue, 17 Oct 2023 21:59:04 -0500 Subject: [PATCH 105/202] Catch generic model errors Prevent the app from dying on invalid models. --- invokeai/backend/model_management/model_manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 38a7361c85..9390c8ce54 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -1011,6 +1011,8 @@ class ModelManager(object): self.logger.warning(f"Not a valid model: {model_path}. {e}") except NotImplementedError as e: self.logger.warning(e) + except Exception as e: + self.logger.warning(f"Error loading model {model_path}. {e}") imported_models = self.scan_autoimport_directory() if (new_models_found or imported_models) and self.config_path: From a459786d735cc2b14e7631e9d955c544bd7ce6e3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 21:40:34 +1100 Subject: [PATCH 106/202] fix(nodes): enable number to string coercion --- invokeai/app/invocations/baseinvocation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 8bd4a89f45..ba94e7c440 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -693,6 +693,7 @@ class BaseInvocation(ABC, BaseModel): validate_assignment=True, json_schema_extra=json_schema_extra, json_schema_serialization_defaults_required=True, + coerce_numbers_to_str=True, ) From 9e063711789336ab16a07e1873b753eee1b094c9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 13:39:21 +1100 Subject: [PATCH 107/202] feat(api): serve app via route & add cache-control: no-store This should prevent `index.html` from *ever* being cached, so UIs will never be out of date. Minor organisation to accomodate this. Deleting old unused files from the early days --- invokeai/app/api_app.py | 24 +- .../static/{dream_web => docs}/favicon.ico | Bin .../frontend/web/static/dream_web/index.css | 179 -------- .../frontend/web/static/dream_web/index.html | 187 -------- .../frontend/web/static/dream_web/index.js | 409 ------------------ .../frontend/web/static/dream_web/test.html | 246 ----------- .../web/static/legacy_web/favicon.ico | Bin 1150 -> 0 bytes .../frontend/web/static/legacy_web/index.css | 152 ------- .../frontend/web/static/legacy_web/index.html | 137 ------ .../frontend/web/static/legacy_web/index.js | 234 ---------- 10 files changed, 17 insertions(+), 1551 deletions(-) rename invokeai/frontend/web/static/{dream_web => docs}/favicon.ico (100%) delete mode 100644 invokeai/frontend/web/static/dream_web/index.css delete mode 100644 invokeai/frontend/web/static/dream_web/index.html delete mode 100644 invokeai/frontend/web/static/dream_web/index.js delete mode 100644 invokeai/frontend/web/static/dream_web/test.html delete mode 100644 invokeai/frontend/web/static/legacy_web/favicon.ico delete mode 100644 invokeai/frontend/web/static/legacy_web/index.css delete mode 100644 invokeai/frontend/web/static/legacy_web/index.html delete mode 100644 invokeai/frontend/web/static/legacy_web/index.js diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index e07b037dd1..f45541e63b 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -23,6 +23,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from fastapi_events.handlers.local import local_handler from fastapi_events.middleware import EventHandlerASGIMiddleware from pydantic.json_schema import models_json_schema + from fastapi.responses import FileResponse # noinspection PyUnresolvedReferences import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) @@ -173,16 +174,13 @@ def custom_openapi(): app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid assignment -# Override API doc favicons -app.mount("/static", StaticFiles(directory=Path(web_dir.__path__[0], "static/dream_web")), name="static") - @app.get("/docs", include_in_schema=False) def overridden_swagger(): return get_swagger_ui_html( openapi_url=app.openapi_url, title=app.title, - swagger_favicon_url="/static/favicon.ico", + swagger_favicon_url="/static/docs/favicon.ico", ) @@ -191,12 +189,24 @@ def overridden_redoc(): return get_redoc_html( openapi_url=app.openapi_url, title=app.title, - redoc_favicon_url="/static/favicon.ico", + redoc_favicon_url="/static/docs/favicon.ico", ) -# Must mount *after* the other routes else it borks em -app.mount("/", StaticFiles(directory=Path(web_dir.__path__[0], "dist"), html=True), name="ui") +web_root_path = Path(list(web_dir.__path__)[0]) + + +# Cannot add headers to StaticFiles, so we must serve index.html with a custom route +# Add cache-control: no-store header to prevent caching of index.html, which leads to broken UIs at release +@app.get("/", include_in_schema=False, name="ui_root") +def get_index() -> FileResponse: + return FileResponse(Path(web_root_path, "dist/index.html"), headers={"Cache-Control": "no-store"}) + + +# # Must mount *after* the other routes else it borks em +app.mount("/static", StaticFiles(directory=Path(web_root_path, "static/")), name="static") # docs favicon is in here +app.mount("/assets", StaticFiles(directory=Path(web_root_path, "dist/assets/")), name="assets") +app.mount("/locales", StaticFiles(directory=Path(web_root_path, "dist/locales/")), name="locales") def invoke_api(): diff --git a/invokeai/frontend/web/static/dream_web/favicon.ico b/invokeai/frontend/web/static/docs/favicon.ico similarity index 100% rename from invokeai/frontend/web/static/dream_web/favicon.ico rename to invokeai/frontend/web/static/docs/favicon.ico diff --git a/invokeai/frontend/web/static/dream_web/index.css b/invokeai/frontend/web/static/dream_web/index.css deleted file mode 100644 index 25a0994a3d..0000000000 --- a/invokeai/frontend/web/static/dream_web/index.css +++ /dev/null @@ -1,179 +0,0 @@ -:root { - --fields-dark:#DCDCDC; - --fields-light:#F5F5F5; -} - -* { - font-family: 'Arial'; - font-size: 100%; -} -body { - font-size: 1em; -} -textarea { - font-size: 0.95em; -} -header, form, #progress-section { - margin-left: auto; - margin-right: auto; - max-width: 1024px; - text-align: center; -} -fieldset { - border: none; - line-height: 2.2em; -} -fieldset > legend { - width: auto; - margin-left: 0; - margin-right: auto; - font-weight:bold; -} -select, input { - margin-right: 10px; - padding: 2px; -} -input:disabled { - cursor:auto; -} -input[type=submit] { - cursor: pointer; - background-color: #666; - color: white; -} -input[type=checkbox] { - cursor: pointer; - margin-right: 0px; - width: 20px; - height: 20px; - vertical-align: middle; -} -input#seed { - margin-right: 0px; -} -div { - padding: 10px 10px 10px 10px; -} -header { - margin-bottom: 16px; -} -header h1 { - margin-bottom: 0; - font-size: 2em; -} -#search-box { - display: flex; -} -#scaling-inprocess-message { - font-weight: bold; - font-style: italic; - display: none; -} -#prompt { - flex-grow: 1; - padding: 5px 10px 5px 10px; - border: 1px solid #999; - outline: none; -} -#submit { - padding: 5px 10px 5px 10px; - border: 1px solid #999; -} -#reset-all, #remove-image { - margin-top: 12px; - font-size: 0.8em; - background-color: pink; - border: 1px solid #999; - border-radius: 4px; -} -#results { - text-align: center; - margin: auto; - padding-top: 10px; -} -#results figure { - display: inline-block; - margin: 10px; -} -#results figcaption { - font-size: 0.8em; - padding: 3px; - color: #888; - cursor: pointer; -} -#results img { - border-radius: 5px; - object-fit: contain; - background-color: var(--fields-dark); -} -#fieldset-config { - line-height:2em; -} -input[type="number"] { - width: 60px; -} -#seed { - width: 150px; -} -button#reset-seed { - font-size: 1.7em; - background: #efefef; - border: 1px solid #999; - border-radius: 4px; - line-height: 0.8; - margin: 0 10px 0 0; - padding: 0 5px 3px; - vertical-align: middle; -} -label { - white-space: nowrap; -} -#progress-section { - display: none; -} -#progress-image { - width: 30vh; - height: 30vh; - object-fit: contain; - background-color: var(--fields-dark); -} -#cancel-button { - cursor: pointer; - color: red; -} -#txt2img { - background-color: var(--fields-dark); -} -#variations { - background-color: var(--fields-light); -} -#initimg { - background-color: var(--fields-dark); -} -#img2img { - background-color: var(--fields-light); -} -#initimg > :not(legend) { - background-color: var(--fields-light); - margin: .5em; -} - -#postprocess, #initimg { - display:flex; - flex-wrap:wrap; - padding: 0; - margin-top: 1em; - background-color: var(--fields-dark); -} -#postprocess > fieldset, #initimg > * { - flex-grow: 1; -} -#postprocess > fieldset { - background-color: var(--fields-dark); -} -#progress-section { - background-color: var(--fields-light); -} -#no-results-message:not(:only-child) { - display: none; -} diff --git a/invokeai/frontend/web/static/dream_web/index.html b/invokeai/frontend/web/static/dream_web/index.html deleted file mode 100644 index feb542adb2..0000000000 --- a/invokeai/frontend/web/static/dream_web/index.html +++ /dev/null @@ -1,187 +0,0 @@ - - - - Stable Diffusion Dream Server - - - - - - - - - - - -
-

Stable Diffusion Dream Server

-
- For news and support for this web service, visit our GitHub - site -
-
- -
- -
-
- - - - - - - - - - - - - - - -
- - - - - - - - - -
- - - - - -
-
- - - - -
-
-
- - - - -
- - - -
-
- - - - - - - - -
-
-
-
- - - - - - -
-
- - - - - - - - -
-
- -
-
-
-
- - -
- -
- Postprocessing...1/3 -
-
-
- -
-
-
- - - diff --git a/invokeai/frontend/web/static/dream_web/index.js b/invokeai/frontend/web/static/dream_web/index.js deleted file mode 100644 index 438232f0c7..0000000000 --- a/invokeai/frontend/web/static/dream_web/index.js +++ /dev/null @@ -1,409 +0,0 @@ -const socket = io(); - -var priorResultsLoadState = { - page: 0, - pages: 1, - per_page: 10, - total: 20, - offset: 0, // number of items generated since last load - loading: false, - initialized: false, -}; - -function loadPriorResults() { - // Fix next page by offset - let offsetPages = - priorResultsLoadState.offset / priorResultsLoadState.per_page; - priorResultsLoadState.page += offsetPages; - priorResultsLoadState.pages += offsetPages; - priorResultsLoadState.total += priorResultsLoadState.offset; - priorResultsLoadState.offset = 0; - - if (priorResultsLoadState.loading) { - return; - } - - if (priorResultsLoadState.page >= priorResultsLoadState.pages) { - return; // Nothing more to load - } - - // Load - priorResultsLoadState.loading = true; - let url = new URL('/api/images', document.baseURI); - url.searchParams.append( - 'page', - priorResultsLoadState.initialized - ? priorResultsLoadState.page + 1 - : priorResultsLoadState.page - ); - url.searchParams.append('per_page', priorResultsLoadState.per_page); - fetch(url.href, { - method: 'GET', - headers: new Headers({ 'content-type': 'application/json' }), - }) - .then((response) => response.json()) - .then((data) => { - priorResultsLoadState.page = data.page; - priorResultsLoadState.pages = data.pages; - priorResultsLoadState.per_page = data.per_page; - priorResultsLoadState.total = data.total; - - data.items.forEach(function (dreamId, index) { - let src = 'api/images/' + dreamId; - fetch('/api/images/' + dreamId + '/metadata', { - method: 'GET', - headers: new Headers({ 'content-type': 'application/json' }), - }) - .then((response) => response.json()) - .then((metadata) => { - let seed = metadata.seed || 0; // TODO: Parse old metadata - appendOutput(src, seed, metadata, true); - }); - }); - - // Load until page is full - if (!priorResultsLoadState.initialized) { - if (document.body.scrollHeight <= window.innerHeight) { - loadPriorResults(); - } - } - }) - .finally(() => { - priorResultsLoadState.loading = false; - priorResultsLoadState.initialized = true; - }); -} - -function resetForm() { - var form = document.getElementById('generate-form'); - form.querySelector('fieldset').removeAttribute('disabled'); -} - -function initProgress(totalSteps, showProgressImages) { - // TODO: Progress could theoretically come from multiple jobs at the same time (in the future) - let progressSectionEle = document.querySelector('#progress-section'); - progressSectionEle.style.display = 'initial'; - let progressEle = document.querySelector('#progress-bar'); - progressEle.setAttribute('max', totalSteps); - - let progressImageEle = document.querySelector('#progress-image'); - progressImageEle.src = BLANK_IMAGE_URL; - progressImageEle.style.display = showProgressImages ? 'initial' : 'none'; -} - -function setProgress(step, totalSteps, src) { - let progressEle = document.querySelector('#progress-bar'); - progressEle.setAttribute('value', step); - - if (src) { - let progressImageEle = document.querySelector('#progress-image'); - progressImageEle.src = src; - } -} - -function resetProgress(hide = true) { - if (hide) { - let progressSectionEle = document.querySelector('#progress-section'); - progressSectionEle.style.display = 'none'; - } - let progressEle = document.querySelector('#progress-bar'); - progressEle.setAttribute('value', 0); -} - -function toBase64(file) { - return new Promise((resolve, reject) => { - const r = new FileReader(); - r.readAsDataURL(file); - r.onload = () => resolve(r.result); - r.onerror = (error) => reject(error); - }); -} - -function ondragdream(event) { - let dream = event.target.dataset.dream; - event.dataTransfer.setData('dream', dream); -} - -function seedClick(event) { - // Get element - var image = event.target.closest('figure').querySelector('img'); - var dream = JSON.parse(decodeURIComponent(image.dataset.dream)); - - let form = document.querySelector('#generate-form'); - for (const [k, v] of new FormData(form)) { - if (k == 'initimg') { - continue; - } - let formElem = form.querySelector(`*[name=${k}]`); - formElem.value = dream[k] !== undefined ? dream[k] : formElem.defaultValue; - } - - document.querySelector('#seed').value = dream.seed; - document.querySelector('#iterations').value = 1; // Reset to 1 iteration since we clicked a single image (not a full job) - - // NOTE: leaving this manual for the user for now - it was very confusing with this behavior - // document.querySelector("#with_variations").value = variations || ''; - // if (document.querySelector("#variation_amount").value <= 0) { - // document.querySelector("#variation_amount").value = 0.2; - // } - - saveFields(document.querySelector('#generate-form')); -} - -function appendOutput(src, seed, config, toEnd = false) { - let outputNode = document.createElement('figure'); - let altText = seed.toString() + ' | ' + config.prompt; - - // img needs width and height for lazy loading to work - // TODO: store the full config in a data attribute on the image? - const figureContents = ` - - ${altText} - -
${seed}
- `; - - outputNode.innerHTML = figureContents; - - if (toEnd) { - document.querySelector('#results').append(outputNode); - } else { - document.querySelector('#results').prepend(outputNode); - } - document.querySelector('#no-results-message')?.remove(); -} - -function saveFields(form) { - for (const [k, v] of new FormData(form)) { - if (typeof v !== 'object') { - // Don't save 'file' type - localStorage.setItem(k, v); - } - } -} - -function loadFields(form) { - for (const [k, v] of new FormData(form)) { - const item = localStorage.getItem(k); - if (item != null) { - form.querySelector(`*[name=${k}]`).value = item; - } - } -} - -function clearFields(form) { - localStorage.clear(); - let prompt = form.prompt.value; - form.reset(); - form.prompt.value = prompt; -} - -const BLANK_IMAGE_URL = - 'data:image/svg+xml,'; -async function generateSubmit(form) { - // Convert file data to base64 - // TODO: Should probably uplaod files with formdata or something, and store them in the backend? - let formData = Object.fromEntries(new FormData(form)); - if (!formData.enable_generate && !formData.enable_init_image) { - gen_label = document.querySelector('label[for=enable_generate]').innerHTML; - initimg_label = document.querySelector( - 'label[for=enable_init_image]' - ).innerHTML; - alert(`Error: one of "${gen_label}" or "${initimg_label}" must be set`); - } - - formData.initimg_name = formData.initimg.name; - formData.initimg = - formData.initimg.name !== '' ? await toBase64(formData.initimg) : null; - - // Evaluate all checkboxes - let checkboxes = form.querySelectorAll('input[type=checkbox]'); - checkboxes.forEach(function (checkbox) { - if (checkbox.checked) { - formData[checkbox.name] = 'true'; - } - }); - - let strength = formData.strength; - let totalSteps = formData.initimg - ? Math.floor(strength * formData.steps) - : formData.steps; - let showProgressImages = formData.progress_images; - - // Set enabling flags - - // Initialize the progress bar - initProgress(totalSteps, showProgressImages); - - // POST, use response to listen for events - fetch(form.action, { - method: form.method, - headers: new Headers({ 'content-type': 'application/json' }), - body: JSON.stringify(formData), - }) - .then((response) => response.json()) - .then((data) => { - var jobId = data.jobId; - socket.emit('join_room', { room: jobId }); - }); - - form.querySelector('fieldset').setAttribute('disabled', ''); -} - -function fieldSetEnableChecked(event) { - cb = event.target; - fields = cb.closest('fieldset'); - fields.disabled = !cb.checked; -} - -// Socket listeners -socket.on('job_started', (data) => {}); - -socket.on('dream_result', (data) => { - var jobId = data.jobId; - var dreamId = data.dreamId; - var dreamRequest = data.dreamRequest; - var src = 'api/images/' + dreamId; - - priorResultsLoadState.offset += 1; - appendOutput(src, dreamRequest.seed, dreamRequest); - - resetProgress(false); -}); - -socket.on('dream_progress', (data) => { - // TODO: it'd be nice if we could get a seed reported here, but the generator would need to be updated - var step = data.step; - var totalSteps = data.totalSteps; - var jobId = data.jobId; - var dreamId = data.dreamId; - - var progressType = data.progressType; - if (progressType === 'GENERATION') { - var src = data.hasProgressImage - ? 'api/intermediates/' + dreamId + '/' + step - : null; - setProgress(step, totalSteps, src); - } else if (progressType === 'UPSCALING_STARTED') { - // step and totalSteps are used for upscale count on this message - document.getElementById('processing_cnt').textContent = step; - document.getElementById('processing_total').textContent = totalSteps; - document.getElementById('scaling-inprocess-message').style.display = - 'block'; - } else if (progressType == 'UPSCALING_DONE') { - document.getElementById('scaling-inprocess-message').style.display = 'none'; - } -}); - -socket.on('job_canceled', (data) => { - resetForm(); - resetProgress(); -}); - -socket.on('job_done', (data) => { - jobId = data.jobId; - socket.emit('leave_room', { room: jobId }); - - resetForm(); - resetProgress(); -}); - -window.onload = async () => { - document.querySelector('#prompt').addEventListener('keydown', (e) => { - if (e.key === 'Enter' && !e.shiftKey) { - const form = e.target.form; - generateSubmit(form); - } - }); - document.querySelector('#generate-form').addEventListener('submit', (e) => { - e.preventDefault(); - const form = e.target; - - generateSubmit(form); - }); - document.querySelector('#generate-form').addEventListener('change', (e) => { - saveFields(e.target.form); - }); - document.querySelector('#reset-seed').addEventListener('click', (e) => { - document.querySelector('#seed').value = 0; - saveFields(e.target.form); - }); - document.querySelector('#reset-all').addEventListener('click', (e) => { - clearFields(e.target.form); - }); - document.querySelector('#remove-image').addEventListener('click', (e) => { - initimg.value = null; - }); - loadFields(document.querySelector('#generate-form')); - - document.querySelector('#cancel-button').addEventListener('click', () => { - fetch('/api/cancel').catch((e) => { - console.error(e); - }); - }); - document.documentElement.addEventListener('keydown', (e) => { - if (e.key === 'Escape') - fetch('/api/cancel').catch((err) => { - console.error(err); - }); - }); - - if (!config.gfpgan_model_exists) { - document.querySelector('#gfpgan').style.display = 'none'; - } - - window.addEventListener('scroll', () => { - if (window.innerHeight + window.pageYOffset >= document.body.offsetHeight) { - loadPriorResults(); - } - }); - - // Enable/disable forms by checkboxes - document - .querySelectorAll('legend > input[type=checkbox]') - .forEach(function (cb) { - cb.addEventListener('change', fieldSetEnableChecked); - fieldSetEnableChecked({ target: cb }); - }); - - // Load some of the previous results - loadPriorResults(); - - // Image drop/upload WIP - /* - let drop = document.getElementById('dropper'); - function ondrop(event) { - let dreamData = event.dataTransfer.getData('dream'); - if (dreamData) { - var dream = JSON.parse(decodeURIComponent(dreamData)); - alert(dream.dreamId); - } - }; - - function ondragenter(event) { - event.preventDefault(); - }; - - function ondragover(event) { - event.preventDefault(); - }; - - function ondragleave(event) { - - } - - drop.addEventListener('drop', ondrop); - drop.addEventListener('dragenter', ondragenter); - drop.addEventListener('dragover', ondragover); - drop.addEventListener('dragleave', ondragleave); - */ -}; diff --git a/invokeai/frontend/web/static/dream_web/test.html b/invokeai/frontend/web/static/dream_web/test.html deleted file mode 100644 index cbb746a5a1..0000000000 --- a/invokeai/frontend/web/static/dream_web/test.html +++ /dev/null @@ -1,246 +0,0 @@ - - - InvokeAI Test - - - - - - - - - - - - - - - -
- -
- - - - diff --git a/invokeai/frontend/web/static/legacy_web/favicon.ico b/invokeai/frontend/web/static/legacy_web/favicon.ico deleted file mode 100644 index 51eb844a6a4a9d4b13e17e38b0fc915e7e97d4b5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1150 zcmaiy%TE(g6vi*n1a-yAr5H_2eSt+l!2}h8?$p@n=nPJTglL%pit>^TL`+1D5hx&N z)!<{Tc1e&lvO-)*Ow^TsgK$#zJKYFEA;2&@TN?6A5C9Q()1;lGF^Sd zF~GSouqjvv->jVh^vZ3gw#sUXZQHSqR>WSmwCOtUf;BK6W$k#wMKX$aiq1TKiY)i0 zVAh_I80S)!qiamC2k7>K9QPINuKnap%uv%}j+#E^Jur4AXDJpbkvT6Ctz07yN&)Z7 znrGHFe)vUp?-<1^k5RnhDB0a3h^>+{H77oj<%hM0acGw^T{k?>wWp=8-IJ2<;2zkW z55$XEACugh&R(wZ1^nba=DC(TD08@HP|IVZ?1<#7_S=$s)|_Dd@;ZI;mZvYT`CA{Y z_Vq(y{pYvZf8ANnKfH$f+a32rZ=N(I_xgGd_x}n~fRYte5_cZWQRBiY+1KuqaiB`D zuiiy$g`D(znbUIcklw#ZXiGqz&xFs - - Stable Diffusion Dream Server - - - - - - - - -
-

Stable Diffusion Dream Server

-
- For news and support for this web service, visit our GitHub site -
-
- -
-
-
- -
-
-
Basic options
- - - - - - - - - - -
- - - - - - - - - -
- - - - - -
- - - - - - -
-
-
Image-to-image options
- - - -
- - - - -
-
-
Post-processing options
- - - - - - -
-
-
-
-
- - -
- -
- Postprocessing...1/3 -
- -
- -
-
-

No results...

-
-
-
- - diff --git a/invokeai/frontend/web/static/legacy_web/index.js b/invokeai/frontend/web/static/legacy_web/index.js deleted file mode 100644 index a150f3f2e9..0000000000 --- a/invokeai/frontend/web/static/legacy_web/index.js +++ /dev/null @@ -1,234 +0,0 @@ -function toBase64(file) { - return new Promise((resolve, reject) => { - const r = new FileReader(); - r.readAsDataURL(file); - r.onload = () => resolve(r.result); - r.onerror = (error) => reject(error); - }); -} - -function appendOutput(src, seed, config) { - let outputNode = document.createElement('figure'); - - let variations = config.with_variations; - if (config.variation_amount > 0) { - variations = - (variations ? variations + ',' : '') + - seed + - ':' + - config.variation_amount; - } - let baseseed = - config.with_variations || config.variation_amount > 0 ? config.seed : seed; - let altText = - baseseed + ' | ' + (variations ? variations + ' | ' : '') + config.prompt; - - // img needs width and height for lazy loading to work - const figureContents = ` - - ${altText} - -
${seed}
- `; - - outputNode.innerHTML = figureContents; - let figcaption = outputNode.querySelector('figcaption'); - - // Reload image config - figcaption.addEventListener('click', () => { - let form = document.querySelector('#generate-form'); - for (const [k, v] of new FormData(form)) { - if (k == 'initimg') { - continue; - } - form.querySelector(`*[name=${k}]`).value = config[k]; - } - - document.querySelector('#seed').value = baseseed; - document.querySelector('#with_variations').value = variations || ''; - if (document.querySelector('#variation_amount').value <= 0) { - document.querySelector('#variation_amount').value = 0.2; - } - - saveFields(document.querySelector('#generate-form')); - }); - - document.querySelector('#results').prepend(outputNode); -} - -function saveFields(form) { - for (const [k, v] of new FormData(form)) { - if (typeof v !== 'object') { - // Don't save 'file' type - localStorage.setItem(k, v); - } - } -} - -function loadFields(form) { - for (const [k, v] of new FormData(form)) { - const item = localStorage.getItem(k); - if (item != null) { - form.querySelector(`*[name=${k}]`).value = item; - } - } -} - -function clearFields(form) { - localStorage.clear(); - let prompt = form.prompt.value; - form.reset(); - form.prompt.value = prompt; -} - -const BLANK_IMAGE_URL = - 'data:image/svg+xml,'; -async function generateSubmit(form) { - const prompt = document.querySelector('#prompt').value; - - // Convert file data to base64 - let formData = Object.fromEntries(new FormData(form)); - formData.initimg_name = formData.initimg.name; - formData.initimg = - formData.initimg.name !== '' ? await toBase64(formData.initimg) : null; - - let strength = formData.strength; - let totalSteps = formData.initimg - ? Math.floor(strength * formData.steps) - : formData.steps; - - let progressSectionEle = document.querySelector('#progress-section'); - progressSectionEle.style.display = 'initial'; - let progressEle = document.querySelector('#progress-bar'); - progressEle.setAttribute('max', totalSteps); - let progressImageEle = document.querySelector('#progress-image'); - progressImageEle.src = BLANK_IMAGE_URL; - - progressImageEle.style.display = {}.hasOwnProperty.call( - formData, - 'progress_images' - ) - ? 'initial' - : 'none'; - - // Post as JSON, using Fetch streaming to get results - fetch(form.action, { - method: form.method, - body: JSON.stringify(formData), - }).then(async (response) => { - const reader = response.body.getReader(); - - let noOutputs = true; - while (true) { - let { value, done } = await reader.read(); - value = new TextDecoder().decode(value); - if (done) { - progressSectionEle.style.display = 'none'; - break; - } - - for (let event of value.split('\n').filter((e) => e !== '')) { - const data = JSON.parse(event); - - if (data.event === 'result') { - noOutputs = false; - appendOutput(data.url, data.seed, data.config); - progressEle.setAttribute('value', 0); - progressEle.setAttribute('max', totalSteps); - } else if (data.event === 'upscaling-started') { - document.getElementById('processing_cnt').textContent = - data.processed_file_cnt; - document.getElementById('scaling-inprocess-message').style.display = - 'block'; - } else if (data.event === 'upscaling-done') { - document.getElementById('scaling-inprocess-message').style.display = - 'none'; - } else if (data.event === 'step') { - progressEle.setAttribute('value', data.step); - if (data.url) { - progressImageEle.src = data.url; - } - } else if (data.event === 'canceled') { - // avoid alerting as if this were an error case - noOutputs = false; - } - } - } - - // Re-enable form, remove no-results-message - form.querySelector('fieldset').removeAttribute('disabled'); - document.querySelector('#prompt').value = prompt; - document.querySelector('progress').setAttribute('value', '0'); - - if (noOutputs) { - alert('Error occurred while generating.'); - } - }); - - // Disable form while generating - form.querySelector('fieldset').setAttribute('disabled', ''); - document.querySelector('#prompt').value = `Generating: "${prompt}"`; -} - -async function fetchRunLog() { - try { - let response = await fetch('/run_log.json'); - const data = await response.json(); - for (let item of data.run_log) { - appendOutput(item.url, item.seed, item); - } - } catch (e) { - console.error(e); - } -} - -window.onload = async () => { - document.querySelector('#prompt').addEventListener('keydown', (e) => { - if (e.key === 'Enter' && !e.shiftKey) { - const form = e.target.form; - generateSubmit(form); - } - }); - document.querySelector('#generate-form').addEventListener('submit', (e) => { - e.preventDefault(); - const form = e.target; - - generateSubmit(form); - }); - document.querySelector('#generate-form').addEventListener('change', (e) => { - saveFields(e.target.form); - }); - document.querySelector('#reset-seed').addEventListener('click', (e) => { - document.querySelector('#seed').value = -1; - saveFields(e.target.form); - }); - document.querySelector('#reset-all').addEventListener('click', (e) => { - clearFields(e.target.form); - }); - document.querySelector('#remove-image').addEventListener('click', (e) => { - initimg.value = null; - }); - loadFields(document.querySelector('#generate-form')); - - document.querySelector('#cancel-button').addEventListener('click', () => { - fetch('/cancel').catch((e) => { - console.error(e); - }); - }); - document.documentElement.addEventListener('keydown', (e) => { - if (e.key === 'Escape') - fetch('/cancel').catch((err) => { - console.error(err); - }); - }); - - if (!config.gfpgan_model_exists) { - document.querySelector('#gfpgan').style.display = 'none'; - } - await fetchRunLog(); -}; From ef14ba17131674319c56bc565bf5069ef9e78647 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 13:40:26 +1100 Subject: [PATCH 108/202] fix(api): fix uvicorn config loop arg We were providing the loop itself, not the kind of loop. This didn't appear to cause any issues whatsoever, but now it's correct. --- invokeai/app/api_app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index f45541e63b..cc55329389 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -245,7 +245,7 @@ def invoke_api(): app=app, host=app_config.host, port=port, - loop=loop, + loop="asyncio", log_level=app_config.log_level, ) server = uvicorn.Server(config) From e4c45012f4fb528cc14eacccee6424d60a9085b8 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 13:42:34 +1100 Subject: [PATCH 109/202] feat(api): add gzip middleware On our local installs this will be a very minor change. For those running on remote servers, load times should be slightly improved. It's a small change but I think correct. --- invokeai/app/api_app.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index cc55329389..5889c7e228 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -17,6 +17,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c import uvicorn from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware + from fastapi.middleware.gzip import GZipMiddleware from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html from fastapi.openapi.utils import get_openapi from fastapi.staticfiles import StaticFiles @@ -72,6 +73,8 @@ app.add_middleware( allow_headers=app_config.allow_headers, ) +app.add_middleware(GZipMiddleware, minimum_size=1000) + # Add startup event to load dependencies @app.on_event("startup") From da403ba04c41f85e007da6b19a088a2627406536 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 13:43:01 +1100 Subject: [PATCH 110/202] fix(api): flesh out types for `api_app.py` --- invokeai/app/api_app.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 5889c7e228..6bdf358147 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -1,3 +1,5 @@ +from typing import Any +from fastapi.responses import HTMLResponse from .services.config import InvokeAIAppConfig # parse_args() must be called before any other imports. if it is not called first, consumers of the config @@ -13,7 +15,6 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from inspect import signature from pathlib import Path - import torch import uvicorn from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware @@ -26,6 +27,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from pydantic.json_schema import models_json_schema from fastapi.responses import FileResponse + # for PyCharm: # noinspection PyUnresolvedReferences import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) import invokeai.frontend.web as web_dir @@ -36,16 +38,15 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from .api.routers import app_info, board_images, boards, images, models, session_queue, sessions, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField + from torch.backends.mps import is_available as is_mps_available - if torch.backends.mps.is_available(): - # noinspection PyUnresolvedReferences + if is_mps_available(): import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import) app_config = InvokeAIAppConfig.get_config() app_config.parse_args() logger = InvokeAILogger.get_logger(config=app_config) - # fix for windows mimetypes registry entries being borked # see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352 mimetypes.add_type("application/javascript", ".js") @@ -78,13 +79,13 @@ app.add_middleware(GZipMiddleware, minimum_size=1000) # Add startup event to load dependencies @app.on_event("startup") -async def startup_event(): +async def startup_event() -> None: ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger) # Shut down threads @app.on_event("shutdown") -async def shutdown_event(): +async def shutdown_event() -> None: ApiDependencies.shutdown() @@ -108,7 +109,7 @@ app.include_router(session_queue.session_queue_router, prefix="/api") # Build a custom OpenAPI to include all outputs # TODO: can outputs be included on metadata of invocation schemas somehow? -def custom_openapi(): +def custom_openapi() -> dict[str, Any]: if app.openapi_schema: return app.openapi_schema openapi_schema = get_openapi( @@ -179,18 +180,18 @@ app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid a @app.get("/docs", include_in_schema=False) -def overridden_swagger(): +def overridden_swagger() -> HTMLResponse: return get_swagger_ui_html( - openapi_url=app.openapi_url, + openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string title=app.title, swagger_favicon_url="/static/docs/favicon.ico", ) @app.get("/redoc", include_in_schema=False) -def overridden_redoc(): +def overridden_redoc() -> HTMLResponse: return get_redoc_html( - openapi_url=app.openapi_url, + openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string title=app.title, redoc_favicon_url="/static/docs/favicon.ico", ) @@ -212,8 +213,8 @@ app.mount("/assets", StaticFiles(directory=Path(web_root_path, "dist/assets/")), app.mount("/locales", StaticFiles(directory=Path(web_root_path, "dist/locales/")), name="locales") -def invoke_api(): - def find_port(port: int): +def invoke_api() -> None: + def find_port(port: int) -> int: """Find a port not in use starting at given port""" # Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon! # https://github.com/WaylonWalker From 96e80c71fb80fb5039a4f1563ee251af9c7e0354 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 21:27:03 +1100 Subject: [PATCH 111/202] chore: lint --- invokeai/app/api_app.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 6bdf358147..866a6665c8 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -1,5 +1,7 @@ from typing import Any + from fastapi.responses import HTMLResponse + from .services.config import InvokeAIAppConfig # parse_args() must be called before any other imports. if it is not called first, consumers of the config @@ -21,11 +23,12 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from fastapi.middleware.gzip import GZipMiddleware from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html from fastapi.openapi.utils import get_openapi + from fastapi.responses import FileResponse from fastapi.staticfiles import StaticFiles from fastapi_events.handlers.local import local_handler from fastapi_events.middleware import EventHandlerASGIMiddleware from pydantic.json_schema import models_json_schema - from fastapi.responses import FileResponse + from torch.backends.mps import is_available as is_mps_available # for PyCharm: # noinspection PyUnresolvedReferences @@ -38,7 +41,6 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from .api.routers import app_info, board_images, boards, images, models, session_queue, sessions, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField - from torch.backends.mps import is_available as is_mps_available if is_mps_available(): import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import) From 677918df61d9d6221547f11bcf8318af91b9d95c Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Thu, 19 Oct 2023 14:38:31 +1100 Subject: [PATCH 112/202] Docs Update (python version & T2I (#4867) * Updated Control Adapter Docs * fixed typo * Update docs for 3.10 * Update diffusers language Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> * Diffusers format Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> * Current T2I Adapter usage Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> * Update test-invoke-pip.yml --------- Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> --- README.md | 2 +- docs/features/CONTROLNET.md | 66 ++++++++++++++++++++----------- docs/help/gettingStartedWithAI.md | 4 +- docs/nodes/communityNodes.md | 2 +- 4 files changed, 47 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 15c5747ea8..932bd79ae9 100644 --- a/README.md +++ b/README.md @@ -123,7 +123,7 @@ and go to http://localhost:9090. ### Command-Line Installation (for developers and users familiar with Terminals) -You must have Python 3.9 through 3.11 installed on your machine. Earlier or +You must have Python 3.10 through 3.11 installed on your machine. Earlier or later versions are not supported. Node.js also needs to be installed along with yarn (can be installed with the command `npm install -g yarn` if needed) diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index d287e6cb19..f55194207c 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -17,9 +17,6 @@ image generation, providing you with a way to direct the network towards generating images that better fit your desired style or outcome. - -#### How it works - ControlNet works by analyzing an input image, pre-processing that image to identify relevant information that can be interpreted by each specific ControlNet model, and then inserting that control information @@ -27,35 +24,21 @@ into the generation process. This can be used to adjust the style, composition, or other aspects of the image to better achieve a specific result. - -#### Models +#### Installation InvokeAI provides access to a series of ControlNet models that provide -different effects or styles in your generated images. Currently -InvokeAI only supports "diffuser" style ControlNet models. These are -folders that contain the files `config.json` and/or -`diffusion_pytorch_model.safetensors` and -`diffusion_pytorch_model.fp16.safetensors`. The name of the folder is -the name of the model. +different effects or styles in your generated images. -***InvokeAI does not currently support checkpoint-format -ControlNets. These come in the form of a single file with the -extension `.safetensors`.*** +To install ControlNet Models: -Diffuser-style ControlNet models are available at HuggingFace -(http://huggingface.co) and accessed via their repo IDs (identifiers -in the format "author/modelname"). The easiest way to install them is +1. The easiest way to install them is to use the InvokeAI model installer application. Use the `invoke.sh`/`invoke.bat` launcher to select item [4] and then navigate to the CONTROLNETS section. Select the models you wish to install and press "APPLY CHANGES". You may also enter additional HuggingFace -repo_ids in the "Additional models" textbox: +repo_ids in the "Additional models" textbox. +2. Using the "Add Model" function of the model manager, enter the HuggingFace Repo ID of the ControlNet. The ID is in the format "author/repoName" -![Model Installer - -Controlnetl](../assets/installing-models/model-installer-controlnet.png){:width="640px"} - -Command-line users can launch the model installer using the command -`invokeai-model-install`. _Be aware that some ControlNet models require additional code functionality in order to work properly, so just installing a @@ -63,6 +46,17 @@ third-party ControlNet model may not have the desired effect._ Please read and follow the documentation for installing a third party model not currently included among InvokeAI's default list. +Currently InvokeAI **only** supports 🤗 Diffusers-format ControlNet models. These are +folders that contain the files `config.json` and/or +`diffusion_pytorch_model.safetensors` and +`diffusion_pytorch_model.fp16.safetensors`. The name of the folder is +the name of the model. + +🤗 Diffusers-format ControlNet models are available at HuggingFace +(http://huggingface.co) and accessed via their repo IDs (identifiers +in the format "author/modelname"). + +#### ControlNet Models The models currently supported include: **Canny**: @@ -133,6 +127,30 @@ Start/End - 0 represents the start of the generation, 1 represents the end. The Additionally, each ControlNet section can be expanded in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in when you Invoke. +## T2I-Adapter +[T2I-Adapter](https://github.com/TencentARC/T2I-Adapter) is a tool similar to ControlNet that allows for control over the generation process by providing control information during the generation process. T2I-Adapter models tend to be smaller and more efficient than ControlNets. + +##### Installation +To install T2I-Adapter Models: + +1. The easiest way to install models is +to use the InvokeAI model installer application. Use the +`invoke.sh`/`invoke.bat` launcher to select item [5] and then navigate +to the T2I-Adapters section. Select the models you wish to install and +press "APPLY CHANGES". You may also enter additional HuggingFace +repo_ids in the "Additional models" textbox. +2. Using the "Add Model" function of the model manager, enter the HuggingFace Repo ID of the T2I-Adapter. The ID is in the format "author/repoName" + +#### Usage +Each T2I Adapter has two settings that are applied. + +Weight - Strength of the model applied to the generation for the section, defined by start/end. + +Start/End - 0 represents the start of the generation, 1 represents the end. The Start/end setting controls what steps during the generation process have the ControlNet applied. + +Additionally, each section can be expanded with the "Show Advanced" button in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in during the generation process. + +**Note:** T2I-Adapter models and ControlNet models cannot currently be used together. ## IP-Adapter @@ -140,7 +158,7 @@ Additionally, each ControlNet section can be expanded in order to manipulate set ![IP-Adapter + T2I](https://github.com/tencent-ailab/IP-Adapter/raw/main/assets/demo/ip_adpter_plus_multi.jpg) -![IP-Adapter + IMG2IMG](https://github.com/tencent-ailab/IP-Adapter/blob/main/assets/demo/image-to-image.jpg) +![IP-Adapter + IMG2IMG](https://raw.githubusercontent.com/tencent-ailab/IP-Adapter/main/assets/demo/image-to-image.jpg) #### Installation There are several ways to install IP-Adapter models with an existing InvokeAI installation: diff --git a/docs/help/gettingStartedWithAI.md b/docs/help/gettingStartedWithAI.md index 1f22e1edba..617bd60401 100644 --- a/docs/help/gettingStartedWithAI.md +++ b/docs/help/gettingStartedWithAI.md @@ -57,7 +57,9 @@ Prompts provide the models directions on what to generate. As a general rule of Models are the magic that power InvokeAI. These files represent the output of training a machine on understanding massive amounts of images - providing them with the capability to generate new images using just a text description of what you’d like to see. (Like Stable Diffusion!) -Invoke offers a simple way to download several different models upon installation, but many more can be discovered online, including at ****. Each model can produce a unique style of output, based on the images it was trained on - Try out different models to see which best fits your creative vision! +Invoke offers a simple way to download several different models upon installation, but many more can be discovered online, including at https://models.invoke.ai + +Each model can produce a unique style of output, based on the images it was trained on - Try out different models to see which best fits your creative vision! - *Models that contain “inpainting” in the name are designed for use with the inpainting feature of the Unified Canvas* diff --git a/docs/nodes/communityNodes.md b/docs/nodes/communityNodes.md index d5a5d5654f..cec5d18df6 100644 --- a/docs/nodes/communityNodes.md +++ b/docs/nodes/communityNodes.md @@ -181,7 +181,7 @@ This includes 15 Nodes: **Output Example:** - + [Full mp4 of Example Output test.mp4](https://github.com/helix4u/load_video_frame/blob/main/test.mp4) -------------------------------- From 9195c8c95732eabc3b285bc9b3d2a021ad037223 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 18:28:36 +1100 Subject: [PATCH 113/202] feat: dedicated route to get intermediates count This fixes a weird issue where the list images method needed to handle `None` for its `limit` and `offset` arguments, in order to get a count of all intermediates. --- invokeai/app/api/routers/images.py | 13 +++- .../image_records/image_records_base.py | 5 ++ .../image_records/image_records_sqlite.py | 25 +++++-- invokeai/app/services/images/images_base.py | 5 ++ .../app/services/images/images_default.py | 7 ++ .../SettingsClearIntermediates.tsx | 15 ++--- .../web/src/services/api/endpoints/images.ts | 16 ++--- .../frontend/web/src/services/api/schema.d.ts | 65 +++++++++++++------ 8 files changed, 106 insertions(+), 45 deletions(-) diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 43a72943ee..84d8e8eea4 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -87,7 +87,7 @@ async def delete_image( pass -@images_router.post("/clear-intermediates", operation_id="clear_intermediates") +@images_router.delete("/intermediates", operation_id="clear_intermediates") async def clear_intermediates() -> int: """Clears all intermediates""" @@ -99,6 +99,17 @@ async def clear_intermediates() -> int: pass +@images_router.get("/intermediates", operation_id="get_intermediates_count") +async def get_intermediates_count() -> int: + """Gets the count of intermediate images""" + + try: + return ApiDependencies.invoker.services.images.get_intermediates_count() + except Exception: + raise HTTPException(status_code=500, detail="Failed to get intermediates") + pass + + @images_router.patch( "/i/{image_name}", operation_id="update_image", diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index 107ff85f9b..7e74b06e9e 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -61,6 +61,11 @@ class ImageRecordStorageBase(ABC): """Deletes all intermediate image records, returning a list of deleted image names.""" pass + @abstractmethod + def get_intermediates_count(self) -> int: + """Gets a count of all intermediate images.""" + pass + @abstractmethod def save( self, diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 9793236d9c..33bf373a7d 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -297,11 +297,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): images_query += query_conditions + query_pagination + ";" # Add all the parameters images_params = query_params.copy() - - if limit is not None: - images_params.append(limit) - if offset is not None: - images_params.append(offset) + # Add the pagination parameters + images_params.extend([limit, offset]) # Build the list of images, deserializing each row self._cursor.execute(images_query, images_params) @@ -357,6 +354,24 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): finally: self._lock.release() + def get_intermediates_count(self) -> int: + try: + self._lock.acquire() + self._cursor.execute( + """--sql + SELECT COUNT(*) FROM images + WHERE is_intermediate = TRUE; + """ + ) + count = cast(int, self._cursor.fetchone()[0]) + self._conn.commit() + return count + except sqlite3.Error as e: + self._conn.rollback() + raise ImageRecordDeleteException from e + finally: + self._lock.release() + def delete_intermediates(self) -> list[str]: try: self._lock.acquire() diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py index a611e9485d..ac7a4a2152 100644 --- a/invokeai/app/services/images/images_base.py +++ b/invokeai/app/services/images/images_base.py @@ -123,6 +123,11 @@ class ImageServiceABC(ABC): """Deletes all intermediate images.""" pass + @abstractmethod + def get_intermediates_count(self) -> int: + """Gets the number of intermediate images.""" + pass + @abstractmethod def delete_images_on_board(self, board_id: str): """Deletes all images on a board.""" diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index d4e473b8e4..3c78c4f29a 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -284,3 +284,10 @@ class ImageService(ImageServiceABC): except Exception as e: self.__invoker.services.logger.error("Problem deleting image records and files") raise e + + def get_intermediates_count(self) -> int: + try: + return self.__invoker.services.image_records.get_intermediates_count() + except Exception as e: + self.__invoker.services.logger.error("Problem getting intermediates count") + raise e diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx index fbe5692431..c419fa716f 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsClearIntermediates.tsx @@ -1,7 +1,7 @@ import { Heading, Text } from '@chakra-ui/react'; import { useAppDispatch } from 'app/store/storeHooks'; import { controlAdaptersReset } from 'features/controlAdapters/store/controlAdaptersSlice'; -import { useCallback, useEffect } from 'react'; +import { useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { useGetQueueStatusQuery } from 'services/api/endpoints/queue'; import IAIButton from '../../../../common/components/IAIButton'; @@ -17,8 +17,12 @@ export default function SettingsClearIntermediates() { const { t } = useTranslation(); const dispatch = useAppDispatch(); - const { data: intermediatesCount, refetch: updateIntermediatesCount } = - useGetIntermediatesCountQuery(); + const { data: intermediatesCount } = useGetIntermediatesCountQuery( + undefined, + { + refetchOnMountOrArgChange: true, + } + ); const [clearIntermediates, { isLoading: isLoadingClearIntermediates }] = useClearIntermediatesMutation(); @@ -55,11 +59,6 @@ export default function SettingsClearIntermediates() { }); }, [t, clearIntermediates, dispatch, hasPendingItems]); - useEffect(() => { - // update the count on mount - updateIntermediatesCount(); - }, [updateIntermediatesCount]); - return ( {t('settings.clearIntermediates')} diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts index 99a5fc5f50..c8d42d17f6 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/images.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts @@ -100,14 +100,12 @@ export const imagesApi = api.injectEndpoints({ keepUnusedDataFor: 86400, }), getIntermediatesCount: build.query({ - query: () => ({ url: getListImagesUrl({ is_intermediate: true }) }), + query: () => ({ url: 'images/intermediates' }), providesTags: ['IntermediatesCount'], - transformResponse: (response: OffsetPaginatedResults_ImageDTO_) => { - // TODO: This is storing a primitive value in the cache. `immer` cannot track state changes, so - // attempts to use manual cache updates on this value will fail. This should be changed into an - // object. - return response.total; - }, + }), + clearIntermediates: build.mutation({ + query: () => ({ url: `images/intermediates`, method: 'DELETE' }), + invalidatesTags: ['IntermediatesCount'], }), getImageDTO: build.query({ query: (image_name) => ({ url: `images/i/${image_name}` }), @@ -185,10 +183,6 @@ export const imagesApi = api.injectEndpoints({ ], keepUnusedDataFor: 86400, // 24 hours }), - clearIntermediates: build.mutation({ - query: () => ({ url: `images/clear-intermediates`, method: 'POST' }), - invalidatesTags: ['IntermediatesCount'], - }), deleteImage: build.mutation({ query: ({ image_name }) => ({ url: `images/i/${image_name}`, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 6bc54f0e35..62f60c1dbc 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -109,12 +109,17 @@ export type paths = { */ patch: operations["update_image"]; }; - "/api/v1/images/clear-intermediates": { + "/api/v1/images/intermediates": { + /** + * Get Intermediates Count + * @description Gets the count of intermediate images + */ + get: operations["get_intermediates_count"]; /** * Clear Intermediates * @description Clears all intermediates */ - post: operations["clear_intermediates"]; + delete: operations["clear_intermediates"]; }; "/api/v1/images/i/{image_name}/metadata": { /** @@ -3002,7 +3007,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["InfillColorInvocation"]; + [key: string]: components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlendLatentsInvocation"]; }; /** * Edges @@ -3039,7 +3044,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["String2Output"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["StringPosNegOutput"]; + [key: string]: components["schemas"]["IntegerOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["String2Output"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ClipSkipInvocationOutput"]; }; /** * Errors @@ -3169,8 +3174,11 @@ export type components = { }; /** IPAdapterField */ IPAdapterField: { - /** @description The IP-Adapter image prompt. */ - image: components["schemas"]["ImageField"]; + /** + * Image + * @description The IP-Adapter image prompt(s). + */ + image: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][]; /** @description The IP-Adapter model to use. */ ip_adapter_model: components["schemas"]["IPAdapterModelField"]; /** @description The name of the CLIP image encoder model. */ @@ -3221,8 +3229,11 @@ export type components = { * @default true */ use_cache?: boolean | null; - /** @description The IP-Adapter image prompt. */ - image?: components["schemas"]["ImageField"]; + /** + * Image + * @description The IP-Adapter image prompt(s). + */ + image?: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][]; /** * IP-Adapter Model * @description The IP-Adapter model. @@ -9105,12 +9116,6 @@ export type components = { /** Ui Order */ ui_order: number | null; }; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusion1ModelFormat * @description An enumeration. @@ -9118,17 +9123,17 @@ export type components = { */ StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** - * StableDiffusionOnnxModelFormat + * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; + T2IAdapterModelFormat: "diffusers"; /** - * StableDiffusion2ModelFormat + * ControlNetModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * IPAdapterModelFormat * @description An enumeration. @@ -9142,11 +9147,17 @@ export type components = { */ CLIPVisionModelFormat: "diffusers"; /** - * T2IAdapterModelFormat + * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ - T2IAdapterModelFormat: "diffusers"; + StableDiffusionOnnxModelFormat: "olive" | "onnx"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionXLModelFormat * @description An enumeration. @@ -9670,6 +9681,20 @@ export type operations = { }; }; }; + /** + * Get Intermediates Count + * @description Gets the count of intermediate images + */ + get_intermediates_count: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": number; + }; + }; + }; + }; /** * Clear Intermediates * @description Clears all intermediates From f102e380768031d288c51ec635813cefc8c143db Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Tue, 10 Oct 2023 23:03:25 -0400 Subject: [PATCH 114/202] feat(docker): update docker image, etc. to python3.11+ubuntu23.04 --- .gitignore | 12 +----------- docker/Dockerfile | 17 ++++++----------- docker/docker-entrypoint.sh | 2 +- 3 files changed, 8 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index 44a0864b5b..2b99d137b1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,5 @@ .idea/ -# ignore the Anaconda/Miniconda installer used while building Docker image -anaconda.sh - # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -136,12 +133,10 @@ celerybeat.pid # Environments .env -.venv +.venv* env/ venv/ ENV/ -env.bak/ -venv.bak/ # Spyder project settings .spyderproject @@ -186,11 +181,6 @@ cython_debug/ .scratch/ .vscode/ -# ignore environment.yml and requirements.txt -# these are links to the real files in environments-and-requirements -environment.yml -requirements.txt - # source installer files installer/*zip installer/install.bat diff --git a/docker/Dockerfile b/docker/Dockerfile index e158c681a4..73852ec66e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,7 +2,7 @@ ## Builder stage -FROM library/ubuntu:22.04 AS builder +FROM library/ubuntu:23.04 AS builder ARG DEBIAN_FRONTEND=noninteractive RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache @@ -10,7 +10,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt update && apt-get install -y \ git \ - python3.10-venv \ + python3-venv \ python3-pip \ build-essential @@ -37,7 +37,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ elif [ "$GPU_DRIVER" = "rocm" ]; then \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \ else \ - extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu118"; \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \ fi &&\ pip install $extra_index_url_arg \ torch==$TORCH_VERSION \ @@ -70,7 +70,7 @@ RUN --mount=type=cache,target=/usr/lib/node_modules \ #### Runtime stage --------------------------------------- -FROM library/ubuntu:22.04 AS runtime +FROM library/ubuntu:23.04 AS runtime ARG DEBIAN_FRONTEND=noninteractive ENV PYTHONUNBUFFERED=1 @@ -85,6 +85,7 @@ RUN apt update && apt install -y --no-install-recommends \ iotop \ bzip2 \ gosu \ + magic-wormhole \ libglib2.0-0 \ libgl1-mesa-glx \ python3-venv \ @@ -94,10 +95,6 @@ RUN apt update && apt install -y --no-install-recommends \ libstdc++-10-dev &&\ apt-get clean && apt-get autoclean -# globally add magic-wormhole -# for ease of transferring data to and from the container -# when running in sandboxed cloud environments; e.g. Runpod etc. -RUN pip install magic-wormhole ENV INVOKEAI_SRC=/opt/invokeai ENV VIRTUAL_ENV=/opt/venv/invokeai @@ -120,9 +117,7 @@ WORKDIR ${INVOKEAI_SRC} RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc RUN python3 -c "from patchmatch import patch_match" -# Create unprivileged user and make the local dir -RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke -RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT} +RUN mkdir -p ${INVOKEAI_ROOT} && chown -R 1000:1000 ${INVOKEAI_ROOT} COPY docker/docker-entrypoint.sh ./ ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"] diff --git a/docker/docker-entrypoint.sh b/docker/docker-entrypoint.sh index 6d776feb0e..7a9e6921ce 100755 --- a/docker/docker-entrypoint.sh +++ b/docker/docker-entrypoint.sh @@ -19,7 +19,7 @@ set -e -o pipefail # Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS. USER_ID=${CONTAINER_UID:-1000} -USER=invoke +USER=ubuntu usermod -u ${USER_ID} ${USER} 1>/dev/null configure() { From 575c7bbfd8d3a36ca8043b033b13ceb810082e06 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Thu, 19 Oct 2023 11:25:47 -0400 Subject: [PATCH 115/202] feat(docker): update docker documentation --- docker/.env.sample | 8 +- docker/README.md | 19 +- docker/build.sh | 4 +- docker/run.sh | 7 +- docs/installation/040_INSTALL_DOCKER.md | 258 ++++-------------------- 5 files changed, 62 insertions(+), 234 deletions(-) diff --git a/docker/.env.sample b/docker/.env.sample index 7e414ecd65..c0a56402fc 100644 --- a/docker/.env.sample +++ b/docker/.env.sample @@ -1,13 +1,15 @@ ## Make a copy of this file named `.env` and fill in the values below. -## Any environment variables supported by InvokeAI can be specified here. +## Any environment variables supported by InvokeAI can be specified here, +## in addition to the examples below. # INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data. # Outputs will also be stored here by default. # This **must** be an absolute path. INVOKEAI_ROOT= -HUGGINGFACE_TOKEN= +# Get this value from your HuggingFace account settings page. +# HUGGING_FACE_HUB_TOKEN= -## optional variables specific to the docker setup +## optional variables specific to the docker setup. # GPU_DRIVER=cuda # CONTAINER_UID=1000 \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index 91f7fb8c51..4291ece25f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -5,7 +5,7 @@ All commands are to be run from the `docker` directory: `cd docker` #### Linux 1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`) -2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-compose-on-ubuntu-22-04). +2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://docs.docker.com/compose/install/linux/#install-using-the-repository). - The deprecated `docker-compose` (hyphenated) CLI continues to work for now. 3. Ensure docker daemon is able to access the GPU. - You may need to install [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) @@ -20,7 +20,6 @@ This is done via Docker Desktop preferences ## Quickstart - 1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to: a. the desired location of the InvokeAI runtime directory, or b. an existing, v3.0.0 compatible runtime directory. @@ -42,20 +41,22 @@ The Docker daemon on the system must be already set up to use the GPU. In case o Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `docker compose up`, your custom values will be used. -You can also set these values in `docker compose.yml` directly, but `.env` will help avoid conflicts when code is updated. +You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated. -Example (most values are optional): +Example (values are optional, but setting `INVOKEAI_ROOT` is highly recommended): -``` +```bash INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai HUGGINGFACE_TOKEN=the_actual_token CONTAINER_UID=1000 GPU_DRIVER=cuda ``` +Any environment variables supported by InvokeAI can be set here - please see the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail. + ## Even Moar Customizing! -See the `docker compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below. +See the `docker-compose.yml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below. ### Reconfigure the runtime directory @@ -63,7 +64,7 @@ Can be used to download additional models from the supported model list In conjunction with `INVOKEAI_ROOT` can be also used to initialize a runtime directory -``` +```yaml command: - invokeai-configure - --yes @@ -71,7 +72,7 @@ command: Or install models: -``` +```yaml command: - invokeai-model-install -``` \ No newline at end of file +``` diff --git a/docker/build.sh b/docker/build.sh index db25439840..3b3875c15c 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -5,7 +5,7 @@ build_args="" [[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=[^$]/ {print "--build-arg " $0 " "}' .env) -echo "docker-compose build args:" +echo "docker compose build args:" echo $build_args -docker-compose build $build_args +docker compose build $build_args diff --git a/docker/run.sh b/docker/run.sh index bb764ec022..0306c4ddab 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -1,8 +1,11 @@ #!/usr/bin/env bash set -e +# This script is provided for backwards compatibility with the old docker setup. +# it doesn't do much aside from wrapping the usual docker compose CLI. + SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") cd "$SCRIPTDIR" || exit 1 -docker-compose up --build -d -docker-compose logs -f +docker compose up --build -d +docker compose logs -f diff --git a/docs/installation/040_INSTALL_DOCKER.md b/docs/installation/040_INSTALL_DOCKER.md index fd75067cf1..a672f71ba8 100644 --- a/docs/installation/040_INSTALL_DOCKER.md +++ b/docs/installation/040_INSTALL_DOCKER.md @@ -4,30 +4,31 @@ title: Installing with Docker # :fontawesome-brands-docker: Docker -!!! warning "For most users" +!!! warning "macOS and AMD GPU Users" - We highly recommend to Install InvokeAI locally using [these instructions](INSTALLATION.md) + We highly recommend to Install InvokeAI locally using [these instructions](INSTALLATION.md), + because Docker containers can not access the GPU on macOS. -!!! tip "For developers" +!!! warning "AMD GPU Users" - For container-related development tasks or for enabling easy - deployment to other environments (on-premises or cloud), follow these - instructions. + Container support for AMD GPUs has been reported to work by the community, but has not received + extensive testing. Please make sure to set the `GPU_DRIVER=rocm` environment variable (see below), and + use the `build.sh` script to build the image for this to take effect at build time. - For general use, install locally to leverage your machine's GPU. +!!! tip "Linux and Windows Users" + + For optimal performance, configure your Docker daemon to access your machine's GPU. + Docker Desktop on Windows [includes GPU support](https://www.docker.com/blog/wsl-2-gpu-support-for-docker-desktop-on-nvidia-gpus/). + Linux users should install and configure the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) ## Why containers? -They provide a flexible, reliable way to build and deploy InvokeAI. You'll also -use a Docker volume to store the largest model files and image outputs as a -first step in decoupling storage and compute. Future enhancements can do this -for other assets. See [Processes](https://12factor.net/processes) under the -Twelve-Factor App methodology for details on why running applications in such a -stateless fashion is important. +They provide a flexible, reliable way to build and deploy InvokeAI. +See [Processes](https://12factor.net/processes) under the Twelve-Factor App +methodology for details on why running applications in such a stateless fashion is important. -You can specify the target platform when building the image and running the -container. You'll also need to specify the InvokeAI requirements file that -matches the container's OS and the architecture it will run on. +The container is configured for CUDA by default, but can be built to support AMD GPUs +by setting the `GPU_DRIVER=rocm` environment variable at Docker image build time. Developers on Apple silicon (M1/M2): You [can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224) @@ -36,6 +37,16 @@ development purposes it's fine. Once you're done with development tasks on your laptop you can build for the target platform and architecture and deploy to another environment with NVIDIA GPUs on-premises or in the cloud. +## TL;DR + +This assumes properly configured Docker on Linux or Windows/WSL2. Read on for detailed customization options. + + ```bash + # docker compose commands should be run from the `docker` directory + cd docker + docker compose up + ``` + ## Installation in a Linux container (desktop) ### Prerequisites @@ -58,222 +69,33 @@ a token and copy it, since you will need in for the next step. ### Setup -Set the fork you want to use and other variables. +Set up your environmnent variables. In the `docker` directory, make a copy of `env.sample` and name it `.env`. Make changes as necessary. -!!! tip +Any environment variables supported by InvokeAI can be set here - please see the [CONFIGURATION](../features/CONFIGURATION.md) for further detail. - I preffer to save my env vars - in the repository root in a `.env` (or `.envrc`) file to automatically re-apply - them when I come back. - -The build- and run- scripts contain default values for almost everything, -besides the [Hugging Face Token](https://huggingface.co/settings/tokens) you -created in the last step. - -Some Suggestions of variables you may want to change besides the Token: +At a minimum, you might want to set the `INVOKEAI_ROOT` environment variable +to point to the location where you wish to store your InvokeAI models, configuration, and outputs.
| Environment-Variable | Default value | Description | | ----------------------------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `HUGGING_FACE_HUB_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models | -| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name | -| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored | -| `ARCH` | arch of the build machine | Can be changed if you want to build the image for another arch | -| `CONTAINER_REGISTRY` | ghcr.io | Name of the Container Registry to use for the full tag | -| `CONTAINER_REPOSITORY` | `$(whoami)/${REPOSITORY_NAME}` | Name of the Container Repository | -| `CONTAINER_FLAVOR` | `cuda` | The flavor of the image to built, available options are `cuda`, `rocm` and `cpu`. If you choose `rocm` or `cpu`, the extra-index-url will be selected automatically, unless you set one yourself. | -| `CONTAINER_TAG` | `${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}` | The Container Repository / Tag which will be used | -| `INVOKE_DOCKERFILE` | `Dockerfile` | The Dockerfile which should be built, handy for development | -| `PIP_EXTRA_INDEX_URL` | | If you want to use a custom pip-extra-index-url | +| `INVOKEAI_ROOT` | `~/invokeai` | **Required** - the location of your InvokeAI root directory. It will be created if it does not exist. +| `HUGGING_FACE_HUB_TOKEN` | | InvokeAI will work without it, but some of the integrations with HuggingFace (like downloading from models from private repositories) may not work| +| `GPU_DRIVER` | `cuda` | Optionally change this to `rocm` to build the image for AMD GPUs. NOTE: Use the `build.sh` script to build the image for this to take effect.
#### Build the Image -I provided a build script, which is located next to the Dockerfile in -`docker/build.sh`. It can be executed from repository root like this: +Use the standard `docker compose build` command from within the `docker` directory. -```bash -./docker/build.sh -``` - -The build Script not only builds the container, but also creates the docker -volume if not existing yet. +If using an AMD GPU: + a: set the `GPU_DRIVER=rocm` environment variable in `docker-compose.yml` and continue using `docker compose build` as usual, or + b: set `GPU_DRIVER=rocm` in the `.env` file and use the `build.sh` script, provided for convenience #### Run the Container -After the build process is done, you can run the container via the provided -`docker/run.sh` script +Use the standard `docker compose up` command, and generally the `docker compose` [CLI](https://docs.docker.com/compose/reference/) as usual. -```bash -./docker/run.sh -``` - -When used without arguments, the container will start the webserver and provide -you the link to open it. But if you want to use some other parameters you can -also do so. - -!!! example "run script example" - - ```bash - ./docker/run.sh "banana sushi" -Ak_lms -S42 -s10 - ``` - - This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps. - - Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments) - ---- - -## Running the container on your GPU - -If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running -the container with an extra environment variable to enable GPU usage and have -the process run much faster: - -```bash -GPU_FLAGS=all ./docker/run.sh -``` - -This passes the `--gpus all` to docker and uses the GPU. - -If you don't have a GPU (or your host is not yet setup to use it) you will see a -message like this: - -`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].` - -You can use the full set of GPU combinations documented here: - -https://docs.docker.com/config/containers/resource_constraints/#gpu - -For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to -choose a specific device identified by a UUID. - ---- - -!!! warning "Deprecated" - - From here on you will find the the previous Docker-Docs, which will still - provide some usefull informations. - -## Usage (time to have fun) - -### Startup - -If you're on a **Linux container** the `invoke` script is **automatically -started** and the output dir set to the Docker volume you created earlier. - -If you're **directly on macOS follow these startup instructions**. With the -Conda environment activated (`conda activate ldm`), run the interactive -interface that combines the functionality of the original scripts `txt2img` and -`img2img`: Use the more accurate but VRAM-intensive full precision math because -half-precision requires autocast and won't work. By default the images are saved -in `outputs/img-samples/`. - -```Shell -python3 scripts/invoke.py --full_precision -``` - -You'll get the script's prompt. You can see available options or quit. - -```Shell -invoke> -h -invoke> q -``` - -### Text to Image - -For quick (but bad) image results test with 5 steps (default 50) and 1 sample -image. This will let you know that everything is set up correctly. Then increase -steps to 100 or more for good (but slower) results. The prompt can be in quotes -or not. - -```Shell -invoke> The hulk fighting with sheldon cooper -s5 -n1 -invoke> "woman closeup highly detailed" -s 150 -# Reuse previous seed and apply face restoration -invoke> "woman closeup highly detailed" --steps 150 --seed -1 -G 0.75 -``` - -You'll need to experiment to see if face restoration is making it better or -worse for your specific prompt. - -If you're on a container the output is set to the Docker volume. You can copy it -wherever you want. You can download it from the Docker Desktop app, Volumes, -my-vol, data. Or you can copy it from your Mac terminal. Keep in mind -`docker cp` can't expand `*.png` so you'll need to specify the image file name. - -On your host Mac (you can use the name of any container that mounted the -volume): - -```Shell -docker cp dummy:/data/000001.928403745.png /Users//Pictures -``` - -### Image to Image - -You can also do text-guided image-to-image translation. For example, turning a -sketch into a detailed drawing. - -`strength` is a value between 0.0 and 1.0 that controls the amount of noise that -is added to the input image. Values that approach 1.0 allow for lots of -variations but will also produce images that are not semantically consistent -with the input. 0.0 preserves image exactly, 1.0 replaces it completely. - -Make sure your input image size dimensions are multiples of 64 e.g. 512x512. -Otherwise you'll get `Error: product of dimension sizes > 2**31'`. If you still -get the error -[try a different size](https://support.apple.com/guide/preview/resize-rotate-or-flip-an-image-prvw2015/mac#:~:text=image's%20file%20size-,In%20the%20Preview%20app%20on%20your%20Mac%2C%20open%20the%20file,is%20shown%20at%20the%20bottom.) -like 512x256. - -If you're on a Docker container, copy your input image into the Docker volume - -```Shell -docker cp /Users//Pictures/sketch-mountains-input.jpg dummy:/data/ -``` - -Try it out generating an image (or more). The `invoke` script needs absolute -paths to find the image so don't use `~`. - -If you're on your Mac - -```Shell -invoke> "A fantasy landscape, trending on artstation" -I /Users//Pictures/sketch-mountains-input.jpg --strength 0.75 --steps 100 -n4 -``` - -If you're on a Linux container on your Mac - -```Shell -invoke> "A fantasy landscape, trending on artstation" -I /data/sketch-mountains-input.jpg --strength 0.75 --steps 50 -n1 -``` - -### Web Interface - -You can use the `invoke` script with a graphical web interface. Start the web -server with: - -```Shell -python3 scripts/invoke.py --full_precision --web -``` - -If it's running on your Mac point your Mac web browser to - - -Press Control-C at the command line to stop the web server. - -### Notes - -Some text you can add at the end of the prompt to make it very pretty: - -```Shell -cinematic photo, highly detailed, cinematic lighting, ultra-detailed, ultrarealistic, photorealism, Octane Rendering, cyberpunk lights, Hyper Detail, 8K, HD, Unreal Engine, V-Ray, full hd, cyberpunk, abstract, 3d octane render + 4k UHD + immense detail + dramatic lighting + well lit + black, purple, blue, pink, cerulean, teal, metallic colours, + fine details, ultra photoreal, photographic, concept art, cinematic composition, rule of thirds, mysterious, eerie, photorealism, breathtaking detailed, painting art deco pattern, by hsiao, ron cheng, john james audubon, bizarre compositions, exquisite detail, extremely moody lighting, painted by greg rutkowski makoto shinkai takashi takeuchi studio ghibli, akihiko yoshida -``` - -The original scripts should work as well. - -```Shell -python3 scripts/orig_scripts/txt2img.py --help -python3 scripts/orig_scripts/txt2img.py --ddim_steps 100 --n_iter 1 --n_samples 1 --plms --prompt "new born baby kitten. Hyper Detail, Octane Rendering, Unreal Engine, V-Ray" -python3 scripts/orig_scripts/txt2img.py --ddim_steps 5 --n_iter 1 --n_samples 1 --plms --prompt "ocean" # or --klms -``` +Once the container starts up (and configures the InvokeAI root directory if this is a new installation), you can access InvokeAI at [http://localhost:9090](http://localhost:9090) From c2da74c5871e5992eed0fedc088d4b46e5731464 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 17:02:15 +1100 Subject: [PATCH 116/202] feat: add workflows table & service --- invokeai/app/api/dependencies.py | 3 + invokeai/app/api/routers/workflows.py | 20 +++ invokeai/app/api_app.py | 20 ++- invokeai/app/invocations/baseinvocation.py | 32 +--- invokeai/app/services/invocation_services.py | 4 + .../app/services/workflow_records/__init__.py | 0 .../workflow_records/workflow_records_base.py | 17 ++ .../workflow_records_common.py | 22 +++ .../workflow_records_sqlite.py | 148 ++++++++++++++++++ tests/nodes/test_graph_execution_state.py | 1 + tests/nodes/test_invoker.py | 1 + 11 files changed, 235 insertions(+), 33 deletions(-) create mode 100644 invokeai/app/api/routers/workflows.py create mode 100644 invokeai/app/services/workflow_records/__init__.py create mode 100644 invokeai/app/services/workflow_records/workflow_records_base.py create mode 100644 invokeai/app/services/workflow_records/workflow_records_common.py create mode 100644 invokeai/app/services/workflow_records/workflow_records_sqlite.py diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index c9a2f0a843..ae4882c0d0 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -30,6 +30,7 @@ from ..services.shared.default_graphs import create_system_graphs from ..services.shared.graph import GraphExecutionState, LibraryGraph from ..services.shared.sqlite import SqliteDatabase from ..services.urls.urls_default import LocalUrlService +from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage from .events import FastAPIEventService @@ -90,6 +91,7 @@ class ApiDependencies: session_processor = DefaultSessionProcessor() session_queue = SqliteSessionQueue(db=db) urls = LocalUrlService() + workflow_records = SqliteWorkflowRecordsStorage(db=db) services = InvocationServices( board_image_records=board_image_records, @@ -114,6 +116,7 @@ class ApiDependencies: session_processor=session_processor, session_queue=session_queue, urls=urls, + workflow_records=workflow_records, ) create_system_graphs(services.graph_library) diff --git a/invokeai/app/api/routers/workflows.py b/invokeai/app/api/routers/workflows.py new file mode 100644 index 0000000000..814123fc81 --- /dev/null +++ b/invokeai/app/api/routers/workflows.py @@ -0,0 +1,20 @@ +from fastapi import APIRouter, Body, Path + +from invokeai.app.api.dependencies import ApiDependencies +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField + +workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"]) + + +@workflows_router.get( + "/i/{workflow_id}", + operation_id="get_workflow", + responses={ + 200: {"model": WorkflowField}, + }, +) +async def get_workflow( + workflow_id: str = Path(description="The workflow to get"), +) -> WorkflowField: + """Gets a workflow""" + return ApiDependencies.invoker.services.workflow_records.get(workflow_id) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 866a6665c8..e04cf564ab 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -38,7 +38,17 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from ..backend.util.logging import InvokeAILogger from .api.dependencies import ApiDependencies - from .api.routers import app_info, board_images, boards, images, models, session_queue, sessions, utilities + from .api.routers import ( + app_info, + board_images, + boards, + images, + models, + sessions, + session_queue, + utilities, + workflows, + ) from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField @@ -95,18 +105,13 @@ async def shutdown_event() -> None: app.include_router(sessions.session_router, prefix="/api") app.include_router(utilities.utilities_router, prefix="/api") - app.include_router(models.models_router, prefix="/api") - app.include_router(images.images_router, prefix="/api") - app.include_router(boards.boards_router, prefix="/api") - app.include_router(board_images.board_images_router, prefix="/api") - app.include_router(app_info.app_router, prefix="/api") - app.include_router(session_queue.session_queue_router, prefix="/api") +app.include_router(workflows.workflows_router, prefix="/api") # Build a custom OpenAPI to include all outputs @@ -166,7 +171,6 @@ def custom_openapi() -> dict[str, Any]: # print(f"Config with name {name} already defined") continue - # "BaseModelType":{"title":"BaseModelType","description":"An enumeration.","enum":["sd-1","sd-2"],"type":"string"} openapi_schema["components"]["schemas"][name] = dict( title=name, description="An enumeration.", diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index ba94e7c440..f2e5f33e6e 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -2,7 +2,6 @@ from __future__ import annotations -import json import re from abc import ABC, abstractmethod from enum import Enum @@ -11,12 +10,13 @@ from types import UnionType from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Optional, Type, TypeVar, Union import semver -from pydantic import BaseModel, ConfigDict, Field, create_model, field_validator +from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter, create_model from pydantic.fields import _Unset from pydantic_core import PydanticUndefined from invokeai.app.services.config.config_default import InvokeAIAppConfig from invokeai.app.util.misc import uuid_string +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField if TYPE_CHECKING: from ..services.invocation_services import InvocationServices @@ -60,7 +60,7 @@ class FieldDescriptions: denoised_latents = "Denoised latents tensor" latents = "Latents tensor" strength = "Strength of denoising (proportional to steps)" - core_metadata = "Optional core metadata to be written to image" + workflow = "Optional workflow to be saved with the image" interp_mode = "Interpolation mode" torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)" fp32 = "Whether or not to use full float32 precision" @@ -665,27 +665,7 @@ class BaseInvocation(ABC, BaseModel): description="Whether or not this is an intermediate invocation.", json_schema_extra=dict(ui_type=UIType.IsIntermediate), ) - workflow: Optional[str] = Field( - default=None, - description="The workflow to save with the image", - json_schema_extra=dict(ui_type=UIType.WorkflowField), - ) - use_cache: Optional[bool] = Field( - default=True, - description="Whether or not to use the cache", - ) - - @field_validator("workflow", mode="before") - @classmethod - def validate_workflow_is_json(cls, v): - """We don't have a workflow schema in the backend, so we just check that it's valid JSON""" - if v is None: - return None - try: - json.loads(v) - except json.decoder.JSONDecodeError: - raise ValueError("Workflow must be valid JSON") - return v + use_cache: bool = InputField(default=True, description="Whether or not to use the cache") UIConfig: ClassVar[Type[UIConfigBase]] @@ -824,4 +804,6 @@ def invocation_output( return wrapper -GenericBaseModel = TypeVar("GenericBaseModel", bound=BaseModel) +class WithWorkflow(BaseModel): + workflow: Optional[WorkflowField] = InputField(default=None, description=FieldDescriptions.workflow) + diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index ba53ea50cf..94db75d810 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -27,6 +27,7 @@ if TYPE_CHECKING: from .session_queue.session_queue_base import SessionQueueBase from .shared.graph import GraphExecutionState, LibraryGraph from .urls.urls_base import UrlServiceBase + from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase class InvocationServices: @@ -55,6 +56,7 @@ class InvocationServices: invocation_cache: "InvocationCacheBase" names: "NameServiceBase" urls: "UrlServiceBase" + workflow_records: "WorkflowRecordsStorageBase" def __init__( self, @@ -80,6 +82,7 @@ class InvocationServices: invocation_cache: "InvocationCacheBase", names: "NameServiceBase", urls: "UrlServiceBase", + workflow_records: "WorkflowRecordsStorageBase", ): self.board_images = board_images self.board_image_records = board_image_records @@ -103,3 +106,4 @@ class InvocationServices: self.invocation_cache = invocation_cache self.names = names self.urls = urls + self.workflow_records = workflow_records diff --git a/invokeai/app/services/workflow_records/__init__.py b/invokeai/app/services/workflow_records/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/workflow_records/workflow_records_base.py b/invokeai/app/services/workflow_records/workflow_records_base.py new file mode 100644 index 0000000000..97f7cfe3c0 --- /dev/null +++ b/invokeai/app/services/workflow_records/workflow_records_base.py @@ -0,0 +1,17 @@ +from abc import ABC, abstractmethod + +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField + + +class WorkflowRecordsStorageBase(ABC): + """Base class for workflow storage services.""" + + @abstractmethod + def get(self, workflow_id: str) -> WorkflowField: + """Get workflow by id.""" + pass + + @abstractmethod + def create(self, workflow: WorkflowField) -> WorkflowField: + """Creates a workflow.""" + pass diff --git a/invokeai/app/services/workflow_records/workflow_records_common.py b/invokeai/app/services/workflow_records/workflow_records_common.py new file mode 100644 index 0000000000..d548656dab --- /dev/null +++ b/invokeai/app/services/workflow_records/workflow_records_common.py @@ -0,0 +1,22 @@ +from typing import Any + +from pydantic import Field, RootModel, TypeAdapter + + +class WorkflowNotFoundError(Exception): + """Raised when a workflow is not found""" + + +class WorkflowField(RootModel): + """ + Pydantic model for workflows with custom root of type dict[str, Any]. + Workflows are stored without a strict schema. + """ + + root: dict[str, Any] = Field(description="Workflow dict") + + def model_dump(self, *args, **kwargs) -> dict[str, Any]: + return super().model_dump(*args, **kwargs)["root"] + + +type_adapter_WorkflowField = TypeAdapter(WorkflowField) diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py new file mode 100644 index 0000000000..2b284ac03f --- /dev/null +++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py @@ -0,0 +1,148 @@ +import sqlite3 +import threading + +from invokeai.app.services.invoker import Invoker +from invokeai.app.services.shared.sqlite import SqliteDatabase +from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase +from invokeai.app.services.workflow_records.workflow_records_common import ( + WorkflowField, + WorkflowNotFoundError, + type_adapter_WorkflowField, +) +from invokeai.app.util.misc import uuid_string + + +class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase): + _invoker: Invoker + _conn: sqlite3.Connection + _cursor: sqlite3.Cursor + _lock: threading.RLock + + def __init__(self, db: SqliteDatabase) -> None: + super().__init__() + self._lock = db.lock + self._conn = db.conn + self._cursor = self._conn.cursor() + self._create_tables() + + def start(self, invoker: Invoker) -> None: + self._invoker = invoker + + def get(self, workflow_id: str) -> WorkflowField: + try: + self._lock.acquire() + self._cursor.execute( + """--sql + SELECT workflow + FROM workflows + WHERE workflow_id = ?; + """, + (workflow_id,), + ) + row = self._cursor.fetchone() + if row is None: + raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found") + return type_adapter_WorkflowField.validate_json(row[0]) + except Exception: + self._conn.rollback() + raise + finally: + self._lock.release() + + def create(self, workflow: WorkflowField) -> WorkflowField: + try: + # workflows do not have ids until they are saved + workflow_id = uuid_string() + workflow.root["id"] = workflow_id + self._lock.acquire() + self._cursor.execute( + """--sql + INSERT INTO workflows(workflow) + VALUES (?); + """, + (workflow.json(),), + ) + self._conn.commit() + except Exception: + self._conn.rollback() + raise + finally: + self._lock.release() + return self.get(workflow_id) + + def _create_tables(self) -> None: + try: + self._lock.acquire() + self._cursor.execute( + """--sql + CREATE TABLE IF NOT EXISTS workflows ( + workflow TEXT NOT NULL, + workflow_id TEXT GENERATED ALWAYS AS (json_extract(workflow, '$.id')) VIRTUAL NOT NULL UNIQUE, -- gets implicit index + created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')), + updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) -- updated via trigger + ); + """ + ) + + self._cursor.execute( + """--sql + CREATE TRIGGER IF NOT EXISTS tg_workflows_updated_at + AFTER UPDATE + ON workflows FOR EACH ROW + BEGIN + UPDATE workflows + SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW') + WHERE workflow_id = old.workflow_id; + END; + """ + ) + + self._conn.commit() + except Exception: + self._conn.rollback() + raise + finally: + self._lock.release() + + # def update(self, workflow_id: str, workflow: Workflow) -> Workflow: + # """Updates a workflow record.""" + # try: + # workflow_id = workflow.get("id", None) + # if type(workflow_id) is not str: + # raise WorkflowNotFoundError(f"Workflow does not have a valid id, got {workflow_id}") + # self._lock.acquire() + # self._cursor.execute( + # """--sql + # UPDATE workflows + # SET workflow = ? + # WHERE workflow_id = ? + # """, + # (workflow, workflow_id), + # ) + # self._conn.commit() + # except Exception: + # self._conn.rollback() + # raise + # finally: + # self._lock.release() + # return self.get(workflow_id) + + # def delete(self, workflow_id: str) -> Workflow: + # """Updates a workflow record.""" + # workflow = self.get(workflow_id) + # try: + # self._lock.acquire() + # self._cursor.execute( + # """--sql + # DELETE FROM workflows + # WHERE workflow_id = ? + # """, + # (workflow_id,), + # ) + # self._conn.commit() + # except Exception: + # self._conn.rollback() + # raise + # finally: + # self._lock.release() + # return workflow diff --git a/tests/nodes/test_graph_execution_state.py b/tests/nodes/test_graph_execution_state.py index 27b8a58bea..e2d435e621 100644 --- a/tests/nodes/test_graph_execution_state.py +++ b/tests/nodes/test_graph_execution_state.py @@ -75,6 +75,7 @@ def mock_services() -> InvocationServices: session_processor=None, # type: ignore session_queue=None, # type: ignore urls=None, # type: ignore + workflow_records=None, # type: ignore ) diff --git a/tests/nodes/test_invoker.py b/tests/nodes/test_invoker.py index 105f7417cd..9774f07fdd 100644 --- a/tests/nodes/test_invoker.py +++ b/tests/nodes/test_invoker.py @@ -80,6 +80,7 @@ def mock_services() -> InvocationServices: session_processor=None, # type: ignore session_queue=None, # type: ignore urls=None, # type: ignore + workflow_records=None, # type: ignore ) From f0db4d36e459a802abe9843ce619cc9e9bd3e3e0 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 17:23:10 +1100 Subject: [PATCH 117/202] feat: metadata refactor - Refactor how metadata is handled to support a user-defined metadata in graphs - Update workflow embed handling - Update UI to work with these changes - Update tests to support metadata/workflow changes --- invokeai/app/api/routers/images.py | 33 +- invokeai/app/api/routers/workflows.py | 2 +- invokeai/app/api_app.py | 2 +- invokeai/app/invocations/baseinvocation.py | 36 +- .../controlnet_image_processors.py | 7 +- invokeai/app/invocations/cv.py | 4 +- invokeai/app/invocations/facetools.py | 8 +- invokeai/app/invocations/image.py | 223 +-- invokeai/app/invocations/infill.py | 21 +- invokeai/app/invocations/latent.py | 12 +- invokeai/app/invocations/metadata.py | 174 +- invokeai/app/invocations/onnx.py | 14 +- invokeai/app/invocations/primitives.py | 4 +- invokeai/app/invocations/upscale.py | 5 +- .../services/image_files/image_files_base.py | 7 +- .../services/image_files/image_files_disk.py | 11 +- .../image_records/image_records_base.py | 6 +- .../image_records/image_records_common.py | 8 + .../image_records/image_records_sqlite.py | 35 +- invokeai/app/services/images/images_base.py | 9 +- invokeai/app/services/images/images_common.py | 2 - .../app/services/images/images_default.py | 35 +- invokeai/app/services/shared/graph.py | 32 +- .../CurrentImage/CurrentImageButtons.tsx | 41 +- .../SingleSelectionMenuItems.tsx | 42 +- .../ImageMetadataViewer.tsx | 28 +- .../Invocation/EmbedWorkflowCheckbox.tsx | 6 +- .../nodes/Invocation/InvocationNodeFooter.tsx | 4 +- .../features/nodes/hooks/useWithWorkflow.ts | 31 + .../util/validateSourceAndTargetTypes.ts | 5 +- .../web/src/features/nodes/types/constants.ts | 38 + .../web/src/features/nodes/types/types.ts | 120 +- .../nodes/util/fieldTemplateBuilders.ts | 144 +- .../features/nodes/util/fieldValueBuilders.ts | 6 + .../addControlNetToLinearGraph.ts | 25 +- .../nodes/util/graphBuilders/addHrfToGraph.ts | 42 +- .../addIPAdapterToLinearGraph.ts | 33 +- .../util/graphBuilders/addLoRAsToGraph.ts | 53 +- .../util/graphBuilders/addSDXLLoRAstoGraph.ts | 63 +- .../graphBuilders/addSDXLRefinerToGraph.ts | 27 +- .../util/graphBuilders/addSaveImageNode.ts | 27 +- .../graphBuilders/addSeamlessToLinearGraph.ts | 12 + .../addT2IAdapterToLinearGraph.ts | 23 +- .../nodes/util/graphBuilders/addVAEToGraph.ts | 10 +- .../graphBuilders/addWatermarkerToGraph.ts | 25 +- .../graphBuilders/buildAdHocUpscaleGraph.ts | 9 +- .../buildCanvasImageToImageGraph.ts | 14 +- .../buildCanvasSDXLImageToImageGraph.ts | 23 +- .../buildCanvasSDXLTextToImageGraph.ts | 23 +- .../buildCanvasTextToImageGraph.ts | 23 +- .../graphBuilders/buildLinearBatchConfig.ts | 75 +- .../buildLinearImageToImageGraph.ts | 23 +- .../buildLinearSDXLImageToImageGraph.ts | 25 +- .../buildLinearSDXLTextToImageGraph.ts | 23 +- .../buildLinearTextToImageGraph.ts | 30 +- .../util/graphBuilders/buildNodesGraph.ts | 5 +- .../nodes/util/graphBuilders/constants.ts | 8 + .../nodes/util/graphBuilders/metadata.ts | 58 + .../src/features/nodes/util/parseSchema.ts | 44 +- .../web/src/services/api/endpoints/images.ts | 18 +- .../src/services/api/endpoints/workflows.ts | 31 + .../frontend/web/src/services/api/index.ts | 1 + .../frontend/web/src/services/api/schema.d.ts | 1489 +++++++---------- .../frontend/web/src/services/api/types.ts | 17 +- tests/nodes/test_node_graph.py | 148 +- tests/nodes/test_nodes.py | 23 + 66 files changed, 1807 insertions(+), 1798 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/hooks/useWithWorkflow.ts create mode 100644 invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts create mode 100644 invokeai/frontend/web/src/services/api/endpoints/workflows.ts diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 84d8e8eea4..f462437700 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -5,12 +5,13 @@ from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadF from fastapi.responses import FileResponse from fastapi.routing import APIRouter from PIL import Image -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, ValidationError -from invokeai.app.invocations.metadata import ImageMetadata +from invokeai.app.invocations.baseinvocation import MetadataField, type_adapter_MetadataField from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.services.workflow_records.workflow_records_common import type_adapter_WorkflowField from ..dependencies import ApiDependencies @@ -45,8 +46,10 @@ async def upload_image( if not file.content_type or not file.content_type.startswith("image"): raise HTTPException(status_code=415, detail="Not an image") - contents = await file.read() + metadata = None + workflow = None + contents = await file.read() try: pil_image = Image.open(io.BytesIO(contents)) if crop_visible: @@ -56,6 +59,24 @@ async def upload_image( # Error opening the image raise HTTPException(status_code=415, detail="Failed to read image") + # attempt to parse metadata from image + metadata_raw = pil_image.info.get("invokeai_metadata", None) + if metadata_raw: + try: + metadata = type_adapter_MetadataField.validate_json(metadata_raw) + except ValidationError: + ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image") + pass + + # attempt to parse workflow from image + workflow_raw = pil_image.info.get("invokeai_workflow", None) + if workflow_raw is not None: + try: + workflow = type_adapter_WorkflowField.validate_json(workflow_raw) + except ValidationError: + ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image") + pass + try: image_dto = ApiDependencies.invoker.services.images.create( image=pil_image, @@ -63,6 +84,8 @@ async def upload_image( image_category=image_category, session_id=session_id, board_id=board_id, + metadata=metadata, + workflow=workflow, is_intermediate=is_intermediate, ) @@ -146,11 +169,11 @@ async def get_image_dto( @images_router.get( "/i/{image_name}/metadata", operation_id="get_image_metadata", - response_model=ImageMetadata, + response_model=Optional[MetadataField], ) async def get_image_metadata( image_name: str = Path(description="The name of image to get"), -) -> ImageMetadata: +) -> Optional[MetadataField]: """Gets an image's metadata""" try: diff --git a/invokeai/app/api/routers/workflows.py b/invokeai/app/api/routers/workflows.py index 814123fc81..57a33fe73f 100644 --- a/invokeai/app/api/routers/workflows.py +++ b/invokeai/app/api/routers/workflows.py @@ -1,4 +1,4 @@ -from fastapi import APIRouter, Body, Path +from fastapi import APIRouter, Path from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index e04cf564ab..51aa14c75b 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -44,8 +44,8 @@ if True: # hack to make flake8 happy with imports coming after setting up the c boards, images, models, - sessions, session_queue, + sessions, utilities, workflows, ) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index f2e5f33e6e..39df4971a6 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -15,8 +15,8 @@ from pydantic.fields import _Unset from pydantic_core import PydanticUndefined from invokeai.app.services.config.config_default import InvokeAIAppConfig -from invokeai.app.util.misc import uuid_string from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField +from invokeai.app.util.misc import uuid_string if TYPE_CHECKING: from ..services.invocation_services import InvocationServices @@ -60,6 +60,11 @@ class FieldDescriptions: denoised_latents = "Denoised latents tensor" latents = "Latents tensor" strength = "Strength of denoising (proportional to steps)" + metadata = "Optional metadata to be saved with the image" + metadata_collection = "Collection of Metadata" + metadata_item_polymorphic = "A single metadata item or collection of metadata items" + metadata_item_label = "Label for this metadata item" + metadata_item_value = "The value for this metadata item (may be any type)" workflow = "Optional workflow to be saved with the image" interp_mode = "Interpolation mode" torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)" @@ -167,8 +172,12 @@ class UIType(str, Enum): Scheduler = "Scheduler" WorkflowField = "WorkflowField" IsIntermediate = "IsIntermediate" - MetadataField = "MetadataField" BoardField = "BoardField" + Any = "Any" + MetadataItem = "MetadataItem" + MetadataItemCollection = "MetadataItemCollection" + MetadataItemPolymorphic = "MetadataItemPolymorphic" + MetadataDict = "MetadataDict" # endregion @@ -807,3 +816,26 @@ def invocation_output( class WithWorkflow(BaseModel): workflow: Optional[WorkflowField] = InputField(default=None, description=FieldDescriptions.workflow) + +class MetadataItemField(BaseModel): + label: str = Field(description=FieldDescriptions.metadata_item_label) + value: Any = Field(description=FieldDescriptions.metadata_item_value) + + +class MetadataField(RootModel): + """ + Pydantic model for metadata with custom root of type dict[str, Any]. + Metadata is stored without a strict schema. + """ + + root: dict[str, Any] = Field(description="A dictionary of metadata, shape of which is arbitrary") + + def model_dump(self, *args, **kwargs) -> dict[str, Any]: + return super().model_dump(*args, **kwargs)["root"] + + +type_adapter_MetadataField = TypeAdapter(MetadataField) + + +class WithMetadata(BaseModel): + metadata: Optional[MetadataField] = InputField(default=None, description=FieldDescriptions.metadata) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 200c37d851..7c76b70e7f 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -38,6 +38,8 @@ from .baseinvocation import ( InputField, InvocationContext, OutputField, + WithMetadata, + WithWorkflow, invocation, invocation_output, ) @@ -127,12 +129,12 @@ class ControlNetInvocation(BaseInvocation): # This invocation exists for other invocations to subclass it - do not register with @invocation! -class ImageProcessorInvocation(BaseInvocation): +class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Base class for invocations that preprocess images for ControlNet""" image: ImageField = InputField(description="The image to process") - def run_processor(self, image): + def run_processor(self, image: Image.Image) -> Image.Image: # superclass just passes through image without processing return image @@ -150,6 +152,7 @@ class ImageProcessorInvocation(BaseInvocation): session_id=context.graph_execution_state_id, node_id=self.id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) diff --git a/invokeai/app/invocations/cv.py b/invokeai/app/invocations/cv.py index 3b85955d74..e5cfd327c1 100644 --- a/invokeai/app/invocations/cv.py +++ b/invokeai/app/invocations/cv.py @@ -8,11 +8,11 @@ from PIL import Image, ImageOps from invokeai.app.invocations.primitives import ImageField, ImageOutput from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin -from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation +from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation @invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.0.0") -class CvInpaintInvocation(BaseInvocation): +class CvInpaintInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Simple inpaint using opencv.""" image: ImageField = InputField(description="The image to inpaint") diff --git a/invokeai/app/invocations/facetools.py b/invokeai/app/invocations/facetools.py index 40e15e9476..0bb24ef69d 100644 --- a/invokeai/app/invocations/facetools.py +++ b/invokeai/app/invocations/facetools.py @@ -16,6 +16,8 @@ from invokeai.app.invocations.baseinvocation import ( InputField, InvocationContext, OutputField, + WithMetadata, + WithWorkflow, invocation, invocation_output, ) @@ -437,7 +439,7 @@ def get_faces_list( @invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.0.2") -class FaceOffInvocation(BaseInvocation): +class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Bound, extract, and mask a face from an image using MediaPipe detection""" image: ImageField = InputField(description="Image for face detection") @@ -531,7 +533,7 @@ class FaceOffInvocation(BaseInvocation): @invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.0.2") -class FaceMaskInvocation(BaseInvocation): +class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Face mask creation using mediapipe face detection""" image: ImageField = InputField(description="Image to face detect") @@ -650,7 +652,7 @@ class FaceMaskInvocation(BaseInvocation): @invocation( "face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.0.2" ) -class FaceIdentifierInvocation(BaseInvocation): +class FaceIdentifierInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Outputs an image with detected face IDs printed on each face. For use with other FaceTools.""" image: ImageField = InputField(description="Image to face detect") diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 3a4f4eadac..9a4e9da954 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -7,13 +7,21 @@ import cv2 import numpy from PIL import Image, ImageChops, ImageFilter, ImageOps -from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark from invokeai.backend.image_util.safety_checker import SafetyChecker -from .baseinvocation import BaseInvocation, FieldDescriptions, Input, InputField, InvocationContext, invocation +from .baseinvocation import ( + BaseInvocation, + FieldDescriptions, + Input, + InputField, + InvocationContext, + WithMetadata, + WithWorkflow, + invocation, +) @invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0") @@ -36,14 +44,8 @@ class ShowImageInvocation(BaseInvocation): ) -@invocation( - "blank_image", - title="Blank Image", - tags=["image"], - category="image", - version="1.0.0", -) -class BlankImageInvocation(BaseInvocation): +@invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.0.0") +class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Creates a blank image and forwards it to the pipeline""" width: int = InputField(default=512, description="The width of the image") @@ -61,6 +63,7 @@ class BlankImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -71,14 +74,8 @@ class BlankImageInvocation(BaseInvocation): ) -@invocation( - "img_crop", - title="Crop Image", - tags=["image", "crop"], - category="image", - version="1.0.0", -) -class ImageCropInvocation(BaseInvocation): +@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.0.0") +class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Crops an image to a specified box. The box can be outside of the image.""" image: ImageField = InputField(description="The image to crop") @@ -100,6 +97,7 @@ class ImageCropInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -110,14 +108,8 @@ class ImageCropInvocation(BaseInvocation): ) -@invocation( - "img_paste", - title="Paste Image", - tags=["image", "paste"], - category="image", - version="1.0.1", -) -class ImagePasteInvocation(BaseInvocation): +@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.0.1") +class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Pastes an image into another image.""" base_image: ImageField = InputField(description="The base image") @@ -159,6 +151,7 @@ class ImagePasteInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -169,14 +162,8 @@ class ImagePasteInvocation(BaseInvocation): ) -@invocation( - "tomask", - title="Mask from Alpha", - tags=["image", "mask"], - category="image", - version="1.0.0", -) -class MaskFromAlphaInvocation(BaseInvocation): +@invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.0.0") +class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Extracts the alpha channel of an image as a mask.""" image: ImageField = InputField(description="The image to create the mask from") @@ -196,6 +183,7 @@ class MaskFromAlphaInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -206,14 +194,8 @@ class MaskFromAlphaInvocation(BaseInvocation): ) -@invocation( - "img_mul", - title="Multiply Images", - tags=["image", "multiply"], - category="image", - version="1.0.0", -) -class ImageMultiplyInvocation(BaseInvocation): +@invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.0.0") +class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Multiplies two images together using `PIL.ImageChops.multiply()`.""" image1: ImageField = InputField(description="The first image to multiply") @@ -232,6 +214,7 @@ class ImageMultiplyInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -245,14 +228,8 @@ class ImageMultiplyInvocation(BaseInvocation): IMAGE_CHANNELS = Literal["A", "R", "G", "B"] -@invocation( - "img_chan", - title="Extract Image Channel", - tags=["image", "channel"], - category="image", - version="1.0.0", -) -class ImageChannelInvocation(BaseInvocation): +@invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.0.0") +class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Gets a channel from an image.""" image: ImageField = InputField(description="The image to get the channel from") @@ -270,6 +247,7 @@ class ImageChannelInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -283,14 +261,8 @@ class ImageChannelInvocation(BaseInvocation): IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] -@invocation( - "img_conv", - title="Convert Image Mode", - tags=["image", "convert"], - category="image", - version="1.0.0", -) -class ImageConvertInvocation(BaseInvocation): +@invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.0.0") +class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Converts an image to a different mode.""" image: ImageField = InputField(description="The image to convert") @@ -308,6 +280,7 @@ class ImageConvertInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -318,14 +291,8 @@ class ImageConvertInvocation(BaseInvocation): ) -@invocation( - "img_blur", - title="Blur Image", - tags=["image", "blur"], - category="image", - version="1.0.0", -) -class ImageBlurInvocation(BaseInvocation): +@invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.0.0") +class ImageBlurInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Blurs an image""" image: ImageField = InputField(description="The image to blur") @@ -348,6 +315,7 @@ class ImageBlurInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -378,23 +346,14 @@ PIL_RESAMPLING_MAP = { } -@invocation( - "img_resize", - title="Resize Image", - tags=["image", "resize"], - category="image", - version="1.0.0", -) -class ImageResizeInvocation(BaseInvocation): +@invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.0.0") +class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Resizes an image to specific dimensions""" image: ImageField = InputField(description="The image to resize") width: int = InputField(default=512, gt=0, description="The width to resize to (px)") height: int = InputField(default=512, gt=0, description="The height to resize to (px)") resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode") - metadata: Optional[CoreMetadata] = InputField( - default=None, description=FieldDescriptions.core_metadata, ui_hidden=True - ) def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) @@ -413,7 +372,7 @@ class ImageResizeInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.model_dump() if self.metadata else None, + metadata=self.metadata, workflow=self.workflow, ) @@ -424,14 +383,8 @@ class ImageResizeInvocation(BaseInvocation): ) -@invocation( - "img_scale", - title="Scale Image", - tags=["image", "scale"], - category="image", - version="1.0.0", -) -class ImageScaleInvocation(BaseInvocation): +@invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.0.0") +class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Scales an image by a factor""" image: ImageField = InputField(description="The image to scale") @@ -461,6 +414,7 @@ class ImageScaleInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -471,14 +425,8 @@ class ImageScaleInvocation(BaseInvocation): ) -@invocation( - "img_lerp", - title="Lerp Image", - tags=["image", "lerp"], - category="image", - version="1.0.0", -) -class ImageLerpInvocation(BaseInvocation): +@invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.0.0") +class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Linear interpolation of all pixels of an image""" image: ImageField = InputField(description="The image to lerp") @@ -500,6 +448,7 @@ class ImageLerpInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -510,14 +459,8 @@ class ImageLerpInvocation(BaseInvocation): ) -@invocation( - "img_ilerp", - title="Inverse Lerp Image", - tags=["image", "ilerp"], - category="image", - version="1.0.0", -) -class ImageInverseLerpInvocation(BaseInvocation): +@invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.0.0") +class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Inverse linear interpolation of all pixels of an image""" image: ImageField = InputField(description="The image to lerp") @@ -539,6 +482,7 @@ class ImageInverseLerpInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -549,20 +493,11 @@ class ImageInverseLerpInvocation(BaseInvocation): ) -@invocation( - "img_nsfw", - title="Blur NSFW Image", - tags=["image", "nsfw"], - category="image", - version="1.0.0", -) -class ImageNSFWBlurInvocation(BaseInvocation): +@invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.0.0") +class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Add blur to NSFW-flagged images""" image: ImageField = InputField(description="The image to check") - metadata: Optional[CoreMetadata] = InputField( - default=None, description=FieldDescriptions.core_metadata, ui_hidden=True - ) def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) @@ -583,7 +518,7 @@ class ImageNSFWBlurInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.model_dump() if self.metadata else None, + metadata=self.metadata, workflow=self.workflow, ) @@ -607,14 +542,11 @@ class ImageNSFWBlurInvocation(BaseInvocation): category="image", version="1.0.0", ) -class ImageWatermarkInvocation(BaseInvocation): +class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Add an invisible watermark to an image""" image: ImageField = InputField(description="The image to check") text: str = InputField(default="InvokeAI", description="Watermark text") - metadata: Optional[CoreMetadata] = InputField( - default=None, description=FieldDescriptions.core_metadata, ui_hidden=True - ) def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) @@ -626,7 +558,7 @@ class ImageWatermarkInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.model_dump() if self.metadata else None, + metadata=self.metadata, workflow=self.workflow, ) @@ -637,14 +569,8 @@ class ImageWatermarkInvocation(BaseInvocation): ) -@invocation( - "mask_edge", - title="Mask Edge", - tags=["image", "mask", "inpaint"], - category="image", - version="1.0.0", -) -class MaskEdgeInvocation(BaseInvocation): +@invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.0.0") +class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Applies an edge mask to an image""" image: ImageField = InputField(description="The image to apply the mask to") @@ -678,6 +604,7 @@ class MaskEdgeInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -695,7 +622,7 @@ class MaskEdgeInvocation(BaseInvocation): category="image", version="1.0.0", ) -class MaskCombineInvocation(BaseInvocation): +class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.""" mask1: ImageField = InputField(description="The first mask to combine") @@ -714,6 +641,7 @@ class MaskCombineInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -724,14 +652,8 @@ class MaskCombineInvocation(BaseInvocation): ) -@invocation( - "color_correct", - title="Color Correct", - tags=["image", "color"], - category="image", - version="1.0.0", -) -class ColorCorrectInvocation(BaseInvocation): +@invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.0.0") +class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata): """ Shifts the colors of a target image to match the reference image, optionally using a mask to only color-correct certain regions of the target image. @@ -830,6 +752,7 @@ class ColorCorrectInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -840,14 +763,8 @@ class ColorCorrectInvocation(BaseInvocation): ) -@invocation( - "img_hue_adjust", - title="Adjust Image Hue", - tags=["image", "hue"], - category="image", - version="1.0.0", -) -class ImageHueAdjustmentInvocation(BaseInvocation): +@invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.0.0") +class ImageHueAdjustmentInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Adjusts the Hue of an image.""" image: ImageField = InputField(description="The image to adjust") @@ -875,6 +792,7 @@ class ImageHueAdjustmentInvocation(BaseInvocation): node_id=self.id, is_intermediate=self.is_intermediate, session_id=context.graph_execution_state_id, + metadata=self.metadata, workflow=self.workflow, ) @@ -950,7 +868,7 @@ CHANNEL_FORMATS = { category="image", version="1.0.0", ) -class ImageChannelOffsetInvocation(BaseInvocation): +class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Add or subtract a value from a specific color channel of an image.""" image: ImageField = InputField(description="The image to adjust") @@ -984,6 +902,7 @@ class ImageChannelOffsetInvocation(BaseInvocation): node_id=self.id, is_intermediate=self.is_intermediate, session_id=context.graph_execution_state_id, + metadata=self.metadata, workflow=self.workflow, ) @@ -1020,7 +939,7 @@ class ImageChannelOffsetInvocation(BaseInvocation): category="image", version="1.0.0", ) -class ImageChannelMultiplyInvocation(BaseInvocation): +class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Scale a specific color channel of an image.""" image: ImageField = InputField(description="The image to adjust") @@ -1060,6 +979,7 @@ class ImageChannelMultiplyInvocation(BaseInvocation): is_intermediate=self.is_intermediate, session_id=context.graph_execution_state_id, workflow=self.workflow, + metadata=self.metadata, ) return ImageOutput( @@ -1079,16 +999,11 @@ class ImageChannelMultiplyInvocation(BaseInvocation): version="1.0.1", use_cache=False, ) -class SaveImageInvocation(BaseInvocation): +class SaveImageInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Saves an image. Unlike an image primitive, this invocation stores a copy of the image.""" image: ImageField = InputField(description=FieldDescriptions.image) - board: Optional[BoardField] = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct) - metadata: Optional[CoreMetadata] = InputField( - default=None, - description=FieldDescriptions.core_metadata, - ui_hidden=True, - ) + board: BoardField = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct) def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) @@ -1101,7 +1016,7 @@ class SaveImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.model_dump() if self.metadata else None, + metadata=self.metadata, workflow=self.workflow, ) diff --git a/invokeai/app/invocations/infill.py b/invokeai/app/invocations/infill.py index d8384290f3..b100fe7c4e 100644 --- a/invokeai/app/invocations/infill.py +++ b/invokeai/app/invocations/infill.py @@ -13,7 +13,7 @@ from invokeai.backend.image_util.cv2_inpaint import cv2_inpaint from invokeai.backend.image_util.lama import LaMA from invokeai.backend.image_util.patchmatch import PatchMatch -from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation +from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES @@ -119,7 +119,7 @@ def tile_fill_missing(im: Image.Image, tile_size: int = 16, seed: Optional[int] @invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0") -class InfillColorInvocation(BaseInvocation): +class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Infills transparent areas of an image with a solid color""" image: ImageField = InputField(description="The image to infill") @@ -143,6 +143,7 @@ class InfillColorInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -154,7 +155,7 @@ class InfillColorInvocation(BaseInvocation): @invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0") -class InfillTileInvocation(BaseInvocation): +class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Infills transparent areas of an image with tiles of the image""" image: ImageField = InputField(description="The image to infill") @@ -179,6 +180,7 @@ class InfillTileInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -192,7 +194,7 @@ class InfillTileInvocation(BaseInvocation): @invocation( "infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0" ) -class InfillPatchMatchInvocation(BaseInvocation): +class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Infills transparent areas of an image using the PatchMatch algorithm""" image: ImageField = InputField(description="The image to infill") @@ -232,6 +234,7 @@ class InfillPatchMatchInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) @@ -243,7 +246,7 @@ class InfillPatchMatchInvocation(BaseInvocation): @invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0") -class LaMaInfillInvocation(BaseInvocation): +class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Infills transparent areas of an image using the LaMa model""" image: ImageField = InputField(description="The image to infill") @@ -260,6 +263,8 @@ class LaMaInfillInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, + workflow=self.workflow, ) return ImageOutput( @@ -269,8 +274,8 @@ class LaMaInfillInvocation(BaseInvocation): ) -@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0") -class CV2InfillInvocation(BaseInvocation): +@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint") +class CV2InfillInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Infills transparent areas of an image using OpenCV Inpainting""" image: ImageField = InputField(description="The image to infill") @@ -287,6 +292,8 @@ class CV2InfillInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, + workflow=self.workflow, ) return ImageOutput( diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index c28c87395d..a537972c0b 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -23,7 +23,6 @@ from pydantic import field_validator from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.ip_adapter import IPAdapterField -from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.primitives import ( DenoiseMaskField, DenoiseMaskOutput, @@ -64,6 +63,8 @@ from .baseinvocation import ( InvocationContext, OutputField, UIType, + WithMetadata, + WithWorkflow, invocation, invocation_output, ) @@ -792,7 +793,7 @@ class DenoiseLatentsInvocation(BaseInvocation): category="latents", version="1.0.0", ) -class LatentsToImageInvocation(BaseInvocation): +class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Generates an image from latents.""" latents: LatentsField = InputField( @@ -805,11 +806,6 @@ class LatentsToImageInvocation(BaseInvocation): ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) - metadata: Optional[CoreMetadata] = InputField( - default=None, - description=FieldDescriptions.core_metadata, - ui_hidden=True, - ) @torch.no_grad() def invoke(self, context: InvocationContext) -> ImageOutput: @@ -878,7 +874,7 @@ class LatentsToImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.model_dump() if self.metadata else None, + metadata=self.metadata, workflow=self.workflow, ) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 4d76926aaa..205dbef814 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -1,13 +1,17 @@ -from typing import Optional +from typing import Any, Literal, Optional, Union -from pydantic import Field +from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, + FieldDescriptions, InputField, InvocationContext, + MetadataField, + MetadataItemField, OutputField, + UIType, invocation, invocation_output, ) @@ -16,116 +20,100 @@ from invokeai.app.invocations.ip_adapter import IPAdapterModelField from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField from invokeai.app.invocations.primitives import ImageField from invokeai.app.invocations.t2i_adapter import T2IAdapterField -from invokeai.app.util.model_exclude_null import BaseModelExcludeNull from ...version import __version__ -class LoRAMetadataField(BaseModelExcludeNull): - """LoRA metadata for an image generated in InvokeAI.""" +class LoRAMetadataField(BaseModel): + """LoRA Metadata Field""" - lora: LoRAModelField = Field(description="The LoRA model") - weight: float = Field(description="The weight of the LoRA model") + lora: LoRAModelField = Field(description=FieldDescriptions.lora_model) + weight: float = Field(description=FieldDescriptions.lora_weight) -class IPAdapterMetadataField(BaseModelExcludeNull): +class IPAdapterMetadataField(BaseModel): + """IP Adapter Field, minus the CLIP Vision Encoder model""" + image: ImageField = Field(description="The IP-Adapter image prompt.") - ip_adapter_model: IPAdapterModelField = Field(description="The IP-Adapter model to use.") - weight: float = Field(description="The weight of the IP-Adapter model") + ip_adapter_model: IPAdapterModelField = Field( + description="The IP-Adapter model.", + ) + weight: Union[float, list[float]] = Field( + default=1, + ge=0, + description="The weight given to the IP-Adapter", + ) begin_step_percent: float = Field( - default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)" + default=0, ge=-1, le=2, description="When the IP-Adapter is first applied (% of total steps)" ) end_step_percent: float = Field( default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)" ) -class CoreMetadata(BaseModelExcludeNull): - """Core generation metadata for an image generated in InvokeAI.""" +@invocation_output("metadata_item_output") +class MetadataItemOutput(BaseInvocationOutput): + """Metadata Item Output""" - app_version: str = Field(default=__version__, description="The version of InvokeAI used to generate this image") - generation_mode: Optional[str] = Field( - default=None, - description="The generation mode that output this image", - ) - created_by: Optional[str] = Field(default=None, description="The name of the creator of the image") - positive_prompt: Optional[str] = Field(default=None, description="The positive prompt parameter") - negative_prompt: Optional[str] = Field(default=None, description="The negative prompt parameter") - width: Optional[int] = Field(default=None, description="The width parameter") - height: Optional[int] = Field(default=None, description="The height parameter") - seed: Optional[int] = Field(default=None, description="The seed used for noise generation") - rand_device: Optional[str] = Field(default=None, description="The device used for random number generation") - cfg_scale: Optional[float] = Field(default=None, description="The classifier-free guidance scale parameter") - steps: Optional[int] = Field(default=None, description="The number of steps used for inference") - scheduler: Optional[str] = Field(default=None, description="The scheduler used for inference") - clip_skip: Optional[int] = Field( - default=None, - description="The number of skipped CLIP layers", - ) - model: Optional[MainModelField] = Field(default=None, description="The main model used for inference") - controlnets: Optional[list[ControlField]] = Field(default=None, description="The ControlNets used for inference") - ipAdapters: Optional[list[IPAdapterMetadataField]] = Field( - default=None, description="The IP Adapters used for inference" - ) - t2iAdapters: Optional[list[T2IAdapterField]] = Field(default=None, description="The IP Adapters used for inference") - loras: Optional[list[LoRAMetadataField]] = Field(default=None, description="The LoRAs used for inference") - vae: Optional[VAEModelField] = Field( - default=None, - description="The VAE used for decoding, if the main model's default was not used", + item: MetadataItemField = OutputField(description="Metadata Item") + + +@invocation("metadata_item", title="Metadata Item", tags=["metadata"], category="metadata", version="1.0.0") +class MetadataItemInvocation(BaseInvocation): + """Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value.""" + + label: str = InputField(description=FieldDescriptions.metadata_item_label) + value: Any = InputField(description=FieldDescriptions.metadata_item_value, ui_type=UIType.Any) + + def invoke(self, context: InvocationContext) -> MetadataItemOutput: + return MetadataItemOutput(item=MetadataItemField(label=self.label, value=self.value)) + + +@invocation_output("metadata_output") +class MetadataOutput(BaseInvocationOutput): + metadata: MetadataField = OutputField(description="Metadata Dict") + + +@invocation("metadata", title="Metadata", tags=["metadata"], category="metadata", version="1.0.0") +class MetadataInvocation(BaseInvocation): + """Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict.""" + + items: Union[list[MetadataItemField], MetadataItemField] = InputField( + description=FieldDescriptions.metadata_item_polymorphic ) - # Latents-to-Latents - strength: Optional[float] = Field( - default=None, - description="The strength used for latents-to-latents", - ) - init_image: Optional[str] = Field(default=None, description="The name of the initial image") + def invoke(self, context: InvocationContext) -> MetadataOutput: + if isinstance(self.items, MetadataItemField): + # single metadata item + data = {self.items.label: self.items.value} + else: + # collection of metadata items + data = {item.label: item.value for item in self.items} - # SDXL - positive_style_prompt: Optional[str] = Field(default=None, description="The positive style prompt parameter") - negative_style_prompt: Optional[str] = Field(default=None, description="The negative style prompt parameter") - - # SDXL Refiner - refiner_model: Optional[MainModelField] = Field(default=None, description="The SDXL Refiner model used") - refiner_cfg_scale: Optional[float] = Field( - default=None, - description="The classifier-free guidance scale parameter used for the refiner", - ) - refiner_steps: Optional[int] = Field(default=None, description="The number of steps used for the refiner") - refiner_scheduler: Optional[str] = Field(default=None, description="The scheduler used for the refiner") - refiner_positive_aesthetic_score: Optional[float] = Field( - default=None, description="The aesthetic score used for the refiner" - ) - refiner_negative_aesthetic_score: Optional[float] = Field( - default=None, description="The aesthetic score used for the refiner" - ) - refiner_start: Optional[float] = Field(default=None, description="The start value used for refiner denoising") + # add app version + data.update({"app_version": __version__}) + return MetadataOutput(metadata=MetadataField.model_validate(data)) -class ImageMetadata(BaseModelExcludeNull): - """An image's generation metadata""" +@invocation("merge_metadata", title="Metadata Merge", tags=["metadata"], category="metadata", version="1.0.0") +class MergeMetadataInvocation(BaseInvocation): + """Merged a collection of MetadataDict into a single MetadataDict.""" - metadata: Optional[dict] = Field( - default=None, - description="The image's core metadata, if it was created in the Linear or Canvas UI", - ) - graph: Optional[dict] = Field(default=None, description="The graph that created the image") + collection: list[MetadataField] = InputField(description=FieldDescriptions.metadata_collection) + + def invoke(self, context: InvocationContext) -> MetadataOutput: + data = {} + for item in self.collection: + data.update(item.model_dump()) + + return MetadataOutput(metadata=MetadataField.model_validate(data)) -@invocation_output("metadata_accumulator_output") -class MetadataAccumulatorOutput(BaseInvocationOutput): - """The output of the MetadataAccumulator node""" +@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.0.0") +class CoreMetadataInvocation(BaseInvocation): + """Collects core generation metadata into a MetadataField""" - metadata: CoreMetadata = OutputField(description="The core metadata for the image") - - -@invocation( - "metadata_accumulator", title="Metadata Accumulator", tags=["metadata"], category="metadata", version="1.0.0" -) -class MetadataAccumulatorInvocation(BaseInvocation): - """Outputs a Core Metadata Object""" - - generation_mode: Optional[str] = InputField( + generation_mode: Literal["txt2img", "img2img", "inpaint", "outpaint"] = InputField( default=None, description="The generation mode that output this image", ) @@ -138,6 +126,8 @@ class MetadataAccumulatorInvocation(BaseInvocation): cfg_scale: Optional[float] = InputField(default=None, description="The classifier-free guidance scale parameter") steps: Optional[int] = InputField(default=None, description="The number of steps used for inference") scheduler: Optional[str] = InputField(default=None, description="The scheduler used for inference") + seamless_x: Optional[bool] = InputField(default=None, description="Whether seamless tiling was used on the X axis") + seamless_y: Optional[bool] = InputField(default=None, description="Whether seamless tiling was used on the Y axis") clip_skip: Optional[int] = InputField( default=None, description="The number of skipped CLIP layers", @@ -220,7 +210,13 @@ class MetadataAccumulatorInvocation(BaseInvocation): description="The start value used for refiner denoising", ) - def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput: + def invoke(self, context: InvocationContext) -> MetadataOutput: """Collects and outputs a CoreMetadata object""" - return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.model_dump())) + return MetadataOutput( + metadata=MetadataField.model_validate( + self.model_dump(exclude_none=True, exclude={"id", "type", "is_intermediate", "use_cache"}) + ) + ) + + model_config = ConfigDict(extra="allow") diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py index 3f4f688cf4..140505f736 100644 --- a/invokeai/app/invocations/onnx.py +++ b/invokeai/app/invocations/onnx.py @@ -4,7 +4,7 @@ import inspect import re # from contextlib import ExitStack -from typing import List, Literal, Optional, Union +from typing import List, Literal, Union import numpy as np import torch @@ -12,7 +12,6 @@ from diffusers.image_processor import VaeImageProcessor from pydantic import BaseModel, ConfigDict, Field, field_validator from tqdm import tqdm -from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.util.step_callback import stable_diffusion_step_callback @@ -31,6 +30,8 @@ from .baseinvocation import ( OutputField, UIComponent, UIType, + WithMetadata, + WithWorkflow, invocation, invocation_output, ) @@ -327,7 +328,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation): category="image", version="1.0.0", ) -class ONNXLatentsToImageInvocation(BaseInvocation): +class ONNXLatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow): """Generates an image from latents.""" latents: LatentsField = InputField( @@ -338,11 +339,6 @@ class ONNXLatentsToImageInvocation(BaseInvocation): description=FieldDescriptions.vae, input=Input.Connection, ) - metadata: Optional[CoreMetadata] = InputField( - default=None, - description=FieldDescriptions.core_metadata, - ui_hidden=True, - ) # tiled: bool = InputField(default=False, description="Decode latents by overlaping tiles(less memory consumption)") def invoke(self, context: InvocationContext) -> ImageOutput: @@ -381,7 +377,7 @@ class ONNXLatentsToImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.model_dump() if self.metadata else None, + metadata=self.metadata, workflow=self.workflow, ) diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index c314edfd15..88ede88cde 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -251,7 +251,9 @@ class ImageCollectionOutput(BaseInvocationOutput): @invocation("image", title="Image Primitive", tags=["primitives", "image"], category="primitives", version="1.0.0") -class ImageInvocation(BaseInvocation): +class ImageInvocation( + BaseInvocation, +): """An image primitive value""" image: ImageField = InputField(description="The image to load") diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index d30bb71d95..1167914aca 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -14,7 +14,7 @@ from invokeai.app.invocations.primitives import ImageField, ImageOutput from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.backend.util.devices import choose_torch_device -from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation +from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation # TODO: Populate this from disk? # TODO: Use model manager to load? @@ -30,7 +30,7 @@ if choose_torch_device() == torch.device("mps"): @invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.1.0") -class ESRGANInvocation(BaseInvocation): +class ESRGANInvocation(BaseInvocation, WithWorkflow, WithMetadata): """Upscales an image using RealESRGAN.""" image: ImageField = InputField(description="The input image") @@ -123,6 +123,7 @@ class ESRGANInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata, workflow=self.workflow, ) diff --git a/invokeai/app/services/image_files/image_files_base.py b/invokeai/app/services/image_files/image_files_base.py index 5dde7b05d6..3f6e797225 100644 --- a/invokeai/app/services/image_files/image_files_base.py +++ b/invokeai/app/services/image_files/image_files_base.py @@ -4,6 +4,9 @@ from typing import Optional from PIL.Image import Image as PILImageType +from invokeai.app.invocations.metadata import MetadataField +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField + class ImageFileStorageBase(ABC): """Low-level service responsible for storing and retrieving image files.""" @@ -30,8 +33,8 @@ class ImageFileStorageBase(ABC): self, image: PILImageType, image_name: str, - metadata: Optional[dict] = None, - workflow: Optional[str] = None, + metadata: Optional[MetadataField] = None, + workflow: Optional[WorkflowField] = None, thumbnail_size: int = 256, ) -> None: """Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp.""" diff --git a/invokeai/app/services/image_files/image_files_disk.py b/invokeai/app/services/image_files/image_files_disk.py index 9111a71605..57c05562d5 100644 --- a/invokeai/app/services/image_files/image_files_disk.py +++ b/invokeai/app/services/image_files/image_files_disk.py @@ -1,5 +1,4 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team -import json from pathlib import Path from queue import Queue from typing import Dict, Optional, Union @@ -8,7 +7,9 @@ from PIL import Image, PngImagePlugin from PIL.Image import Image as PILImageType from send2trash import send2trash +from invokeai.app.invocations.metadata import MetadataField from invokeai.app.services.invoker import Invoker +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail from .image_files_base import ImageFileStorageBase @@ -55,8 +56,8 @@ class DiskImageFileStorage(ImageFileStorageBase): self, image: PILImageType, image_name: str, - metadata: Optional[dict] = None, - workflow: Optional[str] = None, + metadata: Optional[MetadataField] = None, + workflow: Optional[WorkflowField] = None, thumbnail_size: int = 256, ) -> None: try: @@ -67,9 +68,9 @@ class DiskImageFileStorage(ImageFileStorageBase): if metadata is not None or workflow is not None: if metadata is not None: - pnginfo.add_text("invokeai_metadata", json.dumps(metadata)) + pnginfo.add_text("invokeai_metadata", metadata.model_dump_json()) if workflow is not None: - pnginfo.add_text("invokeai_workflow", workflow) + pnginfo.add_text("invokeai_workflow", workflow.model_dump_json()) else: # For uploaded images, we want to retain metadata. PIL strips it on save; manually add it back # TODO: retain non-invokeai metadata on save... diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index 7e74b06e9e..cd1db81857 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -2,6 +2,7 @@ from abc import ABC, abstractmethod from datetime import datetime from typing import Optional +from invokeai.app.invocations.metadata import MetadataField from invokeai.app.services.shared.pagination import OffsetPaginatedResults from .image_records_common import ImageCategory, ImageRecord, ImageRecordChanges, ResourceOrigin @@ -18,7 +19,7 @@ class ImageRecordStorageBase(ABC): pass @abstractmethod - def get_metadata(self, image_name: str) -> Optional[dict]: + def get_metadata(self, image_name: str) -> Optional[MetadataField]: """Gets an image's metadata'.""" pass @@ -78,7 +79,8 @@ class ImageRecordStorageBase(ABC): starred: Optional[bool] = False, session_id: Optional[str] = None, node_id: Optional[str] = None, - metadata: Optional[dict] = None, + metadata: Optional[MetadataField] = None, + workflow_id: Optional[str] = None, ) -> datetime: """Saves an image record.""" pass diff --git a/invokeai/app/services/image_records/image_records_common.py b/invokeai/app/services/image_records/image_records_common.py index 5a6e5652c9..6576fb9647 100644 --- a/invokeai/app/services/image_records/image_records_common.py +++ b/invokeai/app/services/image_records/image_records_common.py @@ -100,6 +100,7 @@ IMAGE_DTO_COLS = ", ".join( "width", "height", "session_id", + "workflow_id", "node_id", "is_intermediate", "created_at", @@ -140,6 +141,11 @@ class ImageRecord(BaseModelExcludeNull): description="The session ID that generated this image, if it is a generated image.", ) """The session ID that generated this image, if it is a generated image.""" + workflow_id: Optional[str] = Field( + default=None, + description="The workflow that generated this image.", + ) + """The workflow that generated this image.""" node_id: Optional[str] = Field( default=None, description="The node ID that generated this image, if it is a generated image.", @@ -184,6 +190,7 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord: width = image_dict.get("width", 0) height = image_dict.get("height", 0) session_id = image_dict.get("session_id", None) + workflow_id = image_dict.get("workflow_id", None) node_id = image_dict.get("node_id", None) created_at = image_dict.get("created_at", get_iso_timestamp()) updated_at = image_dict.get("updated_at", get_iso_timestamp()) @@ -198,6 +205,7 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord: width=width, height=height, session_id=session_id, + workflow_id=workflow_id, node_id=node_id, created_at=created_at, updated_at=updated_at, diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 33bf373a7d..7b60ec3d5b 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -1,9 +1,9 @@ -import json import sqlite3 import threading from datetime import datetime from typing import Optional, Union, cast +from invokeai.app.invocations.baseinvocation import MetadataField, type_adapter_MetadataField from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -76,6 +76,16 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): """ ) + if "workflow_id" not in columns: + self._cursor.execute( + """--sql + ALTER TABLE images + ADD COLUMN workflow_id TEXT; + -- TODO: This requires a migration: + -- FOREIGN KEY (workflow_id) REFERENCES workflows (workflow_id) ON DELETE SET NULL; + """ + ) + # Create the `images` table indices. self._cursor.execute( """--sql @@ -141,22 +151,26 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): return deserialize_image_record(dict(result)) - def get_metadata(self, image_name: str) -> Optional[dict]: + def get_metadata(self, image_name: str) -> Optional[MetadataField]: try: self._lock.acquire() self._cursor.execute( """--sql - SELECT images.metadata FROM images + SELECT metadata FROM images WHERE image_name = ?; """, (image_name,), ) result = cast(Optional[sqlite3.Row], self._cursor.fetchone()) - if not result or not result[0]: - return None - return json.loads(result[0]) + + if not result: + raise ImageRecordNotFoundException + + as_dict = dict(result) + metadata_raw = cast(Optional[str], as_dict.get("metadata", None)) + return type_adapter_MetadataField.validate_json(metadata_raw) if metadata_raw is not None else None except sqlite3.Error as e: self._conn.rollback() raise ImageRecordNotFoundException from e @@ -408,10 +422,11 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): starred: Optional[bool] = False, session_id: Optional[str] = None, node_id: Optional[str] = None, - metadata: Optional[dict] = None, + metadata: Optional[MetadataField] = None, + workflow_id: Optional[str] = None, ) -> datetime: try: - metadata_json = None if metadata is None else json.dumps(metadata) + metadata_json = metadata.model_dump_json() if metadata is not None else None self._lock.acquire() self._cursor.execute( """--sql @@ -424,10 +439,11 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): node_id, session_id, metadata, + workflow_id, is_intermediate, starred ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?); + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); """, ( image_name, @@ -438,6 +454,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): node_id, session_id, metadata_json, + workflow_id, is_intermediate, starred, ), diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py index ac7a4a2152..ebb40424bc 100644 --- a/invokeai/app/services/images/images_base.py +++ b/invokeai/app/services/images/images_base.py @@ -3,7 +3,7 @@ from typing import Callable, Optional from PIL.Image import Image as PILImageType -from invokeai.app.invocations.metadata import ImageMetadata +from invokeai.app.invocations.metadata import MetadataField from invokeai.app.services.image_records.image_records_common import ( ImageCategory, ImageRecord, @@ -12,6 +12,7 @@ from invokeai.app.services.image_records.image_records_common import ( ) from invokeai.app.services.images.images_common import ImageDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField class ImageServiceABC(ABC): @@ -50,8 +51,8 @@ class ImageServiceABC(ABC): session_id: Optional[str] = None, board_id: Optional[str] = None, is_intermediate: Optional[bool] = False, - metadata: Optional[dict] = None, - workflow: Optional[str] = None, + metadata: Optional[MetadataField] = None, + workflow: Optional[WorkflowField] = None, ) -> ImageDTO: """Creates an image, storing the file and its metadata.""" pass @@ -81,7 +82,7 @@ class ImageServiceABC(ABC): pass @abstractmethod - def get_metadata(self, image_name: str) -> ImageMetadata: + def get_metadata(self, image_name: str) -> Optional[MetadataField]: """Gets an image's metadata.""" pass diff --git a/invokeai/app/services/images/images_common.py b/invokeai/app/services/images/images_common.py index 325cecdd26..0464244b94 100644 --- a/invokeai/app/services/images/images_common.py +++ b/invokeai/app/services/images/images_common.py @@ -25,8 +25,6 @@ class ImageDTO(ImageRecord, ImageUrlsDTO): ) """The id of the board the image belongs to, if one exists.""" - pass - def image_record_to_dto( image_record: ImageRecord, diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index 3c78c4f29a..e466e809b1 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -2,10 +2,10 @@ from typing import Optional from PIL.Image import Image as PILImageType -from invokeai.app.invocations.metadata import ImageMetadata +from invokeai.app.invocations.metadata import MetadataField from invokeai.app.services.invoker import Invoker from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.util.metadata import get_metadata_graph_from_raw_session +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField from ..image_files.image_files_common import ( ImageFileDeleteException, @@ -42,8 +42,8 @@ class ImageService(ImageServiceABC): session_id: Optional[str] = None, board_id: Optional[str] = None, is_intermediate: Optional[bool] = False, - metadata: Optional[dict] = None, - workflow: Optional[str] = None, + metadata: Optional[MetadataField] = None, + workflow: Optional[WorkflowField] = None, ) -> ImageDTO: if image_origin not in ResourceOrigin: raise InvalidOriginException @@ -56,6 +56,12 @@ class ImageService(ImageServiceABC): (width, height) = image.size try: + if workflow is not None: + created_workflow = self.__invoker.services.workflow_records.create(workflow) + workflow_id = created_workflow.model_dump()["id"] + else: + workflow_id = None + # TODO: Consider using a transaction here to ensure consistency between storage and database self.__invoker.services.image_records.save( # Non-nullable fields @@ -69,6 +75,7 @@ class ImageService(ImageServiceABC): # Nullable fields node_id=node_id, metadata=metadata, + workflow_id=workflow_id, session_id=session_id, ) if board_id is not None: @@ -146,25 +153,9 @@ class ImageService(ImageServiceABC): self.__invoker.services.logger.error("Problem getting image DTO") raise e - def get_metadata(self, image_name: str) -> ImageMetadata: + def get_metadata(self, image_name: str) -> Optional[MetadataField]: try: - image_record = self.__invoker.services.image_records.get(image_name) - metadata = self.__invoker.services.image_records.get_metadata(image_name) - - if not image_record.session_id: - return ImageMetadata(metadata=metadata) - - session_raw = self.__invoker.services.graph_execution_manager.get_raw(image_record.session_id) - graph = None - - if session_raw: - try: - graph = get_metadata_graph_from_raw_session(session_raw) - except Exception as e: - self.__invoker.services.logger.warn(f"Failed to parse session graph: {e}") - graph = None - - return ImageMetadata(graph=graph, metadata=metadata) + return self.__invoker.services.image_records.get_metadata(image_name) except ImageRecordNotFoundException: self.__invoker.services.logger.error("Image record not found") raise diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index 8f974f7c6b..0f703db749 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -439,6 +439,14 @@ class Graph(BaseModel): except Exception as e: raise UnknownGraphValidationError(f"Problem validating graph {e}") from e + def _is_destination_field_Any(self, edge: Edge) -> bool: + """Checks if the destination field for an edge is of type typing.Any""" + return get_input_field(self.get_node(edge.destination.node_id), edge.destination.field) == Any + + def _is_destination_field_list_of_Any(self, edge: Edge) -> bool: + """Checks if the destination field for an edge is of type typing.Any""" + return get_input_field(self.get_node(edge.destination.node_id), edge.destination.field) == list[Any] + def _validate_edge(self, edge: Edge): """Validates that a new edge doesn't create a cycle in the graph""" @@ -491,8 +499,19 @@ class Graph(BaseModel): f"Collector output type does not match collector input type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}" ) - # Validate if collector output type matches input type (if this edge results in both being set) - if isinstance(from_node, CollectInvocation) and edge.source.field == "collection": + # Validate that we are not connecting collector to iterator (currently unsupported) + if isinstance(from_node, CollectInvocation) and isinstance(to_node, IterateInvocation): + raise InvalidEdgeError( + f"Cannot connect collector to iterator: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}" + ) + + # Validate if collector output type matches input type (if this edge results in both being set) - skip if the destination field is not Any or list[Any] + if ( + isinstance(from_node, CollectInvocation) + and edge.source.field == "collection" + and not self._is_destination_field_list_of_Any(edge) + and not self._is_destination_field_Any(edge) + ): if not self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination): raise InvalidEdgeError( f"Collector input type does not match collector output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}" @@ -725,16 +744,15 @@ class Graph(BaseModel): # Get the input root type input_root_type = next(t[0] for t in type_degrees if t[1] == 0) # type: ignore - # Verify that all outputs are lists - # if not all((get_origin(f) == list for f in output_fields)): - # return False - # Verify that all outputs are lists if not all(is_list_or_contains_list(f) for f in output_fields): return False # Verify that all outputs match the input type (are a base class or the same class) - if not all((issubclass(input_root_type, get_args(f)[0]) for f in output_fields)): + if not all( + is_union_subtype(input_root_type, get_args(f)[0]) or issubclass(input_root_type, get_args(f)[0]) + for f in output_fields + ): return False return True diff --git a/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx index 57f06a0cea..4c0aa5e0e8 100644 --- a/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx @@ -27,7 +27,7 @@ import { setShouldShowImageDetails, setShouldShowProgressInViewer, } from 'features/ui/store/uiSlice'; -import { memo, useCallback, useMemo } from 'react'; +import { memo, useCallback } from 'react'; import { useHotkeys } from 'react-hotkeys-hook'; import { useTranslation } from 'react-i18next'; import { @@ -40,11 +40,13 @@ import { import { FaCircleNodes, FaEllipsis } from 'react-icons/fa6'; import { useGetImageDTOQuery, - useGetImageMetadataFromFileQuery, + useGetImageMetadataQuery, } from 'services/api/endpoints/images'; import { menuListMotionProps } from 'theme/components/menu'; +import { useDebounce } from 'use-debounce'; import { sentImageToImg2Img } from '../../store/actions'; import SingleSelectionMenuItems from '../ImageContextMenu/SingleSelectionMenuItems'; +import { useGetWorkflowQuery } from 'services/api/endpoints/workflows'; const currentImageButtonsSelector = createSelector( [stateSelector, activeTabNameSelector], @@ -89,7 +91,6 @@ const CurrentImageButtons = () => { shouldShowImageDetails, lastSelectedImage, shouldShowProgressInViewer, - shouldFetchMetadataFromApi, } = useAppSelector(currentImageButtonsSelector); const isUpscalingEnabled = useFeatureStatus('upscaling').isFeatureEnabled; @@ -104,23 +105,17 @@ const CurrentImageButtons = () => { lastSelectedImage?.image_name ?? skipToken ); - const getMetadataArg = useMemo(() => { - if (lastSelectedImage) { - return { image: lastSelectedImage, shouldFetchMetadataFromApi }; - } else { - return skipToken; - } - }, [lastSelectedImage, shouldFetchMetadataFromApi]); + const [debouncedImageName] = useDebounce(lastSelectedImage?.image_name, 300); + const [debouncedWorkflowId] = useDebounce( + lastSelectedImage?.workflow_id, + 300 + ); - const { metadata, workflow, isLoading } = useGetImageMetadataFromFileQuery( - getMetadataArg, - { - selectFromResult: (res) => ({ - isLoading: res.isFetching, - metadata: res?.currentData?.metadata, - workflow: res?.currentData?.workflow, - }), - } + const { data: metadata, isLoading: isLoadingMetadata } = + useGetImageMetadataQuery(debouncedImageName ?? skipToken); + + const { data: workflow, isLoading: isLoadingWorkflow } = useGetWorkflowQuery( + debouncedWorkflowId ?? skipToken ); const handleLoadWorkflow = useCallback(() => { @@ -257,7 +252,7 @@ const CurrentImageButtons = () => { } tooltip={`${t('nodes.loadWorkflow')} (W)`} aria-label={`${t('nodes.loadWorkflow')} (W)`} @@ -265,7 +260,7 @@ const CurrentImageButtons = () => { onClick={handleLoadWorkflow} /> } tooltip={`${t('parameters.usePrompt')} (P)`} aria-label={`${t('parameters.usePrompt')} (P)`} @@ -273,7 +268,7 @@ const CurrentImageButtons = () => { onClick={handleUsePrompt} /> } tooltip={`${t('parameters.useSeed')} (S)`} aria-label={`${t('parameters.useSeed')} (S)`} @@ -281,7 +276,7 @@ const CurrentImageButtons = () => { onClick={handleUseSeed} /> } tooltip={`${t('parameters.useAll')} (A)`} aria-label={`${t('parameters.useAll')} (A)`} diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx index 35a4e9f18c..38de235e38 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx @@ -1,8 +1,9 @@ import { Flex, MenuItem, Spinner } from '@chakra-ui/react'; import { useStore } from '@nanostores/react'; +import { skipToken } from '@reduxjs/toolkit/dist/query'; import { useAppToaster } from 'app/components/Toaster'; import { $customStarUI } from 'app/store/nanostores/customStarUI'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { useAppDispatch } from 'app/store/storeHooks'; import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice'; import { imagesToChangeSelected, @@ -32,12 +33,13 @@ import { import { FaCircleNodes } from 'react-icons/fa6'; import { MdStar, MdStarBorder } from 'react-icons/md'; import { - useGetImageMetadataFromFileQuery, + useGetImageMetadataQuery, useStarImagesMutation, useUnstarImagesMutation, } from 'services/api/endpoints/images'; +import { useGetWorkflowQuery } from 'services/api/endpoints/workflows'; import { ImageDTO } from 'services/api/types'; -import { configSelector } from '../../../system/store/configSelectors'; +import { useDebounce } from 'use-debounce'; import { sentImageToCanvas, sentImageToImg2Img } from '../../store/actions'; type SingleSelectionMenuItemsProps = { @@ -53,18 +55,16 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { const toaster = useAppToaster(); const isCanvasEnabled = useFeatureStatus('unifiedCanvas').isFeatureEnabled; - const { shouldFetchMetadataFromApi } = useAppSelector(configSelector); const customStarUi = useStore($customStarUI); - const { metadata, workflow, isLoading } = useGetImageMetadataFromFileQuery( - { image: imageDTO, shouldFetchMetadataFromApi }, - { - selectFromResult: (res) => ({ - isLoading: res.isFetching, - metadata: res?.currentData?.metadata, - workflow: res?.currentData?.workflow, - }), - } + const [debouncedImageName] = useDebounce(imageDTO?.image_name, 300); + const [debouncedWorkflowId] = useDebounce(imageDTO?.workflow_id, 300); + + const { data: metadata, isLoading: isLoadingMetadata } = + useGetImageMetadataQuery(debouncedImageName ?? skipToken); + + const { data: workflow, isLoading: isLoadingWorkflow } = useGetWorkflowQuery( + debouncedWorkflowId ?? skipToken ); const [starImages] = useStarImagesMutation(); @@ -181,17 +181,17 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { {t('parameters.downloadImage')} : } + icon={isLoadingWorkflow ? : } onClickCapture={handleLoadWorkflow} - isDisabled={isLoading || !workflow} + isDisabled={isLoadingWorkflow || !workflow} > {t('nodes.loadWorkflow')} : } + icon={isLoadingMetadata ? : } onClickCapture={handleRecallPrompt} isDisabled={ - isLoading || + isLoadingMetadata || (metadata?.positive_prompt === undefined && metadata?.negative_prompt === undefined) } @@ -199,16 +199,16 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { {t('parameters.usePrompt')} : } + icon={isLoadingMetadata ? : } onClickCapture={handleRecallSeed} - isDisabled={isLoading || metadata?.seed === undefined} + isDisabled={isLoadingMetadata || metadata?.seed === undefined} > {t('parameters.useSeed')} : } + icon={isLoadingMetadata ? : } onClickCapture={handleUseAllParameters} - isDisabled={isLoading || !metadata} + isDisabled={isLoadingMetadata || !metadata} > {t('parameters.useAll')} diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx index e9cb3ffcaf..f6820b9d20 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx @@ -9,16 +9,17 @@ import { Tabs, Text, } from '@chakra-ui/react'; +import { skipToken } from '@reduxjs/toolkit/dist/query'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; +import ScrollableContent from 'features/nodes/components/sidePanel/ScrollableContent'; import { memo } from 'react'; -import { useGetImageMetadataFromFileQuery } from 'services/api/endpoints/images'; +import { useTranslation } from 'react-i18next'; +import { useGetImageMetadataQuery } from 'services/api/endpoints/images'; +import { useGetWorkflowQuery } from 'services/api/endpoints/workflows'; import { ImageDTO } from 'services/api/types'; +import { useDebounce } from 'use-debounce'; import DataViewer from './DataViewer'; import ImageMetadataActions from './ImageMetadataActions'; -import { useAppSelector } from '../../../../app/store/storeHooks'; -import { configSelector } from '../../../system/store/configSelectors'; -import { useTranslation } from 'react-i18next'; -import ScrollableContent from 'features/nodes/components/sidePanel/ScrollableContent'; type ImageMetadataViewerProps = { image: ImageDTO; @@ -32,16 +33,15 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => { // }); const { t } = useTranslation(); - const { shouldFetchMetadataFromApi } = useAppSelector(configSelector); + const [debouncedImageName] = useDebounce(image.image_name, 300); + const [debouncedWorkflowId] = useDebounce(image.workflow_id, 300); - const { metadata, workflow } = useGetImageMetadataFromFileQuery( - { image, shouldFetchMetadataFromApi }, - { - selectFromResult: (res) => ({ - metadata: res?.currentData?.metadata, - workflow: res?.currentData?.workflow, - }), - } + const { data: metadata } = useGetImageMetadataQuery( + debouncedImageName ?? skipToken + ); + + const { data: workflow } = useGetWorkflowQuery( + debouncedWorkflowId ?? skipToken ); return ( diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/EmbedWorkflowCheckbox.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/EmbedWorkflowCheckbox.tsx index 447dfcbd97..3c06b9f9da 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/EmbedWorkflowCheckbox.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/EmbedWorkflowCheckbox.tsx @@ -1,13 +1,13 @@ import { Checkbox, Flex, FormControl, FormLabel } from '@chakra-ui/react'; import { useAppDispatch } from 'app/store/storeHooks'; import { useEmbedWorkflow } from 'features/nodes/hooks/useEmbedWorkflow'; -import { useHasImageOutput } from 'features/nodes/hooks/useHasImageOutput'; +import { useWithWorkflow } from 'features/nodes/hooks/useWithWorkflow'; import { nodeEmbedWorkflowChanged } from 'features/nodes/store/nodesSlice'; import { ChangeEvent, memo, useCallback } from 'react'; const EmbedWorkflowCheckbox = ({ nodeId }: { nodeId: string }) => { const dispatch = useAppDispatch(); - const hasImageOutput = useHasImageOutput(nodeId); + const withWorkflow = useWithWorkflow(nodeId); const embedWorkflow = useEmbedWorkflow(nodeId); const handleChange = useCallback( (e: ChangeEvent) => { @@ -21,7 +21,7 @@ const EmbedWorkflowCheckbox = ({ nodeId }: { nodeId: string }) => { [dispatch, nodeId] ); - if (!hasImageOutput) { + if (!withWorkflow) { return null; } diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx index ec5085221e..1424c6b837 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx @@ -1,11 +1,11 @@ import { Flex } from '@chakra-ui/react'; +import { useHasImageOutput } from 'features/nodes/hooks/useHasImageOutput'; import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants'; import { memo } from 'react'; +import { useFeatureStatus } from '../../../../../system/hooks/useFeatureStatus'; import EmbedWorkflowCheckbox from './EmbedWorkflowCheckbox'; import SaveToGalleryCheckbox from './SaveToGalleryCheckbox'; import UseCacheCheckbox from './UseCacheCheckbox'; -import { useHasImageOutput } from 'features/nodes/hooks/useHasImageOutput'; -import { useFeatureStatus } from '../../../../../system/hooks/useFeatureStatus'; type Props = { nodeId: string; diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useWithWorkflow.ts b/invokeai/frontend/web/src/features/nodes/hooks/useWithWorkflow.ts new file mode 100644 index 0000000000..3c83e01731 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/hooks/useWithWorkflow.ts @@ -0,0 +1,31 @@ +import { createSelector } from '@reduxjs/toolkit'; +import { stateSelector } from 'app/store/store'; +import { useAppSelector } from 'app/store/storeHooks'; +import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; +import { useMemo } from 'react'; +import { isInvocationNode } from '../types/types'; + +export const useWithWorkflow = (nodeId: string) => { + const selector = useMemo( + () => + createSelector( + stateSelector, + ({ nodes }) => { + const node = nodes.nodes.find((node) => node.id === nodeId); + if (!isInvocationNode(node)) { + return false; + } + const nodeTemplate = nodes.nodeTemplates[node?.data.type ?? '']; + if (!nodeTemplate) { + return false; + } + return nodeTemplate.withWorkflow; + }, + defaultSelectorOptions + ), + [nodeId] + ); + + const withWorkflow = useAppSelector(selector); + return withWorkflow; +}; diff --git a/invokeai/frontend/web/src/features/nodes/store/util/validateSourceAndTargetTypes.ts b/invokeai/frontend/web/src/features/nodes/store/util/validateSourceAndTargetTypes.ts index 8c2bef34fe..2f47e47a78 100644 --- a/invokeai/frontend/web/src/features/nodes/store/util/validateSourceAndTargetTypes.ts +++ b/invokeai/frontend/web/src/features/nodes/store/util/validateSourceAndTargetTypes.ts @@ -69,6 +69,8 @@ export const validateSourceAndTargetTypes = ( (sourceType === 'integer' || sourceType === 'float') && targetType === 'string'; + const isTargetAnyType = targetType === 'Any'; + return ( isCollectionItemToNonCollection || isNonCollectionToCollectionItem || @@ -76,6 +78,7 @@ export const validateSourceAndTargetTypes = ( isGenericCollectionToAnyCollectionOrPolymorphic || isCollectionToGenericCollection || isIntToFloat || - isIntOrFloatToString + isIntOrFloatToString || + isTargetAnyType ); }; diff --git a/invokeai/frontend/web/src/features/nodes/types/constants.ts b/invokeai/frontend/web/src/features/nodes/types/constants.ts index 076f71cc02..c6eec736da 100644 --- a/invokeai/frontend/web/src/features/nodes/types/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/types/constants.ts @@ -33,6 +33,8 @@ export const COLLECTION_TYPES: FieldType[] = [ 'ColorCollection', 'T2IAdapterCollection', 'IPAdapterCollection', + 'MetadataItemCollection', + 'MetadataCollection', ]; export const POLYMORPHIC_TYPES: FieldType[] = [ @@ -47,6 +49,7 @@ export const POLYMORPHIC_TYPES: FieldType[] = [ 'ColorPolymorphic', 'T2IAdapterPolymorphic', 'IPAdapterPolymorphic', + 'MetadataItemPolymorphic', ]; export const MODEL_TYPES: FieldType[] = [ @@ -78,6 +81,8 @@ export const COLLECTION_MAP: FieldTypeMapWithNumber = { ColorField: 'ColorCollection', T2IAdapterField: 'T2IAdapterCollection', IPAdapterField: 'IPAdapterCollection', + MetadataItemField: 'MetadataItemCollection', + MetadataField: 'MetadataCollection', }; export const isCollectionItemType = ( itemType: string | undefined @@ -97,6 +102,7 @@ export const SINGLE_TO_POLYMORPHIC_MAP: FieldTypeMapWithNumber = { ColorField: 'ColorPolymorphic', T2IAdapterField: 'T2IAdapterPolymorphic', IPAdapterField: 'IPAdapterPolymorphic', + MetadataItemField: 'MetadataItemPolymorphic', }; export const POLYMORPHIC_TO_SINGLE_MAP: FieldTypeMap = { @@ -111,6 +117,7 @@ export const POLYMORPHIC_TO_SINGLE_MAP: FieldTypeMap = { ColorPolymorphic: 'ColorField', T2IAdapterPolymorphic: 'T2IAdapterField', IPAdapterPolymorphic: 'IPAdapterField', + MetadataItemPolymorphic: 'MetadataItemField', }; export const TYPES_WITH_INPUT_COMPONENTS: FieldType[] = [ @@ -144,6 +151,37 @@ export const isPolymorphicItemType = ( Boolean(itemType && itemType in SINGLE_TO_POLYMORPHIC_MAP); export const FIELDS: Record = { + Any: { + color: 'gray.500', + description: 'Any field type is accepted.', + title: 'Any', + }, + MetadataField: { + color: 'gray.500', + description: 'A metadata dict.', + title: 'Metadata Dict', + }, + MetadataCollection: { + color: 'gray.500', + description: 'A collection of metadata dicts.', + title: 'Metadata Dict Collection', + }, + MetadataItemField: { + color: 'gray.500', + description: 'A metadata item.', + title: 'Metadata Item', + }, + MetadataItemCollection: { + color: 'gray.500', + description: 'Any field type is accepted.', + title: 'Metadata Item Collection', + }, + MetadataItemPolymorphic: { + color: 'gray.500', + description: + 'MetadataItem or MetadataItemCollection field types are accepted.', + title: 'Metadata Item Polymorphic', + }, boolean: { color: 'green.500', description: t('nodes.booleanDescription'), diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index 87c716bb81..ba1ca05c4d 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -54,6 +54,10 @@ export type InvocationTemplate = { * The type of this node's output */ outputType: string; // TODO: generate a union of output types + /** + * Whether or not this invocation supports workflows + */ + withWorkflow: boolean; /** * The invocation's version. */ @@ -72,6 +76,7 @@ export type FieldUIConfig = { // TODO: Get this from the OpenAPI schema? may be tricky... export const zFieldType = z.enum([ + 'Any', 'BoardField', 'boolean', 'BooleanCollection', @@ -109,6 +114,11 @@ export const zFieldType = z.enum([ 'LatentsPolymorphic', 'LoRAModelField', 'MainModelField', + 'MetadataField', + 'MetadataCollection', + 'MetadataItemField', + 'MetadataItemCollection', + 'MetadataItemPolymorphic', 'ONNXModelField', 'Scheduler', 'SDXLMainModelField', @@ -685,6 +695,57 @@ export type CollectionItemInputFieldValue = z.infer< typeof zCollectionItemInputFieldValue >; +export const zMetadataItemField = z.object({ + label: z.string(), + value: z.any(), +}); +export type MetadataItemField = z.infer; + +export const zMetadataItemInputFieldValue = zInputFieldValueBase.extend({ + type: z.literal('MetadataItemField'), + value: zMetadataItemField.optional(), +}); +export type MetadataItemInputFieldValue = z.infer< + typeof zMetadataItemInputFieldValue +>; + +export const zMetadataItemCollectionInputFieldValue = + zInputFieldValueBase.extend({ + type: z.literal('MetadataItemCollection'), + value: z.array(zMetadataItemField).optional(), + }); +export type MetadataItemCollectionInputFieldValue = z.infer< + typeof zMetadataItemCollectionInputFieldValue +>; + +export const zMetadataItemPolymorphicInputFieldValue = + zInputFieldValueBase.extend({ + type: z.literal('MetadataItemPolymorphic'), + value: z + .union([zMetadataItemField, z.array(zMetadataItemField)]) + .optional(), + }); +export type MetadataItemPolymorphicInputFieldValue = z.infer< + typeof zMetadataItemPolymorphicInputFieldValue +>; + +export const zMetadataField = z.record(z.any()); +export type MetadataField = z.infer; + +export const zMetadataInputFieldValue = zInputFieldValueBase.extend({ + type: z.literal('MetadataField'), + value: zMetadataField.optional(), +}); +export type MetadataInputFieldValue = z.infer; + +export const zMetadataCollectionInputFieldValue = zInputFieldValueBase.extend({ + type: z.literal('MetadataCollection'), + value: z.array(zMetadataField).optional(), +}); +export type MetadataCollectionInputFieldValue = z.infer< + typeof zMetadataCollectionInputFieldValue +>; + export const zColorField = z.object({ r: z.number().int().min(0).max(255), g: z.number().int().min(0).max(255), @@ -723,7 +784,13 @@ export type SchedulerInputFieldValue = z.infer< typeof zSchedulerInputFieldValue >; +export const zAnyInputFieldValue = zInputFieldValueBase.extend({ + type: z.literal('Any'), + value: z.any().optional(), +}); + export const zInputFieldValue = z.discriminatedUnion('type', [ + zAnyInputFieldValue, zBoardInputFieldValue, zBooleanCollectionInputFieldValue, zBooleanInputFieldValue, @@ -774,6 +841,11 @@ export const zInputFieldValue = z.discriminatedUnion('type', [ zUNetInputFieldValue, zVaeInputFieldValue, zVaeModelInputFieldValue, + zMetadataItemInputFieldValue, + zMetadataItemCollectionInputFieldValue, + zMetadataItemPolymorphicInputFieldValue, + zMetadataInputFieldValue, + zMetadataCollectionInputFieldValue, ]); export type InputFieldValue = z.infer; @@ -786,6 +858,11 @@ export type InputFieldTemplateBase = { fieldKind: 'input'; } & _InputField; +export type AnyInputFieldTemplate = InputFieldTemplateBase & { + type: 'Any'; + default: undefined; +}; + export type IntegerInputFieldTemplate = InputFieldTemplateBase & { type: 'integer'; default: number; @@ -939,6 +1016,11 @@ export type UNetInputFieldTemplate = InputFieldTemplateBase & { type: 'UNetField'; }; +export type MetadataItemFieldTemplate = InputFieldTemplateBase & { + default: undefined; + type: 'MetadataItemField'; +}; + export type ClipInputFieldTemplate = InputFieldTemplateBase & { default: undefined; type: 'ClipField'; @@ -1087,6 +1169,34 @@ export type WorkflowInputFieldTemplate = InputFieldTemplateBase & { type: 'WorkflowField'; }; +export type MetadataItemInputFieldTemplate = InputFieldTemplateBase & { + default: undefined; + type: 'MetadataItemField'; +}; + +export type MetadataItemCollectionInputFieldTemplate = + InputFieldTemplateBase & { + default: undefined; + type: 'MetadataItemCollection'; + }; + +export type MetadataItemPolymorphicInputFieldTemplate = Omit< + MetadataItemInputFieldTemplate, + 'type' +> & { + type: 'MetadataItemPolymorphic'; +}; + +export type MetadataInputFieldTemplate = InputFieldTemplateBase & { + default: undefined; + type: 'MetadataField'; +}; + +export type MetadataCollectionInputFieldTemplate = InputFieldTemplateBase & { + default: undefined; + type: 'MetadataCollection'; +}; + /** * An input field template is generated on each page load from the OpenAPI schema. * @@ -1094,6 +1204,7 @@ export type WorkflowInputFieldTemplate = InputFieldTemplateBase & { * maximum length, pattern to match, etc). */ export type InputFieldTemplate = + | AnyInputFieldTemplate | BoardInputFieldTemplate | BooleanCollectionInputFieldTemplate | BooleanPolymorphicInputFieldTemplate @@ -1143,7 +1254,12 @@ export type InputFieldTemplate = | T2IAdapterPolymorphicInputFieldTemplate | UNetInputFieldTemplate | VaeInputFieldTemplate - | VaeModelInputFieldTemplate; + | VaeModelInputFieldTemplate + | MetadataItemInputFieldTemplate + | MetadataItemCollectionInputFieldTemplate + | MetadataInputFieldTemplate + | MetadataItemPolymorphicInputFieldTemplate + | MetadataCollectionInputFieldTemplate; export const isInputFieldValue = ( field?: InputFieldValue | OutputFieldValue @@ -1264,7 +1380,7 @@ export const isInvocationFieldSchema = ( export type InvocationEdgeExtra = { type: 'default' | 'collapsed' }; -const zLoRAMetadataItem = z.object({ +export const zLoRAMetadataItem = z.object({ lora: zLoRAModelField.deepPartial(), weight: z.number(), }); diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts index 3fd44207c0..92e44e9ab2 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts @@ -7,6 +7,7 @@ import { startCase, } from 'lodash-es'; import { OpenAPIV3_1 } from 'openapi-types'; +import { ControlField } from 'services/api/types'; import { COLLECTION_MAP, POLYMORPHIC_TYPES, @@ -15,36 +16,70 @@ import { isPolymorphicItemType, } from '../types/constants'; import { + AnyInputFieldTemplate, + BoardInputFieldTemplate, BooleanCollectionInputFieldTemplate, BooleanInputFieldTemplate, + BooleanPolymorphicInputFieldTemplate, ClipInputFieldTemplate, CollectionInputFieldTemplate, CollectionItemInputFieldTemplate, + ColorCollectionInputFieldTemplate, ColorInputFieldTemplate, + ColorPolymorphicInputFieldTemplate, + ConditioningCollectionInputFieldTemplate, + ConditioningField, ConditioningInputFieldTemplate, + ConditioningPolymorphicInputFieldTemplate, + ControlCollectionInputFieldTemplate, ControlInputFieldTemplate, ControlNetModelInputFieldTemplate, + ControlPolymorphicInputFieldTemplate, DenoiseMaskInputFieldTemplate, EnumInputFieldTemplate, FieldType, FloatCollectionInputFieldTemplate, - FloatPolymorphicInputFieldTemplate, FloatInputFieldTemplate, + FloatPolymorphicInputFieldTemplate, + IPAdapterCollectionInputFieldTemplate, + IPAdapterField, + IPAdapterInputFieldTemplate, + IPAdapterModelInputFieldTemplate, + IPAdapterPolymorphicInputFieldTemplate, ImageCollectionInputFieldTemplate, + ImageField, ImageInputFieldTemplate, + ImagePolymorphicInputFieldTemplate, + InputFieldTemplate, InputFieldTemplateBase, IntegerCollectionInputFieldTemplate, IntegerInputFieldTemplate, + IntegerPolymorphicInputFieldTemplate, InvocationFieldSchema, InvocationSchemaObject, + LatentsCollectionInputFieldTemplate, + LatentsField, LatentsInputFieldTemplate, + LatentsPolymorphicInputFieldTemplate, LoRAModelInputFieldTemplate, MainModelInputFieldTemplate, + MetadataCollectionInputFieldTemplate, + MetadataInputFieldTemplate, + MetadataItemCollectionInputFieldTemplate, + MetadataItemInputFieldTemplate, + MetadataItemPolymorphicInputFieldTemplate, + OpenAPIV3_1SchemaOrRef, SDXLMainModelInputFieldTemplate, SDXLRefinerModelInputFieldTemplate, SchedulerInputFieldTemplate, StringCollectionInputFieldTemplate, StringInputFieldTemplate, + StringPolymorphicInputFieldTemplate, + T2IAdapterCollectionInputFieldTemplate, + T2IAdapterField, + T2IAdapterInputFieldTemplate, + T2IAdapterModelInputFieldTemplate, + T2IAdapterPolymorphicInputFieldTemplate, UNetInputFieldTemplate, VaeInputFieldTemplate, VaeModelInputFieldTemplate, @@ -52,36 +87,7 @@ import { isNonArraySchemaObject, isRefObject, isSchemaObject, - ControlPolymorphicInputFieldTemplate, - ColorPolymorphicInputFieldTemplate, - ColorCollectionInputFieldTemplate, - IntegerPolymorphicInputFieldTemplate, - StringPolymorphicInputFieldTemplate, - BooleanPolymorphicInputFieldTemplate, - ImagePolymorphicInputFieldTemplate, - LatentsPolymorphicInputFieldTemplate, - LatentsCollectionInputFieldTemplate, - ConditioningPolymorphicInputFieldTemplate, - ConditioningCollectionInputFieldTemplate, - ControlCollectionInputFieldTemplate, - ImageField, - LatentsField, - ConditioningField, - IPAdapterField, - IPAdapterInputFieldTemplate, - IPAdapterModelInputFieldTemplate, - IPAdapterPolymorphicInputFieldTemplate, - IPAdapterCollectionInputFieldTemplate, - T2IAdapterField, - T2IAdapterInputFieldTemplate, - T2IAdapterModelInputFieldTemplate, - T2IAdapterPolymorphicInputFieldTemplate, - T2IAdapterCollectionInputFieldTemplate, - BoardInputFieldTemplate, - InputFieldTemplate, - OpenAPIV3_1SchemaOrRef, } from '../types/types'; -import { ControlField } from 'services/api/types'; export type BaseFieldProperties = 'name' | 'title' | 'description'; @@ -851,6 +857,78 @@ const buildCollectionItemInputFieldTemplate = ({ return template; }; +const buildAnyInputFieldTemplate = ({ + baseField, +}: BuildInputFieldArg): AnyInputFieldTemplate => { + const template: AnyInputFieldTemplate = { + ...baseField, + type: 'Any', + default: undefined, + }; + + return template; +}; + +const buildMetadataItemInputFieldTemplate = ({ + baseField, +}: BuildInputFieldArg): MetadataItemInputFieldTemplate => { + const template: MetadataItemInputFieldTemplate = { + ...baseField, + type: 'MetadataItemField', + default: undefined, + }; + + return template; +}; + +const buildMetadataItemCollectionInputFieldTemplate = ({ + baseField, +}: BuildInputFieldArg): MetadataItemCollectionInputFieldTemplate => { + const template: MetadataItemCollectionInputFieldTemplate = { + ...baseField, + type: 'MetadataItemCollection', + default: undefined, + }; + + return template; +}; + +const buildMetadataItemPolymorphicInputFieldTemplate = ({ + baseField, +}: BuildInputFieldArg): MetadataItemPolymorphicInputFieldTemplate => { + const template: MetadataItemPolymorphicInputFieldTemplate = { + ...baseField, + type: 'MetadataItemPolymorphic', + default: undefined, + }; + + return template; +}; + +const buildMetadataDictInputFieldTemplate = ({ + baseField, +}: BuildInputFieldArg): MetadataInputFieldTemplate => { + const template: MetadataInputFieldTemplate = { + ...baseField, + type: 'MetadataField', + default: undefined, + }; + + return template; +}; + +const buildMetadataCollectionInputFieldTemplate = ({ + baseField, +}: BuildInputFieldArg): MetadataCollectionInputFieldTemplate => { + const template: MetadataCollectionInputFieldTemplate = { + ...baseField, + type: 'MetadataCollection', + default: undefined, + }; + + return template; +}; + const buildColorInputFieldTemplate = ({ schemaObject, baseField, @@ -1012,6 +1090,7 @@ const TEMPLATE_BUILDER_MAP: { [key in FieldType]?: (arg: BuildInputFieldArg) => InputFieldTemplate; } = { BoardField: buildBoardInputFieldTemplate, + Any: buildAnyInputFieldTemplate, boolean: buildBooleanInputFieldTemplate, BooleanCollection: buildBooleanCollectionInputFieldTemplate, BooleanPolymorphic: buildBooleanPolymorphicInputFieldTemplate, @@ -1047,6 +1126,11 @@ const TEMPLATE_BUILDER_MAP: { LatentsField: buildLatentsInputFieldTemplate, LatentsPolymorphic: buildLatentsPolymorphicInputFieldTemplate, LoRAModelField: buildLoRAModelInputFieldTemplate, + MetadataItemField: buildMetadataItemInputFieldTemplate, + MetadataItemCollection: buildMetadataItemCollectionInputFieldTemplate, + MetadataItemPolymorphic: buildMetadataItemPolymorphicInputFieldTemplate, + MetadataField: buildMetadataDictInputFieldTemplate, + MetadataCollection: buildMetadataCollectionInputFieldTemplate, MainModelField: buildMainModelInputFieldTemplate, Scheduler: buildSchedulerInputFieldTemplate, SDXLMainModelField: buildSDXLMainModelInputFieldTemplate, diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts index 97f520379a..ca2513649d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts @@ -3,6 +3,7 @@ import { FieldType, InputFieldTemplate, InputFieldValue } from '../types/types'; const FIELD_VALUE_FALLBACK_MAP: { [key in FieldType]: InputFieldValue['value']; } = { + Any: undefined, enum: '', BoardField: undefined, boolean: false, @@ -38,6 +39,11 @@ const FIELD_VALUE_FALLBACK_MAP: { LatentsCollection: [], LatentsField: undefined, LatentsPolymorphic: undefined, + MetadataItemField: undefined, + MetadataItemCollection: [], + MetadataItemPolymorphic: undefined, + MetadataField: undefined, + MetadataCollection: [], LoRAModelField: undefined, MainModelField: undefined, ONNXModelField: undefined, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addControlNetToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addControlNetToLinearGraph.ts index 37bd82d4f8..60d4e36dca 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addControlNetToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addControlNetToLinearGraph.ts @@ -5,14 +5,14 @@ import { CollectInvocation, ControlField, ControlNetInvocation, - MetadataAccumulatorInvocation, + CoreMetadataInvocation, } from 'services/api/types'; import { NonNullableGraph } from '../../types/types'; import { CANVAS_COHERENCE_DENOISE_LATENTS, CONTROL_NET_COLLECT, - METADATA_ACCUMULATOR, } from './constants'; +import { upsertMetadata } from './metadata'; export const addControlNetToLinearGraph = ( state: RootState, @@ -23,9 +23,11 @@ export const addControlNetToLinearGraph = ( (ca) => ca.model?.base_model === state.generation.model?.base_model ); - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; + // const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as + // | MetadataAccumulatorInvocation + // | undefined; + + const controlNetMetadata: CoreMetadataInvocation['controlnets'] = []; if (validControlNets.length) { // Even though denoise_latents' control input is polymorphic, keep it simple and always use a collect @@ -99,15 +101,9 @@ export const addControlNetToLinearGraph = ( graph.nodes[controlNetNode.id] = controlNetNode as ControlNetInvocation; - if (metadataAccumulator?.controlnets) { - // metadata accumulator only needs a control field - not the whole node - // extract what we need and add to the accumulator - const controlField = omit(controlNetNode, [ - 'id', - 'type', - ]) as ControlField; - metadataAccumulator.controlnets.push(controlField); - } + controlNetMetadata.push( + omit(controlNetNode, ['id', 'type', 'is_intermediate']) as ControlField + ); graph.edges.push({ source: { node_id: controlNetNode.id, field: 'control' }, @@ -117,5 +113,6 @@ export const addControlNetToLinearGraph = ( }, }); }); + upsertMetadata(graph, { controlnets: controlNetMetadata }); } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addHrfToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addHrfToGraph.ts index 4b4a8a8a03..8c23ae667e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addHrfToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addHrfToGraph.ts @@ -1,25 +1,25 @@ +import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { DenoiseLatentsInvocation, - ResizeLatentsInvocation, - NoiseInvocation, - LatentsToImageInvocation, Edge, + LatentsToImageInvocation, + NoiseInvocation, + ResizeLatentsInvocation, } from 'services/api/types'; import { - LATENTS_TO_IMAGE, DENOISE_LATENTS, - NOISE, - MAIN_MODEL_LOADER, - METADATA_ACCUMULATOR, - LATENTS_TO_IMAGE_HRF, DENOISE_LATENTS_HRF, - RESCALE_LATENTS, + LATENTS_TO_IMAGE, + LATENTS_TO_IMAGE_HRF, + MAIN_MODEL_LOADER, + NOISE, NOISE_HRF, + RESCALE_LATENTS, VAE_LOADER, } from './constants'; -import { logger } from 'app/logging/logger'; +import { upsertMetadata } from './metadata'; // Copy certain connections from previous DENOISE_LATENTS to new DENOISE_LATENTS_HRF. function copyConnectionsToDenoiseLatentsHrf(graph: NonNullableGraph): void { @@ -71,10 +71,8 @@ export const addHrfToGraph = ( } const log = logger('txt2img'); - const { vae } = state.generation; + const { vae, hrfWidth, hrfHeight, hrfStrength } = state.generation; const isAutoVae = !vae; - const hrfWidth = state.generation.hrfWidth; - const hrfHeight = state.generation.hrfHeight; // Pre-existing (original) graph nodes. const originalDenoiseLatentsNode = graph.nodes[DENOISE_LATENTS] as @@ -116,7 +114,7 @@ export const addHrfToGraph = ( cfg_scale: originalDenoiseLatentsNode?.cfg_scale, scheduler: originalDenoiseLatentsNode?.scheduler, steps: originalDenoiseLatentsNode?.steps, - denoising_start: 1 - state.generation.hrfStrength, + denoising_start: 1 - hrfStrength, denoising_end: 1, }; @@ -221,16 +219,6 @@ export const addHrfToGraph = ( field: 'latents', }, }, - { - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: LATENTS_TO_IMAGE_HRF, - field: 'metadata', - }, - }, { source: { node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER, @@ -243,5 +231,11 @@ export const addHrfToGraph = ( } ); + upsertMetadata(graph, { + hrf_height: hrfHeight, + hrf_width: hrfWidth, + hrf_strength: hrfStrength, + }); + copyConnectionsToDenoiseLatentsHrf(graph); }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addIPAdapterToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addIPAdapterToLinearGraph.ts index 19bf7d8338..93c6cdb284 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addIPAdapterToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addIPAdapterToLinearGraph.ts @@ -1,16 +1,18 @@ import { RootState } from 'app/store/store'; import { selectValidIPAdapters } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { omit } from 'lodash-es'; import { CollectInvocation, + CoreMetadataInvocation, IPAdapterInvocation, - MetadataAccumulatorInvocation, + IPAdapterMetadataField, } from 'services/api/types'; import { NonNullableGraph } from '../../types/types'; import { CANVAS_COHERENCE_DENOISE_LATENTS, IP_ADAPTER_COLLECT, - METADATA_ACCUMULATOR, } from './constants'; +import { upsertMetadata } from './metadata'; export const addIPAdapterToLinearGraph = ( state: RootState, @@ -21,10 +23,6 @@ export const addIPAdapterToLinearGraph = ( (ca) => ca.model?.base_model === state.generation.model?.base_model ); - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; - if (validIPAdapters.length) { // Even though denoise_latents' control input is polymorphic, keep it simple and always use a collect const ipAdapterCollectNode: CollectInvocation = { @@ -50,6 +48,7 @@ export const addIPAdapterToLinearGraph = ( }, }); } + const ipAdapterMetdata: CoreMetadataInvocation['ipAdapters'] = []; validIPAdapters.forEach((ipAdapter) => { if (!ipAdapter.model) { @@ -76,19 +75,13 @@ export const addIPAdapterToLinearGraph = ( graph.nodes[ipAdapterNode.id] = ipAdapterNode as IPAdapterInvocation; - if (metadataAccumulator?.ipAdapters) { - const ipAdapterField = { - image: { - image_name: ipAdapter.controlImage, - }, - weight, - ip_adapter_model: model, - begin_step_percent: beginStepPct, - end_step_percent: endStepPct, - }; - - metadataAccumulator.ipAdapters.push(ipAdapterField); - } + ipAdapterMetdata.push( + omit(ipAdapterNode, [ + 'id', + 'type', + 'is_intermediate', + ]) as IPAdapterMetadataField + ); graph.edges.push({ source: { node_id: ipAdapterNode.id, field: 'ip_adapter' }, @@ -98,5 +91,7 @@ export const addIPAdapterToLinearGraph = ( }, }); }); + + upsertMetadata(graph, { ipAdapters: ipAdapterMetdata }); } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts index e199a78a20..66c2bd0444 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts @@ -2,20 +2,20 @@ import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { forEach, size } from 'lodash-es'; import { + CoreMetadataInvocation, LoraLoaderInvocation, - MetadataAccumulatorInvocation, } from 'services/api/types'; import { + CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_INPAINT_GRAPH, CANVAS_OUTPAINT_GRAPH, - CANVAS_COHERENCE_DENOISE_LATENTS, CLIP_SKIP, LORA_LOADER, MAIN_MODEL_LOADER, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, POSITIVE_CONDITIONING, } from './constants'; +import { upsertMetadata } from './metadata'; export const addLoRAsToGraph = ( state: RootState, @@ -33,29 +33,29 @@ export const addLoRAsToGraph = ( const { loras } = state.lora; const loraCount = size(loras); - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; - if (loraCount > 0) { - // Remove modelLoaderNodeId unet connection to feed it to LoRAs - graph.edges = graph.edges.filter( - (e) => - !( - e.source.node_id === modelLoaderNodeId && - ['unet'].includes(e.source.field) - ) - ); - // Remove CLIP_SKIP connections to conditionings to feed it through LoRAs - graph.edges = graph.edges.filter( - (e) => - !(e.source.node_id === CLIP_SKIP && ['clip'].includes(e.source.field)) - ); + if (loraCount === 0) { + return; } + // Remove modelLoaderNodeId unet connection to feed it to LoRAs + graph.edges = graph.edges.filter( + (e) => + !( + e.source.node_id === modelLoaderNodeId && + ['unet'].includes(e.source.field) + ) + ); + // Remove CLIP_SKIP connections to conditionings to feed it through LoRAs + graph.edges = graph.edges.filter( + (e) => + !(e.source.node_id === CLIP_SKIP && ['clip'].includes(e.source.field)) + ); + // we need to remember the last lora so we can chain from it let lastLoraNodeId = ''; let currentLoraIndex = 0; + const loraMetadata: CoreMetadataInvocation['loras'] = []; forEach(loras, (lora) => { const { model_name, base_model, weight } = lora; @@ -69,13 +69,10 @@ export const addLoRAsToGraph = ( weight, }; - // add the lora to the metadata accumulator - if (metadataAccumulator?.loras) { - metadataAccumulator.loras.push({ - lora: { model_name, base_model }, - weight, - }); - } + loraMetadata.push({ + lora: { model_name, base_model }, + weight, + }); // add to graph graph.nodes[currentLoraNodeId] = loraLoaderNode; @@ -182,4 +179,6 @@ export const addLoRAsToGraph = ( lastLoraNodeId = currentLoraNodeId; currentLoraIndex += 1; }); + + upsertMetadata(graph, { loras: loraMetadata }); }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLLoRAstoGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLLoRAstoGraph.ts index cb052984d4..04841f0def 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLLoRAstoGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLLoRAstoGraph.ts @@ -1,14 +1,14 @@ import { RootState } from 'app/store/store'; -import { NonNullableGraph } from 'features/nodes/types/types'; -import { forEach, size } from 'lodash-es'; import { - MetadataAccumulatorInvocation, - SDXLLoraLoaderInvocation, -} from 'services/api/types'; + LoRAMetadataItem, + NonNullableGraph, + zLoRAMetadataItem, +} from 'features/nodes/types/types'; +import { forEach, size } from 'lodash-es'; +import { SDXLLoraLoaderInvocation } from 'services/api/types'; import { CANVAS_COHERENCE_DENOISE_LATENTS, LORA_LOADER, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, POSITIVE_CONDITIONING, SDXL_CANVAS_INPAINT_GRAPH, @@ -17,6 +17,7 @@ import { SDXL_REFINER_INPAINT_CREATE_MASK, SEAMLESS, } from './constants'; +import { upsertMetadata } from './metadata'; export const addSDXLLoRAsToGraph = ( state: RootState, @@ -34,9 +35,12 @@ export const addSDXLLoRAsToGraph = ( const { loras } = state.lora; const loraCount = size(loras); - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; + + if (loraCount === 0) { + return; + } + + const loraMetadata: LoRAMetadataItem[] = []; // Handle Seamless Plugs const unetLoaderId = modelLoaderNodeId; @@ -47,22 +51,17 @@ export const addSDXLLoRAsToGraph = ( clipLoaderId = SDXL_MODEL_LOADER; } - if (loraCount > 0) { - // Remove modelLoaderNodeId unet/clip/clip2 connections to feed it to LoRAs - graph.edges = graph.edges.filter( - (e) => - !( - e.source.node_id === unetLoaderId && ['unet'].includes(e.source.field) - ) && - !( - e.source.node_id === clipLoaderId && ['clip'].includes(e.source.field) - ) && - !( - e.source.node_id === clipLoaderId && - ['clip2'].includes(e.source.field) - ) - ); - } + // Remove modelLoaderNodeId unet/clip/clip2 connections to feed it to LoRAs + graph.edges = graph.edges.filter( + (e) => + !( + e.source.node_id === unetLoaderId && ['unet'].includes(e.source.field) + ) && + !( + e.source.node_id === clipLoaderId && ['clip'].includes(e.source.field) + ) && + !(e.source.node_id === clipLoaderId && ['clip2'].includes(e.source.field)) + ); // we need to remember the last lora so we can chain from it let lastLoraNodeId = ''; @@ -80,16 +79,12 @@ export const addSDXLLoRAsToGraph = ( weight, }; - // add the lora to the metadata accumulator - if (metadataAccumulator) { - if (!metadataAccumulator.loras) { - metadataAccumulator.loras = []; - } - metadataAccumulator.loras.push({ + loraMetadata.push( + zLoRAMetadataItem.parse({ lora: { model_name, base_model }, weight, - }); - } + }) + ); // add to graph graph.nodes[currentLoraNodeId] = loraLoaderNode; @@ -242,4 +237,6 @@ export const addSDXLLoRAsToGraph = ( lastLoraNodeId = currentLoraNodeId; currentLoraIndex += 1; }); + + upsertMetadata(graph, { loras: loraMetadata }); }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index a6ee6a091d..136263f63e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -2,7 +2,6 @@ import { RootState } from 'app/store/store'; import { CreateDenoiseMaskInvocation, ImageDTO, - MetadataAccumulatorInvocation, SeamlessModeInvocation, } from 'services/api/types'; import { NonNullableGraph } from '../../types/types'; @@ -12,7 +11,6 @@ import { LATENTS_TO_IMAGE, MASK_COMBINE, MASK_RESIZE_UP, - METADATA_ACCUMULATOR, SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, SDXL_CANVAS_INPAINT_GRAPH, SDXL_CANVAS_OUTPAINT_GRAPH, @@ -26,6 +24,7 @@ import { SDXL_REFINER_SEAMLESS, } from './constants'; import { buildSDXLStylePrompts } from './helpers/craftSDXLStylePrompt'; +import { upsertMetadata } from './metadata'; export const addSDXLRefinerToGraph = ( state: RootState, @@ -58,21 +57,15 @@ export const addSDXLRefinerToGraph = ( return; } - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; - - if (metadataAccumulator) { - metadataAccumulator.refiner_model = refinerModel; - metadataAccumulator.refiner_positive_aesthetic_score = - refinerPositiveAestheticScore; - metadataAccumulator.refiner_negative_aesthetic_score = - refinerNegativeAestheticScore; - metadataAccumulator.refiner_cfg_scale = refinerCFGScale; - metadataAccumulator.refiner_scheduler = refinerScheduler; - metadataAccumulator.refiner_start = refinerStart; - metadataAccumulator.refiner_steps = refinerSteps; - } + upsertMetadata(graph, { + refiner_model: refinerModel, + refiner_positive_aesthetic_score: refinerPositiveAestheticScore, + refiner_negative_aesthetic_score: refinerNegativeAestheticScore, + refiner_cfg_scale: refinerCFGScale, + refiner_scheduler: refinerScheduler, + refiner_start: refinerStart, + refiner_steps: refinerSteps, + }); const modelLoaderId = modelLoaderNodeId ? modelLoaderNodeId diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSaveImageNode.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSaveImageNode.ts index d5a6addf8a..79aace8f62 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSaveImageNode.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSaveImageNode.ts @@ -1,19 +1,15 @@ +import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; +import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; +import { SaveImageInvocation } from 'services/api/types'; import { CANVAS_OUTPUT, LATENTS_TO_IMAGE, LATENTS_TO_IMAGE_HRF, - METADATA_ACCUMULATOR, NSFW_CHECKER, SAVE_IMAGE, WATERMARKER, } from './constants'; -import { - MetadataAccumulatorInvocation, - SaveImageInvocation, -} from 'services/api/types'; -import { RootState } from 'app/store/store'; -import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; /** * Set the `use_cache` field on the linear/canvas graph's final image output node to False. @@ -37,23 +33,6 @@ export const addSaveImageNode = ( graph.nodes[SAVE_IMAGE] = saveImageNode; - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; - - if (metadataAccumulator) { - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: SAVE_IMAGE, - field: 'metadata', - }, - }); - } - const destination = { node_id: SAVE_IMAGE, field: 'image', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSeamlessToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSeamlessToLinearGraph.ts index bdbaacd384..ba341a8a3d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSeamlessToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSeamlessToLinearGraph.ts @@ -1,6 +1,7 @@ import { RootState } from 'app/store/store'; import { SeamlessModeInvocation } from 'services/api/types'; import { NonNullableGraph } from '../../types/types'; +import { upsertMetadata } from './metadata'; import { CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_INPAINT_GRAPH, @@ -31,6 +32,17 @@ export const addSeamlessToLinearGraph = ( seamless_y: seamlessYAxis, } as SeamlessModeInvocation; + if (seamlessXAxis) { + upsertMetadata(graph, { + seamless_x: seamlessXAxis, + }); + } + if (seamlessYAxis) { + upsertMetadata(graph, { + seamless_y: seamlessYAxis, + }); + } + let denoisingNodeId = DENOISE_LATENTS; if ( diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts index 9511475bb3..71c2aaeede 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addT2IAdapterToLinearGraph.ts @@ -3,15 +3,15 @@ import { selectValidT2IAdapters } from 'features/controlAdapters/store/controlAd import { omit } from 'lodash-es'; import { CollectInvocation, - MetadataAccumulatorInvocation, + CoreMetadataInvocation, T2IAdapterInvocation, } from 'services/api/types'; import { NonNullableGraph, T2IAdapterField } from '../../types/types'; import { CANVAS_COHERENCE_DENOISE_LATENTS, - METADATA_ACCUMULATOR, T2I_ADAPTER_COLLECT, } from './constants'; +import { upsertMetadata } from './metadata'; export const addT2IAdaptersToLinearGraph = ( state: RootState, @@ -22,10 +22,6 @@ export const addT2IAdaptersToLinearGraph = ( (ca) => ca.model?.base_model === state.generation.model?.base_model ); - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; - if (validT2IAdapters.length) { // Even though denoise_latents' control input is polymorphic, keep it simple and always use a collect const t2iAdapterCollectNode: CollectInvocation = { @@ -51,6 +47,7 @@ export const addT2IAdaptersToLinearGraph = ( }, }); } + const t2iAdapterMetdata: CoreMetadataInvocation['t2iAdapters'] = []; validT2IAdapters.forEach((t2iAdapter) => { if (!t2iAdapter.model) { @@ -96,15 +93,13 @@ export const addT2IAdaptersToLinearGraph = ( graph.nodes[t2iAdapterNode.id] = t2iAdapterNode as T2IAdapterInvocation; - if (metadataAccumulator?.t2iAdapters) { - // metadata accumulator only needs a control field - not the whole node - // extract what we need and add to the accumulator - const t2iAdapterField = omit(t2iAdapterNode, [ + t2iAdapterMetdata.push( + omit(t2iAdapterNode, [ 'id', 'type', - ]) as T2IAdapterField; - metadataAccumulator.t2iAdapters.push(t2iAdapterField); - } + 'is_intermediate', + ]) as T2IAdapterField + ); graph.edges.push({ source: { node_id: t2iAdapterNode.id, field: 't2i_adapter' }, @@ -114,5 +109,7 @@ export const addT2IAdaptersToLinearGraph = ( }, }); }); + + upsertMetadata(graph, { t2iAdapters: t2iAdapterMetdata }); } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts index 696c8afff2..f049a89e36 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts @@ -1,6 +1,5 @@ import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; -import { MetadataAccumulatorInvocation } from 'services/api/types'; import { CANVAS_COHERENCE_INPAINT_CREATE_MASK, CANVAS_IMAGE_TO_IMAGE_GRAPH, @@ -14,7 +13,6 @@ import { INPAINT_IMAGE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, - METADATA_ACCUMULATOR, ONNX_MODEL_LOADER, SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, SDXL_CANVAS_INPAINT_GRAPH, @@ -26,6 +24,7 @@ import { TEXT_TO_IMAGE_GRAPH, VAE_LOADER, } from './constants'; +import { upsertMetadata } from './metadata'; export const addVAEToGraph = ( state: RootState, @@ -41,9 +40,6 @@ export const addVAEToGraph = ( ); const isAutoVae = !vae; - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; if (!isAutoVae) { graph.nodes[VAE_LOADER] = { @@ -181,7 +177,7 @@ export const addVAEToGraph = ( } } - if (vae && metadataAccumulator) { - metadataAccumulator.vae = vae; + if (vae) { + upsertMetadata(graph, { vae }); } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addWatermarkerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addWatermarkerToGraph.ts index 4e515906b6..c43437e4fc 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addWatermarkerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addWatermarkerToGraph.ts @@ -5,14 +5,8 @@ import { ImageNSFWBlurInvocation, ImageWatermarkInvocation, LatentsToImageInvocation, - MetadataAccumulatorInvocation, } from 'services/api/types'; -import { - LATENTS_TO_IMAGE, - METADATA_ACCUMULATOR, - NSFW_CHECKER, - WATERMARKER, -} from './constants'; +import { LATENTS_TO_IMAGE, NSFW_CHECKER, WATERMARKER } from './constants'; export const addWatermarkerToGraph = ( state: RootState, @@ -32,10 +26,6 @@ export const addWatermarkerToGraph = ( | ImageNSFWBlurInvocation | undefined; - const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as - | MetadataAccumulatorInvocation - | undefined; - if (!nodeToAddTo) { // something has gone terribly awry return; @@ -80,17 +70,4 @@ export const addWatermarkerToGraph = ( }, }); } - - if (metadataAccumulator) { - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: WATERMARKER, - field: 'metadata', - }, - }); - } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts index c612e88598..46e415a886 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts @@ -1,12 +1,13 @@ +import { BoardId } from 'features/gallery/store/types'; import { NonNullableGraph } from 'features/nodes/types/types'; import { ESRGANModelName } from 'features/parameters/store/postprocessingSlice'; import { - Graph, ESRGANInvocation, + Graph, SaveImageInvocation, } from 'services/api/types'; import { REALESRGAN as ESRGAN, SAVE_IMAGE } from './constants'; -import { BoardId } from 'features/gallery/store/types'; +import { addCoreMetadataNode } from './metadata'; type Arg = { image_name: string; @@ -55,5 +56,9 @@ export const buildAdHocUpscaleGraph = ({ ], }; + addCoreMetadataNode(graph, { + esrgan_model: esrganModelName, + }); + return graph; }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts index 59bdf669e6..9d957c3a4a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts @@ -20,12 +20,12 @@ import { IMG2IMG_RESIZE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, SEAMLESS, } from './constants'; +import { addCoreMetadataNode } from './metadata'; /** * Builds the Canvas tab's Image to Image graph. @@ -308,10 +308,7 @@ export const buildCanvasImageToImageGraph = ( }); } - // add metadata accumulator, which is only mostly populated - some fields are added later - graph.nodes[METADATA_ACCUMULATOR] = { - id: METADATA_ACCUMULATOR, - type: 'metadata_accumulator', + addCoreMetadataNode(graph, { generation_mode: 'img2img', cfg_scale, width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, @@ -325,15 +322,10 @@ export const buildCanvasImageToImageGraph = ( steps, rand_device: use_cpu ? 'cpu' : 'cuda', scheduler, - vae: undefined, // option; set in addVAEToGraph - controlnets: [], // populated in addControlNetToLinearGraph - loras: [], // populated in addLoRAsToGraph - ipAdapters: [], // populated in addIPAdapterToLinearGraph - t2iAdapters: [], clip_skip: clipSkip, strength, init_image: initialImage.image_name, - }; + }); // Add Seamless To Graph if (seamlessXAxis || seamlessYAxis) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index b9c0c9eff3..c1ecde5395 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -16,7 +16,6 @@ import { IMAGE_TO_LATENTS, IMG2IMG_RESIZE, LATENTS_TO_IMAGE, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -28,6 +27,7 @@ import { } from './constants'; import { buildSDXLStylePrompts } from './helpers/craftSDXLStylePrompt'; import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph'; +import { addCoreMetadataNode } from './metadata'; /** * Builds the Canvas tab's Image to Image graph. @@ -319,10 +319,7 @@ export const buildCanvasSDXLImageToImageGraph = ( }); } - // add metadata accumulator, which is only mostly populated - some fields are added later - graph.nodes[METADATA_ACCUMULATOR] = { - id: METADATA_ACCUMULATOR, - type: 'metadata_accumulator', + addCoreMetadataNode(graph, { generation_mode: 'img2img', cfg_scale, width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, @@ -336,24 +333,8 @@ export const buildCanvasSDXLImageToImageGraph = ( steps, rand_device: use_cpu ? 'cpu' : 'cuda', scheduler, - vae: undefined, // option; set in addVAEToGraph - controlnets: [], // populated in addControlNetToLinearGraph - loras: [], // populated in addLoRAsToGraph - ipAdapters: [], // populated in addIPAdapterToLinearGraph - t2iAdapters: [], strength, init_image: initialImage.image_name, - }; - - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: CANVAS_OUTPUT, - field: 'metadata', - }, }); // Add Seamless To Graph diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index df636669dc..e43891eba4 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -18,7 +18,6 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, LATENTS_TO_IMAGE, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, ONNX_MODEL_LOADER, @@ -30,6 +29,7 @@ import { SEAMLESS, } from './constants'; import { buildSDXLStylePrompts } from './helpers/craftSDXLStylePrompt'; +import { addCoreMetadataNode } from './metadata'; /** * Builds the Canvas tab's Text to Image graph. @@ -301,10 +301,7 @@ export const buildCanvasSDXLTextToImageGraph = ( }); } - // add metadata accumulator, which is only mostly populated - some fields are added later - graph.nodes[METADATA_ACCUMULATOR] = { - id: METADATA_ACCUMULATOR, - type: 'metadata_accumulator', + addCoreMetadataNode(graph, { generation_mode: 'txt2img', cfg_scale, width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, @@ -318,22 +315,6 @@ export const buildCanvasSDXLTextToImageGraph = ( steps, rand_device: use_cpu ? 'cpu' : 'cuda', scheduler, - vae: undefined, // option; set in addVAEToGraph - controlnets: [], // populated in addControlNetToLinearGraph - loras: [], // populated in addLoRAsToGraph - ipAdapters: [], // populated in addIPAdapterToLinearGraph - t2iAdapters: [], - }; - - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: CANVAS_OUTPUT, - field: 'metadata', - }, }); // Add Seamless To Graph diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts index 38f11f14ac..6e48c14086 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts @@ -21,13 +21,13 @@ import { DENOISE_LATENTS, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, SEAMLESS, } from './constants'; +import { addCoreMetadataNode } from './metadata'; /** * Builds the Canvas tab's Text to Image graph. @@ -289,10 +289,7 @@ export const buildCanvasTextToImageGraph = ( }); } - // add metadata accumulator, which is only mostly populated - some fields are added later - graph.nodes[METADATA_ACCUMULATOR] = { - id: METADATA_ACCUMULATOR, - type: 'metadata_accumulator', + addCoreMetadataNode(graph, { generation_mode: 'txt2img', cfg_scale, width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, @@ -306,23 +303,7 @@ export const buildCanvasTextToImageGraph = ( steps, rand_device: use_cpu ? 'cpu' : 'cuda', scheduler, - vae: undefined, // option; set in addVAEToGraph - controlnets: [], // populated in addControlNetToLinearGraph - loras: [], // populated in addLoRAsToGraph - ipAdapters: [], // populated in addIPAdapterToLinearGraph - t2iAdapters: [], clip_skip: clipSkip, - }; - - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: CANVAS_OUTPUT, - field: 'metadata', - }, }); // Add Seamless To Graph diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts index 9c25ee3b8f..313826452c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts @@ -7,10 +7,12 @@ import { components } from 'services/api/schema'; import { Batch, BatchConfig } from 'services/api/types'; import { CANVAS_COHERENCE_NOISE, + METADATA, METADATA_ACCUMULATOR, NOISE, POSITIVE_CONDITIONING, } from './constants'; +import { removeMetadata } from './metadata'; export const prepareLinearUIBatch = ( state: RootState, @@ -24,7 +26,6 @@ export const prepareLinearUIBatch = ( const data: Batch['data'] = []; if (prompts.length === 1) { - unset(graph.nodes[METADATA_ACCUMULATOR], 'seed'); const seeds = generateSeeds({ count: iterations, start: shouldRandomizeSeed ? undefined : seed, @@ -40,13 +41,13 @@ export const prepareLinearUIBatch = ( }); } - if (graph.nodes[METADATA_ACCUMULATOR]) { - zipped.push({ - node_path: METADATA_ACCUMULATOR, - field_name: 'seed', - items: seeds, - }); - } + // add to metadata + removeMetadata(graph, 'seed'); + zipped.push({ + node_path: METADATA, + field_name: 'seed', + items: seeds, + }); if (graph.nodes[CANVAS_COHERENCE_NOISE]) { zipped.push({ @@ -77,13 +78,13 @@ export const prepareLinearUIBatch = ( }); } - if (graph.nodes[METADATA_ACCUMULATOR]) { - firstBatchDatumList.push({ - node_path: METADATA_ACCUMULATOR, - field_name: 'seed', - items: seeds, - }); - } + // add to metadata + removeMetadata(graph, 'seed'); + firstBatchDatumList.push({ + node_path: METADATA, + field_name: 'seed', + items: seeds, + }); if (graph.nodes[CANVAS_COHERENCE_NOISE]) { firstBatchDatumList.push({ @@ -106,13 +107,15 @@ export const prepareLinearUIBatch = ( items: seeds, }); } - if (graph.nodes[METADATA_ACCUMULATOR]) { - secondBatchDatumList.push({ - node_path: METADATA_ACCUMULATOR, - field_name: 'seed', - items: seeds, - }); - } + + // add to metadata + removeMetadata(graph, 'seed'); + secondBatchDatumList.push({ + node_path: METADATA, + field_name: 'seed', + items: seeds, + }); + if (graph.nodes[CANVAS_COHERENCE_NOISE]) { secondBatchDatumList.push({ node_path: CANVAS_COHERENCE_NOISE, @@ -137,13 +140,13 @@ export const prepareLinearUIBatch = ( }); } - if (graph.nodes[METADATA_ACCUMULATOR]) { - firstBatchDatumList.push({ - node_path: METADATA_ACCUMULATOR, - field_name: 'positive_prompt', - items: extendedPrompts, - }); - } + // add to metadata + removeMetadata(graph, 'positive_prompt'); + firstBatchDatumList.push({ + node_path: METADATA, + field_name: 'positive_prompt', + items: extendedPrompts, + }); if (shouldConcatSDXLStylePrompt && model?.base_model === 'sdxl') { unset(graph.nodes[METADATA_ACCUMULATOR], 'positive_style_prompt'); @@ -160,13 +163,13 @@ export const prepareLinearUIBatch = ( }); } - if (graph.nodes[METADATA_ACCUMULATOR]) { - firstBatchDatumList.push({ - node_path: METADATA_ACCUMULATOR, - field_name: 'positive_style_prompt', - items: stylePrompts, - }); - } + // add to metadata + removeMetadata(graph, 'positive_style_prompt'); + firstBatchDatumList.push({ + node_path: METADATA, + field_name: 'positive_style_prompt', + items: extendedPrompts, + }); } data.push(firstBatchDatumList); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts index 0eeba988f2..3b13c746c9 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts @@ -21,13 +21,13 @@ import { IMAGE_TO_LATENTS, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, RESIZE, SEAMLESS, } from './constants'; +import { addCoreMetadataNode } from './metadata'; /** * Builds the Image to Image tab graph. @@ -311,10 +311,7 @@ export const buildLinearImageToImageGraph = ( }); } - // add metadata accumulator, which is only mostly populated - some fields are added later - graph.nodes[METADATA_ACCUMULATOR] = { - id: METADATA_ACCUMULATOR, - type: 'metadata_accumulator', + addCoreMetadataNode(graph, { generation_mode: 'img2img', cfg_scale, height, @@ -326,25 +323,9 @@ export const buildLinearImageToImageGraph = ( steps, rand_device: use_cpu ? 'cpu' : 'cuda', scheduler, - vae: undefined, // option; set in addVAEToGraph - controlnets: [], // populated in addControlNetToLinearGraph - loras: [], // populated in addLoRAsToGraph - ipAdapters: [], // populated in addIPAdapterToLinearGraph - t2iAdapters: [], // populated in addT2IAdapterToLinearGraph clip_skip: clipSkip, strength, init_image: initialImage.imageName, - }; - - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'metadata', - }, }); // Add Seamless To Graph diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts index f818768fb5..54f8e05d21 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts @@ -18,7 +18,6 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { IMAGE_TO_LATENTS, LATENTS_TO_IMAGE, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -30,6 +29,7 @@ import { SEAMLESS, } from './constants'; import { buildSDXLStylePrompts } from './helpers/craftSDXLStylePrompt'; +import { addCoreMetadataNode } from './metadata'; /** * Builds the Image to Image tab graph. @@ -331,10 +331,7 @@ export const buildLinearSDXLImageToImageGraph = ( }); } - // add metadata accumulator, which is only mostly populated - some fields are added later - graph.nodes[METADATA_ACCUMULATOR] = { - id: METADATA_ACCUMULATOR, - type: 'metadata_accumulator', + addCoreMetadataNode(graph, { generation_mode: 'sdxl_img2img', cfg_scale, height, @@ -346,26 +343,10 @@ export const buildLinearSDXLImageToImageGraph = ( steps, rand_device: use_cpu ? 'cpu' : 'cuda', scheduler, - vae: undefined, - controlnets: [], - loras: [], - ipAdapters: [], - t2iAdapters: [], - strength: strength, + strength, init_image: initialImage.imageName, positive_style_prompt: positiveStylePrompt, negative_style_prompt: negativeStylePrompt, - }; - - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'metadata', - }, }); // Add Seamless To Graph diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts index 4cb90678c3..37fbbf7f43 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts @@ -11,9 +11,9 @@ import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; +import { addCoreMetadataNode } from './metadata'; import { LATENTS_TO_IMAGE, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -225,10 +225,7 @@ export const buildLinearSDXLTextToImageGraph = ( ], }; - // add metadata accumulator, which is only mostly populated - some fields are added later - graph.nodes[METADATA_ACCUMULATOR] = { - id: METADATA_ACCUMULATOR, - type: 'metadata_accumulator', + addCoreMetadataNode(graph, { generation_mode: 'sdxl_txt2img', cfg_scale, height, @@ -240,24 +237,8 @@ export const buildLinearSDXLTextToImageGraph = ( steps, rand_device: use_cpu ? 'cpu' : 'cuda', scheduler, - vae: undefined, - controlnets: [], - loras: [], - ipAdapters: [], - t2iAdapters: [], positive_style_prompt: positiveStylePrompt, negative_style_prompt: negativeStylePrompt, - }; - - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'metadata', - }, }); // Add Seamless To Graph diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts index e692e12fa4..8e0143f180 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts @@ -15,12 +15,12 @@ import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; +import { addCoreMetadataNode } from './metadata'; import { CLIP_SKIP, DENOISE_LATENTS, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, - METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, ONNX_MODEL_LOADER, @@ -48,10 +48,6 @@ export const buildLinearTextToImageGraph = ( seamlessXAxis, seamlessYAxis, seed, - hrfWidth, - hrfHeight, - hrfStrength, - hrfEnabled: hrfEnabled, } = state.generation; const use_cpu = shouldUseCpuNoise; @@ -238,10 +234,7 @@ export const buildLinearTextToImageGraph = ( ], }; - // add metadata accumulator, which is only mostly populated - some fields are added later - graph.nodes[METADATA_ACCUMULATOR] = { - id: METADATA_ACCUMULATOR, - type: 'metadata_accumulator', + addCoreMetadataNode(graph, { generation_mode: 'txt2img', cfg_scale, height, @@ -253,26 +246,7 @@ export const buildLinearTextToImageGraph = ( steps, rand_device: use_cpu ? 'cpu' : 'cuda', scheduler, - vae: undefined, // option; set in addVAEToGraph - controlnets: [], // populated in addControlNetToLinearGraph - loras: [], // populated in addLoRAsToGraph - ipAdapters: [], // populated in addIPAdapterToLinearGraph - t2iAdapters: [], // populated in addT2IAdapterToLinearGraph clip_skip: clipSkip, - hrf_width: hrfEnabled ? hrfWidth : undefined, - hrf_height: hrfEnabled ? hrfHeight : undefined, - hrf_strength: hrfEnabled ? hrfStrength : undefined, - }; - - graph.edges.push({ - source: { - node_id: METADATA_ACCUMULATOR, - field: 'metadata', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'metadata', - }, }); // Add Seamless To Graph diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildNodesGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildNodesGraph.ts index 7be06ac110..4437e14f66 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildNodesGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildNodesGraph.ts @@ -35,7 +35,7 @@ export const buildNodesGraph = (nodesState: NodesState): Graph => { const { nodes, edges } = nodesState; const filteredNodes = nodes.filter(isInvocationNode); - const workflowJSON = JSON.stringify(buildWorkflow(nodesState)); + // const workflowJSON = JSON.stringify(buildWorkflow(nodesState)); // Reduce the node editor nodes into invocation graph nodes const parsedNodes = filteredNodes.reduce>( @@ -68,7 +68,8 @@ export const buildNodesGraph = (nodesState: NodesState): Graph => { if (embedWorkflow) { // add the workflow to the node - Object.assign(graphNode, { workflow: workflowJSON }); + // Object.assign(graphNode, { workflow: workflowJSON }); + Object.assign(graphNode, { workflow: buildWorkflow(nodesState) }); } // Add it to the nodes object diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 7d547d09e6..e0dc52063b 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -56,7 +56,15 @@ export const IP_ADAPTER = 'ip_adapter'; export const DYNAMIC_PROMPT = 'dynamic_prompt'; export const IMAGE_COLLECTION = 'image_collection'; export const IMAGE_COLLECTION_ITERATE = 'image_collection_iterate'; +export const METADATA = 'core_metadata'; +export const BATCH_METADATA = 'batch_metadata'; +export const BATCH_METADATA_COLLECT = 'batch_metadata_collect'; +export const BATCH_SEED = 'batch_seed'; +export const BATCH_PROMPT = 'batch_prompt'; +export const BATCH_STYLE_PROMPT = 'batch_style_prompt'; +export const METADATA_COLLECT = 'metadata_collect'; export const METADATA_ACCUMULATOR = 'metadata_accumulator'; +export const MERGE_METADATA = 'merge_metadata'; export const REALESRGAN = 'esrgan'; export const DIVIDE = 'divide'; export const SCALE = 'scale_image'; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts new file mode 100644 index 0000000000..547c45addf --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts @@ -0,0 +1,58 @@ +import { NonNullableGraph } from 'features/nodes/types/types'; +import { CoreMetadataInvocation } from 'services/api/types'; +import { JsonObject } from 'type-fest'; +import { METADATA, SAVE_IMAGE } from './constants'; + +export const addCoreMetadataNode = ( + graph: NonNullableGraph, + metadata: Partial | JsonObject +): void => { + graph.nodes[METADATA] = { + id: METADATA, + type: 'core_metadata', + ...metadata, + }; + + graph.edges.push({ + source: { + node_id: METADATA, + field: 'metadata', + }, + destination: { + node_id: SAVE_IMAGE, + field: 'metadata', + }, + }); + + return; +}; + +export const upsertMetadata = ( + graph: NonNullableGraph, + metadata: Partial | JsonObject +): void => { + const metadataNode = graph.nodes[METADATA] as + | CoreMetadataInvocation + | undefined; + + if (!metadataNode) { + return; + } + + Object.assign(metadataNode, metadata); +}; + +export const removeMetadata = ( + graph: NonNullableGraph, + key: keyof CoreMetadataInvocation +): void => { + const metadataNode = graph.nodes[METADATA] as + | CoreMetadataInvocation + | undefined; + + if (!metadataNode) { + return; + } + + delete metadataNode[key]; +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts b/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts index 93cd75dd75..7c6f4e638f 100644 --- a/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts +++ b/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts @@ -4,7 +4,6 @@ import { reduce, startCase } from 'lodash-es'; import { OpenAPIV3_1 } from 'openapi-types'; import { AnyInvocationType } from 'services/events/types'; import { - FieldType, InputFieldTemplate, InvocationSchemaObject, InvocationTemplate, @@ -16,18 +15,11 @@ import { } from '../types/types'; import { buildInputFieldTemplate, getFieldType } from './fieldTemplateBuilders'; -const RESERVED_INPUT_FIELD_NAMES = ['id', 'type', 'metadata', 'use_cache']; +const RESERVED_INPUT_FIELD_NAMES = ['id', 'type', 'use_cache']; const RESERVED_OUTPUT_FIELD_NAMES = ['type']; -const RESERVED_FIELD_TYPES = [ - 'WorkflowField', - 'MetadataField', - 'IsIntermediate', -]; +const RESERVED_FIELD_TYPES = ['IsIntermediate']; -const invocationDenylist: AnyInvocationType[] = [ - 'graph', - 'metadata_accumulator', -]; +const invocationDenylist: AnyInvocationType[] = ['graph']; const isReservedInputField = (nodeType: string, fieldName: string) => { if (RESERVED_INPUT_FIELD_NAMES.includes(fieldName)) { @@ -42,7 +34,7 @@ const isReservedInputField = (nodeType: string, fieldName: string) => { return false; }; -const isReservedFieldType = (fieldType: FieldType) => { +const isReservedFieldType = (fieldType: string) => { if (RESERVED_FIELD_TYPES.includes(fieldType)) { return true; } @@ -86,6 +78,7 @@ export const parseSchema = ( const tags = schema.tags ?? []; const description = schema.description ?? ''; const version = schema.version; + let withWorkflow = false; const inputs = reduce( schema.properties, @@ -112,7 +105,7 @@ export const parseSchema = ( const fieldType = property.ui_type ?? getFieldType(property); - if (!isFieldType(fieldType)) { + if (!fieldType) { logger('nodes').warn( { node: type, @@ -120,11 +113,16 @@ export const parseSchema = ( fieldType, field: parseify(property), }, - 'Skipping unknown input field type' + 'Missing input field type' ); return inputsAccumulator; } + if (fieldType === 'WorkflowField') { + withWorkflow = true; + return inputsAccumulator; + } + if (isReservedFieldType(fieldType)) { logger('nodes').trace( { @@ -133,7 +131,20 @@ export const parseSchema = ( fieldType, field: parseify(property), }, - 'Skipping reserved field type' + `Skipping reserved input field type: ${fieldType}` + ); + return inputsAccumulator; + } + + if (!isFieldType(fieldType)) { + logger('nodes').warn( + { + node: type, + fieldName: propertyName, + fieldType, + field: parseify(property), + }, + `Skipping unknown input field type: ${fieldType}` ); return inputsAccumulator; } @@ -146,7 +157,7 @@ export const parseSchema = ( ); if (!field) { - logger('nodes').debug( + logger('nodes').warn( { node: type, fieldName: propertyName, @@ -248,6 +259,7 @@ export const parseSchema = ( inputs, outputs, useCache, + withWorkflow, }; Object.assign(invocationsAccumulator, { [type]: invocation }); diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts index c8d42d17f6..36c00ee1c9 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/images.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts @@ -1,6 +1,7 @@ import { EntityState, Update } from '@reduxjs/toolkit'; import { fetchBaseQuery } from '@reduxjs/toolkit/dist/query'; import { PatchCollection } from '@reduxjs/toolkit/dist/query/core/buildThunks'; +import { logger } from 'app/logging/logger'; import { ASSETS_CATEGORIES, BoardId, @@ -8,6 +9,7 @@ import { IMAGE_LIMIT, } from 'features/gallery/store/types'; import { + CoreMetadata, ImageMetadataAndWorkflow, zCoreMetadata, } from 'features/nodes/types/types'; @@ -23,7 +25,6 @@ import { ListImagesArgs, OffsetPaginatedResults_ImageDTO_, PostUploadAction, - UnsafeImageMetadata, } from '../types'; import { getCategories, @@ -114,11 +115,24 @@ export const imagesApi = api.injectEndpoints({ ], keepUnusedDataFor: 86400, // 24 hours }), - getImageMetadata: build.query({ + getImageMetadata: build.query({ query: (image_name) => ({ url: `images/i/${image_name}/metadata` }), providesTags: (result, error, image_name) => [ { type: 'ImageMetadata', id: image_name }, ], + transformResponse: ( + response: paths['/api/v1/images/i/{image_name}/metadata']['get']['responses']['200']['content']['application/json'] + ) => { + if (response) { + const result = zCoreMetadata.safeParse(response); + if (result.success) { + return result.data; + } else { + logger('images').warn('Problem parsing metadata'); + } + } + return; + }, keepUnusedDataFor: 86400, // 24 hours }), getImageMetadataFromFile: build.query< diff --git a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts new file mode 100644 index 0000000000..4c69d2e286 --- /dev/null +++ b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts @@ -0,0 +1,31 @@ +import { logger } from 'app/logging/logger'; +import { Workflow, zWorkflow } from 'features/nodes/types/types'; +import { api } from '..'; +import { paths } from '../schema'; + +export const workflowsApi = api.injectEndpoints({ + endpoints: (build) => ({ + getWorkflow: build.query({ + query: (workflow_id) => `workflows/i/${workflow_id}`, + keepUnusedDataFor: 86400, // 24 hours + providesTags: (result, error, workflow_id) => [ + { type: 'Workflow', id: workflow_id }, + ], + transformResponse: ( + response: paths['/api/v1/workflows/i/{workflow_id}']['get']['responses']['200']['content']['application/json'] + ) => { + if (response) { + const result = zWorkflow.safeParse(response); + if (result.success) { + return result.data; + } else { + logger('images').warn('Problem parsing metadata'); + } + } + return; + }, + }), + }), +}); + +export const { useGetWorkflowQuery } = workflowsApi; diff --git a/invokeai/frontend/web/src/services/api/index.ts b/invokeai/frontend/web/src/services/api/index.ts index f423b2b0ed..b7595b3d52 100644 --- a/invokeai/frontend/web/src/services/api/index.ts +++ b/invokeai/frontend/web/src/services/api/index.ts @@ -37,6 +37,7 @@ export const tagTypes = [ 'ControlNetModel', 'LoRAModel', 'SDXLRefinerModel', + 'Workflow', ] as const; export type ApiTagDescription = TagDescription<(typeof tagTypes)[number]>; export const LIST_TAG = 'LIST'; diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 62f60c1dbc..932891c862 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -378,6 +378,13 @@ export type paths = { */ put: operations["cancel_queue_item"]; }; + "/api/v1/workflows/i/{workflow_id}": { + /** + * Get Workflow + * @description Gets a workflow + */ + get: operations["get_workflow"]; + }; }; export type webhooks = Record; @@ -413,17 +420,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * A * @description The first number @@ -572,6 +574,10 @@ export type components = { * @description Creates a blank image and forwards it to the pipeline */ BlankImageInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -583,17 +589,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Width * @description The width of the image @@ -646,17 +647,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Latents tensor */ latents_a?: components["schemas"]["LatentsField"]; /** @description Latents tensor */ @@ -899,17 +895,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection * @description The collection of boolean values @@ -955,17 +946,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Value * @description The boolean value @@ -1033,6 +1019,10 @@ export type components = { * @description Infills transparent areas of an image using OpenCV Inpainting */ CV2InfillInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -1044,17 +1034,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** @@ -1080,6 +1065,10 @@ export type components = { * @description Canny edge detection for ControlNet */ CannyImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -1091,17 +1080,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -1167,17 +1151,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count @@ -1229,17 +1208,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection Item * @description The item to collect (all inputs must be of the same type) @@ -1294,6 +1268,10 @@ export type components = { * using a mask to only color-correct certain regions of the target image. */ ColorCorrectInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -1305,17 +1283,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to color-correct */ image?: components["schemas"]["ImageField"]; /** @description Reference image for color-correction */ @@ -1377,17 +1350,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * @description The color value * @default { @@ -1410,6 +1378,10 @@ export type components = { * @description Generates a color map from the provided image */ ColorMapImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -1421,17 +1393,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -1477,17 +1444,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -1522,17 +1484,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection * @description The collection of conditioning tensors @@ -1589,17 +1546,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Conditioning tensor */ conditioning?: components["schemas"]["ConditioningField"]; /** @@ -1628,6 +1580,10 @@ export type components = { * @description Applies content shuffle processing to image */ ContentShuffleImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -1639,17 +1595,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -1744,17 +1695,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The control image */ image?: components["schemas"]["ImageField"]; /** @description ControlNet model to load */ @@ -1872,26 +1818,33 @@ export type components = { type: "control_output"; }; /** - * CoreMetadata - * @description Core generation metadata for an image generated in InvokeAI. + * Core Metadata + * @description Collects core generation metadata into a MetadataField */ - CoreMetadata: { + CoreMetadataInvocation: { /** - * App Version - * @description The version of InvokeAI used to generate this image - * @default 3.3.0 + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - app_version?: string; + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean | null; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; /** * Generation Mode * @description The generation mode that output this image + * @enum {string} */ - generation_mode?: string | null; - /** - * Created By - * @description The name of the creator of the image - */ - created_by?: string | null; + generation_mode?: "txt2img" | "img2img" | "inpaint" | "outpaint"; /** * Positive Prompt * @description The positive prompt parameter @@ -1937,6 +1890,16 @@ export type components = { * @description The scheduler used for inference */ scheduler?: string | null; + /** + * Seamless X + * @description Whether seamless tiling was used on the X axis + */ + seamless_x?: boolean | null; + /** + * Seamless Y + * @description Whether seamless tiling was used on the Y axis + */ + seamless_y?: boolean | null; /** * Clip Skip * @description The number of skipped CLIP layers @@ -1964,8 +1927,6 @@ export type components = { * @description The LoRAs used for inference */ loras?: components["schemas"]["LoRAMetadataField"][] | null; - /** @description The VAE used for decoding, if the main model's default was not used */ - vae?: components["schemas"]["VAEModelField"] | null; /** * Strength * @description The strength used for latents-to-latents @@ -1976,6 +1937,23 @@ export type components = { * @description The name of the initial image */ init_image?: string | null; + /** @description The VAE used for decoding, if the main model's default was not used */ + vae?: components["schemas"]["VAEModelField"] | null; + /** + * Hrf Width + * @description The high resolution fix height and width multipler. + */ + hrf_width?: number | null; + /** + * Hrf Height + * @description The high resolution fix height and width multipler. + */ + hrf_height?: number | null; + /** + * Hrf Strength + * @description The high resolution fix img2img strength used in the upscale pass. + */ + hrf_strength?: number | null; /** * Positive Style Prompt * @description The positive style prompt parameter @@ -2018,6 +1996,13 @@ export type components = { * @description The start value used for refiner denoising */ refiner_start?: number | null; + /** + * type + * @default core_metadata + * @constant + */ + type: "core_metadata"; + [key: string]: unknown; }; /** * Create Denoise Mask @@ -2035,17 +2020,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description VAE */ vae?: components["schemas"]["VaeField"]; /** @description Image which will be masked */ @@ -2094,6 +2074,10 @@ export type components = { * @description Simple inpaint using opencv. */ CvInpaintInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -2105,17 +2089,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to inpaint */ image?: components["schemas"]["ImageField"]; /** @description The mask to use when inpainting */ @@ -2166,17 +2145,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Positive conditioning tensor */ positive_conditioning?: components["schemas"]["ConditioningField"]; /** @description Negative conditioning tensor */ @@ -2288,17 +2262,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * A * @description The first number @@ -2334,17 +2303,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Prompt * @description The prompt to parse with dynamicprompts @@ -2381,6 +2345,10 @@ export type components = { * @description Upscales an image using RealESRGAN. */ ESRGANInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -2392,17 +2360,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The input image */ image?: components["schemas"]["ImageField"]; /** @@ -2475,6 +2438,10 @@ export type components = { * @description Outputs an image with detected face IDs printed on each face. For use with other FaceTools. */ FaceIdentifierInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -2486,17 +2453,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Image to face detect */ image?: components["schemas"]["ImageField"]; /** @@ -2523,6 +2485,10 @@ export type components = { * @description Face mask creation using mediapipe face detection */ FaceMaskInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -2534,17 +2500,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Image to face detect */ image?: components["schemas"]["ImageField"]; /** @@ -2621,6 +2582,10 @@ export type components = { * @description Bound, extract, and mask a face from an image using MediaPipe detection */ FaceOffInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -2632,17 +2597,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Image for face detection */ image?: components["schemas"]["ImageField"]; /** @@ -2740,17 +2700,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection * @description The collection of float values @@ -2796,17 +2751,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Value * @description The float value @@ -2836,17 +2786,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Start * @description The first value of the range @@ -2888,17 +2833,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Operation * @description The operation to perform @@ -2958,17 +2898,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Value * @description The value to round @@ -3007,7 +2942,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlendLatentsInvocation"]; + [key: string]: components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["GraphInvocation"]; }; /** * Edges @@ -3044,7 +2979,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["IntegerOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["String2Output"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ClipSkipInvocationOutput"]; + [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["String2Output"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["LatentsOutput"]; }; /** * Errors @@ -3084,17 +3019,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The graph to run */ graph?: components["schemas"]["Graph"]; /** @@ -3123,6 +3053,10 @@ export type components = { * @description Applies HED edge detection to image */ HedImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3134,17 +3068,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -3218,17 +3147,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Image * @description The IP-Adapter image prompt(s). @@ -3264,17 +3188,21 @@ export type components = { */ type: "ip_adapter"; }; - /** IPAdapterMetadataField */ + /** + * IPAdapterMetadataField + * @description IP Adapter Field, minus the CLIP Vision Encoder model + */ IPAdapterMetadataField: { /** @description The IP-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** @description The IP-Adapter model to use. */ + /** @description The IP-Adapter model. */ ip_adapter_model: components["schemas"]["IPAdapterModelField"]; /** * Weight - * @description The weight of the IP-Adapter model + * @description The weight given to the IP-Adapter + * @default 1 */ - weight: number; + weight?: number | number[]; /** * Begin Step Percent * @description When the IP-Adapter is first applied (% of total steps) @@ -3339,6 +3267,10 @@ export type components = { * @description Blurs an image */ ImageBlurInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3350,17 +3282,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to blur */ image?: components["schemas"]["ImageField"]; /** @@ -3400,6 +3327,10 @@ export type components = { * @description Gets a channel from an image. */ ImageChannelInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3411,17 +3342,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to get the channel from */ image?: components["schemas"]["ImageField"]; /** @@ -3443,6 +3369,10 @@ export type components = { * @description Scale a specific color channel of an image. */ ImageChannelMultiplyInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3454,17 +3384,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** @@ -3497,6 +3422,10 @@ export type components = { * @description Add or subtract a value from a specific color channel of an image. */ ImageChannelOffsetInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3508,17 +3437,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** @@ -3556,17 +3480,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection * @description The collection of image values @@ -3601,6 +3520,10 @@ export type components = { * @description Converts an image to a different mode. */ ImageConvertInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3612,17 +3535,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to convert */ image?: components["schemas"]["ImageField"]; /** @@ -3644,6 +3562,10 @@ export type components = { * @description Crops an image to a specified box. The box can be outside of the image. */ ImageCropInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3655,17 +3577,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to crop */ image?: components["schemas"]["ImageField"]; /** @@ -3758,6 +3675,11 @@ export type components = { * @description The session ID that generated this image, if it is a generated image. */ session_id?: string | null; + /** + * Workflow Id + * @description The workflow that generated this image. + */ + workflow_id?: string | null; /** * Node Id * @description The node ID that generated this image, if it is a generated image. @@ -3790,6 +3712,10 @@ export type components = { * @description Adjusts the Hue of an image. */ ImageHueAdjustmentInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3801,17 +3727,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** @@ -3832,6 +3753,10 @@ export type components = { * @description Inverse linear interpolation of all pixels of an image */ ImageInverseLerpInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3843,17 +3768,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to lerp */ image?: components["schemas"]["ImageField"]; /** @@ -3891,17 +3811,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to load */ image?: components["schemas"]["ImageField"]; /** @@ -3916,6 +3831,10 @@ export type components = { * @description Linear interpolation of all pixels of an image */ ImageLerpInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3927,17 +3846,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to lerp */ image?: components["schemas"]["ImageField"]; /** @@ -3959,27 +3873,15 @@ export type components = { */ type: "img_lerp"; }; - /** - * ImageMetadata - * @description An image's generation metadata - */ - ImageMetadata: { - /** - * Metadata - * @description The image's core metadata, if it was created in the Linear or Canvas UI - */ - metadata?: Record | null; - /** - * Graph - * @description The graph that created the image - */ - graph?: Record | null; - }; /** * Multiply Images * @description Multiplies two images together using `PIL.ImageChops.multiply()`. */ ImageMultiplyInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -3991,17 +3893,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The first image to multiply */ image1?: components["schemas"]["ImageField"]; /** @description The second image to multiply */ @@ -4018,6 +3915,10 @@ export type components = { * @description Add blur to NSFW-flagged images */ ImageNSFWBlurInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4029,21 +3930,14 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to check */ image?: components["schemas"]["ImageField"]; - /** @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"] | null; /** * type * @default img_nsfw @@ -4080,6 +3974,10 @@ export type components = { * @description Pastes an image into another image. */ ImagePasteInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4091,17 +3989,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The base image */ base_image?: components["schemas"]["ImageField"]; /** @description The image to paste */ @@ -4168,6 +4061,10 @@ export type components = { * @description Resizes an image to specific dimensions */ ImageResizeInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4179,17 +4076,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to resize */ image?: components["schemas"]["ImageField"]; /** @@ -4211,8 +4103,6 @@ export type components = { * @enum {string} */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; - /** @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"] | null; /** * type * @default img_resize @@ -4225,6 +4115,10 @@ export type components = { * @description Scales an image by a factor */ ImageScaleInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4236,17 +4130,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to scale */ image?: components["schemas"]["ImageField"]; /** @@ -4285,17 +4174,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to encode */ image?: components["schemas"]["ImageField"]; /** @description VAE */ @@ -4345,6 +4229,10 @@ export type components = { * @description Add an invisible watermark to an image */ ImageWatermarkInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4356,17 +4244,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to check */ image?: components["schemas"]["ImageField"]; /** @@ -4375,8 +4258,6 @@ export type components = { * @default InvokeAI */ text?: string; - /** @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"] | null; /** * type * @default img_watermark @@ -4405,6 +4286,10 @@ export type components = { * @description Infills transparent areas of an image with a solid color */ InfillColorInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4416,17 +4301,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** @@ -4451,6 +4331,10 @@ export type components = { * @description Infills transparent areas of an image using the PatchMatch algorithm */ InfillPatchMatchInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4462,17 +4346,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** @@ -4500,6 +4379,10 @@ export type components = { * @description Infills transparent areas of an image with tiles of the image */ InfillTileInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4511,17 +4394,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** @@ -4558,17 +4436,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection * @description The collection of integer values @@ -4614,17 +4487,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Value * @description The integer value @@ -4654,17 +4522,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Operation * @description The operation to perform @@ -4752,17 +4615,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection * @description The list of items to iterate over @@ -4803,6 +4661,10 @@ export type components = { * @description Infills transparent areas of an image using the LaMa model */ LaMaInfillInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4814,17 +4676,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** @@ -4850,17 +4707,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection * @description The collection of latents tensors @@ -4922,17 +4774,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The latents tensor */ latents?: components["schemas"]["LatentsField"]; /** @@ -4971,6 +4818,10 @@ export type components = { * @description Generates an image from latents. */ LatentsToImageInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -4982,17 +4833,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** @description VAE */ @@ -5009,8 +4855,6 @@ export type components = { * @default false */ fp32?: boolean; - /** @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"] | null; /** * type * @default l2i @@ -5023,6 +4867,10 @@ export type components = { * @description Applies leres processing to image */ LeresImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5034,17 +4882,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -5089,6 +4932,10 @@ export type components = { * @description Applies line art anime processing to image */ LineartAnimeImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5100,17 +4947,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -5137,6 +4979,10 @@ export type components = { * @description Applies line art processing to image */ LineartImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5148,17 +4994,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -5188,14 +5029,14 @@ export type components = { }; /** * LoRAMetadataField - * @description LoRA metadata for an image generated in InvokeAI. + * @description LoRA Metadata Field */ LoRAMetadataField: { - /** @description The LoRA model */ + /** @description LoRA model to load */ lora: components["schemas"]["LoRAModelField"]; /** * Weight - * @description The weight of the LoRA model + * @description The weight at which the LoRA is applied to each model */ weight: number; }; @@ -5275,17 +5116,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * LoRA * @description LoRA model to load @@ -5367,17 +5203,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Main model (UNet, VAE, CLIP) to load */ model: components["schemas"]["MainModelField"]; /** @@ -5392,6 +5223,10 @@ export type components = { * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. */ MaskCombineInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5403,17 +5238,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The first mask to combine */ mask1?: components["schemas"]["ImageField"]; /** @description The second image to combine */ @@ -5430,6 +5260,10 @@ export type components = { * @description Applies an edge mask to an image */ MaskEdgeInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5441,17 +5275,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to apply the mask to */ image?: components["schemas"]["ImageField"]; /** @@ -5486,6 +5315,10 @@ export type components = { * @description Extracts the alpha channel of an image as a mask. */ MaskFromAlphaInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5497,17 +5330,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to create the mask from */ image?: components["schemas"]["ImageField"]; /** @@ -5528,6 +5356,10 @@ export type components = { * @description Applies mediapipe face processing to image */ MediapipeFaceProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5539,17 +5371,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -5576,6 +5403,40 @@ export type components = { * @enum {string} */ MergeInterpolationMethod: "weighted_sum" | "sigmoid" | "inv_sigmoid" | "add_difference"; + /** + * Metadata Merge + * @description Merged a collection of MetadataDict into a single MetadataDict. + */ + MergeMetadataInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean | null; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Collection + * @description Collection of Metadata + */ + collection?: components["schemas"]["MetadataField"][]; + /** + * type + * @default merge_metadata + * @constant + */ + type: "merge_metadata"; + }; /** MergeModelsBody */ MergeModelsBody: { /** @@ -5609,10 +5470,16 @@ export type components = { merge_dest_directory?: string | null; }; /** - * Metadata Accumulator - * @description Outputs a Core Metadata Object + * MetadataField + * @description Pydantic model for metadata with custom root of type dict[str, Any]. + * Metadata is stored without a strict schema. */ - MetadataAccumulatorInvocation: { + MetadataField: Record; + /** + * Metadata + * @description Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict. + */ + MetadataInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5625,188 +5492,109 @@ export type components = { */ is_intermediate?: boolean | null; /** - * Workflow - * @description The workflow to save with the image + * Use Cache + * @description Whether or not to use the cache + * @default true */ - workflow?: string | null; + use_cache?: boolean; + /** + * Items + * @description A single metadata item or collection of metadata items + */ + items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"]; + /** + * type + * @default metadata + * @constant + */ + type: "metadata"; + }; + /** MetadataItemField */ + MetadataItemField: { + /** + * Label + * @description Label for this metadata item + */ + label: string; + /** + * Value + * @description The value for this metadata item (may be any type) + */ + value: unknown; + }; + /** + * Metadata Item + * @description Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value. + */ + MetadataItemInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** - * Generation Mode - * @description The generation mode that output this image + * Label + * @description Label for this metadata item */ - generation_mode?: string | null; + label?: string; /** - * Positive Prompt - * @description The positive prompt parameter + * Value + * @description The value for this metadata item (may be any type) */ - positive_prompt?: string | null; - /** - * Negative Prompt - * @description The negative prompt parameter - */ - negative_prompt?: string | null; - /** - * Width - * @description The width parameter - */ - width?: number | null; - /** - * Height - * @description The height parameter - */ - height?: number | null; - /** - * Seed - * @description The seed used for noise generation - */ - seed?: number | null; - /** - * Rand Device - * @description The device used for random number generation - */ - rand_device?: string | null; - /** - * Cfg Scale - * @description The classifier-free guidance scale parameter - */ - cfg_scale?: number | null; - /** - * Steps - * @description The number of steps used for inference - */ - steps?: number | null; - /** - * Scheduler - * @description The scheduler used for inference - */ - scheduler?: string | null; - /** - * Clip Skip - * @description The number of skipped CLIP layers - */ - clip_skip?: number | null; - /** @description The main model used for inference */ - model?: components["schemas"]["MainModelField"] | null; - /** - * Controlnets - * @description The ControlNets used for inference - */ - controlnets?: components["schemas"]["ControlField"][] | null; - /** - * Ipadapters - * @description The IP Adapters used for inference - */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; - /** - * T2Iadapters - * @description The IP Adapters used for inference - */ - t2iAdapters?: components["schemas"]["T2IAdapterField"][] | null; - /** - * Loras - * @description The LoRAs used for inference - */ - loras?: components["schemas"]["LoRAMetadataField"][] | null; - /** - * Strength - * @description The strength used for latents-to-latents - */ - strength?: number | null; - /** - * Init Image - * @description The name of the initial image - */ - init_image?: string | null; - /** @description The VAE used for decoding, if the main model's default was not used */ - vae?: components["schemas"]["VAEModelField"] | null; - /** - * Hrf Width - * @description The high resolution fix height and width multipler. - */ - hrf_width?: number | null; - /** - * Hrf Height - * @description The high resolution fix height and width multipler. - */ - hrf_height?: number | null; - /** - * Hrf Strength - * @description The high resolution fix img2img strength used in the upscale pass. - */ - hrf_strength?: number | null; - /** - * Positive Style Prompt - * @description The positive style prompt parameter - */ - positive_style_prompt?: string | null; - /** - * Negative Style Prompt - * @description The negative style prompt parameter - */ - negative_style_prompt?: string | null; - /** @description The SDXL Refiner model used */ - refiner_model?: components["schemas"]["MainModelField"] | null; - /** - * Refiner Cfg Scale - * @description The classifier-free guidance scale parameter used for the refiner - */ - refiner_cfg_scale?: number | null; - /** - * Refiner Steps - * @description The number of steps used for the refiner - */ - refiner_steps?: number | null; - /** - * Refiner Scheduler - * @description The scheduler used for the refiner - */ - refiner_scheduler?: string | null; - /** - * Refiner Positive Aesthetic Score - * @description The aesthetic score used for the refiner - */ - refiner_positive_aesthetic_score?: number | null; - /** - * Refiner Negative Aesthetic Score - * @description The aesthetic score used for the refiner - */ - refiner_negative_aesthetic_score?: number | null; - /** - * Refiner Start - * @description The start value used for refiner denoising - */ - refiner_start?: number | null; + value?: unknown; /** * type - * @default metadata_accumulator + * @default metadata_item * @constant */ - type: "metadata_accumulator"; + type: "metadata_item"; }; /** - * MetadataAccumulatorOutput - * @description The output of the MetadataAccumulator node + * MetadataItemOutput + * @description Metadata Item Output */ - MetadataAccumulatorOutput: { - /** @description The core metadata for the image */ - metadata: components["schemas"]["CoreMetadata"]; + MetadataItemOutput: { + /** @description Metadata Item */ + item: components["schemas"]["MetadataItemField"]; /** * type - * @default metadata_accumulator_output + * @default metadata_item_output * @constant */ - type: "metadata_accumulator_output"; + type: "metadata_item_output"; + }; + /** MetadataOutput */ + MetadataOutput: { + /** @description Metadata Dict */ + metadata: components["schemas"]["MetadataField"]; + /** + * type + * @default metadata_output + * @constant + */ + type: "metadata_output"; }; /** * Midas Depth Processor * @description Applies Midas depth processing to image */ MidasDepthImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5818,17 +5606,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -5855,6 +5638,10 @@ export type components = { * @description Applies MLSD processing to image */ MlsdImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -5866,17 +5653,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -5987,17 +5769,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * A * @description The first number @@ -6051,17 +5828,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Seed * @description Seed for random number generation @@ -6121,6 +5893,10 @@ export type components = { * @description Applies NormalBae processing to image */ NormalbaeImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -6132,17 +5908,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -6169,6 +5940,10 @@ export type components = { * @description Generates an image from latents. */ ONNXLatentsToImageInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -6180,23 +5955,16 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Denoised latents tensor */ latents?: components["schemas"]["LatentsField"]; /** @description VAE */ vae?: components["schemas"]["VaeField"]; - /** @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"] | null; /** * type * @default l2i_onnx @@ -6249,17 +6017,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Prompt * @description Raw prompt text (no parsing) @@ -6340,17 +6103,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Positive conditioning tensor */ positive_conditioning?: components["schemas"]["ConditioningField"]; /** @description Negative conditioning tensor */ @@ -6474,17 +6232,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description ONNX Main model (UNet, VAE, CLIP) to load */ model: components["schemas"]["OnnxModelField"]; /** @@ -6499,6 +6252,10 @@ export type components = { * @description Applies Openpose processing to image */ OpenposeImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -6510,17 +6267,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -6553,6 +6305,10 @@ export type components = { * @description Applies PIDI processing to image */ PidiImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -6564,17 +6320,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -6624,17 +6375,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * File Path * @description Path to prompt text file @@ -6696,17 +6442,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Low * @description The inclusive low value @@ -6748,17 +6489,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Low * @description The inclusive low value @@ -6794,17 +6530,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Low * @description The inclusive low value @@ -6851,17 +6582,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Start * @description The start of the range @@ -6903,17 +6629,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Start * @description The start of the range @@ -6963,17 +6684,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** @@ -7032,17 +6748,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Value * @description The float value @@ -7078,17 +6789,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -7164,17 +6870,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * LoRA * @description LoRA model to load @@ -7251,17 +6952,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load */ model: components["schemas"]["MainModelField"]; /** @@ -7319,17 +7015,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Style * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -7387,17 +7078,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load */ model: components["schemas"]["MainModelField"]; /** @@ -7439,6 +7125,10 @@ export type components = { * @description Saves an image. Unlike an image primitive, this invocation stores a copy of the image. */ SaveImageInvocation: { + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7450,23 +7140,16 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @description The board to save the image to */ - board?: components["schemas"]["BoardField"] | null; - /** @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"] | null; + board?: components["schemas"]["BoardField"]; /** * type * @default save_image @@ -7490,17 +7173,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** @@ -7544,17 +7222,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Scheduler * @description Scheduler to use during inference @@ -7605,17 +7278,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * UNet * @description UNet (scheduler, LoRAs) @@ -7672,6 +7340,10 @@ export type components = { * @description Applies segment anything processing to image */ SegmentAnythingProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7683,17 +7355,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -7927,17 +7594,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to show */ image?: components["schemas"]["ImageField"]; /** @@ -8119,17 +7781,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Easing * @description The easing function to use @@ -8234,17 +7891,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Collection * @description The collection of string values @@ -8290,17 +7942,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * Value * @description The string value @@ -8330,17 +7977,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * String Left * @description String Left @@ -8376,17 +8018,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * String Left * @description String Left @@ -8467,17 +8104,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * String * @description String to work on @@ -8525,17 +8157,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * String * @description String to split @@ -8571,17 +8198,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * String * @description String to split @@ -8616,17 +8238,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * A * @description The first number @@ -8694,17 +8311,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The IP-Adapter image prompt. */ image?: components["schemas"]["ImageField"]; /** @@ -8814,6 +8426,10 @@ export type components = { * @description Tile resampler processor */ TileResamplerProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8825,17 +8441,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -8920,17 +8531,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** * VAE * @description VAE model to load @@ -8992,11 +8598,21 @@ export type components = { /** Error Type */ type: string; }; + /** + * WorkflowField + * @description Pydantic model for workflows with custom root of type dict[str, Any]. + * Workflows are stored without a strict schema. + */ + WorkflowField: Record; /** * Zoe (Depth) Processor * @description Applies Zoe depth processing to image */ ZoeDepthImageProcessorInvocation: { + /** @description Optional workflow to be saved with the image */ + workflow?: components["schemas"]["WorkflowField"] | null; + /** @description Optional metadata to be saved with the image */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9008,17 +8624,12 @@ export type components = { * @default false */ is_intermediate?: boolean | null; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean | null; + use_cache?: boolean; /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** @@ -9079,7 +8690,7 @@ export type components = { * If a field should be provided a data type that does not exactly match the python type of the field, use this to provide the type that should be used instead. See the node development docs for detail on adding a new field type, which involves client-side changes. * @enum {string} */ - UIType: "boolean" | "ColorField" | "ConditioningField" | "ControlField" | "float" | "ImageField" | "integer" | "LatentsField" | "string" | "BooleanCollection" | "ColorCollection" | "ConditioningCollection" | "ControlCollection" | "FloatCollection" | "ImageCollection" | "IntegerCollection" | "LatentsCollection" | "StringCollection" | "BooleanPolymorphic" | "ColorPolymorphic" | "ConditioningPolymorphic" | "ControlPolymorphic" | "FloatPolymorphic" | "ImagePolymorphic" | "IntegerPolymorphic" | "LatentsPolymorphic" | "StringPolymorphic" | "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VaeModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "UNetField" | "VaeField" | "ClipField" | "Collection" | "CollectionItem" | "enum" | "Scheduler" | "WorkflowField" | "IsIntermediate" | "MetadataField" | "BoardField"; + UIType: "boolean" | "ColorField" | "ConditioningField" | "ControlField" | "float" | "ImageField" | "integer" | "LatentsField" | "string" | "BooleanCollection" | "ColorCollection" | "ConditioningCollection" | "ControlCollection" | "FloatCollection" | "ImageCollection" | "IntegerCollection" | "LatentsCollection" | "StringCollection" | "BooleanPolymorphic" | "ColorPolymorphic" | "ConditioningPolymorphic" | "ControlPolymorphic" | "FloatPolymorphic" | "ImagePolymorphic" | "IntegerPolymorphic" | "LatentsPolymorphic" | "StringPolymorphic" | "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VaeModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "UNetField" | "VaeField" | "ClipField" | "Collection" | "CollectionItem" | "enum" | "Scheduler" | "WorkflowField" | "IsIntermediate" | "BoardField" | "Any" | "MetadataItem" | "MetadataItemCollection" | "MetadataItemPolymorphic" | "MetadataDict"; /** * _InputField * @description *DO NOT USE* @@ -9116,24 +8727,18 @@ export type components = { /** Ui Order */ ui_order: number | null; }; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * T2IAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - T2IAdapterModelFormat: "diffusers"; /** * ControlNetModelFormat * @description An enumeration. * @enum {string} */ ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** * IPAdapterModelFormat * @description An enumeration. @@ -9146,24 +8751,30 @@ export type components = { * @enum {string} */ CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * StableDiffusion2ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * T2IAdapterModelFormat + * @description An enumeration. + * @enum {string} + */ + T2IAdapterModelFormat: "diffusers"; }; responses: never; parameters: never; @@ -9724,7 +9335,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ImageMetadata"]; + "application/json": components["schemas"]["MetadataField"] | null; }; }; /** @description Validation Error */ @@ -10701,4 +10312,30 @@ export type operations = { }; }; }; + /** + * Get Workflow + * @description Gets a workflow + */ + get_workflow: { + parameters: { + path: { + /** @description The workflow to get */ + workflow_id: string; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["WorkflowField"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; }; diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 63617a4eb5..085ea65327 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -27,14 +27,6 @@ export type BatchConfig = export type EnqueueBatchResult = components['schemas']['EnqueueBatchResult']; -/** - * This is an unsafe type; the object inside is not guaranteed to be valid. - */ -export type UnsafeImageMetadata = { - metadata: s['CoreMetadata']; - graph: NonNullable; -}; - export type _InputField = s['_InputField']; export type _OutputField = s['_OutputField']; @@ -50,7 +42,6 @@ export type ImageChanges = s['ImageRecordChanges']; export type ImageCategory = s['ImageCategory']; export type ResourceOrigin = s['ResourceOrigin']; export type ImageField = s['ImageField']; -export type ImageMetadata = s['ImageMetadata']; export type OffsetPaginatedResults_BoardDTO_ = s['OffsetPaginatedResults_BoardDTO_']; export type OffsetPaginatedResults_ImageDTO_ = @@ -145,13 +136,19 @@ export type ImageCollectionInvocation = s['ImageCollectionInvocation']; export type MainModelLoaderInvocation = s['MainModelLoaderInvocation']; export type OnnxModelLoaderInvocation = s['OnnxModelLoaderInvocation']; export type LoraLoaderInvocation = s['LoraLoaderInvocation']; -export type MetadataAccumulatorInvocation = s['MetadataAccumulatorInvocation']; export type ESRGANInvocation = s['ESRGANInvocation']; export type DivideInvocation = s['DivideInvocation']; export type ImageNSFWBlurInvocation = s['ImageNSFWBlurInvocation']; export type ImageWatermarkInvocation = s['ImageWatermarkInvocation']; export type SeamlessModeInvocation = s['SeamlessModeInvocation']; export type SaveImageInvocation = s['SaveImageInvocation']; +export type MetadataInvocation = s['MetadataInvocation']; +export type CoreMetadataInvocation = s['CoreMetadataInvocation']; +export type MetadataItemInvocation = s['MetadataItemInvocation']; +export type MergeMetadataInvocation = s['MergeMetadataInvocation']; +export type IPAdapterMetadataField = s['IPAdapterMetadataField']; +export type T2IAdapterField = s['T2IAdapterField']; +export type LoRAMetadataField = s['LoRAMetadataField']; // ControlNet Nodes export type ControlNetInvocation = s['ControlNetInvocation']; diff --git a/tests/nodes/test_node_graph.py b/tests/nodes/test_node_graph.py index 3c965895f9..d1ece0336a 100644 --- a/tests/nodes/test_node_graph.py +++ b/tests/nodes/test_node_graph.py @@ -10,7 +10,12 @@ from invokeai.app.invocations.baseinvocation import ( ) from invokeai.app.invocations.image import ShowImageInvocation from invokeai.app.invocations.math import AddInvocation, SubtractInvocation -from invokeai.app.invocations.primitives import FloatInvocation, IntegerInvocation +from invokeai.app.invocations.primitives import ( + FloatCollectionInvocation, + FloatInvocation, + IntegerInvocation, + StringInvocation, +) from invokeai.app.invocations.upscale import ESRGANInvocation from invokeai.app.services.shared.default_graphs import create_text_to_image from invokeai.app.services.shared.graph import ( @@ -27,8 +32,11 @@ from invokeai.app.services.shared.graph import ( ) from .test_nodes import ( + AnyTypeTestInvocation, ImageToImageTestInvocation, ListPassThroughInvocation, + PolymorphicStringTestInvocation, + PromptCollectionTestInvocation, PromptTestInvocation, TextToImageTestInvocation, ) @@ -692,6 +700,144 @@ def test_ints_do_not_accept_floats(): g.add_edge(e) +def test_polymorphic_accepts_single(): + g = Graph() + n1 = StringInvocation(id="1", value="banana") + n2 = PolymorphicStringTestInvocation(id="2") + g.add_node(n1) + g.add_node(n2) + e1 = create_edge(n1.id, "value", n2.id, "value") + # Not throwing on this line is sufficient + g.add_edge(e1) + + +def test_polymorphic_accepts_collection_of_same_base_type(): + g = Graph() + n1 = PromptCollectionTestInvocation(id="1", collection=["banana", "sundae"]) + n2 = PolymorphicStringTestInvocation(id="2") + g.add_node(n1) + g.add_node(n2) + e1 = create_edge(n1.id, "collection", n2.id, "value") + # Not throwing on this line is sufficient + g.add_edge(e1) + + +def test_polymorphic_does_not_accept_collection_of_different_base_type(): + g = Graph() + n1 = FloatCollectionInvocation(id="1", collection=[1.0, 2.0, 3.0]) + n2 = PolymorphicStringTestInvocation(id="2") + g.add_node(n1) + g.add_node(n2) + e1 = create_edge(n1.id, "collection", n2.id, "value") + with pytest.raises(InvalidEdgeError): + g.add_edge(e1) + + +def test_polymorphic_does_not_accept_generic_collection(): + g = Graph() + n1 = IntegerInvocation(id="1", value=1) + n2 = IntegerInvocation(id="2", value=2) + n3 = CollectInvocation(id="3") + n4 = PolymorphicStringTestInvocation(id="4") + g.add_node(n1) + g.add_node(n2) + g.add_node(n3) + g.add_node(n4) + e1 = create_edge(n1.id, "value", n3.id, "item") + e2 = create_edge(n2.id, "value", n3.id, "item") + e3 = create_edge(n3.id, "collection", n4.id, "value") + g.add_edge(e1) + g.add_edge(e2) + with pytest.raises(InvalidEdgeError): + g.add_edge(e3) + + +def test_any_accepts_integer(): + g = Graph() + n1 = IntegerInvocation(id="1", value=1) + n2 = AnyTypeTestInvocation(id="2") + g.add_node(n1) + g.add_node(n2) + e = create_edge(n1.id, "value", n2.id, "value") + # Not throwing on this line is sufficient + g.add_edge(e) + + +def test_any_accepts_string(): + g = Graph() + n1 = StringInvocation(id="1", value="banana sundae") + n2 = AnyTypeTestInvocation(id="2") + g.add_node(n1) + g.add_node(n2) + e = create_edge(n1.id, "value", n2.id, "value") + # Not throwing on this line is sufficient + g.add_edge(e) + + +def test_any_accepts_generic_collection(): + g = Graph() + n1 = IntegerInvocation(id="1", value=1) + n2 = IntegerInvocation(id="2", value=2) + n3 = CollectInvocation(id="3") + n4 = AnyTypeTestInvocation(id="4") + g.add_node(n1) + g.add_node(n2) + g.add_node(n3) + g.add_node(n4) + e1 = create_edge(n1.id, "value", n3.id, "item") + e2 = create_edge(n2.id, "value", n3.id, "item") + e3 = create_edge(n3.id, "collection", n4.id, "value") + g.add_edge(e1) + g.add_edge(e2) + # Not throwing on this line is sufficient + g.add_edge(e3) + + +def test_any_accepts_prompt_collection(): + g = Graph() + n1 = PromptCollectionTestInvocation(id="1", collection=["banana", "sundae"]) + n2 = AnyTypeTestInvocation(id="2") + g.add_node(n1) + g.add_node(n2) + e = create_edge(n1.id, "collection", n2.id, "value") + # Not throwing on this line is sufficient + g.add_edge(e) + + +def test_any_accepts_any(): + g = Graph() + n1 = AnyTypeTestInvocation(id="1") + n2 = AnyTypeTestInvocation(id="2") + g.add_node(n1) + g.add_node(n2) + e = create_edge(n1.id, "value", n2.id, "value") + # Not throwing on this line is sufficient + g.add_edge(e) + + +def test_iterate_accepts_collection(): + """We need to update the validation for Collect -> Iterate to traverse to the Iterate + node's output and compare that against the item type of the Collect node's collection. Until + then, Collect nodes may not output into Iterate nodes.""" + g = Graph() + n1 = IntegerInvocation(id="1", value=1) + n2 = IntegerInvocation(id="2", value=2) + n3 = CollectInvocation(id="3") + n4 = IterateInvocation(id="4") + g.add_node(n1) + g.add_node(n2) + g.add_node(n3) + g.add_node(n4) + e1 = create_edge(n1.id, "value", n3.id, "item") + e2 = create_edge(n2.id, "value", n3.id, "item") + e3 = create_edge(n3.id, "collection", n4.id, "collection") + g.add_edge(e1) + g.add_edge(e2) + # Once we fix the validation logic as described, this should should not raise an error + with pytest.raises(InvalidEdgeError, match="Cannot connect collector to iterator"): + g.add_edge(e3) + + def test_graph_can_generate_schema(): # Not throwing on this line is sufficient # NOTE: if this test fails, it's PROBABLY because a new invocation type is breaking schema generation diff --git a/tests/nodes/test_nodes.py b/tests/nodes/test_nodes.py index 471c72a005..7807a56879 100644 --- a/tests/nodes/test_nodes.py +++ b/tests/nodes/test_nodes.py @@ -81,6 +81,29 @@ class PromptCollectionTestInvocation(BaseInvocation): return PromptCollectionTestInvocationOutput(collection=self.collection.copy()) +@invocation_output("test_any_output") +class AnyTypeTestInvocationOutput(BaseInvocationOutput): + value: Any = Field() + + +@invocation("test_any") +class AnyTypeTestInvocation(BaseInvocation): + value: Any = Field(default=None) + + def invoke(self, context: InvocationContext) -> AnyTypeTestInvocationOutput: + return AnyTypeTestInvocationOutput(value=self.value) + + +@invocation("test_polymorphic") +class PolymorphicStringTestInvocation(BaseInvocation): + value: Union[str, list[str]] = Field(default="") + + def invoke(self, context: InvocationContext) -> PromptCollectionTestInvocationOutput: + if isinstance(self.value, str): + return PromptCollectionTestInvocationOutput(collection=[self.value]) + return PromptCollectionTestInvocationOutput(collection=self.value) + + # Importing these must happen after test invocations are defined or they won't register from invokeai.app.services.events.events_base import EventServiceBase # noqa: E402 from invokeai.app.services.shared.graph import Edge, EdgeConnection # noqa: E402 From 5a163f02a66a75b373fe86bbf8b5f92c0de71ee3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:08:01 +1100 Subject: [PATCH 118/202] fix(nodes): fix metadata/workflow serialization --- invokeai/app/invocations/baseinvocation.py | 3 --- .../app/services/workflow_records/workflow_records_common.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 39df4971a6..162b22b28d 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -830,9 +830,6 @@ class MetadataField(RootModel): root: dict[str, Any] = Field(description="A dictionary of metadata, shape of which is arbitrary") - def model_dump(self, *args, **kwargs) -> dict[str, Any]: - return super().model_dump(*args, **kwargs)["root"] - type_adapter_MetadataField = TypeAdapter(MetadataField) diff --git a/invokeai/app/services/workflow_records/workflow_records_common.py b/invokeai/app/services/workflow_records/workflow_records_common.py index d548656dab..32046328bb 100644 --- a/invokeai/app/services/workflow_records/workflow_records_common.py +++ b/invokeai/app/services/workflow_records/workflow_records_common.py @@ -15,8 +15,5 @@ class WorkflowField(RootModel): root: dict[str, Any] = Field(description="Workflow dict") - def model_dump(self, *args, **kwargs) -> dict[str, Any]: - return super().model_dump(*args, **kwargs)["root"] - type_adapter_WorkflowField = TypeAdapter(WorkflowField) From 3c4f43314ccd9e552e6637ccdbe1f84b86ed52ce Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:42:02 +1100 Subject: [PATCH 119/202] feat: move workflow/metadata models to `baseinvocation.py` needed to prevent circular imports --- invokeai/app/api/routers/images.py | 7 +++++-- invokeai/app/api/routers/workflows.py | 2 +- invokeai/app/invocations/baseinvocation.py | 20 ++++++++++++------- invokeai/app/invocations/metadata.py | 6 +++++- .../services/image_files/image_files_base.py | 3 +-- .../services/image_files/image_files_disk.py | 3 +-- invokeai/app/services/images/images_base.py | 3 +-- .../app/services/images/images_default.py | 3 +-- .../workflow_records/workflow_records_base.py | 2 +- .../workflow_records_common.py | 17 ---------------- .../workflow_records_sqlite.py | 7 ++----- 11 files changed, 31 insertions(+), 42 deletions(-) diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index f462437700..625fb3c43b 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -7,11 +7,14 @@ from fastapi.routing import APIRouter from PIL import Image from pydantic import BaseModel, Field, ValidationError -from invokeai.app.invocations.baseinvocation import MetadataField, type_adapter_MetadataField +from invokeai.app.invocations.baseinvocation import ( + MetadataField, + type_adapter_MetadataField, + type_adapter_WorkflowField, +) from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.services.workflow_records.workflow_records_common import type_adapter_WorkflowField from ..dependencies import ApiDependencies diff --git a/invokeai/app/api/routers/workflows.py b/invokeai/app/api/routers/workflows.py index 57a33fe73f..36de31fb51 100644 --- a/invokeai/app/api/routers/workflows.py +++ b/invokeai/app/api/routers/workflows.py @@ -1,7 +1,7 @@ from fastapi import APIRouter, Path from invokeai.app.api.dependencies import ApiDependencies -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField +from invokeai.app.invocations.baseinvocation import WorkflowField workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"]) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 162b22b28d..50ce8de7d3 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -15,7 +15,6 @@ from pydantic.fields import _Unset from pydantic_core import PydanticUndefined from invokeai.app.services.config.config_default import InvokeAIAppConfig -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField from invokeai.app.util.misc import uuid_string if TYPE_CHECKING: @@ -813,22 +812,29 @@ def invocation_output( return wrapper +class WorkflowField(RootModel): + """ + Pydantic model for workflows with custom root of type dict[str, Any]. + Workflows are stored without a strict schema. + """ + + root: dict[str, Any] = Field(description="The workflow") + + +type_adapter_WorkflowField = TypeAdapter(WorkflowField) + + class WithWorkflow(BaseModel): workflow: Optional[WorkflowField] = InputField(default=None, description=FieldDescriptions.workflow) -class MetadataItemField(BaseModel): - label: str = Field(description=FieldDescriptions.metadata_item_label) - value: Any = Field(description=FieldDescriptions.metadata_item_value) - - class MetadataField(RootModel): """ Pydantic model for metadata with custom root of type dict[str, Any]. Metadata is stored without a strict schema. """ - root: dict[str, Any] = Field(description="A dictionary of metadata, shape of which is arbitrary") + root: dict[str, Any] = Field(description="The metadata") type_adapter_MetadataField = TypeAdapter(MetadataField) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 205dbef814..98f5f0e830 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -9,7 +9,6 @@ from invokeai.app.invocations.baseinvocation import ( InputField, InvocationContext, MetadataField, - MetadataItemField, OutputField, UIType, invocation, @@ -24,6 +23,11 @@ from invokeai.app.invocations.t2i_adapter import T2IAdapterField from ...version import __version__ +class MetadataItemField(BaseModel): + label: str = Field(description=FieldDescriptions.metadata_item_label) + value: Any = Field(description=FieldDescriptions.metadata_item_value) + + class LoRAMetadataField(BaseModel): """LoRA Metadata Field""" diff --git a/invokeai/app/services/image_files/image_files_base.py b/invokeai/app/services/image_files/image_files_base.py index 3f6e797225..91e18f30fc 100644 --- a/invokeai/app/services/image_files/image_files_base.py +++ b/invokeai/app/services/image_files/image_files_base.py @@ -4,8 +4,7 @@ from typing import Optional from PIL.Image import Image as PILImageType -from invokeai.app.invocations.metadata import MetadataField -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField +from invokeai.app.invocations.baseinvocation import MetadataField, WorkflowField class ImageFileStorageBase(ABC): diff --git a/invokeai/app/services/image_files/image_files_disk.py b/invokeai/app/services/image_files/image_files_disk.py index 57c05562d5..e8a733d619 100644 --- a/invokeai/app/services/image_files/image_files_disk.py +++ b/invokeai/app/services/image_files/image_files_disk.py @@ -7,9 +7,8 @@ from PIL import Image, PngImagePlugin from PIL.Image import Image as PILImageType from send2trash import send2trash -from invokeai.app.invocations.metadata import MetadataField +from invokeai.app.invocations.baseinvocation import MetadataField, WorkflowField from invokeai.app.services.invoker import Invoker -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail from .image_files_base import ImageFileStorageBase diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py index ebb40424bc..50a3a5fb82 100644 --- a/invokeai/app/services/images/images_base.py +++ b/invokeai/app/services/images/images_base.py @@ -3,7 +3,7 @@ from typing import Callable, Optional from PIL.Image import Image as PILImageType -from invokeai.app.invocations.metadata import MetadataField +from invokeai.app.invocations.baseinvocation import MetadataField, WorkflowField from invokeai.app.services.image_records.image_records_common import ( ImageCategory, ImageRecord, @@ -12,7 +12,6 @@ from invokeai.app.services.image_records.image_records_common import ( ) from invokeai.app.services.images.images_common import ImageDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField class ImageServiceABC(ABC): diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index e466e809b1..a0d59470fc 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -2,10 +2,9 @@ from typing import Optional from PIL.Image import Image as PILImageType -from invokeai.app.invocations.metadata import MetadataField +from invokeai.app.invocations.baseinvocation import MetadataField, WorkflowField from invokeai.app.services.invoker import Invoker from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField from ..image_files.image_files_common import ( ImageFileDeleteException, diff --git a/invokeai/app/services/workflow_records/workflow_records_base.py b/invokeai/app/services/workflow_records/workflow_records_base.py index 97f7cfe3c0..d5a4b25ce4 100644 --- a/invokeai/app/services/workflow_records/workflow_records_base.py +++ b/invokeai/app/services/workflow_records/workflow_records_base.py @@ -1,6 +1,6 @@ from abc import ABC, abstractmethod -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowField +from invokeai.app.invocations.baseinvocation import WorkflowField class WorkflowRecordsStorageBase(ABC): diff --git a/invokeai/app/services/workflow_records/workflow_records_common.py b/invokeai/app/services/workflow_records/workflow_records_common.py index 32046328bb..3a2b13f565 100644 --- a/invokeai/app/services/workflow_records/workflow_records_common.py +++ b/invokeai/app/services/workflow_records/workflow_records_common.py @@ -1,19 +1,2 @@ -from typing import Any - -from pydantic import Field, RootModel, TypeAdapter - - class WorkflowNotFoundError(Exception): """Raised when a workflow is not found""" - - -class WorkflowField(RootModel): - """ - Pydantic model for workflows with custom root of type dict[str, Any]. - Workflows are stored without a strict schema. - """ - - root: dict[str, Any] = Field(description="Workflow dict") - - -type_adapter_WorkflowField = TypeAdapter(WorkflowField) diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py index 2b284ac03f..e3c11cfa4b 100644 --- a/invokeai/app/services/workflow_records/workflow_records_sqlite.py +++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py @@ -1,14 +1,11 @@ import sqlite3 import threading +from invokeai.app.invocations.baseinvocation import WorkflowField, type_adapter_WorkflowField from invokeai.app.services.invoker import Invoker from invokeai.app.services.shared.sqlite import SqliteDatabase from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase -from invokeai.app.services.workflow_records.workflow_records_common import ( - WorkflowField, - WorkflowNotFoundError, - type_adapter_WorkflowField, -) +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowNotFoundError from invokeai.app.util.misc import uuid_string From 4012388f0aca3836714cbd6bba02f4ccda61b5ce Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:46:37 +1100 Subject: [PATCH 120/202] feat: use `ModelValidator` naming convention for pydantic type adapters This is the naming convention in the docs and is also clear. --- invokeai/app/api/routers/images.py | 8 ++++---- invokeai/app/api/routers/models.py | 20 +++++++++---------- invokeai/app/invocations/baseinvocation.py | 4 ++-- .../image_records/image_records_sqlite.py | 4 ++-- .../item_storage/item_storage_sqlite.py | 10 +++++----- .../session_queue/session_queue_common.py | 8 ++++---- .../workflow_records_sqlite.py | 4 ++-- tests/nodes/test_node_graph.py | 4 ++-- tests/nodes/test_session_queue.py | 10 +++++----- 9 files changed, 36 insertions(+), 36 deletions(-) diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 625fb3c43b..c27ec1e0d9 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -9,8 +9,8 @@ from pydantic import BaseModel, Field, ValidationError from invokeai.app.invocations.baseinvocation import ( MetadataField, - type_adapter_MetadataField, - type_adapter_WorkflowField, + MetadataFieldValidator, + WorkflowFieldValidator, ) from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO @@ -66,7 +66,7 @@ async def upload_image( metadata_raw = pil_image.info.get("invokeai_metadata", None) if metadata_raw: try: - metadata = type_adapter_MetadataField.validate_json(metadata_raw) + metadata = MetadataFieldValidator.validate_json(metadata_raw) except ValidationError: ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image") pass @@ -75,7 +75,7 @@ async def upload_image( workflow_raw = pil_image.info.get("invokeai_workflow", None) if workflow_raw is not None: try: - workflow = type_adapter_WorkflowField.validate_json(workflow_raw) + workflow = WorkflowFieldValidator.validate_json(workflow_raw) except ValidationError: ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image") pass diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index 018f3af02b..afa7d8df82 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -23,13 +23,13 @@ from ..dependencies import ApiDependencies models_router = APIRouter(prefix="/v1/models", tags=["models"]) UpdateModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] -update_models_response_adapter = TypeAdapter(UpdateModelResponse) +UpdateModelResponseValidator = TypeAdapter(UpdateModelResponse) ImportModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] -import_models_response_adapter = TypeAdapter(ImportModelResponse) +ImportModelResponseValidator = TypeAdapter(ImportModelResponse) ConvertModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] -convert_models_response_adapter = TypeAdapter(ConvertModelResponse) +ConvertModelResponseValidator = TypeAdapter(ConvertModelResponse) MergeModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)] @@ -41,7 +41,7 @@ class ModelsList(BaseModel): model_config = ConfigDict(use_enum_values=True) -models_list_adapter = TypeAdapter(ModelsList) +ModelsListValidator = TypeAdapter(ModelsList) @models_router.get( @@ -60,7 +60,7 @@ async def list_models( models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type)) else: models_raw = ApiDependencies.invoker.services.model_manager.list_models(None, model_type) - models = models_list_adapter.validate_python({"models": models_raw}) + models = ModelsListValidator.validate_python({"models": models_raw}) return models @@ -131,7 +131,7 @@ async def update_model( base_model=base_model, model_type=model_type, ) - model_response = update_models_response_adapter.validate_python(model_raw) + model_response = UpdateModelResponseValidator.validate_python(model_raw) except ModelNotFoundException as e: raise HTTPException(status_code=404, detail=str(e)) except ValueError as e: @@ -186,7 +186,7 @@ async def import_model( model_raw = ApiDependencies.invoker.services.model_manager.list_model( model_name=info.name, base_model=info.base_model, model_type=info.model_type ) - return import_models_response_adapter.validate_python(model_raw) + return ImportModelResponseValidator.validate_python(model_raw) except ModelNotFoundException as e: logger.error(str(e)) @@ -231,7 +231,7 @@ async def add_model( base_model=info.base_model, model_type=info.model_type, ) - return import_models_response_adapter.validate_python(model_raw) + return ImportModelResponseValidator.validate_python(model_raw) except ModelNotFoundException as e: logger.error(str(e)) raise HTTPException(status_code=404, detail=str(e)) @@ -302,7 +302,7 @@ async def convert_model( model_raw = ApiDependencies.invoker.services.model_manager.list_model( model_name, base_model=base_model, model_type=model_type ) - response = convert_models_response_adapter.validate_python(model_raw) + response = ConvertModelResponseValidator.validate_python(model_raw) except ModelNotFoundException as e: raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found: {str(e)}") except ValueError as e: @@ -417,7 +417,7 @@ async def merge_models( base_model=base_model, model_type=ModelType.Main, ) - response = convert_models_response_adapter.validate_python(model_raw) + response = ConvertModelResponseValidator.validate_python(model_raw) except ModelNotFoundException: raise HTTPException( status_code=404, diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 50ce8de7d3..5f1ff0395f 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -821,7 +821,7 @@ class WorkflowField(RootModel): root: dict[str, Any] = Field(description="The workflow") -type_adapter_WorkflowField = TypeAdapter(WorkflowField) +WorkflowFieldValidator = TypeAdapter(WorkflowField) class WithWorkflow(BaseModel): @@ -837,7 +837,7 @@ class MetadataField(RootModel): root: dict[str, Any] = Field(description="The metadata") -type_adapter_MetadataField = TypeAdapter(MetadataField) +MetadataFieldValidator = TypeAdapter(MetadataField) class WithMetadata(BaseModel): diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 7b60ec3d5b..dcabe55829 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -3,7 +3,7 @@ import threading from datetime import datetime from typing import Optional, Union, cast -from invokeai.app.invocations.baseinvocation import MetadataField, type_adapter_MetadataField +from invokeai.app.invocations.baseinvocation import MetadataField, MetadataFieldValidator from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -170,7 +170,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): as_dict = dict(result) metadata_raw = cast(Optional[str], as_dict.get("metadata", None)) - return type_adapter_MetadataField.validate_json(metadata_raw) if metadata_raw is not None else None + return MetadataFieldValidator.validate_json(metadata_raw) if metadata_raw is not None else None except sqlite3.Error as e: self._conn.rollback() raise ImageRecordNotFoundException from e diff --git a/invokeai/app/services/item_storage/item_storage_sqlite.py b/invokeai/app/services/item_storage/item_storage_sqlite.py index 1bb9429130..d0249ebfa6 100644 --- a/invokeai/app/services/item_storage/item_storage_sqlite.py +++ b/invokeai/app/services/item_storage/item_storage_sqlite.py @@ -18,7 +18,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): _cursor: sqlite3.Cursor _id_field: str _lock: threading.RLock - _adapter: Optional[TypeAdapter[T]] + _validator: Optional[TypeAdapter[T]] def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"): super().__init__() @@ -28,7 +28,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._table_name = table_name self._id_field = id_field # TODO: validate that T has this field self._cursor = self._conn.cursor() - self._adapter: Optional[TypeAdapter[T]] = None + self._validator: Optional[TypeAdapter[T]] = None self._create_table() @@ -47,14 +47,14 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._lock.release() def _parse_item(self, item: str) -> T: - if self._adapter is None: + if self._validator is None: """ We don't get access to `__orig_class__` in `__init__()`, and we need this before start(), so we can create it when it is first needed instead. __orig_class__ is technically an implementation detail of the typing module, not a supported API """ - self._adapter = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined] - return self._adapter.validate_json(item) + self._validator = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined] + return self._validator.validate_json(item) def set(self, item: T): try: diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py index cbf2154b66..69e6a3ab87 100644 --- a/invokeai/app/services/session_queue/session_queue_common.py +++ b/invokeai/app/services/session_queue/session_queue_common.py @@ -147,20 +147,20 @@ DEFAULT_QUEUE_ID = "default" QUEUE_ITEM_STATUS = Literal["pending", "in_progress", "completed", "failed", "canceled"] -adapter_NodeFieldValue = TypeAdapter(list[NodeFieldValue]) +NodeFieldValueValidator = TypeAdapter(list[NodeFieldValue]) def get_field_values(queue_item_dict: dict) -> Optional[list[NodeFieldValue]]: field_values_raw = queue_item_dict.get("field_values", None) - return adapter_NodeFieldValue.validate_json(field_values_raw) if field_values_raw is not None else None + return NodeFieldValueValidator.validate_json(field_values_raw) if field_values_raw is not None else None -adapter_GraphExecutionState = TypeAdapter(GraphExecutionState) +GraphExecutionStateValidator = TypeAdapter(GraphExecutionState) def get_session(queue_item_dict: dict) -> GraphExecutionState: session_raw = queue_item_dict.get("session", "{}") - session = adapter_GraphExecutionState.validate_json(session_raw, strict=False) + session = GraphExecutionStateValidator.validate_json(session_raw, strict=False) return session diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py index e3c11cfa4b..2d9e1f26e8 100644 --- a/invokeai/app/services/workflow_records/workflow_records_sqlite.py +++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py @@ -1,7 +1,7 @@ import sqlite3 import threading -from invokeai.app.invocations.baseinvocation import WorkflowField, type_adapter_WorkflowField +from invokeai.app.invocations.baseinvocation import WorkflowField, WorkflowFieldValidator from invokeai.app.services.invoker import Invoker from invokeai.app.services.shared.sqlite import SqliteDatabase from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase @@ -39,7 +39,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase): row = self._cursor.fetchone() if row is None: raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found") - return type_adapter_WorkflowField.validate_json(row[0]) + return WorkflowFieldValidator.validate_json(row[0]) except Exception: self._conn.rollback() raise diff --git a/tests/nodes/test_node_graph.py b/tests/nodes/test_node_graph.py index d1ece0336a..e2a50e61e5 100644 --- a/tests/nodes/test_node_graph.py +++ b/tests/nodes/test_node_graph.py @@ -615,8 +615,8 @@ def test_graph_can_deserialize(): g.add_edge(e) json = g.model_dump_json() - adapter_graph = TypeAdapter(Graph) - g2 = adapter_graph.validate_json(json) + GraphValidator = TypeAdapter(Graph) + g2 = GraphValidator.validate_json(json) assert g2 is not None assert g2.nodes["1"] is not None diff --git a/tests/nodes/test_session_queue.py b/tests/nodes/test_session_queue.py index 731316068c..cdab5729f8 100644 --- a/tests/nodes/test_session_queue.py +++ b/tests/nodes/test_session_queue.py @@ -150,9 +150,9 @@ def test_prepare_values_to_insert(batch_data_collection, batch_graph): values = prepare_values_to_insert(queue_id="default", batch=b, priority=0, max_new_queue_items=1000) assert len(values) == 8 - session_adapter = TypeAdapter(GraphExecutionState) + GraphExecutionStateValidator = TypeAdapter(GraphExecutionState) # graph should be serialized - ges = session_adapter.validate_json(values[0].session) + ges = GraphExecutionStateValidator.validate_json(values[0].session) # graph values should be populated assert ges.graph.get_node("1").prompt == "Banana sushi" @@ -161,16 +161,16 @@ def test_prepare_values_to_insert(batch_data_collection, batch_graph): assert ges.graph.get_node("4").prompt == "Nissan" # session ids should match deserialized graph - assert [v.session_id for v in values] == [session_adapter.validate_json(v.session).id for v in values] + assert [v.session_id for v in values] == [GraphExecutionStateValidator.validate_json(v.session).id for v in values] # should unique session ids sids = [v.session_id for v in values] assert len(sids) == len(set(sids)) - nfv_list_adapter = TypeAdapter(list[NodeFieldValue]) + NodeFieldValueValidator = TypeAdapter(list[NodeFieldValue]) # should have 3 node field values assert type(values[0].field_values) is str - assert len(nfv_list_adapter.validate_json(values[0].field_values)) == 3 + assert len(NodeFieldValueValidator.validate_json(values[0].field_values)) == 3 # should have batch id and priority assert all(v.batch_id == b.batch_id for v in values) From 8910e912c791e7c08b330204f4db5b64f1d07354 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:48:47 +1100 Subject: [PATCH 121/202] chore(ui): regen types --- .../frontend/web/src/services/api/schema.d.ts | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 932891c862..6092e822d6 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -2942,7 +2942,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["GraphInvocation"]; + [key: string]: components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["InfillColorInvocation"]; }; /** * Edges @@ -2979,7 +2979,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["String2Output"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["LatentsOutput"]; + [key: string]: components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ImageOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FaceMaskOutput"]; }; /** * Errors @@ -8728,23 +8728,11 @@ export type components = { ui_order: number | null; }; /** - * ControlNetModelFormat + * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ - ControlNetModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusion2ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * IPAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - IPAdapterModelFormat: "invokeai"; + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * CLIPVisionModelFormat * @description An enumeration. @@ -8752,17 +8740,23 @@ export type components = { */ CLIPVisionModelFormat: "diffusers"; /** - * StableDiffusion1ModelFormat + * ControlNetModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + ControlNetModelFormat: "checkpoint" | "diffusers"; /** - * StableDiffusionOnnxModelFormat + * IPAdapterModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; + IPAdapterModelFormat: "invokeai"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionXLModelFormat * @description An enumeration. @@ -8775,6 +8769,12 @@ export type components = { * @enum {string} */ T2IAdapterModelFormat: "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; From bbae4045c9f5e643d3ee11611991a141c682ea96 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 23:22:30 +1100 Subject: [PATCH 122/202] fix(nodes): `GraphInvocation` should use `InputField` --- invokeai/app/services/shared/graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index 0f703db749..e9a4c73d4e 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -193,7 +193,7 @@ class GraphInvocation(BaseInvocation): """Execute a graph""" # TODO: figure out how to create a default here - graph: "Graph" = Field(description="The graph to run", default=None) + graph: Optional["Graph"] = InputField(description="The graph to run", default=None) def invoke(self, context: InvocationContext) -> GraphInvocationOutput: """Invoke with provided services and return outputs.""" From 7b6e2bc37fd1381a944768d19bb50b0cb27ee1ad Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 23:23:17 +1100 Subject: [PATCH 123/202] feat(nodes): add field name validation Protect against using reserved field names --- invokeai/app/invocations/baseinvocation.py | 109 ++++++++++++++++++--- 1 file changed, 93 insertions(+), 16 deletions(-) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 5f1ff0395f..25589510a6 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -2,6 +2,7 @@ from __future__ import annotations +import inspect import re from abc import ABC, abstractmethod from enum import Enum @@ -11,7 +12,7 @@ from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Op import semver from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter, create_model -from pydantic.fields import _Unset +from pydantic.fields import FieldInfo, _Unset from pydantic_core import PydanticUndefined from invokeai.app.services.config.config_default import InvokeAIAppConfig @@ -25,6 +26,10 @@ class InvalidVersionError(ValueError): pass +class InvalidFieldError(TypeError): + pass + + class FieldDescriptions: denoising_start = "When to start denoising, expressed a percentage of total steps" denoising_end = "When to stop denoising, expressed a percentage of total steps" @@ -302,6 +307,7 @@ def InputField( ui_order=ui_order, item_default=item_default, ui_choice_labels=ui_choice_labels, + _field_kind="input", ) field_args = dict( @@ -444,6 +450,7 @@ def OutputField( ui_type=ui_type, ui_hidden=ui_hidden, ui_order=ui_order, + _field_kind="output", ), ) @@ -527,6 +534,7 @@ class BaseInvocationOutput(BaseModel): schema["required"].extend(["type"]) model_config = ConfigDict( + protected_namespaces=(), validate_assignment=True, json_schema_serialization_defaults_required=True, json_schema_extra=json_schema_extra, @@ -549,9 +557,6 @@ class MissingInputException(Exception): class BaseInvocation(ABC, BaseModel): """ - A node to process inputs and produce outputs. - May use dependency injection in __init__ to receive providers. - All invocations must use the `@invocation` decorator to provide their unique type. """ @@ -667,17 +672,21 @@ class BaseInvocation(ABC, BaseModel): id: str = Field( default_factory=uuid_string, description="The id of this instance of an invocation. Must be unique among all instances of invocations.", + json_schema_extra=dict(_field_kind="internal"), ) - is_intermediate: Optional[bool] = Field( + is_intermediate: bool = Field( default=False, description="Whether or not this is an intermediate invocation.", - json_schema_extra=dict(ui_type=UIType.IsIntermediate), + json_schema_extra=dict(ui_type=UIType.IsIntermediate, _field_kind="internal"), + ) + use_cache: bool = Field( + default=True, description="Whether or not to use the cache", json_schema_extra=dict(_field_kind="internal") ) - use_cache: bool = InputField(default=True, description="Whether or not to use the cache") UIConfig: ClassVar[Type[UIConfigBase]] model_config = ConfigDict( + protected_namespaces=(), validate_assignment=True, json_schema_extra=json_schema_extra, json_schema_serialization_defaults_required=True, @@ -688,6 +697,70 @@ class BaseInvocation(ABC, BaseModel): TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation) +RESERVED_INPUT_FIELD_NAMES = { + "id", + "is_intermediate", + "use_cache", + "type", + "workflow", + "metadata", +} + +RESERVED_OUTPUT_FIELD_NAMES = {"type"} + + +class _Model(BaseModel): + pass + + +# Get all pydantic model attrs, methods, etc +RESERVED_PYDANTIC_FIELD_NAMES = set(map(lambda m: m[0], inspect.getmembers(_Model()))) + +print(RESERVED_PYDANTIC_FIELD_NAMES) + + +def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None: + """ + Validates the fields of an invocation or invocation output: + - must not override any pydantic reserved fields + - must be created via `InputField`, `OutputField`, or be an internal field defined in this file + """ + for name, field in model_fields.items(): + if name in RESERVED_PYDANTIC_FIELD_NAMES: + raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved by pydantic)') + + field_kind = ( + # _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file + field.json_schema_extra.get("_field_kind", None) + if field.json_schema_extra + else None + ) + + # must have a field_kind + if field_kind is None or field_kind not in {"input", "output", "internal"}: + raise InvalidFieldError( + f'Invalid field definition for "{name}" on "{model_type}" (maybe it\'s not an InputField or OutputField?)' + ) + + if field_kind == "input" and name in RESERVED_INPUT_FIELD_NAMES: + raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved input field name)') + + if field_kind == "output" and name in RESERVED_OUTPUT_FIELD_NAMES: + raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved output field name)') + + # internal fields *must* be in the reserved list + if ( + field_kind == "internal" + and name not in RESERVED_INPUT_FIELD_NAMES + and name not in RESERVED_OUTPUT_FIELD_NAMES + ): + raise InvalidFieldError( + f'Invalid field name "{name}" on "{model_type}" (internal field without reserved name)' + ) + + return None + + def invocation( invocation_type: str, title: Optional[str] = None, @@ -697,7 +770,7 @@ def invocation( use_cache: Optional[bool] = True, ) -> Callable[[Type[TBaseInvocation]], Type[TBaseInvocation]]: """ - Adds metadata to an invocation. + Registers an invocation. :param str invocation_type: The type of the invocation. Must be unique among all invocations. :param Optional[str] title: Adds a title to the invocation. Use if the auto-generated title isn't quite right. Defaults to None. @@ -716,6 +789,8 @@ def invocation( if invocation_type in BaseInvocation.get_invocation_types(): raise ValueError(f'Invocation type "{invocation_type}" already exists') + validate_fields(cls.model_fields, invocation_type) + # Add OpenAPI schema extras uiconf_name = cls.__qualname__ + ".UIConfig" if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name: @@ -746,8 +821,7 @@ def invocation( invocation_type_annotation = Literal[invocation_type] # type: ignore invocation_type_field = Field( - title="type", - default=invocation_type, + title="type", default=invocation_type, json_schema_extra=dict(_field_kind="internal") ) docstring = cls.__doc__ @@ -788,13 +862,12 @@ def invocation_output( if output_type in BaseInvocationOutput.get_output_types(): raise ValueError(f'Invocation type "{output_type}" already exists') + validate_fields(cls.model_fields, output_type) + # Add the output type to the model. output_type_annotation = Literal[output_type] # type: ignore - output_type_field = Field( - title="type", - default=output_type, - ) + output_type_field = Field(title="type", default=output_type, json_schema_extra=dict(_field_kind="internal")) docstring = cls.__doc__ cls = create_model( @@ -825,7 +898,9 @@ WorkflowFieldValidator = TypeAdapter(WorkflowField) class WithWorkflow(BaseModel): - workflow: Optional[WorkflowField] = InputField(default=None, description=FieldDescriptions.workflow) + workflow: Optional[WorkflowField] = Field( + default=None, description=FieldDescriptions.workflow, json_schema_extra=dict(_field_kind="internal") + ) class MetadataField(RootModel): @@ -841,4 +916,6 @@ MetadataFieldValidator = TypeAdapter(MetadataField) class WithMetadata(BaseModel): - metadata: Optional[MetadataField] = InputField(default=None, description=FieldDescriptions.metadata) + metadata: Optional[MetadataField] = Field( + default=None, description=FieldDescriptions.metadata, json_schema_extra=dict(_field_kind="internal") + ) From e3e8d8af0233ef9d2bdfc9fef889c864836fc2ff Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:38:12 +1100 Subject: [PATCH 124/202] fix(ui): fix log message --- invokeai/frontend/web/src/services/api/endpoints/workflows.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts index 4c69d2e286..7ddd9c5606 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts @@ -19,7 +19,7 @@ export const workflowsApi = api.injectEndpoints({ if (result.success) { return result.data; } else { - logger('images').warn('Problem parsing metadata'); + logger('images').warn('Problem parsing workflow'); } } return; From d32caf7cb1205475ffec054640da5beca959c9c2 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:38:27 +1100 Subject: [PATCH 125/202] fix(ui): remove references to metadata accumulator --- .../nodes/util/graphBuilders/buildLinearBatchConfig.ts | 5 +---- .../web/src/features/nodes/util/graphBuilders/constants.ts | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts index 313826452c..8bf9a2785a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts @@ -2,13 +2,12 @@ import { NUMPY_RAND_MAX } from 'app/constants'; import { RootState } from 'app/store/store'; import { generateSeeds } from 'common/util/generateSeeds'; import { NonNullableGraph } from 'features/nodes/types/types'; -import { range, unset } from 'lodash-es'; +import { range } from 'lodash-es'; import { components } from 'services/api/schema'; import { Batch, BatchConfig } from 'services/api/types'; import { CANVAS_COHERENCE_NOISE, METADATA, - METADATA_ACCUMULATOR, NOISE, POSITIVE_CONDITIONING, } from './constants'; @@ -149,8 +148,6 @@ export const prepareLinearUIBatch = ( }); if (shouldConcatSDXLStylePrompt && model?.base_model === 'sdxl') { - unset(graph.nodes[METADATA_ACCUMULATOR], 'positive_style_prompt'); - const stylePrompts = extendedPrompts.map((p) => [p, positiveStylePrompt].join(' ') ); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index e0dc52063b..51dc94769f 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -63,7 +63,6 @@ export const BATCH_SEED = 'batch_seed'; export const BATCH_PROMPT = 'batch_prompt'; export const BATCH_STYLE_PROMPT = 'batch_style_prompt'; export const METADATA_COLLECT = 'metadata_collect'; -export const METADATA_ACCUMULATOR = 'metadata_accumulator'; export const MERGE_METADATA = 'merge_metadata'; export const REALESRGAN = 'esrgan'; export const DIVIDE = 'divide'; From 86c3acf18499db50839b85399b5e8cdfb96b3b7a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 18:14:22 +1100 Subject: [PATCH 126/202] fix(nodes): revert optional graph --- invokeai/app/services/shared/graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index e9a4c73d4e..b84d456071 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -193,7 +193,7 @@ class GraphInvocation(BaseInvocation): """Execute a graph""" # TODO: figure out how to create a default here - graph: Optional["Graph"] = InputField(description="The graph to run", default=None) + graph: "Graph" = InputField(description="The graph to run", default=None) def invoke(self, context: InvocationContext) -> GraphInvocationOutput: """Invoke with provided services and return outputs.""" From 6d776bad7e8ff21d608651228c8bda27b238352f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 18:15:01 +1100 Subject: [PATCH 127/202] fix(nodes): remove errant print --- invokeai/app/invocations/baseinvocation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 25589510a6..945df4bd83 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -716,8 +716,6 @@ class _Model(BaseModel): # Get all pydantic model attrs, methods, etc RESERVED_PYDANTIC_FIELD_NAMES = set(map(lambda m: m[0], inspect.getmembers(_Model()))) -print(RESERVED_PYDANTIC_FIELD_NAMES) - def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None: """ From 0cda7943fa1fd2f215e5238b753a817752a8761b Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 18:16:36 +1100 Subject: [PATCH 128/202] feat(api): add workflow_images junction table similar to boards, images and workflows may be associated via junction table --- invokeai/app/api/dependencies.py | 3 + invokeai/app/api/routers/images.py | 3 + .../image_records/image_records_base.py | 1 - .../image_records/image_records_common.py | 8 -- .../image_records/image_records_sqlite.py | 15 +-- invokeai/app/services/images/images_common.py | 7 + .../app/services/images/images_default.py | 34 +++-- invokeai/app/services/invocation_services.py | 4 + .../workflow_image_records/__init__.py | 0 .../workflow_image_records_base.py | 23 ++++ .../workflow_image_records_sqlite.py | 123 ++++++++++++++++++ 11 files changed, 189 insertions(+), 32 deletions(-) create mode 100644 invokeai/app/services/workflow_image_records/__init__.py create mode 100644 invokeai/app/services/workflow_image_records/workflow_image_records_base.py create mode 100644 invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index ae4882c0d0..4746eeae3f 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -1,6 +1,7 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) from logging import Logger +from invokeai.app.services.workflow_image_records.workflow_image_records_sqlite import SqliteWorkflowImageRecordsStorage from invokeai.backend.util.logging import InvokeAILogger from invokeai.version.invokeai_version import __version__ @@ -91,6 +92,7 @@ class ApiDependencies: session_processor = DefaultSessionProcessor() session_queue = SqliteSessionQueue(db=db) urls = LocalUrlService() + workflow_image_records = SqliteWorkflowImageRecordsStorage(db=db) workflow_records = SqliteWorkflowRecordsStorage(db=db) services = InvocationServices( @@ -116,6 +118,7 @@ class ApiDependencies: session_processor=session_processor, session_queue=session_queue, urls=urls, + workflow_image_records=workflow_image_records, workflow_records=workflow_records, ) diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index c27ec1e0d9..429eaef37c 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -1,4 +1,5 @@ import io +import traceback from typing import Optional from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile @@ -60,6 +61,7 @@ async def upload_image( pil_image = pil_image.crop(bbox) except Exception: # Error opening the image + ApiDependencies.invoker.services.logger.error(traceback.format_exc()) raise HTTPException(status_code=415, detail="Failed to read image") # attempt to parse metadata from image @@ -97,6 +99,7 @@ async def upload_image( return image_dto except Exception: + ApiDependencies.invoker.services.logger.error(traceback.format_exc()) raise HTTPException(status_code=500, detail="Failed to create image") diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index cd1db81857..655e4b4fb8 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -80,7 +80,6 @@ class ImageRecordStorageBase(ABC): session_id: Optional[str] = None, node_id: Optional[str] = None, metadata: Optional[MetadataField] = None, - workflow_id: Optional[str] = None, ) -> datetime: """Saves an image record.""" pass diff --git a/invokeai/app/services/image_records/image_records_common.py b/invokeai/app/services/image_records/image_records_common.py index 6576fb9647..5a6e5652c9 100644 --- a/invokeai/app/services/image_records/image_records_common.py +++ b/invokeai/app/services/image_records/image_records_common.py @@ -100,7 +100,6 @@ IMAGE_DTO_COLS = ", ".join( "width", "height", "session_id", - "workflow_id", "node_id", "is_intermediate", "created_at", @@ -141,11 +140,6 @@ class ImageRecord(BaseModelExcludeNull): description="The session ID that generated this image, if it is a generated image.", ) """The session ID that generated this image, if it is a generated image.""" - workflow_id: Optional[str] = Field( - default=None, - description="The workflow that generated this image.", - ) - """The workflow that generated this image.""" node_id: Optional[str] = Field( default=None, description="The node ID that generated this image, if it is a generated image.", @@ -190,7 +184,6 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord: width = image_dict.get("width", 0) height = image_dict.get("height", 0) session_id = image_dict.get("session_id", None) - workflow_id = image_dict.get("workflow_id", None) node_id = image_dict.get("node_id", None) created_at = image_dict.get("created_at", get_iso_timestamp()) updated_at = image_dict.get("updated_at", get_iso_timestamp()) @@ -205,7 +198,6 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord: width=width, height=height, session_id=session_id, - workflow_id=workflow_id, node_id=node_id, created_at=created_at, updated_at=updated_at, diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index dcabe55829..239917b728 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -76,16 +76,6 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): """ ) - if "workflow_id" not in columns: - self._cursor.execute( - """--sql - ALTER TABLE images - ADD COLUMN workflow_id TEXT; - -- TODO: This requires a migration: - -- FOREIGN KEY (workflow_id) REFERENCES workflows (workflow_id) ON DELETE SET NULL; - """ - ) - # Create the `images` table indices. self._cursor.execute( """--sql @@ -423,7 +413,6 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): session_id: Optional[str] = None, node_id: Optional[str] = None, metadata: Optional[MetadataField] = None, - workflow_id: Optional[str] = None, ) -> datetime: try: metadata_json = metadata.model_dump_json() if metadata is not None else None @@ -439,11 +428,10 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): node_id, session_id, metadata, - workflow_id, is_intermediate, starred ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?); """, ( image_name, @@ -454,7 +442,6 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): node_id, session_id, metadata_json, - workflow_id, is_intermediate, starred, ), diff --git a/invokeai/app/services/images/images_common.py b/invokeai/app/services/images/images_common.py index 0464244b94..198c26c3a2 100644 --- a/invokeai/app/services/images/images_common.py +++ b/invokeai/app/services/images/images_common.py @@ -24,6 +24,11 @@ class ImageDTO(ImageRecord, ImageUrlsDTO): default=None, description="The id of the board the image belongs to, if one exists." ) """The id of the board the image belongs to, if one exists.""" + workflow_id: Optional[str] = Field( + default=None, + description="The workflow that generated this image.", + ) + """The workflow that generated this image.""" def image_record_to_dto( @@ -31,6 +36,7 @@ def image_record_to_dto( image_url: str, thumbnail_url: str, board_id: Optional[str], + workflow_id: Optional[str], ) -> ImageDTO: """Converts an image record to an image DTO.""" return ImageDTO( @@ -38,4 +44,5 @@ def image_record_to_dto( image_url=image_url, thumbnail_url=thumbnail_url, board_id=board_id, + workflow_id=workflow_id, ) diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index a0d59470fc..8eb768a1b9 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -74,11 +74,12 @@ class ImageService(ImageServiceABC): # Nullable fields node_id=node_id, metadata=metadata, - workflow_id=workflow_id, session_id=session_id, ) if board_id is not None: self.__invoker.services.board_image_records.add_image_to_board(board_id=board_id, image_name=image_name) + if workflow_id is not None: + self.__invoker.services.workflow_image_records.create(workflow_id=workflow_id, image_name=image_name) self.__invoker.services.image_files.save( image_name=image_name, image=image, metadata=metadata, workflow=workflow ) @@ -138,10 +139,11 @@ class ImageService(ImageServiceABC): image_record = self.__invoker.services.image_records.get(image_name) image_dto = image_record_to_dto( - image_record, - self.__invoker.services.urls.get_image_url(image_name), - self.__invoker.services.urls.get_image_url(image_name, True), - self.__invoker.services.board_image_records.get_board_for_image(image_name), + image_record=image_record, + image_url=self.__invoker.services.urls.get_image_url(image_name), + thumbnail_url=self.__invoker.services.urls.get_image_url(image_name, True), + board_id=self.__invoker.services.board_image_records.get_board_for_image(image_name), + workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(image_name), ) return image_dto @@ -162,6 +164,19 @@ class ImageService(ImageServiceABC): self.__invoker.services.logger.error("Problem getting image DTO") raise e + def get_workflow(self, image_name: str) -> Optional[WorkflowField]: + try: + workflow_id = self.__invoker.services.workflow_image_records.get_workflow_for_image(image_name) + if workflow_id is None: + return None + return self.__invoker.services.workflow_records.get(workflow_id) + except ImageRecordNotFoundException: + self.__invoker.services.logger.error("Image record not found") + raise + except Exception as e: + self.__invoker.services.logger.error("Problem getting image DTO") + raise e + def get_path(self, image_name: str, thumbnail: bool = False) -> str: try: return str(self.__invoker.services.image_files.get_path(image_name, thumbnail)) @@ -205,10 +220,11 @@ class ImageService(ImageServiceABC): image_dtos = list( map( lambda r: image_record_to_dto( - r, - self.__invoker.services.urls.get_image_url(r.image_name), - self.__invoker.services.urls.get_image_url(r.image_name, True), - self.__invoker.services.board_image_records.get_board_for_image(r.image_name), + image_record=r, + image_url=self.__invoker.services.urls.get_image_url(r.image_name), + thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True), + board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name), + workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name), ), results.items, ) diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index 94db75d810..804b1b6884 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -28,6 +28,7 @@ if TYPE_CHECKING: from .shared.graph import GraphExecutionState, LibraryGraph from .urls.urls_base import UrlServiceBase from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase + from .workflow_image_records.workflow_image_records_base import WorkflowImageRecordsStorageBase class InvocationServices: @@ -56,6 +57,7 @@ class InvocationServices: invocation_cache: "InvocationCacheBase" names: "NameServiceBase" urls: "UrlServiceBase" + workflow_image_records: "WorkflowImageRecordsStorageBase" workflow_records: "WorkflowRecordsStorageBase" def __init__( @@ -82,6 +84,7 @@ class InvocationServices: invocation_cache: "InvocationCacheBase", names: "NameServiceBase", urls: "UrlServiceBase", + workflow_image_records: "WorkflowImageRecordsStorageBase", workflow_records: "WorkflowRecordsStorageBase", ): self.board_images = board_images @@ -106,4 +109,5 @@ class InvocationServices: self.invocation_cache = invocation_cache self.names = names self.urls = urls + self.workflow_image_records = workflow_image_records self.workflow_records = workflow_records diff --git a/invokeai/app/services/workflow_image_records/__init__.py b/invokeai/app/services/workflow_image_records/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/workflow_image_records/workflow_image_records_base.py b/invokeai/app/services/workflow_image_records/workflow_image_records_base.py new file mode 100644 index 0000000000..d99a2ba106 --- /dev/null +++ b/invokeai/app/services/workflow_image_records/workflow_image_records_base.py @@ -0,0 +1,23 @@ +from abc import ABC, abstractmethod +from typing import Optional + + +class WorkflowImageRecordsStorageBase(ABC): + """Abstract base class for the one-to-many workflow-image relationship record storage.""" + + @abstractmethod + def create( + self, + workflow_id: str, + image_name: str, + ) -> None: + """Creates a workflow-image record.""" + pass + + @abstractmethod + def get_workflow_for_image( + self, + image_name: str, + ) -> Optional[str]: + """Gets an image's workflow id, if it has one.""" + pass diff --git a/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py b/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py new file mode 100644 index 0000000000..1a5de672bc --- /dev/null +++ b/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py @@ -0,0 +1,123 @@ +import sqlite3 +import threading +from typing import Optional, cast +from invokeai.app.services.shared.sqlite import SqliteDatabase + +from invokeai.app.services.workflow_image_records.workflow_image_records_base import WorkflowImageRecordsStorageBase + + +class SqliteWorkflowImageRecordsStorage(WorkflowImageRecordsStorageBase): + """SQLite implementation of WorkflowImageRecordsStorageBase.""" + + _conn: sqlite3.Connection + _cursor: sqlite3.Cursor + _lock: threading.RLock + + def __init__(self, db: SqliteDatabase) -> None: + super().__init__() + self._lock = db.lock + self._conn = db.conn + self._cursor = self._conn.cursor() + + try: + self._lock.acquire() + self._create_tables() + self._conn.commit() + finally: + self._lock.release() + + def _create_tables(self) -> None: + # Create the `workflow_images` junction table. + self._cursor.execute( + """--sql + CREATE TABLE IF NOT EXISTS workflow_images ( + workflow_id TEXT NOT NULL, + image_name TEXT NOT NULL, + created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')), + -- updated via trigger + updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')), + -- Soft delete, currently unused + deleted_at DATETIME, + -- enforce one-to-many relationship between workflows and images using PK + -- (we can extend this to many-to-many later) + PRIMARY KEY (image_name), + FOREIGN KEY (workflow_id) REFERENCES workflows (workflow_id) ON DELETE CASCADE, + FOREIGN KEY (image_name) REFERENCES images (image_name) ON DELETE CASCADE + ); + """ + ) + + # Add index for workflow id + self._cursor.execute( + """--sql + CREATE INDEX IF NOT EXISTS idx_workflow_images_workflow_id ON workflow_images (workflow_id); + """ + ) + + # Add index for workflow id, sorted by created_at + self._cursor.execute( + """--sql + CREATE INDEX IF NOT EXISTS idx_workflow_images_workflow_id_created_at ON workflow_images (workflow_id, created_at); + """ + ) + + # Add trigger for `updated_at`. + self._cursor.execute( + """--sql + CREATE TRIGGER IF NOT EXISTS tg_workflow_images_updated_at + AFTER UPDATE + ON workflow_images FOR EACH ROW + BEGIN + UPDATE workflow_images SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW') + WHERE workflow_id = old.workflow_id AND image_name = old.image_name; + END; + """ + ) + + def create( + self, + workflow_id: str, + image_name: str, + ) -> None: + """Creates a workflow-image record.""" + try: + self._lock.acquire() + self._cursor.execute( + """--sql + INSERT INTO workflow_images (workflow_id, image_name) + VALUES (?, ?) + ON CONFLICT (image_name) DO UPDATE SET workflow_id = ?; + """, + (workflow_id, image_name, workflow_id), + ) + self._conn.commit() + except sqlite3.Error as e: + self._conn.rollback() + raise e + finally: + self._lock.release() + + def get_workflow_for_image( + self, + image_name: str, + ) -> Optional[str]: + """Gets an image's workflow id, if it has one.""" + try: + self._lock.acquire() + self._cursor.execute( + """--sql + SELECT workflow_id + FROM workflow_images + WHERE image_name = ?; + """, + (image_name,), + ) + result = self._cursor.fetchone() + if result is None: + return None + return cast(str, result[0]) + except sqlite3.Error as e: + self._conn.rollback() + raise e + finally: + self._lock.release() From 23fa2e560aaa0625936faea8c073623f29f0dc2e Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 18:27:29 +1100 Subject: [PATCH 129/202] fix: fix tests --- tests/nodes/test_graph_execution_state.py | 1 + tests/nodes/test_invoker.py | 1 + tests/nodes/test_nodes.py | 31 ++++++++++++----------- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/tests/nodes/test_graph_execution_state.py b/tests/nodes/test_graph_execution_state.py index e2d435e621..171cdfdb6f 100644 --- a/tests/nodes/test_graph_execution_state.py +++ b/tests/nodes/test_graph_execution_state.py @@ -76,6 +76,7 @@ def mock_services() -> InvocationServices: session_queue=None, # type: ignore urls=None, # type: ignore workflow_records=None, # type: ignore + workflow_image_records=None, # type: ignore ) diff --git a/tests/nodes/test_invoker.py b/tests/nodes/test_invoker.py index 9774f07fdd..25b02955b0 100644 --- a/tests/nodes/test_invoker.py +++ b/tests/nodes/test_invoker.py @@ -81,6 +81,7 @@ def mock_services() -> InvocationServices: session_queue=None, # type: ignore urls=None, # type: ignore workflow_records=None, # type: ignore + workflow_image_records=None, # type: ignore ) diff --git a/tests/nodes/test_nodes.py b/tests/nodes/test_nodes.py index 7807a56879..1d7f2e4194 100644 --- a/tests/nodes/test_nodes.py +++ b/tests/nodes/test_nodes.py @@ -1,11 +1,12 @@ from typing import Any, Callable, Union -from pydantic import Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, + InputField, InvocationContext, + OutputField, invocation, invocation_output, ) @@ -15,12 +16,12 @@ from invokeai.app.invocations.image import ImageField # Define test invocations before importing anything that uses invocations @invocation_output("test_list_output") class ListPassThroughInvocationOutput(BaseInvocationOutput): - collection: list[ImageField] = Field(default_factory=list) + collection: list[ImageField] = OutputField(default_factory=list) @invocation("test_list") class ListPassThroughInvocation(BaseInvocation): - collection: list[ImageField] = Field(default_factory=list) + collection: list[ImageField] = InputField(default_factory=list) def invoke(self, context: InvocationContext) -> ListPassThroughInvocationOutput: return ListPassThroughInvocationOutput(collection=self.collection) @@ -28,12 +29,12 @@ class ListPassThroughInvocation(BaseInvocation): @invocation_output("test_prompt_output") class PromptTestInvocationOutput(BaseInvocationOutput): - prompt: str = Field(default="") + prompt: str = OutputField(default="") @invocation("test_prompt") class PromptTestInvocation(BaseInvocation): - prompt: str = Field(default="") + prompt: str = InputField(default="") def invoke(self, context: InvocationContext) -> PromptTestInvocationOutput: return PromptTestInvocationOutput(prompt=self.prompt) @@ -47,13 +48,13 @@ class ErrorInvocation(BaseInvocation): @invocation_output("test_image_output") class ImageTestInvocationOutput(BaseInvocationOutput): - image: ImageField = Field() + image: ImageField = OutputField() @invocation("test_text_to_image") class TextToImageTestInvocation(BaseInvocation): - prompt: str = Field(default="") - prompt2: str = Field(default="") + prompt: str = InputField(default="") + prompt2: str = InputField(default="") def invoke(self, context: InvocationContext) -> ImageTestInvocationOutput: return ImageTestInvocationOutput(image=ImageField(image_name=self.id)) @@ -61,8 +62,8 @@ class TextToImageTestInvocation(BaseInvocation): @invocation("test_image_to_image") class ImageToImageTestInvocation(BaseInvocation): - prompt: str = Field(default="") - image: Union[ImageField, None] = Field(default=None) + prompt: str = InputField(default="") + image: Union[ImageField, None] = InputField(default=None) def invoke(self, context: InvocationContext) -> ImageTestInvocationOutput: return ImageTestInvocationOutput(image=ImageField(image_name=self.id)) @@ -70,12 +71,12 @@ class ImageToImageTestInvocation(BaseInvocation): @invocation_output("test_prompt_collection_output") class PromptCollectionTestInvocationOutput(BaseInvocationOutput): - collection: list[str] = Field(default_factory=list) + collection: list[str] = OutputField(default_factory=list) @invocation("test_prompt_collection") class PromptCollectionTestInvocation(BaseInvocation): - collection: list[str] = Field() + collection: list[str] = InputField() def invoke(self, context: InvocationContext) -> PromptCollectionTestInvocationOutput: return PromptCollectionTestInvocationOutput(collection=self.collection.copy()) @@ -83,12 +84,12 @@ class PromptCollectionTestInvocation(BaseInvocation): @invocation_output("test_any_output") class AnyTypeTestInvocationOutput(BaseInvocationOutput): - value: Any = Field() + value: Any = OutputField() @invocation("test_any") class AnyTypeTestInvocation(BaseInvocation): - value: Any = Field(default=None) + value: Any = InputField(default=None) def invoke(self, context: InvocationContext) -> AnyTypeTestInvocationOutput: return AnyTypeTestInvocationOutput(value=self.value) @@ -96,7 +97,7 @@ class AnyTypeTestInvocation(BaseInvocation): @invocation("test_polymorphic") class PolymorphicStringTestInvocation(BaseInvocation): - value: Union[str, list[str]] = Field(default="") + value: Union[str, list[str]] = InputField(default="") def invoke(self, context: InvocationContext) -> PromptCollectionTestInvocationOutput: if isinstance(self.value, str): From 2faed653d75547ac40b955971b6ca481b82586d7 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 20:10:17 +1100 Subject: [PATCH 130/202] fix(api): deduplicate metadata/workflow extraction logic --- invokeai/app/api/routers/images.py | 2 +- .../services/image_files/image_files_disk.py | 18 ++++-------------- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 429eaef37c..a57414e17f 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -60,10 +60,10 @@ async def upload_image( bbox = pil_image.getbbox() pil_image = pil_image.crop(bbox) except Exception: - # Error opening the image ApiDependencies.invoker.services.logger.error(traceback.format_exc()) raise HTTPException(status_code=415, detail="Failed to read image") + # TODO: retain non-invokeai metadata on upload? # attempt to parse metadata from image metadata_raw = pil_image.info.get("invokeai_metadata", None) if metadata_raw: diff --git a/invokeai/app/services/image_files/image_files_disk.py b/invokeai/app/services/image_files/image_files_disk.py index e8a733d619..91c1e14789 100644 --- a/invokeai/app/services/image_files/image_files_disk.py +++ b/invokeai/app/services/image_files/image_files_disk.py @@ -65,20 +65,10 @@ class DiskImageFileStorage(ImageFileStorageBase): pnginfo = PngImagePlugin.PngInfo() - if metadata is not None or workflow is not None: - if metadata is not None: - pnginfo.add_text("invokeai_metadata", metadata.model_dump_json()) - if workflow is not None: - pnginfo.add_text("invokeai_workflow", workflow.model_dump_json()) - else: - # For uploaded images, we want to retain metadata. PIL strips it on save; manually add it back - # TODO: retain non-invokeai metadata on save... - original_metadata = image.info.get("invokeai_metadata", None) - if original_metadata is not None: - pnginfo.add_text("invokeai_metadata", original_metadata) - original_workflow = image.info.get("invokeai_workflow", None) - if original_workflow is not None: - pnginfo.add_text("invokeai_workflow", original_workflow) + if metadata is not None: + pnginfo.add_text("invokeai_metadata", metadata.model_dump_json()) + if workflow is not None: + pnginfo.add_text("invokeai_workflow", workflow.model_dump_json()) image.save( image_path, From f04462973b091f02ef15046cecfaf266de7ab0bb Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 21:23:09 +1100 Subject: [PATCH 131/202] feat(ui): create debounced metadata/workflow query hooks Also added config options for metadata and workflow debounce times (`metadataFetchDebounce` & `workflowFetchDebounce`). Falls back to 0 if not provided. In OSS, because we have no major latency concerns, the debounce is 0. But in other environments, it may be desirable to set this to something like 300ms. --- .../frontend/web/src/app/types/invokeai.ts | 2 ++ .../CurrentImage/CurrentImageButtons.tsx | 22 ++++++------------- .../SingleSelectionMenuItems.tsx | 19 ++++++---------- .../ImageMetadataViewer.tsx | 18 ++++----------- .../src/services/api/endpoints/workflows.ts | 1 - .../api/hooks/useDebouncedMetadata.ts | 21 ++++++++++++++++++ .../api/hooks/useDebouncedWorkflow.ts | 21 ++++++++++++++++++ 7 files changed, 62 insertions(+), 42 deletions(-) create mode 100644 invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts create mode 100644 invokeai/frontend/web/src/services/api/hooks/useDebouncedWorkflow.ts diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts index 39e4ffd27a..0fe7a36052 100644 --- a/invokeai/frontend/web/src/app/types/invokeai.ts +++ b/invokeai/frontend/web/src/app/types/invokeai.ts @@ -59,6 +59,8 @@ export type AppConfig = { nodesAllowlist: string[] | undefined; nodesDenylist: string[] | undefined; maxUpscalePixels?: number; + metadataFetchDebounce?: number; + workflowFetchDebounce?: number; sd: { defaultModel?: string; disabledControlNetModels: string[]; diff --git a/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx index 4c0aa5e0e8..36a251952c 100644 --- a/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx @@ -38,15 +38,12 @@ import { FaSeedling, } from 'react-icons/fa'; import { FaCircleNodes, FaEllipsis } from 'react-icons/fa6'; -import { - useGetImageDTOQuery, - useGetImageMetadataQuery, -} from 'services/api/endpoints/images'; +import { useGetImageDTOQuery } from 'services/api/endpoints/images'; +import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata'; +import { useDebouncedWorkflow } from 'services/api/hooks/useDebouncedWorkflow'; import { menuListMotionProps } from 'theme/components/menu'; -import { useDebounce } from 'use-debounce'; import { sentImageToImg2Img } from '../../store/actions'; import SingleSelectionMenuItems from '../ImageContextMenu/SingleSelectionMenuItems'; -import { useGetWorkflowQuery } from 'services/api/endpoints/workflows'; const currentImageButtonsSelector = createSelector( [stateSelector, activeTabNameSelector], @@ -105,17 +102,12 @@ const CurrentImageButtons = () => { lastSelectedImage?.image_name ?? skipToken ); - const [debouncedImageName] = useDebounce(lastSelectedImage?.image_name, 300); - const [debouncedWorkflowId] = useDebounce( - lastSelectedImage?.workflow_id, - 300 + const { metadata, isLoading: isLoadingMetadata } = useDebouncedMetadata( + lastSelectedImage?.image_name ); - const { data: metadata, isLoading: isLoadingMetadata } = - useGetImageMetadataQuery(debouncedImageName ?? skipToken); - - const { data: workflow, isLoading: isLoadingWorkflow } = useGetWorkflowQuery( - debouncedWorkflowId ?? skipToken + const { workflow, isLoading: isLoadingWorkflow } = useDebouncedWorkflow( + lastSelectedImage?.workflow_id ); const handleLoadWorkflow = useCallback(() => { diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx index 38de235e38..ed12eb5ff4 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx @@ -1,6 +1,5 @@ import { Flex, MenuItem, Spinner } from '@chakra-ui/react'; import { useStore } from '@nanostores/react'; -import { skipToken } from '@reduxjs/toolkit/dist/query'; import { useAppToaster } from 'app/components/Toaster'; import { $customStarUI } from 'app/store/nanostores/customStarUI'; import { useAppDispatch } from 'app/store/storeHooks'; @@ -33,13 +32,12 @@ import { import { FaCircleNodes } from 'react-icons/fa6'; import { MdStar, MdStarBorder } from 'react-icons/md'; import { - useGetImageMetadataQuery, useStarImagesMutation, useUnstarImagesMutation, } from 'services/api/endpoints/images'; -import { useGetWorkflowQuery } from 'services/api/endpoints/workflows'; +import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata'; +import { useDebouncedWorkflow } from 'services/api/hooks/useDebouncedWorkflow'; import { ImageDTO } from 'services/api/types'; -import { useDebounce } from 'use-debounce'; import { sentImageToCanvas, sentImageToImg2Img } from '../../store/actions'; type SingleSelectionMenuItemsProps = { @@ -57,14 +55,11 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { const isCanvasEnabled = useFeatureStatus('unifiedCanvas').isFeatureEnabled; const customStarUi = useStore($customStarUI); - const [debouncedImageName] = useDebounce(imageDTO?.image_name, 300); - const [debouncedWorkflowId] = useDebounce(imageDTO?.workflow_id, 300); - - const { data: metadata, isLoading: isLoadingMetadata } = - useGetImageMetadataQuery(debouncedImageName ?? skipToken); - - const { data: workflow, isLoading: isLoadingWorkflow } = useGetWorkflowQuery( - debouncedWorkflowId ?? skipToken + const { metadata, isLoading: isLoadingMetadata } = useDebouncedMetadata( + imageDTO?.image_name + ); + const { workflow, isLoading: isLoadingWorkflow } = useDebouncedWorkflow( + imageDTO?.workflow_id ); const [starImages] = useStarImagesMutation(); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx index f6820b9d20..29637e4a3c 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx @@ -9,15 +9,13 @@ import { Tabs, Text, } from '@chakra-ui/react'; -import { skipToken } from '@reduxjs/toolkit/dist/query'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import ScrollableContent from 'features/nodes/components/sidePanel/ScrollableContent'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; -import { useGetImageMetadataQuery } from 'services/api/endpoints/images'; -import { useGetWorkflowQuery } from 'services/api/endpoints/workflows'; +import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata'; +import { useDebouncedWorkflow } from 'services/api/hooks/useDebouncedWorkflow'; import { ImageDTO } from 'services/api/types'; -import { useDebounce } from 'use-debounce'; import DataViewer from './DataViewer'; import ImageMetadataActions from './ImageMetadataActions'; @@ -33,16 +31,8 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => { // }); const { t } = useTranslation(); - const [debouncedImageName] = useDebounce(image.image_name, 300); - const [debouncedWorkflowId] = useDebounce(image.workflow_id, 300); - - const { data: metadata } = useGetImageMetadataQuery( - debouncedImageName ?? skipToken - ); - - const { data: workflow } = useGetWorkflowQuery( - debouncedWorkflowId ?? skipToken - ); + const { metadata } = useDebouncedMetadata(image.image_name); + const { workflow } = useDebouncedWorkflow(image.workflow_id); return ( ({ getWorkflow: build.query({ query: (workflow_id) => `workflows/i/${workflow_id}`, - keepUnusedDataFor: 86400, // 24 hours providesTags: (result, error, workflow_id) => [ { type: 'Workflow', id: workflow_id }, ], diff --git a/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts b/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts new file mode 100644 index 0000000000..023b3c140c --- /dev/null +++ b/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts @@ -0,0 +1,21 @@ +import { skipToken } from '@reduxjs/toolkit/query'; +import { useDebounce } from 'use-debounce'; +import { useGetImageMetadataQuery } from '../endpoints/images'; +import { useAppSelector } from 'app/store/storeHooks'; + +export const useDebouncedMetadata = (imageName?: string | null) => { + const metadataFetchDebounce = useAppSelector( + (state) => state.config.metadataFetchDebounce + ); + + const [debouncedImageName] = useDebounce( + imageName, + metadataFetchDebounce ?? 0 + ); + + const { data: metadata, isLoading } = useGetImageMetadataQuery( + debouncedImageName ?? skipToken + ); + + return { metadata, isLoading }; +}; diff --git a/invokeai/frontend/web/src/services/api/hooks/useDebouncedWorkflow.ts b/invokeai/frontend/web/src/services/api/hooks/useDebouncedWorkflow.ts new file mode 100644 index 0000000000..2731597b2c --- /dev/null +++ b/invokeai/frontend/web/src/services/api/hooks/useDebouncedWorkflow.ts @@ -0,0 +1,21 @@ +import { skipToken } from '@reduxjs/toolkit/query'; +import { useAppSelector } from 'app/store/storeHooks'; +import { useDebounce } from 'use-debounce'; +import { useGetWorkflowQuery } from '../endpoints/workflows'; + +export const useDebouncedWorkflow = (workflowId?: string | null) => { + const workflowFetchDebounce = useAppSelector( + (state) => state.config.workflowFetchDebounce + ); + + const [debouncedWorkflowID] = useDebounce( + workflowId, + workflowFetchDebounce ?? 0 + ); + + const { data: workflow, isLoading } = useGetWorkflowQuery( + debouncedWorkflowID ?? skipToken + ); + + return { workflow, isLoading }; +}; From 91049799430b67cb3c86d42a5e33b4dc586bc357 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 21:24:07 +1100 Subject: [PATCH 132/202] chore(ui): regen types --- .../frontend/web/src/services/api/schema.d.ts | 298 +++++++++--------- 1 file changed, 149 insertions(+), 149 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 6092e822d6..5541fa20e9 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -419,7 +419,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -588,7 +588,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -646,7 +646,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -894,7 +894,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -945,7 +945,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1033,7 +1033,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1079,7 +1079,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1150,7 +1150,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1207,7 +1207,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1282,7 +1282,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1349,7 +1349,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1392,7 +1392,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1443,7 +1443,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1483,7 +1483,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1545,7 +1545,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1594,7 +1594,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1694,7 +1694,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -1832,7 +1832,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2019,7 +2019,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2088,7 +2088,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2144,7 +2144,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2261,7 +2261,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2302,11 +2302,11 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @@ -2359,7 +2359,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2452,7 +2452,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2499,7 +2499,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2596,7 +2596,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2699,7 +2699,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2750,7 +2750,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2785,7 +2785,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2832,7 +2832,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2897,7 +2897,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -2942,7 +2942,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["InfillColorInvocation"]; + [key: string]: components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ImageMultiplyInvocation"]; }; /** * Edges @@ -2979,7 +2979,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ImageOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FaceMaskOutput"]; + [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["String2Output"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["FaceMaskOutput"]; }; /** * Errors @@ -3018,7 +3018,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3067,7 +3067,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3146,7 +3146,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3281,7 +3281,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3341,7 +3341,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3383,7 +3383,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3436,7 +3436,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3479,7 +3479,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3534,7 +3534,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3576,7 +3576,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3675,11 +3675,6 @@ export type components = { * @description The session ID that generated this image, if it is a generated image. */ session_id?: string | null; - /** - * Workflow Id - * @description The workflow that generated this image. - */ - workflow_id?: string | null; /** * Node Id * @description The node ID that generated this image, if it is a generated image. @@ -3695,6 +3690,11 @@ export type components = { * @description The id of the board the image belongs to, if one exists. */ board_id?: string | null; + /** + * Workflow Id + * @description The workflow that generated this image. + */ + workflow_id?: string | null; }; /** * ImageField @@ -3726,7 +3726,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3767,7 +3767,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3810,7 +3810,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3845,7 +3845,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3892,7 +3892,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3929,7 +3929,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -3988,7 +3988,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4075,7 +4075,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4129,7 +4129,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4173,7 +4173,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4243,7 +4243,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4300,7 +4300,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4345,7 +4345,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4393,7 +4393,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4435,7 +4435,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4486,7 +4486,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4521,7 +4521,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4614,7 +4614,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4675,7 +4675,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4706,7 +4706,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4773,7 +4773,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4832,7 +4832,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4881,7 +4881,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4946,7 +4946,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -4993,7 +4993,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5115,7 +5115,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5202,7 +5202,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5237,7 +5237,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5274,7 +5274,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5329,7 +5329,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5370,7 +5370,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5418,7 +5418,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5490,7 +5490,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5537,7 +5537,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5605,7 +5605,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5652,7 +5652,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5768,7 +5768,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5827,7 +5827,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5907,7 +5907,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -5954,7 +5954,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6016,7 +6016,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6102,7 +6102,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6231,7 +6231,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6266,7 +6266,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6319,7 +6319,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6374,7 +6374,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6441,11 +6441,11 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @@ -6488,11 +6488,11 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @@ -6529,11 +6529,11 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @@ -6581,7 +6581,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6628,7 +6628,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6683,7 +6683,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6747,7 +6747,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6788,7 +6788,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6869,7 +6869,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -6951,7 +6951,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7014,7 +7014,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7077,7 +7077,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7139,11 +7139,11 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @description The image to process */ @@ -7172,7 +7172,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7221,7 +7221,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7277,7 +7277,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7354,7 +7354,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7593,7 +7593,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7780,7 +7780,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7890,7 +7890,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7941,7 +7941,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -7976,7 +7976,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8017,7 +8017,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8103,7 +8103,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8156,7 +8156,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8197,7 +8197,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8237,7 +8237,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8310,7 +8310,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8440,7 +8440,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8530,7 +8530,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8623,7 +8623,7 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean | null; + is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache @@ -8728,11 +8728,11 @@ export type components = { ui_order: number | null; }; /** - * StableDiffusionOnnxModelFormat + * StableDiffusion2ModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** * CLIPVisionModelFormat * @description An enumeration. @@ -8740,11 +8740,23 @@ export type components = { */ CLIPVisionModelFormat: "diffusers"; /** - * ControlNetModelFormat + * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ - ControlNetModelFormat: "checkpoint" | "diffusers"; + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * IPAdapterModelFormat * @description An enumeration. @@ -8752,29 +8764,17 @@ export type components = { */ IPAdapterModelFormat: "invokeai"; /** - * StableDiffusion2ModelFormat + * ControlNetModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ T2IAdapterModelFormat: "diffusers"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; From b5940039f3051c3b9ed0474b6428ac1981e06893 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 21:24:21 +1100 Subject: [PATCH 133/202] chore: lint --- invokeai/app/api/dependencies.py | 2 +- invokeai/app/api/routers/images.py | 6 +----- invokeai/app/services/invocation_services.py | 2 +- .../workflow_image_records/workflow_image_records_sqlite.py | 2 +- tests/nodes/test_nodes.py | 1 - 5 files changed, 4 insertions(+), 9 deletions(-) diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 4746eeae3f..e7c8fa7fae 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -1,8 +1,8 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) from logging import Logger -from invokeai.app.services.workflow_image_records.workflow_image_records_sqlite import SqliteWorkflowImageRecordsStorage +from invokeai.app.services.workflow_image_records.workflow_image_records_sqlite import SqliteWorkflowImageRecordsStorage from invokeai.backend.util.logging import InvokeAILogger from invokeai.version.invokeai_version import __version__ diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index a57414e17f..e8c8c693b3 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -8,11 +8,7 @@ from fastapi.routing import APIRouter from PIL import Image from pydantic import BaseModel, Field, ValidationError -from invokeai.app.invocations.baseinvocation import ( - MetadataField, - MetadataFieldValidator, - WorkflowFieldValidator, -) +from invokeai.app.invocations.baseinvocation import MetadataField, MetadataFieldValidator, WorkflowFieldValidator from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index 804b1b6884..d405201f4e 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -27,8 +27,8 @@ if TYPE_CHECKING: from .session_queue.session_queue_base import SessionQueueBase from .shared.graph import GraphExecutionState, LibraryGraph from .urls.urls_base import UrlServiceBase - from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase from .workflow_image_records.workflow_image_records_base import WorkflowImageRecordsStorageBase + from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase class InvocationServices: diff --git a/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py b/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py index 1a5de672bc..912d80cbf6 100644 --- a/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py +++ b/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py @@ -1,8 +1,8 @@ import sqlite3 import threading from typing import Optional, cast -from invokeai.app.services.shared.sqlite import SqliteDatabase +from invokeai.app.services.shared.sqlite import SqliteDatabase from invokeai.app.services.workflow_image_records.workflow_image_records_base import WorkflowImageRecordsStorageBase diff --git a/tests/nodes/test_nodes.py b/tests/nodes/test_nodes.py index 1d7f2e4194..51b33dd4c7 100644 --- a/tests/nodes/test_nodes.py +++ b/tests/nodes/test_nodes.py @@ -1,6 +1,5 @@ from typing import Any, Callable, Union - from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, From 16dacb5f430072a2b86feb7983d8055264fda82a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 21:36:29 +1100 Subject: [PATCH 134/202] fix(nodes): remove constraints on ip adapter metadata fields --- invokeai/app/invocations/metadata.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 98f5f0e830..1ed399873b 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -43,16 +43,10 @@ class IPAdapterMetadataField(BaseModel): description="The IP-Adapter model.", ) weight: Union[float, list[float]] = Field( - default=1, - ge=0, description="The weight given to the IP-Adapter", ) - begin_step_percent: float = Field( - default=0, ge=-1, le=2, description="When the IP-Adapter is first applied (% of total steps)" - ) - end_step_percent: float = Field( - default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)" - ) + begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)") + end_step_percent: float = Field(description="When the IP-Adapter is last applied (% of total steps)") @invocation_output("metadata_item_output") From 52fbd1b222866ba151a7c6b4044d17e063e2fd59 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 22:18:06 +1100 Subject: [PATCH 135/202] fix(ui): remove errant comment --- .../src/features/nodes/util/graphBuilders/buildNodesGraph.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildNodesGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildNodesGraph.ts index 4437e14f66..eb782f456a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildNodesGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildNodesGraph.ts @@ -35,7 +35,6 @@ export const buildNodesGraph = (nodesState: NodesState): Graph => { const { nodes, edges } = nodesState; const filteredNodes = nodes.filter(isInvocationNode); - // const workflowJSON = JSON.stringify(buildWorkflow(nodesState)); // Reduce the node editor nodes into invocation graph nodes const parsedNodes = filteredNodes.reduce>( @@ -68,7 +67,6 @@ export const buildNodesGraph = (nodesState: NodesState): Graph => { if (embedWorkflow) { // add the workflow to the node - // Object.assign(graphNode, { workflow: workflowJSON }); Object.assign(graphNode, { workflow: buildWorkflow(nodesState) }); } From 301a8fef92d15d485be5b20b9c8d4a6b65729624 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 22:33:17 +1100 Subject: [PATCH 136/202] fix(ui): fix batch metadata logic when graph has no metadata On canvas, images have no metadata yet, so this needs to be handled --- .../graphBuilders/buildLinearBatchConfig.ts | 74 +++++++++++-------- .../nodes/util/graphBuilders/metadata.ts | 8 ++ 2 files changed, 50 insertions(+), 32 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts index 8bf9a2785a..59f8d4123f 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearBatchConfig.ts @@ -11,7 +11,7 @@ import { NOISE, POSITIVE_CONDITIONING, } from './constants'; -import { removeMetadata } from './metadata'; +import { getHasMetadata, removeMetadata } from './metadata'; export const prepareLinearUIBatch = ( state: RootState, @@ -40,13 +40,15 @@ export const prepareLinearUIBatch = ( }); } - // add to metadata - removeMetadata(graph, 'seed'); - zipped.push({ - node_path: METADATA, - field_name: 'seed', - items: seeds, - }); + if (getHasMetadata(graph)) { + // add to metadata + removeMetadata(graph, 'seed'); + zipped.push({ + node_path: METADATA, + field_name: 'seed', + items: seeds, + }); + } if (graph.nodes[CANVAS_COHERENCE_NOISE]) { zipped.push({ @@ -78,12 +80,14 @@ export const prepareLinearUIBatch = ( } // add to metadata - removeMetadata(graph, 'seed'); - firstBatchDatumList.push({ - node_path: METADATA, - field_name: 'seed', - items: seeds, - }); + if (getHasMetadata(graph)) { + removeMetadata(graph, 'seed'); + firstBatchDatumList.push({ + node_path: METADATA, + field_name: 'seed', + items: seeds, + }); + } if (graph.nodes[CANVAS_COHERENCE_NOISE]) { firstBatchDatumList.push({ @@ -108,12 +112,14 @@ export const prepareLinearUIBatch = ( } // add to metadata - removeMetadata(graph, 'seed'); - secondBatchDatumList.push({ - node_path: METADATA, - field_name: 'seed', - items: seeds, - }); + if (getHasMetadata(graph)) { + removeMetadata(graph, 'seed'); + secondBatchDatumList.push({ + node_path: METADATA, + field_name: 'seed', + items: seeds, + }); + } if (graph.nodes[CANVAS_COHERENCE_NOISE]) { secondBatchDatumList.push({ @@ -140,12 +146,14 @@ export const prepareLinearUIBatch = ( } // add to metadata - removeMetadata(graph, 'positive_prompt'); - firstBatchDatumList.push({ - node_path: METADATA, - field_name: 'positive_prompt', - items: extendedPrompts, - }); + if (getHasMetadata(graph)) { + removeMetadata(graph, 'positive_prompt'); + firstBatchDatumList.push({ + node_path: METADATA, + field_name: 'positive_prompt', + items: extendedPrompts, + }); + } if (shouldConcatSDXLStylePrompt && model?.base_model === 'sdxl') { const stylePrompts = extendedPrompts.map((p) => @@ -161,12 +169,14 @@ export const prepareLinearUIBatch = ( } // add to metadata - removeMetadata(graph, 'positive_style_prompt'); - firstBatchDatumList.push({ - node_path: METADATA, - field_name: 'positive_style_prompt', - items: extendedPrompts, - }); + if (getHasMetadata(graph)) { + removeMetadata(graph, 'positive_style_prompt'); + firstBatchDatumList.push({ + node_path: METADATA, + field_name: 'positive_style_prompt', + items: extendedPrompts, + }); + } } data.push(firstBatchDatumList); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts index 547c45addf..5cc397ce68 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts @@ -56,3 +56,11 @@ export const removeMetadata = ( delete metadataNode[key]; }; + +export const getHasMetadata = (graph: NonNullableGraph): boolean => { + const metadataNode = graph.nodes[METADATA] as + | CoreMetadataInvocation + | undefined; + + return Boolean(metadataNode); +}; From 2f4f83280b7240a1aeeddc09a3395b32c19185f3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 19 Oct 2023 07:37:28 +1100 Subject: [PATCH 137/202] fix(db): remove extraneous conflict handling in workflow image records --- .../workflow_image_records/workflow_image_records_sqlite.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py b/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py index 912d80cbf6..ec7a73f1d5 100644 --- a/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py +++ b/invokeai/app/services/workflow_image_records/workflow_image_records_sqlite.py @@ -85,10 +85,9 @@ class SqliteWorkflowImageRecordsStorage(WorkflowImageRecordsStorageBase): self._cursor.execute( """--sql INSERT INTO workflow_images (workflow_id, image_name) - VALUES (?, ?) - ON CONFLICT (image_name) DO UPDATE SET workflow_id = ?; + VALUES (?, ?); """, - (workflow_id, image_name, workflow_id), + (workflow_id, image_name), ) self._conn.commit() except sqlite3.Error as e: From c071262c20b7912e6d262a54097806e16ad226f9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 19 Oct 2023 08:16:28 +1100 Subject: [PATCH 138/202] fix(ui): remove getMetadataFromFile query & util This will all be handled by python going forward --- .../getMetadataAndWorkflowFromImageBlob.ts | 45 ------------ .../web/src/services/api/endpoints/images.ts | 72 +------------------ 2 files changed, 1 insertion(+), 116 deletions(-) delete mode 100644 invokeai/frontend/web/src/features/nodes/util/getMetadataAndWorkflowFromImageBlob.ts diff --git a/invokeai/frontend/web/src/features/nodes/util/getMetadataAndWorkflowFromImageBlob.ts b/invokeai/frontend/web/src/features/nodes/util/getMetadataAndWorkflowFromImageBlob.ts deleted file mode 100644 index b46a701757..0000000000 --- a/invokeai/frontend/web/src/features/nodes/util/getMetadataAndWorkflowFromImageBlob.ts +++ /dev/null @@ -1,45 +0,0 @@ -import * as png from '@stevebel/png'; -import { logger } from 'app/logging/logger'; -import { parseify } from 'common/util/serialize'; -import { - ImageMetadataAndWorkflow, - zCoreMetadata, - zWorkflow, -} from 'features/nodes/types/types'; -import { get } from 'lodash-es'; - -export const getMetadataAndWorkflowFromImageBlob = async ( - image: Blob -): Promise => { - const data: ImageMetadataAndWorkflow = {}; - const buffer = await image.arrayBuffer(); - const text = png.decode(buffer).text; - - const rawMetadata = get(text, 'invokeai_metadata'); - if (rawMetadata) { - const metadataResult = zCoreMetadata.safeParse(JSON.parse(rawMetadata)); - if (metadataResult.success) { - data.metadata = metadataResult.data; - } else { - logger('system').error( - { error: parseify(metadataResult.error) }, - 'Problem reading metadata from image' - ); - } - } - - const rawWorkflow = get(text, 'invokeai_workflow'); - if (rawWorkflow) { - const workflowResult = zWorkflow.safeParse(JSON.parse(rawWorkflow)); - if (workflowResult.success) { - data.workflow = workflowResult.data; - } else { - logger('system').error( - { error: parseify(workflowResult.error) }, - 'Problem reading workflow from image' - ); - } - } - - return data; -}; diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts index 36c00ee1c9..166d00a3db 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/images.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts @@ -1,5 +1,4 @@ import { EntityState, Update } from '@reduxjs/toolkit'; -import { fetchBaseQuery } from '@reduxjs/toolkit/dist/query'; import { PatchCollection } from '@reduxjs/toolkit/dist/query/core/buildThunks'; import { logger } from 'app/logging/logger'; import { @@ -8,15 +7,9 @@ import { IMAGE_CATEGORIES, IMAGE_LIMIT, } from 'features/gallery/store/types'; -import { - CoreMetadata, - ImageMetadataAndWorkflow, - zCoreMetadata, -} from 'features/nodes/types/types'; -import { getMetadataAndWorkflowFromImageBlob } from 'features/nodes/util/getMetadataAndWorkflowFromImageBlob'; +import { CoreMetadata, zCoreMetadata } from 'features/nodes/types/types'; import { keyBy } from 'lodash-es'; import { ApiTagDescription, LIST_TAG, api } from '..'; -import { $authToken, $projectId } from '../client'; import { components, paths } from '../schema'; import { DeleteBoardResult, @@ -135,68 +128,6 @@ export const imagesApi = api.injectEndpoints({ }, keepUnusedDataFor: 86400, // 24 hours }), - getImageMetadataFromFile: build.query< - ImageMetadataAndWorkflow, - { image: ImageDTO; shouldFetchMetadataFromApi: boolean } - >({ - queryFn: async ( - args: { image: ImageDTO; shouldFetchMetadataFromApi: boolean }, - api, - extraOptions, - fetchWithBaseQuery - ) => { - if (args.shouldFetchMetadataFromApi) { - let metadata; - const metadataResponse = await fetchWithBaseQuery( - `images/i/${args.image.image_name}/metadata` - ); - if (metadataResponse.data) { - const metadataResult = zCoreMetadata.safeParse( - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (metadataResponse.data as any)?.metadata - ); - if (metadataResult.success) { - metadata = metadataResult.data; - } - } - return { data: { metadata } }; - } else { - const authToken = $authToken.get(); - const projectId = $projectId.get(); - const customBaseQuery = fetchBaseQuery({ - baseUrl: '', - prepareHeaders: (headers) => { - if (authToken) { - headers.set('Authorization', `Bearer ${authToken}`); - } - if (projectId) { - headers.set('project-id', projectId); - } - - return headers; - }, - responseHandler: async (res) => { - return await res.blob(); - }, - }); - - const response = await customBaseQuery( - args.image.image_url, - api, - extraOptions - ); - const data = await getMetadataAndWorkflowFromImageBlob( - response.data as Blob - ); - - return { data }; - } - }, - providesTags: (result, error, { image }) => [ - { type: 'ImageMetadataFromFile', id: image.image_name }, - ], - keepUnusedDataFor: 86400, // 24 hours - }), deleteImage: build.mutation({ query: ({ image_name }) => ({ url: `images/i/${image_name}`, @@ -1643,6 +1574,5 @@ export const { useDeleteBoardMutation, useStarImagesMutation, useUnstarImagesMutation, - useGetImageMetadataFromFileQuery, useBulkDownloadImagesMutation, } = imagesApi; From dcd11327c1105715b52b6ccb2e13ff1edf2dd956 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 20 Oct 2023 11:41:05 +1100 Subject: [PATCH 139/202] fix(db): remove unused, commented out methods --- .../workflow_records_sqlite.py | 43 ------------------- 1 file changed, 43 deletions(-) diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py index 2d9e1f26e8..e9e2bdca3a 100644 --- a/invokeai/app/services/workflow_records/workflow_records_sqlite.py +++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py @@ -100,46 +100,3 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase): raise finally: self._lock.release() - - # def update(self, workflow_id: str, workflow: Workflow) -> Workflow: - # """Updates a workflow record.""" - # try: - # workflow_id = workflow.get("id", None) - # if type(workflow_id) is not str: - # raise WorkflowNotFoundError(f"Workflow does not have a valid id, got {workflow_id}") - # self._lock.acquire() - # self._cursor.execute( - # """--sql - # UPDATE workflows - # SET workflow = ? - # WHERE workflow_id = ? - # """, - # (workflow, workflow_id), - # ) - # self._conn.commit() - # except Exception: - # self._conn.rollback() - # raise - # finally: - # self._lock.release() - # return self.get(workflow_id) - - # def delete(self, workflow_id: str) -> Workflow: - # """Updates a workflow record.""" - # workflow = self.get(workflow_id) - # try: - # self._lock.acquire() - # self._cursor.execute( - # """--sql - # DELETE FROM workflows - # WHERE workflow_id = ? - # """, - # (workflow_id,), - # ) - # self._conn.commit() - # except Exception: - # self._conn.rollback() - # raise - # finally: - # self._lock.release() - # return workflow From b7f63a40653bb92ab4dd128e154fac18aa8a30e9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 20 Oct 2023 13:58:53 +1100 Subject: [PATCH 140/202] fix(ui): fix canvas color picker when value is zero good ol' zero is false-y --- .../web/src/features/canvas/hooks/useColorUnderCursor.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/canvas/hooks/useColorUnderCursor.ts b/invokeai/frontend/web/src/features/canvas/hooks/useColorUnderCursor.ts index 0ade036987..5bdc59d345 100644 --- a/invokeai/frontend/web/src/features/canvas/hooks/useColorUnderCursor.ts +++ b/invokeai/frontend/web/src/features/canvas/hooks/useColorUnderCursor.ts @@ -37,7 +37,12 @@ const useColorPicker = () => { 1 ).data; - if (!(a && r && g && b)) { + if ( + r === undefined || + g === undefined || + b === undefined || + a === undefined + ) { return; } From 8604943e89fb26c2c74ef9c4b4501e4b1a4a4dbd Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 19 Oct 2023 17:51:55 +1100 Subject: [PATCH 141/202] feat(nodes): simple custom nodes Custom nodes may be places in `$INVOKEAI_ROOT/nodes/` (configurable with `custom_nodes_dir` option). On app startup, an `__init__.py` is copied into the custom nodes dir, which recursively loads all python files in the directory as modules (files starting with `_` are ignored). The custom nodes dir is now a python module itself. When we `from invocations import *` to load init all invocations, we load the custom nodes dir, registering all custom nodes. --- invokeai/app/invocations/__init__.py | 29 ++++++++++++--- .../app/invocations/_custom_nodes_init.py | 37 +++++++++++++++++++ .../app/services/config/config_default.py | 8 ++++ 3 files changed, 68 insertions(+), 6 deletions(-) create mode 100644 invokeai/app/invocations/_custom_nodes_init.py diff --git a/invokeai/app/invocations/__init__.py b/invokeai/app/invocations/__init__.py index 6407a1cdee..91a2edc680 100644 --- a/invokeai/app/invocations/__init__.py +++ b/invokeai/app/invocations/__init__.py @@ -1,8 +1,25 @@ -import os +import shutil +import sys +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path -__all__ = [] +from invokeai.app.services.config.config_default import InvokeAIAppConfig -dirname = os.path.dirname(os.path.abspath(__file__)) -for f in os.listdir(dirname): - if f != "__init__.py" and os.path.isfile("%s/%s" % (dirname, f)) and f[-3:] == ".py": - __all__.append(f[:-3]) +custom_nodes_path = Path(InvokeAIAppConfig.get_config().custom_nodes_path.absolute()) +custom_nodes_path.mkdir(parents=True, exist_ok=True) +custom_nodes_init_path = str(custom_nodes_path / "__init__.py") + +# copy our custom nodes __init__.py to the custom nodes directory +shutil.copy(Path(__file__).parent / "_custom_nodes_init.py", custom_nodes_init_path) + +# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically +spec = spec_from_file_location("custom_nodes", custom_nodes_init_path) +if spec is None or spec.loader is None: + raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}") +module = module_from_spec(spec) +sys.modules[spec.name] = module +spec.loader.exec_module(module) + +# add core nodes to __all__ +python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.rglob("*.py")) +__all__ = list(f.stem for f in python_files) # type: ignore diff --git a/invokeai/app/invocations/_custom_nodes_init.py b/invokeai/app/invocations/_custom_nodes_init.py new file mode 100644 index 0000000000..561f6de382 --- /dev/null +++ b/invokeai/app/invocations/_custom_nodes_init.py @@ -0,0 +1,37 @@ +""" +InvokeAI custom nodes initialization + +This file is responsible for loading all custom nodes from this directory. + +All python files are loaded on app startup. Custom nodes will be initialized and available for use +in workflows. + +The app must be restarted for changes to be picked up. + +This file is overwritten on launch. Do not edit this file directly. +""" +import sys +from importlib import import_module +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path + +from invokeai.backend.util.logging import InvokeAILogger + +logger = InvokeAILogger.get_logger() +count = 0 +for f in Path(__file__).parent.rglob("*.py"): + module_name = f.stem + if (not module_name.startswith("_")) and (module_name not in globals()): + spec = spec_from_file_location(module_name, f.absolute()) + if spec is None or spec.loader is None: + logger.warn(f"Could not load {f}") + continue + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + count += 1 + del f, module_name + +logger.info(f"Loaded {count} modules from {Path(__file__).parent}") + +del import_module, Path diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index df01b65882..a877c465d2 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -243,6 +243,7 @@ class InvokeAIAppConfig(InvokeAISettings): db_dir : Optional[Path] = Field(default=Path('databases'), description='Path to InvokeAI databases directory', json_schema_extra=Categories.Paths) outdir : Optional[Path] = Field(default=Path('outputs'), description='Default folder for output images', json_schema_extra=Categories.Paths) use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', json_schema_extra=Categories.Paths) + custom_nodes_dir : Path = Field(default=Path('nodes'), description='Path to directory for custom nodes', json_schema_extra=Categories.Paths) from_file : Optional[Path] = Field(default=None, description='Take command input from the indicated file (command-line client only)', json_schema_extra=Categories.Paths) # LOGGING @@ -410,6 +411,13 @@ class InvokeAIAppConfig(InvokeAISettings): """ return self._resolve(self.models_dir) + @property + def custom_nodes_path(self) -> Path: + """ + Path to the custom nodes directory + """ + return self._resolve(self.custom_nodes_dir) + # the following methods support legacy calls leftover from the Globals era @property def full_precision(self) -> bool: From 824702de99a09d18e5fae065bda2cc67568908f2 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 20 Oct 2023 12:50:55 +1100 Subject: [PATCH 142/202] feat(nodes): change expected structure for custom nodes --- invokeai/app/invocations/__init__.py | 7 ++- .../app/invocations/_custom_nodes_init.py | 37 -------------- .../app/invocations/custom_nodes/README.md | 51 +++++++++++++++++++ invokeai/app/invocations/custom_nodes/init.py | 51 +++++++++++++++++++ 4 files changed, 107 insertions(+), 39 deletions(-) delete mode 100644 invokeai/app/invocations/_custom_nodes_init.py create mode 100644 invokeai/app/invocations/custom_nodes/README.md create mode 100644 invokeai/app/invocations/custom_nodes/init.py diff --git a/invokeai/app/invocations/__init__.py b/invokeai/app/invocations/__init__.py index 91a2edc680..32cf73d215 100644 --- a/invokeai/app/invocations/__init__.py +++ b/invokeai/app/invocations/__init__.py @@ -7,10 +7,13 @@ from invokeai.app.services.config.config_default import InvokeAIAppConfig custom_nodes_path = Path(InvokeAIAppConfig.get_config().custom_nodes_path.absolute()) custom_nodes_path.mkdir(parents=True, exist_ok=True) + custom_nodes_init_path = str(custom_nodes_path / "__init__.py") +custom_nodes_readme_path = str(custom_nodes_path / "README.md") # copy our custom nodes __init__.py to the custom nodes directory -shutil.copy(Path(__file__).parent / "_custom_nodes_init.py", custom_nodes_init_path) +shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path) +shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path) # Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically spec = spec_from_file_location("custom_nodes", custom_nodes_init_path) @@ -21,5 +24,5 @@ sys.modules[spec.name] = module spec.loader.exec_module(module) # add core nodes to __all__ -python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.rglob("*.py")) +python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.glob("*.py")) __all__ = list(f.stem for f in python_files) # type: ignore diff --git a/invokeai/app/invocations/_custom_nodes_init.py b/invokeai/app/invocations/_custom_nodes_init.py deleted file mode 100644 index 561f6de382..0000000000 --- a/invokeai/app/invocations/_custom_nodes_init.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -InvokeAI custom nodes initialization - -This file is responsible for loading all custom nodes from this directory. - -All python files are loaded on app startup. Custom nodes will be initialized and available for use -in workflows. - -The app must be restarted for changes to be picked up. - -This file is overwritten on launch. Do not edit this file directly. -""" -import sys -from importlib import import_module -from importlib.util import module_from_spec, spec_from_file_location -from pathlib import Path - -from invokeai.backend.util.logging import InvokeAILogger - -logger = InvokeAILogger.get_logger() -count = 0 -for f in Path(__file__).parent.rglob("*.py"): - module_name = f.stem - if (not module_name.startswith("_")) and (module_name not in globals()): - spec = spec_from_file_location(module_name, f.absolute()) - if spec is None or spec.loader is None: - logger.warn(f"Could not load {f}") - continue - module = module_from_spec(spec) - sys.modules[spec.name] = module - spec.loader.exec_module(module) - count += 1 - del f, module_name - -logger.info(f"Loaded {count} modules from {Path(__file__).parent}") - -del import_module, Path diff --git a/invokeai/app/invocations/custom_nodes/README.md b/invokeai/app/invocations/custom_nodes/README.md new file mode 100644 index 0000000000..d93bb65539 --- /dev/null +++ b/invokeai/app/invocations/custom_nodes/README.md @@ -0,0 +1,51 @@ +# Custom Nodes / Node Packs + +Copy your node packs to this directory. + +When nodes are added or changed, you must restart the app to see the changes. + +## Directory Structure + +For a node pack to be loaded, it must be placed in a directory alongside this +file. Here's an example structure: + +```py +. +├── __init__.py # Invoke-managed custom node loader +│ +├── cool_node +│ ├── __init__.py # see example below +│ └── cool_node.py +│ +└── my_node_pack + ├── __init__.py # see example below + ├── tasty_node.py + ├── bodacious_node.py + ├── utils.py + └── extra_nodes + └── fancy_node.py +``` + +## Node Pack `__init__.py` + +Each node pack must have an `__init__.py` file that imports its nodes. + +The structure of each node or node pack is otherwise not important. + +Here are examples, based on the example directory structure. + +### `cool_node/__init__.py` + +```py +from .cool_node import CoolInvocation +``` + +### `my_node_pack/__init__.py` + +```py +from .tasty_node import TastyInvocation +from .bodacious_node import BodaciousInvocation +from .extra_nodes.fancy_node import FancyInvocation +``` + +Only nodes imported in the `__init__.py` file are loaded. diff --git a/invokeai/app/invocations/custom_nodes/init.py b/invokeai/app/invocations/custom_nodes/init.py new file mode 100644 index 0000000000..c6708e95a7 --- /dev/null +++ b/invokeai/app/invocations/custom_nodes/init.py @@ -0,0 +1,51 @@ +""" +Invoke-managed custom node loader. See README.md for more information. +""" + +import sys +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path + +from invokeai.backend.util.logging import InvokeAILogger + +logger = InvokeAILogger.get_logger() +loaded_count = 0 + + +for d in Path(__file__).parent.iterdir(): + # skip files + if not d.is_dir(): + continue + + # skip hidden directories + if d.name.startswith("_") or d.name.startswith("."): + continue + + # skip directories without an `__init__.py` + init = d / "__init__.py" + if not init.exists(): + continue + + module_name = init.parent.stem + + # skip if already imported + if module_name in globals(): + continue + + # we have a legit module to import + spec = spec_from_file_location(module_name, init.absolute()) + + if spec is None or spec.loader is None: + logger.warn(f"Could not load {init}") + continue + + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + + loaded_count += 1 + + del init, module_name + + +logger.info(f"Loaded {loaded_count} modules from {Path(__file__).parent}") From 3d33b3e1f5e81a76dba69d9d9446463ae7772346 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 20 Oct 2023 15:18:29 +1100 Subject: [PATCH 143/202] fix(nodes): explicitly include custom nodes files setuptools ignores markdown files - explicitly include all files in `"invokeai.app.invocations"` to ensure all custom node files are included --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 2bcaea2efa..693d4c9ed1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -171,6 +171,7 @@ version = { attr = "invokeai.version.__version__" } "invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"] "invokeai.frontend.web.dist" = ["**"] "invokeai.frontend.web.static" = ["**"] +"invokeai.app.invocations" = ["**"] #=== Begin: PyTest and Coverage [tool.pytest.ini_options] From eeeb5dc4513515a16a7a4019cebd66228e332e2e Mon Sep 17 00:00:00 2001 From: Dennis Date: Fri, 20 Oct 2023 04:11:01 +0000 Subject: [PATCH 144/202] translationBot(ui): update translation (Dutch) Currently translated at 99.9% (1216 of 1217 strings) Co-authored-by: Dennis Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/nl/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/nl.json | 114 +++++++++---------- 1 file changed, 57 insertions(+), 57 deletions(-) diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 5c08f65d21..2d50a602d1 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -106,9 +106,9 @@ "allImagesLoaded": "Alle afbeeldingen geladen", "loadMore": "Laad meer", "noImagesInGallery": "Geen afbeeldingen om te tonen", - "deleteImage": "Wis afbeelding", - "deleteImageBin": "Gewiste afbeeldingen worden naar de prullenbak van je besturingssysteem gestuurd.", - "deleteImagePermanent": "Gewiste afbeeldingen kunnen niet worden hersteld.", + "deleteImage": "Verwijder afbeelding", + "deleteImageBin": "Verwijderde afbeeldingen worden naar de prullenbak van je besturingssysteem gestuurd.", + "deleteImagePermanent": "Verwijderde afbeeldingen kunnen niet worden hersteld.", "assets": "Eigen onderdelen", "images": "Afbeeldingen", "autoAssignBoardOnClick": "Ken automatisch bord toe bij klikken", @@ -386,11 +386,11 @@ "deleteModel": "Verwijder model", "deleteConfig": "Verwijder configuratie", "deleteMsg1": "Weet je zeker dat je dit model wilt verwijderen uit InvokeAI?", - "deleteMsg2": "Hiermee ZAL het model van schijf worden verwijderd als het zich bevindt in de InvokeAI-beginmap. Als je het model vanaf een eigen locatie gebruikt, dan ZAL het model NIET van schijf worden verwijderd.", + "deleteMsg2": "Hiermee ZAL het model van schijf worden verwijderd als het zich bevindt in de beginmap van InvokeAI. Als je het model vanaf een eigen locatie gebruikt, dan ZAL het model NIET van schijf worden verwijderd.", "formMessageDiffusersVAELocationDesc": "Indien niet opgegeven, dan zal InvokeAI kijken naar het VAE-bestand in de hierboven gegeven modellocatie.", "repoIDValidationMsg": "Online repository van je model", "formMessageDiffusersModelLocation": "Locatie Diffusers-model", - "convertToDiffusersHelpText3": "Je checkpoint-bestand op schijf ZAL worden verwijderd als het zich in de InvokeAI root map bevindt. Het zal NIET worden verwijderd als het zich in een andere locatie bevindt.", + "convertToDiffusersHelpText3": "Je checkpoint-bestand op de schijf ZAL worden verwijderd als het zich in de beginmap van InvokeAI bevindt. Het ZAL NIET worden verwijderd als het zich in een andere locatie bevindt.", "convertToDiffusersHelpText6": "Wil je dit model omzetten?", "allModels": "Alle modellen", "checkpointModels": "Checkpoints", @@ -458,11 +458,11 @@ "noCustomLocationProvided": "Geen Aangepaste Locatie Opgegeven", "syncModels": "Synchroniseer Modellen", "modelsSynced": "Modellen Gesynchroniseerd", - "modelSyncFailed": "Synchronisatie Modellen Gefaald", + "modelSyncFailed": "Synchronisatie modellen mislukt", "modelDeleteFailed": "Model kon niet verwijderd worden", "convertingModelBegin": "Model aan het converteren. Even geduld.", "importModels": "Importeer Modellen", - "syncModelsDesc": "Als je modellen niet meer synchroon zijn met de backend, kan je ze met deze optie verversen. Dit wordt typisch gebruikt in het geval je het models.yaml bestand met de hand bewerkt of als je modellen aan de InvokeAI root map toevoegt nadat de applicatie gestart werd.", + "syncModelsDesc": "Als je modellen niet meer synchroon zijn met de backend, kan je ze met deze optie vernieuwen. Dit wordt meestal gebruikt in het geval je het bestand models.yaml met de hand bewerkt of als je modellen aan de beginmap van InvokeAI toevoegt nadat de applicatie gestart is.", "loraModels": "LoRA's", "onnxModels": "Onnx", "oliveModels": "Olives", @@ -615,14 +615,14 @@ "resetWebUI": "Herstel web-UI", "resetWebUIDesc1": "Herstel web-UI herstelt alleen de lokale afbeeldingscache en de onthouden instellingen van je browser. Het verwijdert geen afbeeldingen van schijf.", "resetWebUIDesc2": "Als afbeeldingen niet getoond worden in de galerij of iets anders werkt niet, probeer dan eerst deze herstelfunctie voordat je een fout aanmeldt op GitHub.", - "resetComplete": "Webgebruikersinterface is hersteld.", + "resetComplete": "Webinterface is hersteld.", "useSlidersForAll": "Gebruik schuifbalken voor alle opties", - "consoleLogLevel": "Logboekniveau", + "consoleLogLevel": "Niveau logboek", "shouldLogToConsole": "Schrijf logboek naar console", "developer": "Ontwikkelaar", "general": "Algemeen", "showProgressInViewer": "Toon voortgangsafbeeldingen in viewer", - "generation": "Generatie", + "generation": "Genereren", "ui": "Gebruikersinterface", "antialiasProgressImages": "Voer anti-aliasing uit op voortgangsafbeeldingen", "showAdvancedOptions": "Toon uitgebreide opties", @@ -631,16 +631,16 @@ "beta": "Bèta", "experimental": "Experimenteel", "alternateCanvasLayout": "Omwisselen Canvas Layout", - "enableNodesEditor": "Knopen Editor Inschakelen", - "autoChangeDimensions": "Werk bij wijziging afmetingen bij naar modelstandaard", + "enableNodesEditor": "Schakel Knooppunteditor in", + "autoChangeDimensions": "Werk B/H bij naar modelstandaard bij wijziging", "clearIntermediates": "Wis tussentijdse afbeeldingen", "clearIntermediatesDesc3": "Je galerijafbeeldingen zullen niet worden verwijderd.", "clearIntermediatesWithCount_one": "Wis {{count}} tussentijdse afbeelding", "clearIntermediatesWithCount_other": "Wis {{count}} tussentijdse afbeeldingen", - "clearIntermediatesDesc2": "Tussentijdse afbeeldingen zijn nevenproducten bij een generatie, die afwijken van de uitvoerafbeeldingen in de galerij. Het wissen van tussentijdse afbeeldingen zal schijfruimte vrijmaken.", + "clearIntermediatesDesc2": "Tussentijdse afbeeldingen zijn nevenproducten bij het genereren. Deze wijken af van de uitvoerafbeeldingen in de galerij. Als je tussentijdse afbeeldingen wist, wordt schijfruimte vrijgemaakt.", "intermediatesCleared_one": "{{count}} tussentijdse afbeelding gewist", "intermediatesCleared_other": "{{count}} tussentijdse afbeeldingen gewist", - "clearIntermediatesDesc1": "Het wissen van tussentijdse onderdelen zet de staat van je canvas en ControlNet terug.", + "clearIntermediatesDesc1": "Als je tussentijdse afbeeldingen wist, dan wordt de staat hersteld van je canvas en van ControlNet.", "intermediatesClearedFailed": "Fout bij wissen van tussentijdse afbeeldingen" }, "toast": { @@ -881,7 +881,7 @@ "conditioningCollectionDescription": "Conditionering kan worden doorgegeven tussen knooppunten.", "colorPolymorphic": "Polymorfisme kleur", "colorCodeEdgesHelp": "Kleurgecodeerde randen op basis van hun verbonden velden", - "collectionDescription": "Beschrijving", + "collectionDescription": "TODO", "float": "Zwevende-kommagetal", "workflowContact": "Contactpersoon", "skippingReservedFieldType": "Overslaan van gereserveerd veldsoort", @@ -898,7 +898,7 @@ "sourceNode": "Bronknooppunt", "nodeOpacity": "Dekking knooppunt", "pickOne": "Kies er een", - "collectionItemDescription": "Beschrijving", + "collectionItemDescription": "TODO", "integerDescription": "Gehele getallen zijn getallen zonder een decimaalteken.", "outputField": "Uitvoerveld", "unableToLoadWorkflow": "Kan werkstroom niet valideren", @@ -944,7 +944,7 @@ "inputNode": "Invoerknooppunt", "enumDescription": "Enumeraties zijn waarden die uit een aantal opties moeten worden gekozen.", "unkownInvocation": "Onbekende aanroepsoort", - "loRAModelFieldDescription": "Beschrijving", + "loRAModelFieldDescription": "TODO", "imageField": "Afbeelding", "skippedReservedOutput": "Overgeslagen gereserveerd uitvoerveld", "animatedEdgesHelp": "Animeer gekozen randen en randen verbonden met de gekozen knooppunten", @@ -953,7 +953,7 @@ "unknownTemplate": "Onbekend sjabloon", "noWorkflow": "Geen werkstroom", "removeLinearView": "Verwijder uit lineaire weergave", - "colorCollectionDescription": "Beschrijving", + "colorCollectionDescription": "TODO", "integerCollectionDescription": "Een verzameling gehele getallen.", "colorPolymorphicDescription": "Een verzameling kleuren.", "sDXLMainModelField": "SDXL-model", @@ -1028,7 +1028,7 @@ "loadingNodes": "Bezig met laden van knooppunten...", "snapToGridHelp": "Lijn knooppunten uit op raster bij verplaatsing", "workflowSettings": "Instellingen werkstroomeditor", - "mainModelFieldDescription": "Beschrijving", + "mainModelFieldDescription": "TODO", "sDXLRefinerModelField": "Verfijningsmodel", "loRAModelField": "LoRA", "unableToParseEdge": "Kan rand niet inlezen", @@ -1039,13 +1039,13 @@ "controlnet": { "amult": "a_mult", "resize": "Schaal", - "showAdvanced": "Toon uitgebreid", + "showAdvanced": "Toon uitgebreide opties", "contentShuffleDescription": "Verschuift het materiaal in de afbeelding", "bgth": "bg_th", "addT2IAdapter": "Voeg $t(common.t2iAdapter) toe", "pidi": "PIDI", "importImageFromCanvas": "Importeer afbeelding uit canvas", - "lineartDescription": "Zet afbeelding om naar lineart", + "lineartDescription": "Zet afbeelding om naar line-art", "normalBae": "Normale BAE", "importMaskFromCanvas": "Importeer masker uit canvas", "hed": "HED", @@ -1053,7 +1053,7 @@ "contentShuffle": "Verschuif materiaal", "controlNetEnabledT2IDisabled": "$t(common.controlNet) ingeschakeld, $t(common.t2iAdapter)s uitgeschakeld", "ipAdapterModel": "Adaptermodel", - "resetControlImage": "Zet controle-afbeelding terug", + "resetControlImage": "Herstel controle-afbeelding", "beginEndStepPercent": "Percentage begin-/eindstap", "mlsdDescription": "Minimalistische herkenning lijnsegmenten", "duplicate": "Maak kopie", @@ -1061,8 +1061,8 @@ "f": "F", "h": "H", "prompt": "Prompt", - "depthMidasDescription": "Generatie van diepteblad via Midas", - "controlnet": "$t(controlnet.controlAdapter) #{{number}} ($t(common.controlNet))", + "depthMidasDescription": "Genereer diepteblad via Midas", + "controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))", "openPoseDescription": "Menselijke pose-benadering via Openpose", "control": "Controle", "resizeMode": "Modus schaling", @@ -1080,7 +1080,7 @@ "enableControlnet": "Schakel ControlNet in", "detectResolution": "Herken resolutie", "controlNetT2IMutexDesc": "Gelijktijdig gebruik van $t(common.controlNet) en $t(common.t2iAdapter) wordt op dit moment niet ondersteund.", - "ip_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.ipAdapter))", + "ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))", "pidiDescription": "PIDI-afbeeldingsverwerking", "mediapipeFace": "Mediapipe - Gezicht", "mlsd": "M-LSD", @@ -1088,10 +1088,10 @@ "fill": "Vul", "cannyDescription": "Herkenning Canny-rand", "addIPAdapter": "Voeg $t(common.ipAdapter) toe", - "lineart": "Lineart", + "lineart": "Line-art", "colorMapDescription": "Genereert een kleurenblad van de afbeelding", "lineartAnimeDescription": "Lineartverwerking in anime-stijl", - "t2i_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.t2iAdapter))", + "t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))", "minConfidence": "Min. vertrouwensniveau", "imageResolution": "Resolutie afbeelding", "megaControl": "Zeer veel controle", @@ -1110,15 +1110,15 @@ "controlAdapter_other": "Control-adapters", "safe": "Veilig", "colorMapTileSize": "Grootte tegel", - "lineartAnime": "Lineart-anime", + "lineartAnime": "Line-art voor anime", "ipAdapterImageFallback": "Geen IP-adapterafbeelding gekozen", "mediapipeFaceDescription": "Gezichtsherkenning met Mediapipe", "canny": "Canny", - "depthZoeDescription": "Generatie van diepteblad via Zoe", + "depthZoeDescription": "Genereer diepteblad via Zoe", "hedDescription": "Herkenning van holistisch-geneste randen", "setControlImageDimensions": "Stel afmetingen controle-afbeelding in op B/H", "scribble": "Krabbel", - "resetIPAdapterImage": "Zet IP-adapterafbeelding terug", + "resetIPAdapterImage": "Herstel IP-adapterafbeelding", "handAndFace": "Hand en gezicht", "enableIPAdapter": "Schakel IP-adapter in", "maxFaces": "Max. gezichten" @@ -1132,7 +1132,7 @@ "label": "Gedrag seedwaarde" }, "enableDynamicPrompts": "Schakel dynamische prompts in", - "combinatorial": "Combinatorische generatie", + "combinatorial": "Combinatorisch genereren", "maxPrompts": "Max. prompts", "promptsWithCount_one": "{{count}} prompt", "promptsWithCount_other": "{{count}} prompts", @@ -1141,7 +1141,7 @@ "popovers": { "noiseUseCPU": { "paragraphs": [ - "Bestuurt of ruis wordt gegenereerd op de CPU of de GPU.", + "Bepaalt of ruis wordt gegenereerd op de CPU of de GPU.", "Met CPU-ruis ingeschakeld zal een bepaalde seedwaarde dezelfde afbeelding opleveren op welke machine dan ook.", "Er is geen prestatieverschil bij het inschakelen van CPU-ruis." ], @@ -1149,7 +1149,7 @@ }, "paramScheduler": { "paragraphs": [ - "De planner bepaalt hoe per keer ruis wordt toegevoegd aan een afbeelding of hoe een monster wordt bijgewerkt op basis van de uitvoer van een model." + "De planner bepaalt hoe ruis per iteratie wordt toegevoegd aan een afbeelding of hoe een monster wordt bijgewerkt op basis van de uitvoer van een model." ], "heading": "Planner" }, @@ -1220,8 +1220,8 @@ }, "paramSeed": { "paragraphs": [ - "Bestuurt de startruis die gebruikt wordt bij het genereren.", - "Schakel \"Willekeurige seedwaarde\" uit om identieke resultaten te krijgen met dezelfde generatie-instellingen." + "Bepaalt de startruis die gebruikt wordt bij het genereren.", + "Schakel \"Willekeurige seedwaarde\" uit om identieke resultaten te krijgen met dezelfde genereer-instellingen." ], "heading": "Seedwaarde" }, @@ -1240,7 +1240,7 @@ }, "dynamicPromptsSeedBehaviour": { "paragraphs": [ - "Bestuurt hoe de seedwaarde wordt gebruikt bij het genereren van prompts.", + "Bepaalt hoe de seedwaarde wordt gebruikt bij het genereren van prompts.", "Per iteratie zal een unieke seedwaarde worden gebruikt voor elke iteratie. Gebruik dit om de promptvariaties binnen een enkele seedwaarde te verkennen.", "Bijvoorbeeld: als je vijf prompts heb, dan zal voor elke afbeelding dezelfde seedwaarde gebruikt worden.", "De optie Per afbeelding zal een unieke seedwaarde voor elke afbeelding gebruiken. Dit biedt meer variatie." @@ -1259,7 +1259,7 @@ "heading": "Model", "paragraphs": [ "Model gebruikt voor de ontruisingsstappen.", - "Verschillende modellen zijn meestal getraind zich te specialiseren in het maken van bepaalde esthetische resultaten en materiaal." + "Verschillende modellen zijn meestal getraind om zich te specialiseren in het maken van bepaalde esthetische resultaten en materiaal." ] }, "compositingCoherencePass": { @@ -1271,7 +1271,7 @@ "paramDenoisingStrength": { "paragraphs": [ "Hoeveel ruis wordt toegevoegd aan de invoerafbeelding.", - "0 geeft een identieke afbeelding, waarbij 1 een volledig nieuwe afbeelding geeft." + "0 levert een identieke afbeelding op, waarbij 1 een volledig nieuwe afbeelding oplevert." ], "heading": "Ontruisingssterkte" }, @@ -1284,7 +1284,7 @@ }, "paramNegativeConditioning": { "paragraphs": [ - "Het generatieproces voorkomt de gegeven begrippen in de negatieve prompt. Gebruik dit om bepaalde zaken of voorwerpen uit te sluiten van de uitvoerafbeelding.", + "Het genereerproces voorkomt de gegeven begrippen in de negatieve prompt. Gebruik dit om bepaalde zaken of voorwerpen uit te sluiten van de uitvoerafbeelding.", "Ondersteunt Compel-syntax en -embeddingen." ], "heading": "Negatieve prompt" @@ -1316,13 +1316,13 @@ "controlNet": { "heading": "ControlNet", "paragraphs": [ - "ControlNets biedt begeleiding aan het generatieproces, waarbij hulp wordt geboden bij het maken van afbeelding met aangestuurde compositie, structuur of stijl, afhankelijk van het gekozen model." + "ControlNets begeleidt het genereerproces, waarbij geholpen wordt bij het maken van afbeeldingen met aangestuurde compositie, structuur of stijl, afhankelijk van het gekozen model." ] }, "paramCFGScale": { "heading": "CFG-schaal", "paragraphs": [ - "Bestuurt hoeveel je prompt invloed heeft op het generatieproces." + "Bepaalt hoeveel je prompt invloed heeft op het genereerproces." ] }, "controlNetControlMode": { @@ -1335,7 +1335,7 @@ "heading": "Stappen", "paragraphs": [ "Het aantal uit te voeren stappen tijdens elke generatie.", - "Hogere stappenaantallen geven meestal betere afbeeldingen ten koste van een grotere benodigde generatietijd." + "Een hoger aantal stappen geven meestal betere afbeeldingen, ten koste van een hogere benodigde tijd om te genereren." ] }, "paramPositiveConditioning": { @@ -1356,7 +1356,7 @@ "seamless": "Naadloos", "positivePrompt": "Positieve prompt", "negativePrompt": "Negatieve prompt", - "generationMode": "Generatiemodus", + "generationMode": "Genereermodus", "Threshold": "Drempelwaarde ruis", "metadata": "Metagegevens", "strength": "Sterkte Afbeelding naar afbeelding", @@ -1382,13 +1382,13 @@ }, "queue": { "status": "Status", - "pruneSucceeded": "{{item_count}} voltooide onderdelen uit wachtrij gesnoeid", + "pruneSucceeded": "{{item_count}} voltooide onderdelen uit wachtrij opgeruimd", "cancelTooltip": "Annuleer huidig onderdeel", "queueEmpty": "Wachtrij leeg", "pauseSucceeded": "Verwerker onderbroken", "in_progress": "Bezig", - "queueFront": "Voeg toe aan voorkant van wachtrij", - "notReady": "Kan niet in wachtrij plaatsen", + "queueFront": "Voeg vooraan toe in wachtrij", + "notReady": "Fout bij plaatsen in wachtrij", "batchFailedToQueue": "Fout bij reeks in wachtrij plaatsen", "completed": "Voltooid", "queueBack": "Voeg toe aan wachtrij", @@ -1402,22 +1402,22 @@ "front": "begin", "clearSucceeded": "Wachtrij gewist", "pause": "Onderbreek", - "pruneTooltip": "Snoei {{item_count}} voltooide onderdelen", + "pruneTooltip": "Ruim {{item_count}} voltooide onderdelen op", "cancelSucceeded": "Onderdeel geannuleerd", "batchQueuedDesc_one": "Voeg {{count}} sessie toe aan het {{direction}} van de wachtrij", "batchQueuedDesc_other": "Voeg {{count}} sessies toe aan het {{direction}} van de wachtrij", "graphQueued": "Graaf in wachtrij geplaatst", "queue": "Wachtrij", "batch": "Reeks", - "clearQueueAlertDialog": "Als je de wachtrij onmiddellijk wist, dan worden alle onderdelen die bezig zijn geannuleerd en wordt de gehele wachtrij gewist.", + "clearQueueAlertDialog": "Als je de wachtrij onmiddellijk wist, dan worden alle onderdelen die bezig zijn geannuleerd en wordt de wachtrij volledig gewist.", "pending": "Wachtend", "completedIn": "Voltooid na", "resumeFailed": "Fout bij hervatten verwerker", "clear": "Wis", - "prune": "Snoei", + "prune": "Ruim op", "total": "Totaal", "canceled": "Geannuleerd", - "pruneFailed": "Fout bij snoeien van wachtrij", + "pruneFailed": "Fout bij opruimen van wachtrij", "cancelBatchSucceeded": "Reeks geannuleerd", "clearTooltip": "Annuleer en wis alle onderdelen", "current": "Huidig", @@ -1431,7 +1431,7 @@ "session": "Sessie", "queueTotal": "Totaal {{total}}", "resumeSucceeded": "Verwerker hervat", - "enqueueing": "Toevoegen van reeks aan wachtrij", + "enqueueing": "Bezig met toevoegen van reeks aan wachtrij", "resumeTooltip": "Hervat verwerker", "queueMaxExceeded": "Max. aantal van {{max_queue_size}} overschreden, {{skip}} worden overgeslagen", "resume": "Hervat", @@ -1441,18 +1441,18 @@ "graphFailedToQueue": "Fout bij toevoegen graaf aan wachtrij" }, "sdxl": { - "refinerStart": "Startwaarde verfijner", + "refinerStart": "Startwaarde verfijning", "selectAModel": "Kies een model", "scheduler": "Planner", "cfgScale": "CFG-schaal", "negStylePrompt": "Negatieve-stijlprompt", "noModelsAvailable": "Geen modellen beschikbaar", - "refiner": "Verfijner", - "negAestheticScore": "Negatieve aantrekkelijkheidsscore", - "useRefiner": "Gebruik verfijner", + "refiner": "Verfijning", + "negAestheticScore": "Negatieve esthetische score", + "useRefiner": "Gebruik verfijning", "denoisingStrength": "Sterkte ontruising", - "refinermodel": "Verfijnermodel", - "posAestheticScore": "Positieve aantrekkelijkheidsscore", + "refinermodel": "Verfijningsmodel", + "posAestheticScore": "Positieve esthetische score", "concatPromptStyle": "Plak prompt- en stijltekst aan elkaar", "loading": "Bezig met laden...", "steps": "Stappen", From 8615d53e654ecaf20960c9110b47a098756b5962 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 20 Oct 2023 16:22:56 +1100 Subject: [PATCH 145/202] fix(nodes): fix missing generation modes Lax typing on the metadata util functions allowed a typing issue to slip through. Fixed the lax typing, updated core metadata node. --- invokeai/app/invocations/metadata.py | 7 +- .../graphBuilders/buildAdHocUpscaleGraph.ts | 5 +- .../nodes/util/graphBuilders/metadata.ts | 2 +- .../frontend/web/src/services/api/schema.d.ts | 152 +++++++++++++----- 4 files changed, 126 insertions(+), 40 deletions(-) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 1ed399873b..0f45fc5a36 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -107,11 +107,16 @@ class MergeMetadataInvocation(BaseInvocation): return MetadataOutput(metadata=MetadataField.model_validate(data)) +GENERATION_MODES = Literal[ + "txt2img", "img2img", "inpaint", "outpaint", "sdxl_txt2img", "sdxl_img2img", "sdxl_inpaint", "sdxl_outpaint" +] + + @invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.0.0") class CoreMetadataInvocation(BaseInvocation): """Collects core generation metadata into a MetadataField""" - generation_mode: Literal["txt2img", "img2img", "inpaint", "outpaint"] = InputField( + generation_mode: Optional[GENERATION_MODES] = InputField( default=None, description="The generation mode that output this image", ) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts index 46e415a886..5af8edbdfc 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts @@ -7,7 +7,7 @@ import { SaveImageInvocation, } from 'services/api/types'; import { REALESRGAN as ESRGAN, SAVE_IMAGE } from './constants'; -import { addCoreMetadataNode } from './metadata'; +import { addCoreMetadataNode, upsertMetadata } from './metadata'; type Arg = { image_name: string; @@ -56,7 +56,8 @@ export const buildAdHocUpscaleGraph = ({ ], }; - addCoreMetadataNode(graph, { + addCoreMetadataNode(graph, {}); + upsertMetadata(graph, { esrgan_model: esrganModelName, }); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts index 5cc397ce68..b673be9e4a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/metadata.ts @@ -5,7 +5,7 @@ import { METADATA, SAVE_IMAGE } from './constants'; export const addCoreMetadataNode = ( graph: NonNullableGraph, - metadata: Partial | JsonObject + metadata: Partial ): void => { graph.nodes[METADATA] = { id: METADATA, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 5541fa20e9..6ff9d49bf9 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1842,9 +1842,8 @@ export type components = { /** * Generation Mode * @description The generation mode that output this image - * @enum {string} */ - generation_mode?: "txt2img" | "img2img" | "inpaint" | "outpaint"; + generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint") | null; /** * Positive Prompt * @description The positive prompt parameter @@ -2942,7 +2941,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ImageMultiplyInvocation"]; + [key: string]: components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["TestInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["TestInvocation3"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["TestInvocation2"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["StringReplaceInvocation"]; }; /** * Edges @@ -2979,7 +2978,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["String2Output"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["FaceMaskOutput"]; + [key: string]: components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ImageOutput"]; }; /** * Errors @@ -3200,21 +3199,18 @@ export type components = { /** * Weight * @description The weight given to the IP-Adapter - * @default 1 */ - weight?: number | number[]; + weight: number | number[]; /** * Begin Step Percent * @description When the IP-Adapter is first applied (% of total steps) - * @default 0 */ - begin_step_percent?: number; + begin_step_percent: number; /** * End Step Percent * @description When the IP-Adapter is last applied (% of total steps) - * @default 1 */ - end_step_percent?: number; + end_step_percent: number; }; /** IPAdapterModelField */ IPAdapterModelField: { @@ -8402,6 +8398,90 @@ export type components = { */ type: "t2i_adapter_output"; }; + /** TestInvocation */ + TestInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** A */ + a?: string; + /** + * type + * @default test_invocation + * @constant + */ + type: "test_invocation"; + }; + /** TestInvocation2 */ + TestInvocation2: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** A */ + a?: string; + /** + * type + * @default test_invocation_2 + * @constant + */ + type: "test_invocation_2"; + }; + /** TestInvocation3 */ + TestInvocation3: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** A */ + a?: string; + /** + * type + * @default test_invocation_3 + * @constant + */ + type: "test_invocation_3"; + }; /** TextualInversionModelConfig */ TextualInversionModelConfig: { /** Model Name */ @@ -8727,30 +8807,6 @@ export type components = { /** Ui Order */ ui_order: number | null; }; - /** - * StableDiffusion2ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * CLIPVisionModelFormat - * @description An enumeration. - * @enum {string} - */ - CLIPVisionModelFormat: "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionOnnxModelFormat * @description An enumeration. @@ -8763,6 +8819,30 @@ export type components = { * @enum {string} */ IPAdapterModelFormat: "invokeai"; + /** + * T2IAdapterModelFormat + * @description An enumeration. + * @enum {string} + */ + T2IAdapterModelFormat: "diffusers"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * CLIPVisionModelFormat + * @description An enumeration. + * @enum {string} + */ + CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * ControlNetModelFormat * @description An enumeration. @@ -8770,11 +8850,11 @@ export type components = { */ ControlNetModelFormat: "checkpoint" | "diffusers"; /** - * T2IAdapterModelFormat + * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ - T2IAdapterModelFormat: "diffusers"; + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; From 8e948d3f17c7a93fcae95f71fda863d069cfa4de Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Thu, 19 Oct 2023 12:13:36 -0400 Subject: [PATCH 146/202] fix(assets): re-add missing caution image --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 693d4c9ed1..d67b096ddc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -166,6 +166,7 @@ version = { attr = "invokeai.version.__version__" } ] [tool.setuptools.package-data] +"invokeai.app.assets" = ["**/*.png"] "invokeai.assets.fonts" = ["**/*.ttf"] "invokeai.backend" = ["**.png"] "invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"] From 3546c41f4a361dfc4be0f7cd97005b41ba191e73 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 23 Oct 2023 18:48:14 -0400 Subject: [PATCH 147/202] close #4975 --- invokeai/app/services/model_manager/__init__.py | 1 + invokeai/backend/training/textual_inversion_training.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/app/services/model_manager/__init__.py b/invokeai/app/services/model_manager/__init__.py index e69de29bb2..da54cbf89f 100644 --- a/invokeai/app/services/model_manager/__init__.py +++ b/invokeai/app/services/model_manager/__init__.py @@ -0,0 +1 @@ +from .model_manager_default import ModelManagerService diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 153bd0fcc4..9bc1d188bc 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -41,7 +41,7 @@ from transformers import CLIPTextModel, CLIPTokenizer # invokeai stuff from invokeai.app.services.config import InvokeAIAppConfig, PagingArgumentParser -from invokeai.app.services.model_manager_service import ModelManagerService +from invokeai.app.services.model_manager import ModelManagerService from invokeai.backend.model_management.models import SubModelType if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): From c14aa30956c4255f49dce00d20c3e5a2b1df6a1e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 23 Oct 2023 20:37:33 -0400 Subject: [PATCH 148/202] fix the merge script to correctly display models sorted by base --- invokeai/frontend/merge/merge_diffusers.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py index 8fa02cb49c..f3672acbf2 100644 --- a/invokeai/frontend/merge/merge_diffusers.py +++ b/invokeai/frontend/merge/merge_diffusers.py @@ -131,6 +131,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): values=[ "Models Built on SD-1.x", "Models Built on SD-2.x", + "Models Built on SDXL", ], value=[self.current_base], columns=4, @@ -309,7 +310,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): else: return True - def get_model_names(self, base_model: Optional[BaseModelType] = None) -> List[str]: + def get_model_names(self, base_model: BaseModelType = BaseModelType.StableDiffusion1) -> List[str]: model_names = [ info["model_name"] for info in self.model_manager.list_models(model_type=ModelType.Main, base_model=base_model) @@ -318,7 +319,8 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): return sorted(model_names) def _populate_models(self, value=None): - base_model = tuple(BaseModelType)[value[0]] + bases = ["sd-1", "sd-2", "sdxl"] + base_model = BaseModelType(bases[value[0]]) self.model_names = self.get_model_names(base_model) models_plus_none = self.model_names.copy() From 6cbc69f3b7785531b49470ca93399d917fff832d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 23 Oct 2023 22:06:10 -0400 Subject: [PATCH 149/202] support conversion of controlnets from safetensors to diffusers --- .../backend/install/model_install_backend.py | 6 ++ .../model_management/models/controlnet.py | 3 +- invokeai/configs/controlnet/cldm_v15.yaml | 79 +++++++++++++++++ invokeai/configs/controlnet/cldm_v21.yaml | 85 +++++++++++++++++++ 4 files changed, 172 insertions(+), 1 deletion(-) create mode 100644 invokeai/configs/controlnet/cldm_v15.yaml create mode 100644 invokeai/configs/controlnet/cldm_v21.yaml diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 9224f5c8b2..9784aa0ac2 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -460,6 +460,12 @@ class ModelInstall(object): possible_conf = path.with_suffix(".yaml") if possible_conf.exists(): legacy_conf = str(self.relative_to_root(possible_conf)) + else: + legacy_conf = Path( + self.config.root_path, + "configs/controlnet", + ("cldm_v15.yaml" if info.base_type == BaseModelType("sd-1") else "cldm_v21.yaml"), + ) if legacy_conf: attributes.update(dict(config=str(legacy_conf))) diff --git a/invokeai/backend/model_management/models/controlnet.py b/invokeai/backend/model_management/models/controlnet.py index 359df91a82..6a42b59fe1 100644 --- a/invokeai/backend/model_management/models/controlnet.py +++ b/invokeai/backend/model_management/models/controlnet.py @@ -132,13 +132,14 @@ def _convert_controlnet_ckpt_and_cache( model_path: str, output_path: str, base_model: BaseModelType, - model_config: ControlNetModel.CheckpointConfig, + model_config: str, ) -> str: """ Convert the controlnet from checkpoint format to diffusers format, cache it to disk, and return Path to converted file. If already on disk then just returns Path. """ + print(f"DEBUG: controlnet config = {model_config}") app_config = InvokeAIAppConfig.get_config() weights = app_config.root_path / model_path output_path = Path(output_path) diff --git a/invokeai/configs/controlnet/cldm_v15.yaml b/invokeai/configs/controlnet/cldm_v15.yaml new file mode 100644 index 0000000000..fde1825577 --- /dev/null +++ b/invokeai/configs/controlnet/cldm_v15.yaml @@ -0,0 +1,79 @@ +model: + target: cldm.cldm.ControlLDM + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + control_key: "hint" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + only_mid_control: False + + control_stage_config: + target: cldm.cldm.ControlNet + params: + image_size: 32 # unused + in_channels: 4 + hint_channels: 3 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + unet_config: + target: cldm.cldm.ControlledUnetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/invokeai/configs/controlnet/cldm_v21.yaml b/invokeai/configs/controlnet/cldm_v21.yaml new file mode 100644 index 0000000000..fc65193647 --- /dev/null +++ b/invokeai/configs/controlnet/cldm_v21.yaml @@ -0,0 +1,85 @@ +model: + target: cldm.cldm.ControlLDM + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + control_key: "hint" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + only_mid_control: False + + control_stage_config: + target: cldm.cldm.ControlNet + params: + use_checkpoint: True + image_size: 32 # unused + in_channels: 4 + hint_channels: 3 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + unet_config: + target: cldm.cldm.ControlledUnetModel + params: + use_checkpoint: True + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" From 3b7e17c0ccafa5bc8a074d52820e1317b4cdcdc6 Mon Sep 17 00:00:00 2001 From: Gille Date: Mon, 23 Oct 2023 21:46:51 -0600 Subject: [PATCH 150/202] Update communityNodes.md Update to Load Video Frame node to reflect changes made in link locations... a.k.a. fixing broken links. --- docs/nodes/communityNodes.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/nodes/communityNodes.md b/docs/nodes/communityNodes.md index cec5d18df6..9f997776fe 100644 --- a/docs/nodes/communityNodes.md +++ b/docs/nodes/communityNodes.md @@ -177,12 +177,8 @@ This includes 15 Nodes: **Node Link:** https://github.com/helix4u/load_video_frame -**Example Node Graph:** https://github.com/helix4u/load_video_frame/blob/main/Example_Workflow.json - **Output Example:** - - -[Full mp4 of Example Output test.mp4](https://github.com/helix4u/load_video_frame/blob/main/test.mp4) + -------------------------------- ### Make 3D From 4f74549f17bf50e61978468cef84b0c567b716ea Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 27 Oct 2023 19:12:48 -0400 Subject: [PATCH 151/202] prevent prereleases from showing up in updater --- invokeai/frontend/install/invokeai_update.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/install/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py index 87661da79f..cf017dcfeb 100644 --- a/invokeai/frontend/install/invokeai_update.py +++ b/invokeai/frontend/install/invokeai_update.py @@ -91,7 +91,7 @@ def get_extras(): def main(): - versions = get_versions() + versions = [x for x in get_versions() if not (x['draft'] or x['prerelease'])] if invokeai_is_running(): print(":exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]") input("Press any key to continue...") From fe5d2c023b21ca958a8e439a72c96bd3bb727bd7 Mon Sep 17 00:00:00 2001 From: Jonathan <34005131+JPPhoto@users.noreply.github.com> Date: Sat, 28 Oct 2023 08:13:51 -0500 Subject: [PATCH 152/202] Update communityNodes.md Added Average Images node --- docs/nodes/communityNodes.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/nodes/communityNodes.md b/docs/nodes/communityNodes.md index 9f997776fe..f394a7c9bd 100644 --- a/docs/nodes/communityNodes.md +++ b/docs/nodes/communityNodes.md @@ -9,6 +9,7 @@ To download a node, simply download the `.py` node file from the link and add it To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor. - Community Nodes + + [Average Images](#average-images) + [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj) + [Film Grain](#film-grain) + [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes) @@ -33,6 +34,13 @@ To use a community workflow, download the the `.json` node graph file and load i - [Help](#help) +-------------------------------- +### Average Images + +**Description:** This node takes in a collection of images of the same size and averages them as output. It converts everything to RGB mode first. + +**Node Link:** https://github.com/JPPhoto/average-images-node + -------------------------------- ### Depth Map from Wavefront OBJ From 859e3d5a6156fcad8d102a8ac751e27fef78ae8a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:49:07 -0500 Subject: [PATCH 153/202] chore: flake8 --- invokeai/app/services/model_manager/__init__.py | 2 +- invokeai/frontend/merge/merge_diffusers.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/app/services/model_manager/__init__.py b/invokeai/app/services/model_manager/__init__.py index da54cbf89f..3d6a9c248c 100644 --- a/invokeai/app/services/model_manager/__init__.py +++ b/invokeai/app/services/model_manager/__init__.py @@ -1 +1 @@ -from .model_manager_default import ModelManagerService +from .model_manager_default import ModelManagerService # noqa F401 diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py index f3672acbf2..51602b75d1 100644 --- a/invokeai/frontend/merge/merge_diffusers.py +++ b/invokeai/frontend/merge/merge_diffusers.py @@ -9,7 +9,7 @@ import curses import sys from argparse import Namespace from pathlib import Path -from typing import List, Optional +from typing import List import npyscreen from npyscreen import widget From 6e052928135fd0f64c69ca8b085a9390097474d1 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Sun, 29 Oct 2023 12:10:12 +0100 Subject: [PATCH 154/202] translationBot(ui): update translation (Italian) Currently translated at 97.6% (1188 of 1217 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index c69879cfcf..49b258c95d 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -1116,7 +1116,8 @@ "controlAdapter_other": "Adattatori di Controllo", "megaControl": "Mega ControlNet", "minConfidence": "Confidenza minima", - "scribble": "Scribble" + "scribble": "Scribble", + "amult": "Angolo di illuminazione" }, "queue": { "queueFront": "Aggiungi all'inizio della coda", From 69ba3a72787257ee32bc42dbc0d0a61b7a6e842a Mon Sep 17 00:00:00 2001 From: Gohsuke Shimada Date: Sun, 29 Oct 2023 12:10:12 +0100 Subject: [PATCH 155/202] translationBot(ui): update translation (Japanese) Currently translated at 56.1% (683 of 1217 strings) translationBot(ui): update translation (Japanese) Currently translated at 40.3% (491 of 1217 strings) Co-authored-by: Gohsuke Shimada Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ja/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ja.json | 370 ++++++++++++++++++- 1 file changed, 360 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json index a53ea50b46..c7718e7b7c 100644 --- a/invokeai/frontend/web/public/locales/ja.json +++ b/invokeai/frontend/web/public/locales/ja.json @@ -1,6 +1,6 @@ { "common": { - "languagePickerLabel": "言語選択", + "languagePickerLabel": "言語", "reportBugLabel": "バグ報告", "settingsLabel": "設定", "langJapanese": "日本語", @@ -63,11 +63,34 @@ "langFrench": "Français", "langGerman": "Deutsch", "langPortuguese": "Português", - "nodes": "ノード", + "nodes": "ワークフローエディター", "langKorean": "한국어", "langPolish": "Polski", "txt2img": "txt2img", - "postprocessing": "Post Processing" + "postprocessing": "Post Processing", + "t2iAdapter": "T2I アダプター", + "communityLabel": "コミュニティ", + "dontAskMeAgain": "次回から確認しない", + "areYouSure": "本当によろしいですか?", + "on": "オン", + "nodeEditor": "ノードエディター", + "ipAdapter": "IPアダプター", + "controlAdapter": "コントロールアダプター", + "auto": "自動", + "openInNewTab": "新しいタブで開く", + "controlNet": "コントロールネット", + "statusProcessing": "処理中", + "linear": "リニア", + "imageFailedToLoad": "画像が読み込めません", + "imagePrompt": "画像プロンプト", + "modelManager": "モデルマネージャー", + "lightMode": "ライトモード", + "generate": "生成", + "learnMore": "もっと学ぶ", + "darkMode": "ダークモード", + "random": "ランダム", + "batch": "バッチマネージャー", + "advanced": "高度な設定" }, "gallery": { "uploads": "アップロード", @@ -274,7 +297,7 @@ "config": "Config", "configValidationMsg": "モデルの設定ファイルへのパス", "modelLocation": "モデルの場所", - "modelLocationValidationMsg": "モデルが配置されている場所へのパス。", + "modelLocationValidationMsg": "ディフューザーモデルのあるローカルフォルダーのパスを入力してください", "repo_id": "Repo ID", "repoIDValidationMsg": "モデルのリモートリポジトリ", "vaeLocation": "VAEの場所", @@ -309,12 +332,79 @@ "delete": "削除", "deleteModel": "モデルを削除", "deleteConfig": "設定を削除", - "deleteMsg1": "InvokeAIからこのモデルエントリーを削除してよろしいですか?", - "deleteMsg2": "これは、ドライブからモデルのCheckpointファイルを削除するものではありません。必要であればそれらを読み込むことができます。", + "deleteMsg1": "InvokeAIからこのモデルを削除してよろしいですか?", + "deleteMsg2": "これは、モデルがInvokeAIルートフォルダ内にある場合、ディスクからモデルを削除します。カスタム保存場所を使用している場合、モデルはディスクから削除されません。", "formMessageDiffusersModelLocation": "Diffusersモデルの場所", "formMessageDiffusersModelLocationDesc": "最低でも1つは入力してください。", "formMessageDiffusersVAELocation": "VAEの場所s", - "formMessageDiffusersVAELocationDesc": "指定しない場合、InvokeAIは上記のモデルの場所にあるVAEファイルを探します。" + "formMessageDiffusersVAELocationDesc": "指定しない場合、InvokeAIは上記のモデルの場所にあるVAEファイルを探します。", + "importModels": "モデルをインポート", + "custom": "カスタム", + "none": "なし", + "convert": "変換", + "statusConverting": "変換中", + "cannotUseSpaces": "スペースは使えません", + "convertToDiffusersHelpText6": "このモデルを変換しますか?", + "checkpointModels": "チェックポイント", + "settings": "設定", + "convertingModelBegin": "モデルを変換しています...", + "baseModel": "ベースモデル", + "modelDeleteFailed": "モデルの削除ができませんでした", + "convertToDiffusers": "ディフューザーに変換", + "alpha": "アルファ", + "diffusersModels": "ディフューザー", + "pathToCustomConfig": "カスタム設定のパス", + "noCustomLocationProvided": "カスタムロケーションが指定されていません", + "modelConverted": "モデル変換が完了しました", + "weightedSum": "重み付け総和", + "inverseSigmoid": "逆シグモイド", + "invokeAIFolder": "Invoke AI フォルダ", + "syncModelsDesc": "モデルがバックエンドと同期していない場合、このオプションを使用してモデルを更新できます。通常、モデル.yamlファイルを手動で更新したり、アプリケーションの起動後にモデルをInvokeAIルートフォルダに追加した場合に便利です。", + "noModels": "モデルが見つかりません", + "sigmoid": "シグモイド", + "merge": "マージ", + "modelMergeInterpAddDifferenceHelp": "このモードでは、モデル3がまずモデル2から減算されます。その結果得られたバージョンが、上記で設定されたアルファ率でモデル1とブレンドされます。", + "customConfig": "カスタム設定", + "predictionType": "予測タイプ(安定したディフュージョン 2.x モデルおよび一部の安定したディフュージョン 1.x モデル用)", + "selectModel": "モデルを選択", + "modelSyncFailed": "モデルの同期に失敗しました", + "quickAdd": "クイック追加", + "simpleModelDesc": "ローカルのDiffusersモデル、ローカルのチェックポイント/safetensorsモデル、HuggingFaceリポジトリのID、またはチェックポイント/ DiffusersモデルのURLへのパスを指定してください。", + "customSaveLocation": "カスタム保存場所", + "advanced": "高度な設定", + "modelDeleted": "モデルが削除されました", + "convertToDiffusersHelpText2": "このプロセスでは、モデルマネージャーのエントリーを同じモデルのディフューザーバージョンに置き換えます。", + "modelUpdateFailed": "モデル更新が失敗しました", + "useCustomConfig": "カスタム設定を使用する", + "convertToDiffusersHelpText5": "十分なディスク空き容量があることを確認してください。モデルは一般的に2GBから7GBのサイズがあります。", + "modelConversionFailed": "モデル変換が失敗しました", + "modelEntryDeleted": "モデルエントリーが削除されました", + "syncModels": "モデルを同期", + "mergedModelSaveLocation": "保存場所", + "closeAdvanced": "高度な設定を閉じる", + "modelType": "モデルタイプ", + "modelsMerged": "モデルマージ完了", + "modelsMergeFailed": "モデルマージ失敗", + "scanForModels": "モデルをスキャン", + "customConfigFileLocation": "カスタム設定ファイルの場所", + "convertToDiffusersHelpText1": "このモデルは 🧨 Diffusers フォーマットに変換されます。", + "modelsSynced": "モデルが同期されました", + "invokeRoot": "InvokeAIフォルダ", + "mergedModelCustomSaveLocation": "カスタムパス", + "mergeModels": "マージモデル", + "interpolationType": "補間タイプ", + "modelMergeHeaderHelp2": "マージできるのはDiffusersのみです。チェックポイントモデルをマージしたい場合は、まずDiffusersに変換してください。", + "convertToDiffusersSaveLocation": "保存場所", + "pickModelType": "モデルタイプを選択", + "sameFolder": "同じフォルダ", + "convertToDiffusersHelpText3": "チェックポイントファイルは、InvokeAIルートフォルダ内にある場合、ディスクから削除されます。カスタムロケーションにある場合は、削除されません。", + "loraModels": "LoRA", + "modelMergeAlphaHelp": "アルファはモデルのブレンド強度を制御します。アルファ値が低いと、2番目のモデルの影響が低くなります。", + "addDifference": "差分を追加", + "modelMergeHeaderHelp1": "あなたのニーズに適したブレンドを作成するために、異なるモデルを最大3つまでマージすることができます。", + "ignoreMismatch": "選択されたモデル間の不一致を無視する", + "convertToDiffusersHelpText4": "これは一回限りのプロセスです。コンピュータの仕様によっては、約30秒から60秒かかる可能性があります。", + "mergedModelName": "マージされたモデル名" }, "parameters": { "images": "画像", @@ -440,7 +530,8 @@ "next": "次", "accept": "同意", "showHide": "表示/非表示", - "discardAll": "すべて破棄" + "discardAll": "すべて破棄", + "snapToGrid": "グリッドにスナップ" }, "accessibility": { "modelSelect": "モデルを選択", @@ -452,7 +543,7 @@ "useThisParameter": "このパラメータを使用する", "copyMetadataJson": "メタデータをコピー(JSON)", "zoomIn": "ズームイン", - "exitViewer": "ExitViewer", + "exitViewer": "ビューアーを終了", "zoomOut": "ズームアウト", "rotateCounterClockwise": "反時計回りに回転", "rotateClockwise": "時計回りに回転", @@ -461,6 +552,265 @@ "toggleAutoscroll": "自動スクロールの切替", "modifyConfig": "Modify Config", "toggleLogViewer": "Log Viewerの切替", - "showOptionsPanel": "オプションパネルを表示" + "showOptionsPanel": "サイドパネルを表示", + "showGalleryPanel": "ギャラリーパネルを表示", + "menu": "メニュー", + "loadMore": "さらに読み込む" + }, + "controlnet": { + "resize": "リサイズ", + "showAdvanced": "高度な設定を表示", + "addT2IAdapter": "$t(common.t2iAdapter)を追加", + "importImageFromCanvas": "キャンバスから画像をインポート", + "lineartDescription": "画像を線画に変換", + "importMaskFromCanvas": "キャンバスからマスクをインポート", + "hideAdvanced": "高度な設定を非表示", + "ipAdapterModel": "アダプターモデル", + "resetControlImage": "コントロール画像をリセット", + "beginEndStepPercent": "開始 / 終了ステップパーセンテージ", + "duplicate": "複製", + "balanced": "バランス", + "prompt": "プロンプト", + "depthMidasDescription": "Midasを使用して深度マップを生成", + "openPoseDescription": "Openposeを使用してポーズを推定", + "control": "コントロール", + "resizeMode": "リサイズモード", + "weight": "重み", + "selectModel": "モデルを選択", + "crop": "切り抜き", + "w": "幅", + "processor": "プロセッサー", + "addControlNet": "$t(common.controlNet)を追加", + "none": "なし", + "incompatibleBaseModel": "互換性のないベースモデル:", + "enableControlnet": "コントロールネットを有効化", + "detectResolution": "検出解像度", + "controlNetT2IMutexDesc": "$t(common.controlNet)と$t(common.t2iAdapter)の同時使用は現在サポートされていません。", + "pidiDescription": "PIDI画像処理", + "controlMode": "コントロールモード", + "fill": "塗りつぶし", + "cannyDescription": "Canny 境界検出", + "addIPAdapter": "$t(common.ipAdapter)を追加", + "colorMapDescription": "画像からカラーマップを生成", + "lineartAnimeDescription": "アニメスタイルの線画処理", + "imageResolution": "画像解像度", + "megaControl": "メガコントロール", + "lowThreshold": "最低閾値", + "autoConfigure": "プロセッサーを自動設定", + "highThreshold": "最大閾値", + "saveControlImage": "コントロール画像を保存", + "toggleControlNet": "このコントロールネットを切り替え", + "delete": "削除", + "controlAdapter_other": "コントロールアダプター", + "colorMapTileSize": "タイルサイズ", + "ipAdapterImageFallback": "IP Adapterの画像が選択されていません", + "mediapipeFaceDescription": "Mediapipeを使用して顔を検出", + "depthZoeDescription": "Zoeを使用して深度マップを生成", + "setControlImageDimensions": "コントロール画像のサイズを幅と高さにセット", + "resetIPAdapterImage": "IP Adapterの画像をリセット", + "handAndFace": "手と顔", + "enableIPAdapter": "IP Adapterを有効化", + "amult": "a_mult", + "contentShuffleDescription": "画像の内容をシャッフルします", + "bgth": "bg_th", + "controlNetEnabledT2IDisabled": "$t(common.controlNet) が有効化され、$t(common.t2iAdapter)s が無効化されました", + "controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))", + "t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) が有効化され、$t(common.controlNet)s が無効化されました", + "ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))", + "t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))", + "minConfidence": "最小確信度", + "colorMap": "Color", + "noneDescription": "処理は行われていません", + "canny": "Canny", + "hedDescription": "階層的エッジ検出", + "maxFaces": "顔の最大数" + }, + "metadata": { + "seamless": "シームレス", + "Threshold": "ノイズ閾値", + "seed": "シード", + "width": "幅", + "workflow": "ワークフロー", + "steps": "ステップ", + "scheduler": "スケジューラー", + "positivePrompt": "ポジティブプロンプト", + "strength": "Image to Image 強度", + "perlin": "パーリンノイズ", + "recallParameters": "パラメータを呼び出す" + }, + "queue": { + "queueEmpty": "キューが空です", + "pauseSucceeded": "処理が一時停止されました", + "queueFront": "キューの先頭へ追加", + "queueBack": "キューに追加", + "queueCountPrediction": "{{predicted}}をキューに追加", + "queuedCount": "保留中 {{pending}}", + "pause": "一時停止", + "queue": "キュー", + "pauseTooltip": "処理を一時停止", + "cancel": "キャンセル", + "queueTotal": "合計 {{total}}", + "resumeSucceeded": "処理が再開されました", + "resumeTooltip": "処理を再開", + "resume": "再会", + "status": "ステータス", + "pruneSucceeded": "キューから完了アイテム{{item_count}}件を削除しました", + "cancelTooltip": "現在のアイテムをキャンセル", + "in_progress": "進行中", + "notReady": "キューに追加できません", + "batchFailedToQueue": "バッチをキューに追加できませんでした", + "completed": "完了", + "batchValues": "バッチの値", + "cancelFailed": "アイテムのキャンセルに問題があります", + "batchQueued": "バッチをキューに追加しました", + "pauseFailed": "処理の一時停止に問題があります", + "clearFailed": "キューのクリアに問題があります", + "front": "先頭", + "clearSucceeded": "キューがクリアされました", + "pruneTooltip": "{{item_count}} の完了アイテムを削除", + "cancelSucceeded": "アイテムがキャンセルされました", + "batchQueuedDesc_other": "{{count}} セッションをキューの{{direction}}に追加しました", + "graphQueued": "グラフをキューに追加しました", + "batch": "バッチ", + "clearQueueAlertDialog": "キューをクリアすると、処理中のアイテムは直ちにキャンセルされ、キューは完全にクリアされます。", + "pending": "保留中", + "resumeFailed": "処理の再開に問題があります", + "clear": "クリア", + "total": "合計", + "canceled": "キャンセル", + "pruneFailed": "キューの削除に問題があります", + "cancelBatchSucceeded": "バッチがキャンセルされました", + "clearTooltip": "全てのアイテムをキャンセルしてクリア", + "current": "現在", + "failed": "失敗", + "cancelItem": "項目をキャンセル", + "next": "次", + "cancelBatch": "バッチをキャンセル", + "session": "セッション", + "enqueueing": "バッチをキューに追加", + "queueMaxExceeded": "{{max_queue_size}} の最大値を超えたため、{{skip}} をスキップします", + "cancelBatchFailed": "バッチのキャンセルに問題があります", + "clearQueueAlertDialog2": "キューをクリアしてもよろしいですか?", + "item": "アイテム", + "graphFailedToQueue": "グラフをキューに追加できませんでした" + }, + "models": { + "noMatchingModels": "一致するモデルがありません", + "loading": "読み込み中", + "noMatchingLoRAs": "一致するLoRAがありません", + "noLoRAsAvailable": "使用可能なLoRAがありません", + "noModelsAvailable": "使用可能なモデルがありません", + "selectModel": "モデルを選択してください", + "selectLoRA": "LoRAを選択してください" + }, + "nodes": { + "addNode": "ノードを追加", + "boardField": "ボード", + "boolean": "ブーリアン", + "boardFieldDescription": "ギャラリーボード", + "addNodeToolTip": "ノードを追加 (Shift+A, Space)", + "booleanPolymorphicDescription": "ブーリアンのコレクション。", + "inputField": "入力フィールド", + "latentsFieldDescription": "潜在空間はノード間で伝達できます。", + "floatCollectionDescription": "浮動小数点のコレクション。", + "missingTemplate": "テンプレートが見つかりません", + "ipAdapterPolymorphicDescription": "IP-Adaptersのコレクション。", + "latentsPolymorphicDescription": "潜在空間はノード間で伝達できます。", + "colorFieldDescription": "RGBAカラー。", + "ipAdapterCollection": "IP-Adapterコレクション", + "conditioningCollection": "条件付きコレクション", + "hideGraphNodes": "グラフオーバーレイを非表示", + "loadWorkflow": "ワークフローを読み込み", + "integerPolymorphicDescription": "整数のコレクション。", + "hideLegendNodes": "フィールドタイプの凡例を非表示", + "float": "浮動小数点", + "booleanCollectionDescription": "ブーリアンのコレクション。", + "integer": "整数", + "colorField": "カラー", + "nodeTemplate": "ノードテンプレート", + "integerDescription": "整数は小数点を持たない数値です。", + "imagePolymorphicDescription": "画像のコレクション。", + "doesNotExist": "存在しません", + "ipAdapterCollectionDescription": "IP-Adaptersのコレクション。", + "inputMayOnlyHaveOneConnection": "入力は1つの接続しか持つことができません", + "nodeOutputs": "ノード出力", + "currentImageDescription": "ノードエディタ内の現在の画像を表示", + "downloadWorkflow": "ワークフローのJSONをダウンロード", + "integerCollection": "整数コレクション", + "collectionItem": "コレクションアイテム", + "fieldTypesMustMatch": "フィールドタイプが一致している必要があります", + "edge": "輪郭", + "inputNode": "入力ノード", + "imageField": "画像", + "animatedEdgesHelp": "選択したエッジおよび選択したノードに接続されたエッジをアニメーション化します", + "cannotDuplicateConnection": "重複した接続は作れません", + "noWorkflow": "ワークフローがありません", + "integerCollectionDescription": "整数のコレクション。", + "colorPolymorphicDescription": "カラーのコレクション。", + "missingCanvaInitImage": "キャンバスの初期画像が見つかりません", + "clipFieldDescription": "トークナイザーとテキストエンコーダーサブモデル。", + "fullyContainNodesHelp": "ノードは選択ボックス内に完全に存在する必要があります", + "clipField": "クリップ", + "nodeType": "ノードタイプ", + "executionStateInProgress": "処理中", + "executionStateError": "エラー", + "ipAdapterModel": "IP-Adapterモデル", + "ipAdapterDescription": "イメージプロンプトアダプター(IP-Adapter)。", + "missingCanvaInitMaskImages": "キャンバスの初期画像およびマスクが見つかりません", + "hideMinimapnodes": "ミニマップを非表示", + "fitViewportNodes": "全体を表示", + "executionStateCompleted": "完了", + "node": "ノード", + "currentImage": "現在の画像", + "controlField": "コントロール", + "booleanDescription": "ブーリアンはtrueかfalseです。", + "collection": "コレクション", + "ipAdapterModelDescription": "IP-Adapterモデルフィールド", + "cannotConnectInputToInput": "入力から入力には接続できません", + "invalidOutputSchema": "無効な出力スキーマ", + "floatDescription": "浮動小数点は、小数点を持つ数値です。", + "floatPolymorphicDescription": "浮動小数点のコレクション。", + "floatCollection": "浮動小数点コレクション", + "latentsField": "潜在空間", + "cannotConnectOutputToOutput": "出力から出力には接続できません", + "booleanCollection": "ブーリアンコレクション", + "cannotConnectToSelf": "自身のノードには接続できません", + "inputFields": "入力フィールド(複数)", + "colorCodeEdges": "カラー-Code Edges", + "imageCollectionDescription": "画像のコレクション。", + "loadingNodes": "ノードを読み込み中...", + "imageCollection": "画像コレクション" + }, + "boards": { + "autoAddBoard": "自動追加するボード", + "move": "移動", + "menuItemAutoAdd": "このボードに自動追加", + "myBoard": "マイボード", + "searchBoard": "ボードを検索...", + "noMatching": "一致するボードがありません", + "selectBoard": "ボードを選択", + "cancel": "キャンセル", + "addBoard": "ボードを追加", + "uncategorized": "未分類", + "downloadBoard": "ボードをダウンロード", + "changeBoard": "ボードを変更", + "loading": "ロード中...", + "topMessage": "このボードには、以下の機能で使用されている画像が含まれています:", + "bottomMessage": "このボードおよび画像を削除すると、現在これらを利用している機能はリセットされます。", + "clearSearch": "検索をクリア" + }, + "embedding": { + "noMatchingEmbedding": "一致する埋め込みがありません", + "addEmbedding": "埋め込みを追加", + "incompatibleModel": "互換性のないベースモデル:" + }, + "invocationCache": { + "invocationCache": "呼び出しキャッシュ", + "clearSucceeded": "呼び出しキャッシュをクリアしました", + "clearFailed": "呼び出しキャッシュのクリアに問題があります", + "enable": "有効", + "clear": "クリア", + "maxCacheSize": "最大キャッシュサイズ", + "cacheSize": "キャッシュサイズ" } } From 09bb61f630ed50a95b6645534ba8a3de09120e67 Mon Sep 17 00:00:00 2001 From: Fabian Bahl Date: Sun, 29 Oct 2023 12:10:12 +0100 Subject: [PATCH 156/202] translationBot(ui): update translation (English) Currently translated at 100.0% (1217 of 1217 strings) Co-authored-by: Fabian Bahl Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/en/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/en.json | 25 +++++++++++++------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 818f9f4fa9..04d817428c 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -70,8 +70,8 @@ "langDutch": "Nederlands", "langEnglish": "English", "langFrench": "Français", - "langGerman": "Deutsch", - "langHebrew": "עברית", + "langGerman": "German", + "langHebrew": "Hebrew", "langItalian": "Italiano", "langJapanese": "日本語", "langKorean": "한국어", @@ -1122,7 +1122,6 @@ "clearIntermediates": "Clear Intermediates", "clearIntermediatesWithCount_one": "Clear {{count}} Intermediate", "clearIntermediatesWithCount_other": "Clear {{count}} Intermediates", - "clearIntermediatesWithCount_zero": "No Intermediates to Clear", "intermediatesCleared_one": "Cleared {{count}} Intermediate", "intermediatesCleared_other": "Cleared {{count}} Intermediates", "intermediatesClearedFailed": "Problem Clearing Intermediates" @@ -1257,11 +1256,15 @@ }, "compositingBlur": { "heading": "Blur", - "paragraphs": ["The blur radius of the mask."] + "paragraphs": [ + "The blur radius of the mask." + ] }, "compositingBlurMethod": { "heading": "Blur Method", - "paragraphs": ["The method of blur applied to the masked area."] + "paragraphs": [ + "The method of blur applied to the masked area." + ] }, "compositingCoherencePass": { "heading": "Coherence Pass", @@ -1271,7 +1274,9 @@ }, "compositingCoherenceMode": { "heading": "Mode", - "paragraphs": ["The mode of the Coherence Pass."] + "paragraphs": [ + "The mode of the Coherence Pass." + ] }, "compositingCoherenceSteps": { "heading": "Steps", @@ -1289,7 +1294,9 @@ }, "compositingMaskAdjustments": { "heading": "Mask Adjustments", - "paragraphs": ["Adjust the mask."] + "paragraphs": [ + "Adjust the mask." + ] }, "controlNetBeginEnd": { "heading": "Begin / End Step Percentage", @@ -1347,7 +1354,9 @@ }, "infillMethod": { "heading": "Infill Method", - "paragraphs": ["Method to infill the selected area."] + "paragraphs": [ + "Method to infill the selected area." + ] }, "lora": { "heading": "LoRA Weight", From 67fb2c81295206340e49cc78055e454ed96ae358 Mon Sep 17 00:00:00 2001 From: Alexander Eichhorn Date: Sun, 29 Oct 2023 12:10:12 +0100 Subject: [PATCH 157/202] translationBot(ui): update translation (German) Currently translated at 35.5% (433 of 1217 strings) Co-authored-by: Alexander Eichhorn Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 61 +++++++++++++++++--- 1 file changed, 54 insertions(+), 7 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 9baa6eb6a2..c8e653d971 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -4,7 +4,7 @@ "reportBugLabel": "Fehler melden", "settingsLabel": "Einstellungen", "img2img": "Bild zu Bild", - "nodes": "Knoten", + "nodes": "Knoten Editor", "langGerman": "Deutsch", "nodesDesc": "Ein knotenbasiertes System, für die Erzeugung von Bildern, ist derzeit in der Entwicklung. Bleiben Sie gespannt auf Updates zu dieser fantastischen Funktion.", "postProcessing": "Nachbearbeitung", @@ -38,7 +38,7 @@ "statusUpscalingESRGAN": "Hochskalierung (ESRGAN)", "statusLoadingModel": "Laden des Modells", "statusModelChanged": "Modell Geändert", - "cancel": "Abbruch", + "cancel": "Abbrechen", "accept": "Annehmen", "back": "Zurück", "langEnglish": "Englisch", @@ -58,7 +58,33 @@ "langArabic": "Arabisch", "langKorean": "Koreanisch", "langHebrew": "Hebräisch", - "langSpanish": "Spanisch" + "langSpanish": "Spanisch", + "t2iAdapter": "T2I Adapter", + "communityLabel": "Gemeinschaft", + "dontAskMeAgain": "Frag mich nicht nochmal", + "loadingInvokeAI": "Lade Invoke AI", + "statusMergedModels": "Modelle zusammengeführt", + "areYouSure": "Bist du dir sicher?", + "statusConvertingModel": "Model konvertieren", + "on": "An", + "nodeEditor": "Knoten Editor", + "statusMergingModels": "Modelle zusammenführen", + "langSimplifiedChinese": "Vereinfachtes Chinesisch", + "ipAdapter": "IP Adapter", + "controlAdapter": "Control Adapter", + "auto": "Automatisch", + "controlNet": "ControlNet", + "imageFailedToLoad": "Kann Bild nicht laden", + "statusModelConverted": "Model konvertiert", + "modelManager": "Model Manager", + "lightMode": "Heller Modus", + "generate": "Erstellen", + "learnMore": "Mehr lernen", + "darkMode": "Dunkler Modus", + "loading": "Laden", + "random": "Zufall", + "batch": "Batch-Manager", + "advanced": "Erweitert" }, "gallery": { "generations": "Erzeugungen", @@ -166,7 +192,7 @@ }, "toggleGalleryPin": { "title": "Galerie anheften umschalten", - "desc": "Heftet die Galerie an die Benutzeroberfläche bzw. löst die sie." + "desc": "Heftet die Galerie an die Benutzeroberfläche bzw. löst die sie" }, "increaseGalleryThumbSize": { "title": "Größe der Galeriebilder erhöhen", @@ -295,7 +321,7 @@ "config": "Konfiguration", "configValidationMsg": "Pfad zur Konfigurationsdatei Ihres Models.", "modelLocation": "Ort des Models", - "modelLocationValidationMsg": "Pfad zum Speicherort Ihres Models.", + "modelLocationValidationMsg": "Pfad zum Speicherort Ihres Models", "vaeLocation": "VAE Ort", "vaeLocationValidationMsg": "Pfad zum Speicherort Ihres VAE.", "width": "Breite", @@ -508,7 +534,7 @@ "uploadImage": "Bild hochladen", "previousImage": "Voriges Bild", "useThisParameter": "Benutze diesen Parameter", - "copyMetadataJson": "Kopiere metadata JSON", + "copyMetadataJson": "Kopiere Metadaten JSON", "zoomIn": "Vergrößern", "rotateClockwise": "Im Uhrzeigersinn drehen", "flipHorizontally": "Horizontal drehen", @@ -520,6 +546,27 @@ "reset": "Zurücksetzen", "nextImage": "Nächstes Bild", "zoomOut": "Verkleinern", - "rotateCounterClockwise": "Gegen den Uhrzeigersinn verdrehen" + "rotateCounterClockwise": "Gegen den Uhrzeigersinn verdrehen", + "showGalleryPanel": "Galeriefenster anzeigen", + "exitViewer": "Betrachten beenden", + "menu": "Menü", + "loadMore": "Mehr laden" + }, + "boards": { + "autoAddBoard": "Automatisches Hinzufügen zum Ordner", + "topMessage": "Dieser Ordner enthält Bilder die in den folgenden Funktionen verwendet werden:", + "move": "Bewegen", + "menuItemAutoAdd": "Automatisches Hinzufügen zu diesem Ordner", + "myBoard": "Meine Ordner", + "searchBoard": "Ordner durchsuchen...", + "noMatching": "Keine passenden Ordner", + "selectBoard": "Ordner aussuchen", + "cancel": "Abbrechen", + "addBoard": "Ordner hinzufügen", + "uncategorized": "Nicht kategorisiert", + "downloadBoard": "Ordner runterladen", + "changeBoard": "Ordner wechseln", + "loading": "Laden...", + "clearSearch": "Suche leeren" } } From 60a105103b2bd8ab90b64a108229e3a8700d7a2a Mon Sep 17 00:00:00 2001 From: Jaulustus Date: Sun, 29 Oct 2023 12:10:12 +0100 Subject: [PATCH 158/202] translationBot(ui): update translation (German) Currently translated at 36.0% (439 of 1217 strings) translationBot(ui): update translation (German) Currently translated at 35.5% (433 of 1217 strings) Co-authored-by: Jaulustus Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index c8e653d971..381d5ecbb4 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -543,14 +543,15 @@ "toggleAutoscroll": "Auroscroll ein/ausschalten", "toggleLogViewer": "Log Betrachter ein/ausschalten", "showOptionsPanel": "Zeige Optionen", - "reset": "Zurücksetzen", + "reset": "Zurücksetzten", "nextImage": "Nächstes Bild", "zoomOut": "Verkleinern", "rotateCounterClockwise": "Gegen den Uhrzeigersinn verdrehen", "showGalleryPanel": "Galeriefenster anzeigen", "exitViewer": "Betrachten beenden", "menu": "Menü", - "loadMore": "Mehr laden" + "loadMore": "Mehr laden", + "invokeProgressBar": "Invoke Fortschrittsanzeige" }, "boards": { "autoAddBoard": "Automatisches Hinzufügen zum Ordner", From 54b0c4f3c967da9c9e2054e1f03749266bef8e87 Mon Sep 17 00:00:00 2001 From: Fabian Bahl Date: Sun, 29 Oct 2023 12:10:12 +0100 Subject: [PATCH 159/202] translationBot(ui): update translation (German) Currently translated at 37.7% (460 of 1217 strings) translationBot(ui): update translation (German) Currently translated at 36.4% (444 of 1217 strings) translationBot(ui): update translation (German) Currently translated at 36.4% (443 of 1217 strings) translationBot(ui): update translation (German) Currently translated at 36.0% (439 of 1217 strings) translationBot(ui): update translation (German) Currently translated at 35.5% (433 of 1217 strings) Co-authored-by: Fabian Bahl Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 32 ++++++++++++++++++-- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 381d5ecbb4..46fc3f32ac 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -11,7 +11,7 @@ "postProcessDesc1": "InvokeAI bietet eine breite Palette von Nachbearbeitungsfunktionen. Bildhochskalierung und Gesichtsrekonstruktion sind bereits in der WebUI verfügbar. Sie können sie über das Menü Erweiterte Optionen der Reiter Text in Bild und Bild in Bild aufrufen. Sie können Bilder auch direkt bearbeiten, indem Sie die Schaltflächen für Bildaktionen oberhalb der aktuellen Bildanzeige oder im Viewer verwenden.", "postProcessDesc2": "Eine spezielle Benutzeroberfläche wird in Kürze veröffentlicht, um erweiterte Nachbearbeitungs-Workflows zu erleichtern.", "postProcessDesc3": "Die InvokeAI Kommandozeilen-Schnittstelle bietet verschiedene andere Funktionen, darunter Embiggen.", - "training": "Training", + "training": "trainieren", "trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.", "trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.", "upload": "Hochladen", @@ -84,7 +84,8 @@ "loading": "Laden", "random": "Zufall", "batch": "Batch-Manager", - "advanced": "Erweitert" + "advanced": "Erweitert", + "langBrPortuguese": "Portugiesisch (Brasilien)" }, "gallery": { "generations": "Erzeugungen", @@ -358,7 +359,32 @@ "customConfig": "Benutzerdefinierte Konfiguration", "invokeRoot": "InvokeAI Ordner", "formMessageDiffusersVAELocationDesc": "Falls nicht angegeben, sucht InvokeAI nach der VAE-Datei innerhalb des oben angegebenen Modell Speicherortes.", - "checkpointModels": "Kontrollpunkte" + "checkpointModels": "Kontrollpunkte", + "convert": "Umwandeln", + "addCheckpointModel": "Kontrollpunkt / SafeTensors Modell hinzufügen", + "allModels": "Alle Modelle", + "alpha": "Alpha", + "addDifference": "Unterschied hinzufügen", + "convertToDiffusersHelpText2": "Bei diesem Vorgang wird Ihr Eintrag im Modell-Manager durch die Diffusor-Version desselben Modells ersetzt.", + "convertToDiffusersHelpText5": "Bitte stellen Sie sicher, dass Sie über genügend Speicherplatz verfügen. Die Modelle sind in der Regel zwischen 4 GB und 7 GB groß.", + "convertToDiffusersHelpText3": "Ihre Kontrollpunktdatei auf der Festplatte wird NICHT gelöscht oder in irgendeiner Weise verändert. Sie können Ihren Kontrollpunkt dem Modell-Manager wieder hinzufügen, wenn Sie dies wünschen.", + "convertToDiffusersHelpText4": "Dies ist ein einmaliger Vorgang. Er kann je nach den Spezifikationen Ihres Computers etwa 30-60 Sekunden dauern.", + "convertToDiffusersHelpText6": "Möchten Sie dieses Modell umbauen?", + "custom": "Benutzerdefiniert", + "modelConverted": "Modell umgewandelt", + "inverseSigmoid": "Inverses Sigmoid", + "invokeAIFolder": "Invoke AI Ordner", + "formMessageDiffusersModelLocationDesc": "Bitte geben Sie mindestens einen an.", + "customSaveLocation": "Benutzerdefinierter Speicherort", + "formMessageDiffusersVAELocation": "VAE Speicherort", + "mergedModelCustomSaveLocation": "Benutzerdefinierter Pfad", + "modelMergeHeaderHelp2": "Nur Diffuser sind für die Zusammenführung verfügbar. Wenn Sie ein Kontrollpunktmodell zusammenführen möchten, konvertieren Sie es bitte zuerst in Diffusoren.", + "manual": "Manuell", + "modelManager": "Modell Manager", + "modelMergeAlphaHelp": "Alpha steuert die Überblendungsstärke für die Modelle. Niedrigere Alphawerte führen zu einem geringeren Einfluss des zweiten Modells.", + "modelMergeHeaderHelp1": "Sie können bis zu drei verschiedene Modelle miteinander kombinieren, um eine Mischung zu erstellen, die Ihren Bedürfnissen entspricht.", + "ignoreMismatch": "Unstimmigkeiten zwischen ausgewählten Modellen ignorieren", + "model": "Modell" }, "parameters": { "images": "Bilder", From 015cec197b6f9b512c37dfa57ea5847e60d461fc Mon Sep 17 00:00:00 2001 From: Alexander Eichhorn Date: Sun, 29 Oct 2023 12:10:12 +0100 Subject: [PATCH 160/202] translationBot(ui): update translation (German) Currently translated at 37.7% (460 of 1217 strings) translationBot(ui): update translation (German) Currently translated at 36.4% (444 of 1217 strings) translationBot(ui): update translation (German) Currently translated at 36.0% (439 of 1217 strings) Co-authored-by: Alexander Eichhorn Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 46fc3f32ac..9f3bd847a0 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -45,7 +45,7 @@ "langDutch": "Niederländisch", "langFrench": "Französisch", "langItalian": "Italienisch", - "langPortuguese": "Portogisisch", + "langPortuguese": "Portugiesisch", "langRussian": "Russisch", "langUkranian": "Ukrainisch", "hotkeysLabel": "Tastenkombinationen", @@ -81,16 +81,17 @@ "generate": "Erstellen", "learnMore": "Mehr lernen", "darkMode": "Dunkler Modus", - "loading": "Laden", + "loading": "Lade", "random": "Zufall", "batch": "Batch-Manager", "advanced": "Erweitert", - "langBrPortuguese": "Portugiesisch (Brasilien)" + "langBrPortuguese": "Portugiesisch (Brasilien)", + "unifiedCanvas": "Einheitliche Leinwand" }, "gallery": { "generations": "Erzeugungen", "showGenerations": "Zeige Erzeugnisse", - "uploads": "Hochgelades", + "uploads": "Uploads", "showUploads": "Zeige Uploads", "galleryImageSize": "Bildgröße", "galleryImageResetSize": "Größe zurücksetzen", @@ -366,10 +367,10 @@ "alpha": "Alpha", "addDifference": "Unterschied hinzufügen", "convertToDiffusersHelpText2": "Bei diesem Vorgang wird Ihr Eintrag im Modell-Manager durch die Diffusor-Version desselben Modells ersetzt.", - "convertToDiffusersHelpText5": "Bitte stellen Sie sicher, dass Sie über genügend Speicherplatz verfügen. Die Modelle sind in der Regel zwischen 4 GB und 7 GB groß.", + "convertToDiffusersHelpText5": "Bitte stellen Sie sicher, dass Sie über genügend Speicherplatz verfügen. Die Modelle sind in der Regel zwischen 2 GB und 7 GB groß.", "convertToDiffusersHelpText3": "Ihre Kontrollpunktdatei auf der Festplatte wird NICHT gelöscht oder in irgendeiner Weise verändert. Sie können Ihren Kontrollpunkt dem Modell-Manager wieder hinzufügen, wenn Sie dies wünschen.", "convertToDiffusersHelpText4": "Dies ist ein einmaliger Vorgang. Er kann je nach den Spezifikationen Ihres Computers etwa 30-60 Sekunden dauern.", - "convertToDiffusersHelpText6": "Möchten Sie dieses Modell umbauen?", + "convertToDiffusersHelpText6": "Möchten Sie dieses Modell konvertieren?", "custom": "Benutzerdefiniert", "modelConverted": "Modell umgewandelt", "inverseSigmoid": "Inverses Sigmoid", @@ -384,7 +385,8 @@ "modelMergeAlphaHelp": "Alpha steuert die Überblendungsstärke für die Modelle. Niedrigere Alphawerte führen zu einem geringeren Einfluss des zweiten Modells.", "modelMergeHeaderHelp1": "Sie können bis zu drei verschiedene Modelle miteinander kombinieren, um eine Mischung zu erstellen, die Ihren Bedürfnissen entspricht.", "ignoreMismatch": "Unstimmigkeiten zwischen ausgewählten Modellen ignorieren", - "model": "Modell" + "model": "Modell", + "convertToDiffusersSaveLocation": "Speicherort" }, "parameters": { "images": "Bilder", From 8109bc5316bdad78f07178484a50a0016862cb0d Mon Sep 17 00:00:00 2001 From: Fabian Bahl Date: Sun, 29 Oct 2023 14:49:38 +0000 Subject: [PATCH 161/202] translationBot(ui): update translation (German) Currently translated at 40.3% (491 of 1217 strings) Co-authored-by: Fabian Bahl Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 30 +++++++++++++++++--- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 9f3bd847a0..19a6eb51e1 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -386,7 +386,20 @@ "modelMergeHeaderHelp1": "Sie können bis zu drei verschiedene Modelle miteinander kombinieren, um eine Mischung zu erstellen, die Ihren Bedürfnissen entspricht.", "ignoreMismatch": "Unstimmigkeiten zwischen ausgewählten Modellen ignorieren", "model": "Modell", - "convertToDiffusersSaveLocation": "Speicherort" + "convertToDiffusersSaveLocation": "Speicherort", + "pathToCustomConfig": "Pfad zur benutzerdefinierten Konfiguration", + "v1": "v1", + "modelMergeInterpAddDifferenceHelp": "In diesem Modus wird zunächst Modell 3 von Modell 2 subtrahiert. Die resultierende Version wird mit Modell 1 mit dem oben eingestellten Alphasatz gemischt.", + "modelTwo": "Modell 2", + "modelOne": "Modell 1", + "v2_base": "v2 (512px)", + "scanForModels": "Nach Modellen suchen", + "name": "Name", + "safetensorModels": "SafeTensors", + "pickModelType": "Modell Typ auswählen", + "sameFolder": "Gleicher Ordner", + "modelThree": "Modell 3", + "v2_768": "v2 (768px)" }, "parameters": { "images": "Bilder", @@ -423,7 +436,7 @@ "seamCorrectionHeader": "Nahtkorrektur", "infillScalingHeader": "Infill und Skalierung", "img2imgStrength": "Bild-zu-Bild-Stärke", - "toggleLoopback": "Toggle Loopback", + "toggleLoopback": "Loopback umschalten", "sendTo": "Senden an", "sendToImg2Img": "Senden an Bild zu Bild", "sendToUnifiedCanvas": "Senden an Unified Canvas", @@ -438,8 +451,17 @@ "initialImage": "Ursprüngliches Bild", "showOptionsPanel": "Optionsleiste zeigen", "cancel": { - "setType": "Abbruchart festlegen" - } + "setType": "Abbruchart festlegen", + "immediate": "Sofort abbrechen", + "schedule": "Abbrechen nach der aktuellen Iteration", + "isScheduled": "Abbrechen" + }, + "copyImage": "Bild kopieren", + "denoisingStrength": "Stärke der Entrauschung", + "symmetry": "Symmetrie", + "imageToImage": "Bild zu Bild", + "info": "Information", + "general": "Allgemein" }, "settings": { "displayInProgress": "Bilder in Bearbeitung anzeigen", From 249618f6b43afd2acee116d3e0185c05daec9ad6 Mon Sep 17 00:00:00 2001 From: Alexander Eichhorn Date: Sun, 29 Oct 2023 14:49:39 +0000 Subject: [PATCH 162/202] translationBot(ui): update translation (German) Currently translated at 40.3% (491 of 1217 strings) Co-authored-by: Alexander Eichhorn Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 19a6eb51e1..b483d61928 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -379,7 +379,7 @@ "customSaveLocation": "Benutzerdefinierter Speicherort", "formMessageDiffusersVAELocation": "VAE Speicherort", "mergedModelCustomSaveLocation": "Benutzerdefinierter Pfad", - "modelMergeHeaderHelp2": "Nur Diffuser sind für die Zusammenführung verfügbar. Wenn Sie ein Kontrollpunktmodell zusammenführen möchten, konvertieren Sie es bitte zuerst in Diffusoren.", + "modelMergeHeaderHelp2": "Nur Diffusers sind für die Zusammenführung verfügbar. Wenn Sie ein Kontrollpunktmodell zusammenführen möchten, konvertieren Sie es bitte zuerst in Diffusers.", "manual": "Manuell", "modelManager": "Modell Manager", "modelMergeAlphaHelp": "Alpha steuert die Überblendungsstärke für die Modelle. Niedrigere Alphawerte führen zu einem geringeren Einfluss des zweiten Modells.", @@ -399,7 +399,10 @@ "pickModelType": "Modell Typ auswählen", "sameFolder": "Gleicher Ordner", "modelThree": "Modell 3", - "v2_768": "v2 (768px)" + "v2_768": "v2 (768px)", + "none": "Nix", + "repoIDValidationMsg": "Online Repo Ihres Modells", + "vaeRepoIDValidationMsg": "Online Repo Ihrer VAE" }, "parameters": { "images": "Bilder", @@ -461,7 +464,10 @@ "symmetry": "Symmetrie", "imageToImage": "Bild zu Bild", "info": "Information", - "general": "Allgemein" + "general": "Allgemein", + "hiresStrength": "High Res Stärke", + "hidePreview": "Verstecke Vorschau", + "showPreview": "Zeige Vorschau" }, "settings": { "displayInProgress": "Bilder in Bearbeitung anzeigen", @@ -472,7 +478,9 @@ "resetWebUI": "Web-Oberfläche zurücksetzen", "resetWebUIDesc1": "Das Zurücksetzen der Web-Oberfläche setzt nur den lokalen Cache des Browsers mit Ihren Bildern und gespeicherten Einstellungen zurück. Es werden keine Bilder von der Festplatte gelöscht.", "resetWebUIDesc2": "Wenn die Bilder nicht in der Galerie angezeigt werden oder etwas anderes nicht funktioniert, versuchen Sie bitte, die Einstellungen zurückzusetzen, bevor Sie einen Fehler auf GitHub melden.", - "resetComplete": "Die Web-Oberfläche wurde zurückgesetzt. Aktualisieren Sie die Seite, um sie neu zu laden." + "resetComplete": "Die Web-Oberfläche wurde zurückgesetzt. Aktualisieren Sie die Seite, um sie neu zu laden.", + "models": "Modelle", + "useSlidersForAll": "Schieberegler für alle Optionen verwenden" }, "toast": { "tempFoldersEmptied": "Temp-Ordner geleert", @@ -552,7 +560,7 @@ "autoSaveToGallery": "Automatisch in Galerie speichern", "saveBoxRegionOnly": "Nur Auswahlbox speichern", "limitStrokesToBox": "Striche auf Box beschränken", - "showCanvasDebugInfo": "Leinwand-Debug-Infos anzeigen", + "showCanvasDebugInfo": "Zusätzliche Informationen zur Leinwand anzeigen", "clearCanvasHistory": "Leinwand-Verlauf löschen", "clearHistory": "Verlauf löschen", "clearCanvasHistoryMessage": "Wenn Sie den Verlauf der Leinwand löschen, bleibt die aktuelle Leinwand intakt, aber der Verlauf der Rückgängig- und Wiederherstellung wird unwiderruflich gelöscht.", From 224b09f8fd83b394ffa060a31124099304dd7d9f Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Mon, 30 Oct 2023 12:34:30 -0400 Subject: [PATCH 163/202] Enforce Unix line endings in container (#4990) * (fix) enforce Unix (LF) line endings in docker/ directory * (fix) update docker docs wrt line endings on Windows * (fix) static check fixes --- .gitattributes | 1 + docs/installation/040_INSTALL_DOCKER.md | 11 +++++++++++ invokeai/frontend/install/invokeai_update.py | 2 +- pyproject.toml | 1 + 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index 23dfb4efbd..4c9fc0120e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,3 +2,4 @@ # Only affects text files and ignores other file types. # For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/ * text=auto +docker/** text eol=lf \ No newline at end of file diff --git a/docs/installation/040_INSTALL_DOCKER.md b/docs/installation/040_INSTALL_DOCKER.md index a672f71ba8..a550056ce1 100644 --- a/docs/installation/040_INSTALL_DOCKER.md +++ b/docs/installation/040_INSTALL_DOCKER.md @@ -99,3 +99,14 @@ If using an AMD GPU: Use the standard `docker compose up` command, and generally the `docker compose` [CLI](https://docs.docker.com/compose/reference/) as usual. Once the container starts up (and configures the InvokeAI root directory if this is a new installation), you can access InvokeAI at [http://localhost:9090](http://localhost:9090) + +## Troubleshooting / FAQ + +- Q: I am running on Windows under WSL2, and am seeing a "no such file or directory" error. +- A: Your `docker-entrypoint.sh` file likely has Windows (CRLF) as opposed to Unix (LF) line endings, + and you may have cloned this repository before the issue was fixed. To solve this, please change + the line endings in the `docker-entrypoint.sh` file to `LF`. You can do this in VSCode + (`Ctrl+P` and search for "line endings"), or by using the `dos2unix` utility in WSL. + Finally, you may delete `docker-entrypoint.sh` followed by `git pull; git checkout docker/docker-entrypoint.sh` + to reset the file to its most recent version. + For more information on this issue, please see the [Docker Desktop documentation](https://docs.docker.com/desktop/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers) diff --git a/invokeai/frontend/install/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py index cf017dcfeb..ed54a8f10b 100644 --- a/invokeai/frontend/install/invokeai_update.py +++ b/invokeai/frontend/install/invokeai_update.py @@ -91,7 +91,7 @@ def get_extras(): def main(): - versions = [x for x in get_versions() if not (x['draft'] or x['prerelease'])] + versions = [x for x in get_versions() if not (x["draft"] or x["prerelease"])] if invokeai_is_running(): print(":exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]") input("Press any key to continue...") diff --git a/pyproject.toml b/pyproject.toml index d67b096ddc..c0d91cf330 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -206,6 +206,7 @@ exclude = [ "build", "dist", "invokeai/frontend/web/node_modules/", + ".venv*", ] [tool.black] From 55bfadfd0b2d642aee54f4ac11d77c678f269177 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:36:16 -0500 Subject: [PATCH 164/202] fix(nodes): fix DenoiseMaskField.masked_latents_name This optional field needs to have a default of `None`. --- invokeai/app/invocations/primitives.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 88ede88cde..537d616f1f 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -293,7 +293,7 @@ class DenoiseMaskField(BaseModel): """An inpaint mask field""" mask_name: str = Field(description="The name of the mask image") - masked_latents_name: Optional[str] = Field(description="The name of the masked image latents") + masked_latents_name: Optional[str] = Field(default=None, description="The name of the masked image latents") @invocation_output("denoise_mask_output") From 03a64275c696b4bdc8ddb0b0a815f056502d354a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:15:26 -0500 Subject: [PATCH 165/202] fix(db): fix deprecated pydantic `.json()` method --- .../app/services/workflow_records/workflow_records_sqlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py index e9e2bdca3a..b0952e8234 100644 --- a/invokeai/app/services/workflow_records/workflow_records_sqlite.py +++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py @@ -57,7 +57,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase): INSERT INTO workflows(workflow) VALUES (?); """, - (workflow.json(),), + (workflow.model_dump_json(),), ) self._conn.commit() except Exception: From 9721e1382d417ebe4ec665e33159b15c6075295b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 30 Oct 2023 15:49:27 -0400 Subject: [PATCH 166/202] add option to install latest prerelease --- invokeai/frontend/install/invokeai_update.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/install/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py index ed54a8f10b..b065fcfe5b 100644 --- a/invokeai/frontend/install/invokeai_update.py +++ b/invokeai/frontend/install/invokeai_update.py @@ -50,7 +50,7 @@ def invokeai_is_running() -> bool: return False -def welcome(versions: dict): +def welcome(latest_release: str, latest_prerelease: str): @group() def text(): yield f"InvokeAI Version: [bold yellow]{__version__}" @@ -61,7 +61,8 @@ def welcome(versions: dict): yield "making the web frontend unusable. Please downgrade to the latest release if this happens." yield "" yield "[bold yellow]Options:" - yield f"""[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic]) + yield f"""[1] Update to the latest [bold]official release[/bold] ([italic]{latest_release}[/italic]) +[2] Update to the latest [bold]pre-release[/bold] (may be buggy; caveat emptor!) ([italic]{latest_prerelease}[/italic]) [2] Manually enter the [bold]tag name[/bold] for the version you wish to update to [3] Manually enter the [bold]branch name[/bold] for the version you wish to update to""" @@ -91,13 +92,18 @@ def get_extras(): def main(): - versions = [x for x in get_versions() if not (x["draft"] or x["prerelease"])] + versions = get_versions() + released_versions = [x for x in versions if not (x["draft"] or x["prerelease"])] + prerelease_versions = [x for x in versions if not x["draft"] and x["prerelease"]] + latest_release = released_versions[0]["tag_name"] if len(released_versions) else None + latest_prerelease = prerelease_versions[0]["tag_name"] if len(prerelease_versions) else None + if invokeai_is_running(): print(":exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]") input("Press any key to continue...") return - welcome(versions) + welcome(latest_release, latest_prerelease) tag = None branch = None @@ -105,11 +111,13 @@ def main(): choice = Prompt.ask("Choice:", choices=["1", "2", "3", "4"], default="1") if choice == "1": - release = versions[0]["tag_name"] + release = latest_release elif choice == "2": + release = latest_prerelease + elif choice == "3": while not tag: tag = Prompt.ask("Enter an InvokeAI tag name") - elif choice == "3": + elif choice == "4": while not branch: branch = Prompt.ask("Enter an InvokeAI branch name") From bb68175fd050cdd46f5d61504f154ed5c8613e0e Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Fri, 20 Oct 2023 23:01:13 -0400 Subject: [PATCH 167/202] Add negative IP Adapter support --- invokeai/app/invocations/ip_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 8902152538..4fa256552d 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -67,7 +67,7 @@ class IPAdapterInvocation(BaseInvocation): # weight: float = InputField(default=1.0, description="The weight of the IP-Adapter.", ui_type=UIType.Float) weight: Union[float, List[float]] = InputField( - default=1, ge=0, description="The weight given to the IP-Adapter", ui_type=UIType.Float, title="Weight" + default=1, ge=-1, description="The weight given to the IP-Adapter", ui_type=UIType.Float, title="Weight" ) begin_step_percent: float = InputField( From 8481db96edd3df2cae9e0af56be4eb5b06aee824 Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Tue, 31 Oct 2023 16:06:48 +1100 Subject: [PATCH 168/202] Updated workflows --- docs/nodes/exampleWorkflows.md | 12 +- ...N_img2img_upscale_w_Canny_ControlNet.json} | 1382 ++++++----- docs/workflows/FaceMask.json | 822 +++---- docs/workflows/FaceOff_FaceScale2x.json | 1356 +++++------ ...ce_Detailer_with_IP-Adapter_and_Canny.json | 2032 +++++++++++++++++ .../Multi_ControlNet_Canny_and_Depth.json | 985 ++++++++ docs/workflows/Prompt_from_File.json | 719 ++++++ docs/workflows/QR_Code_Monster.json | 758 ++++++ docs/workflows/SDXL_Text_to_Image.json | 432 ++-- .../SDXL_w_Refiner_Text_to_Image.json | 973 ++++---- docs/workflows/Text_to_Image.json | 366 +-- 11 files changed, 7230 insertions(+), 2607 deletions(-) rename docs/workflows/{ESRGAN_img2img_upscale w_Canny_ControlNet.json => ESRGAN_img2img_upscale_w_Canny_ControlNet.json} (55%) create mode 100644 docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json create mode 100644 docs/workflows/Multi_ControlNet_Canny_and_Depth.json create mode 100644 docs/workflows/Prompt_from_File.json create mode 100644 docs/workflows/QR_Code_Monster.json diff --git a/docs/nodes/exampleWorkflows.md b/docs/nodes/exampleWorkflows.md index f0159bd77c..568283585c 100644 --- a/docs/nodes/exampleWorkflows.md +++ b/docs/nodes/exampleWorkflows.md @@ -2,13 +2,17 @@ We've curated some example workflows for you to get started with Workflows in InvokeAI -To use them, right click on your desired workflow, press "Download Linked File". You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images! +To use them, right click on your desired workflow, follow the link to GitHub and click the "⬇" button to download the raw file. You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images! If you're interested in finding more workflows, checkout the [#share-your-workflows](https://discord.com/channels/1020123559063990373/1130291608097661000) channel in the InvokeAI Discord. * [SD1.5 / SD2 Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Text_to_Image.json) -* [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json) -* [SDXL (with Refiner) Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json) -* [Tiled Upscaling with ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json) +* [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/SDXL_Text_to_Image.json) +* [SDXL Text to Image with Refiner](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/SDXL_w_Refiner_Text_to_Image.json) +* [Multi ControlNet (Canny & Depth)](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Multi_ControlNet_Canny_and_Depth.json) +* [Tiled Upscaling with ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/ESRGAN_img2img_upscale_w_Canny_ControlNet.json) +* [Prompt From File](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Prompt_from_File.json) +* [Face Detailer with IP-Adapter & ControlNet](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json.json) * [FaceMask](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceMask.json) * [FaceOff with 2x Face Scaling](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceOff_FaceScale2x.json) +* [QR Code Monster](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/QR_Code_Monster.json) \ No newline at end of file diff --git a/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json b/docs/workflows/ESRGAN_img2img_upscale_w_Canny_ControlNet.json similarity index 55% rename from docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json rename to docs/workflows/ESRGAN_img2img_upscale_w_Canny_ControlNet.json index c963ace025..17222aa002 100644 --- a/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json +++ b/docs/workflows/ESRGAN_img2img_upscale_w_Canny_ControlNet.json @@ -1,5 +1,5 @@ { - "name": "ESRGAN img2img upscale w_ Lineart ControlNet", + "name": "ESRGAN img2img upscale w_ Canny ControlNet", "author": "InvokeAI", "description": "Sample workflow for using Upscaling with ControlNet with SD1.5", "version": "1.0.1", @@ -8,20 +8,16 @@ "notes": "", "exposedFields": [ { - "nodeId": "aba70c57-2495-4ec1-8d23-02b1d11445c7", + "nodeId": "d8ace142-c05f-4f1d-8982-88dc7473958d", "fieldName": "model" }, { - "nodeId": "c394834e-cab7-4c0c-919e-2e35eba7f34e", - "fieldName": "prompt" - }, - { - "nodeId": "465c7e6e-278f-49b0-87ab-642e88cd076f", - "fieldName": "prompt" - }, - { - "nodeId": "4e2833b2-5d35-45ec-ae65-89ea1846a3f4", + "nodeId": "771bdf6a-0813-4099-a5d8-921a138754d4", "fieldName": "image" + }, + { + "nodeId": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", + "fieldName": "prompt" } ], "meta": { @@ -29,14 +25,60 @@ }, "nodes": [ { - "id": "aba70c57-2495-4ec1-8d23-02b1d11445c7", + "id": "e8bf67fe-67de-4227-87eb-79e86afdfc74", "type": "invocation", "data": { - "id": "aba70c57-2495-4ec1-8d23-02b1d11445c7", + "id": "e8bf67fe-67de-4227-87eb-79e86afdfc74", + "type": "compel", + "inputs": { + "prompt": { + "id": "5f762fae-d791-42d9-8ab5-2b830c33ff20", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "", + "value": "" + }, + "clip": { + "id": "8ac95f40-317d-4513-bbba-b99effd3b438", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "46c65b2b-c0b5-40c2-b183-74e9451c6d56", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 1261.0015571435993, + "y": 1513.9276360694537 + } + }, + { + "id": "d8ace142-c05f-4f1d-8982-88dc7473958d", + "type": "invocation", + "data": { + "id": "d8ace142-c05f-4f1d-8982-88dc7473958d", "type": "main_model_loader", "inputs": { "model": { - "id": "4fcc98ee-1c70-4ad3-aaee-df72e0d4ecb9", + "id": "b35ae88a-f2d2-43f6-958c-8c624391250f", "name": "model", "type": "MainModelField", "fieldKind": "input", @@ -50,19 +92,19 @@ }, "outputs": { "unet": { - "id": "3ecbc0ff-a7b0-43de-a81f-039210cbda50", + "id": "02f243cb-c6e2-42c5-8be9-ef0519d54383", "name": "unet", "type": "UNetField", "fieldKind": "output" }, "clip": { - "id": "4e55bd72-5409-4fba-9929-4177e4ae9c34", + "id": "7762ed13-5b28-40f4-85f1-710942ceb92a", "name": "clip", "type": "ClipField", "fieldKind": "output" }, "vae": { - "id": "0095495c-4424-451f-a8f5-26dc840a3c56", + "id": "69566153-1918-417d-a3bb-32e9e857ef6b", "name": "vae", "type": "VaeField", "fieldKind": "output" @@ -72,256 +114,26 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 202, + "height": 226, "position": { - "x": 175, - "y": 300 + "x": 433.44132965778, + "y": 1419.9552496403696 } }, { - "id": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe", + "id": "771bdf6a-0813-4099-a5d8-921a138754d4", "type": "invocation", "data": { - "id": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe", - "type": "noise", - "inputs": { - "seed": { - "id": "a64e30f7-f9f4-4d67-a5aa-af5924205f92", - "name": "seed", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "width": { - "id": "054eaa35-63af-41c9-b13f-e9c2aaeeca43", - "name": "width", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 512 - }, - "height": { - "id": "6e9a642c-fe1d-477f-ae66-1706471f7d73", - "name": "height", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 512 - }, - "use_cpu": { - "id": "caa3c8ba-aed5-44d8-88d9-4f48a75d59a4", - "name": "use_cpu", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": true - } - }, - "outputs": { - "noise": { - "id": "133dbb18-f862-430b-b9a0-613aa8e61c7d", - "name": "noise", - "type": "LatentsField", - "fieldKind": "output" - }, - "width": { - "id": "2e31961a-af0c-497b-9ae2-429cb6c2f5a1", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "eca33bb8-37bb-4bfd-b7c7-3dba01207374", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": false, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 32, - "position": { - "x": 1400, - "y": 650 - } - }, - { - "id": "c2172a8b-1b5f-4330-acbe-dd2565c3b988", - "type": "invocation", - "data": { - "id": "c2172a8b-1b5f-4330-acbe-dd2565c3b988", - "type": "l2i", - "inputs": { - "tiled": { - "id": "406bccc1-d757-4578-b46e-be6141c03385", - "name": "tiled", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "fp32": { - "id": "960ec115-547c-45c8-af2a-569214d9409c", - "name": "fp32", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "latents": { - "id": "c8658cc5-3762-499d-9cad-eceb8f9dde4e", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "vae": { - "id": "c35cf05c-0985-4bc4-9b05-0c89799bb888", - "name": "vae", - "type": "VaeField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "image": { - "id": "25afb2bc-c964-4cd3-8332-4d0e9ea65d3a", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "aff7065e-8ce0-44aa-bb6e-d16925279fdd", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "6f5e8706-7a9b-4455-beb8-3d2e2ceabbc2", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": true, - "isIntermediate": false - }, - "width": 320, - "height": 266, - "position": { - "x": 2225, - "y": 450 - } - }, - { - "id": "c394834e-cab7-4c0c-919e-2e35eba7f34e", - "type": "invocation", - "data": { - "id": "c394834e-cab7-4c0c-919e-2e35eba7f34e", - "type": "compel", - "inputs": { - "prompt": { - "id": "fef594dd-07d3-47e6-97d0-1803b55a0f26", - "name": "prompt", - "type": "string", - "fieldKind": "input", - "label": "Positive Prompt", - "value": "tiger" - }, - "clip": { - "id": "e5639045-aa13-48c2-a172-869774aecab6", - "name": "clip", - "type": "ClipField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "conditioning": { - "id": "3249093e-0bc4-42a7-8a9b-2172fb89e915", - "name": "conditioning", - "type": "ConditioningField", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 235, - "position": { - "x": 975, - "y": -25 - } - }, - { - "id": "465c7e6e-278f-49b0-87ab-642e88cd076f", - "type": "invocation", - "data": { - "id": "465c7e6e-278f-49b0-87ab-642e88cd076f", - "type": "compel", - "inputs": { - "prompt": { - "id": "9a3ac8ea-7655-4806-ab8c-b7a18a253181", - "name": "prompt", - "type": "string", - "fieldKind": "input", - "label": "Negative Prompt", - "value": "" - }, - "clip": { - "id": "1d348def-bb7d-4bab-b983-9f55c933ea67", - "name": "clip", - "type": "ClipField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "conditioning": { - "id": "f92febc1-67c4-45d4-b2e4-9ba470e4ccef", - "name": "conditioning", - "type": "ConditioningField", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 235, - "position": { - "x": 975, - "y": 250 - } - }, - { - "id": "4e2833b2-5d35-45ec-ae65-89ea1846a3f4", - "type": "invocation", - "data": { - "id": "4e2833b2-5d35-45ec-ae65-89ea1846a3f4", + "id": "771bdf6a-0813-4099-a5d8-921a138754d4", "type": "image", "inputs": { "image": { - "id": "0b2f59b4-9994-4c99-9309-5434c746abb9", + "id": "0f6d68a2-38bd-4f65-a112-0a256c7a2678", "name": "image", "type": "ImageField", "fieldKind": "input", @@ -330,19 +142,19 @@ }, "outputs": { "image": { - "id": "9a41092e-50ec-4530-95b2-33d9207a8f50", + "id": "76f6f9b6-755b-4373-93fa-6a779998d2c8", "name": "image", "type": "ImageField", "fieldKind": "output" }, "width": { - "id": "6462e3b2-6450-45fd-9fee-0fbe25537ed0", + "id": "6858e46b-707c-444f-beda-9b5f4aecfdf8", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "94c0d477-8753-4976-ba91-b9eb7ad71285", + "id": "421bdc6e-ecd1-4935-9665-d38ab8314f79", "name": "height", "type": "integer", "fieldKind": "output" @@ -352,299 +164,70 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 225, "position": { - "x": 50, - "y": 750 + "x": 11.612243766002848, + "y": 1989.909405085168 } }, { - "id": "a7f1336d-516d-4735-826f-3c633dfaa5e8", + "id": "f7564dd2-9539-47f2-ac13-190804461f4e", "type": "invocation", "data": { - "id": "a7f1336d-516d-4735-826f-3c633dfaa5e8", - "type": "i2l", - "inputs": { - "image": { - "id": "b69df743-8045-4ffe-bb14-71b7f9c17c5f", - "name": "image", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "vae": { - "id": "a98a3497-34c5-46f7-9eaf-c24eab5d481a", - "name": "vae", - "type": "VaeField", - "fieldKind": "input", - "label": "" - }, - "tiled": { - "id": "706dc2c8-1820-42a7-98e1-bcd631737e7b", - "name": "tiled", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "fp32": { - "id": "5fc3bc04-c66e-46db-be2a-470c9d64b0d8", - "name": "fp32", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - } - }, - "outputs": { - "latents": { - "id": "50b89da5-e7ed-45cd-b74e-83c07e510ccd", - "name": "latents", - "type": "LatentsField", - "fieldKind": "output" - }, - "width": { - "id": "7be69dad-837f-4c98-8ae2-c7aacaa44b52", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "66e3c7ec-3848-4afb-84bb-ff3a09e47089", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 299, - "position": { - "x": 975, - "y": 525 - } - }, - { - "id": "61613ab2-784d-4a5c-8576-18fd5da065ef", - "type": "invocation", - "data": { - "id": "61613ab2-784d-4a5c-8576-18fd5da065ef", - "type": "denoise_latents", - "inputs": { - "noise": { - "id": "6b894db2-ee45-45b6-b531-573311ddea73", - "name": "noise", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "steps": { - "id": "7e7693fe-6c0c-464a-8535-2ed517766c19", - "name": "steps", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 64 - }, - "cfg_scale": { - "id": "890ba738-690c-44a3-97cd-c589257b531a", - "name": "cfg_scale", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 7.5 - }, - "denoising_start": { - "id": "0e157ca8-5c06-499a-be6d-283fb834df32", - "name": "denoising_start", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0.7 - }, - "denoising_end": { - "id": "915bc497-cb58-40a5-8089-2ece7213be21", - "name": "denoising_end", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 1 - }, - "scheduler": { - "id": "694471cd-8708-411b-9d88-2ae725254ff0", - "name": "scheduler", - "type": "Scheduler", - "fieldKind": "input", - "label": "", - "value": "euler_a" - }, - "control": { - "id": "b14cff5e-30ea-4d61-b27c-8247063699ad", - "name": "control", - "type": "ControlField", - "fieldKind": "input", - "label": "" - }, - "latents": { - "id": "0528dbfd-d661-4c73-b493-722b27e37201", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "denoise_mask": { - "id": "ea7d6070-4f00-4b77-b49e-ffe1ca0e3fea", - "name": "denoise_mask", - "type": "DenoiseMaskField", - "fieldKind": "input", - "label": "" - }, - "positive_conditioning": { - "id": "56a3fa76-9eb4-4680-8a4d-169696034525", - "name": "positive_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "negative_conditioning": { - "id": "0eed85bc-d5bd-4de4-8155-0c08f1ac5e32", - "name": "negative_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "unet": { - "id": "87ab9e45-a92f-4d47-a9f6-a65fe23373de", - "name": "unet", - "type": "UNetField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "latents": { - "id": "d12e0bd6-7d35-4d12-84c0-540c26ba01c8", - "name": "latents", - "type": "LatentsField", - "fieldKind": "output" - }, - "width": { - "id": "201ca444-2d10-4bab-b6be-e1b5be97e1b0", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "74ce84a5-b68c-4503-8a7d-bc017024678a", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 558, - "position": { - "x": 1800, - "y": 350 - } - }, - { - "id": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e", - "type": "invocation", - "data": { - "id": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e", - "type": "rand_int", - "inputs": { - "low": { - "id": "10eff2d0-929f-45ca-a1ba-68c3e742db71", - "name": "low", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "high": { - "id": "b6e39169-e6ee-496a-8046-5444497036c2", - "name": "high", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 2147483647 - } - }, - "outputs": { - "value": { - "id": "cc477a7a-36bc-458f-b7ac-6717bac6f12b", - "name": "value", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": false, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 32, - "position": { - "x": 975, - "y": 850 - } - }, - { - "id": "08c41d50-fb10-43a9-a58b-fc15ee678a83", - "type": "invocation", - "data": { - "id": "08c41d50-fb10-43a9-a58b-fc15ee678a83", + "id": "f7564dd2-9539-47f2-ac13-190804461f4e", "type": "esrgan", "inputs": { + "metadata": { + "id": "8fa0c7eb-5bd3-4575-98e7-72285c532504", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, "image": { - "id": "01b92996-26aa-412b-9eba-d13cb7b370a8", + "id": "3c949799-a504-41c9-b342-cff4b8146c48", "name": "image", "type": "ImageField", "fieldKind": "input", "label": "" }, "model_name": { - "id": "3fc7bf6a-e181-4236-b746-48b011351af1", + "id": "77cb4750-53d6-4c2c-bb5c-145981acbf17", "name": "model_name", "type": "enum", "fieldKind": "input", "label": "", - "value": "RealESRGAN_x2plus.pth" + "value": "RealESRGAN_x4plus.pth" + }, + "tile_size": { + "id": "7787b3ad-46ee-4248-995f-bc740e1f988b", + "name": "tile_size", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 400 } }, "outputs": { "image": { - "id": "6feb0664-c61d-4fcd-8226-ed81591dcb0c", + "id": "37e6308e-e926-4e07-b0db-4e8601f495d0", "name": "image", "type": "ImageField", "fieldKind": "output" }, "width": { - "id": "61650a80-7d2f-4509-8600-574c5cc6e569", + "id": "c194d84a-fac7-4856-b646-d08477a5ad2b", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "fcb00841-f068-475e-ac90-0874313fd7fa", + "id": "b2a6206c-a9c8-4271-a055-0b93a7f7d505", "name": "height", "type": "integer", "fieldKind": "output" @@ -654,31 +237,106 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.1.0" }, "width": 320, - "height": 282, + "height": 339, "position": { - "x": 475, - "y": 725 + "x": 436.07457889056195, + "y": 1967.3109314112623 } }, { - "id": "30598d37-cf80-4e12-896a-7683cf727e77", + "id": "1d887701-df21-4966-ae6e-a7d82307d7bd", "type": "invocation", "data": { - "id": "30598d37-cf80-4e12-896a-7683cf727e77", + "id": "1d887701-df21-4966-ae6e-a7d82307d7bd", + "type": "canny_image_processor", + "inputs": { + "metadata": { + "id": "52c877c8-25d9-4949-8518-f536fcdd152d", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "e0af11fe-4f95-4193-a599-cf40b6a963f5", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "low_threshold": { + "id": "ab775f7b-f556-4298-a9d6-2274f3a6c77c", + "name": "low_threshold", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 100 + }, + "high_threshold": { + "id": "9e58b615-06e4-417f-b0d8-63f1574cd174", + "name": "high_threshold", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 200 + } + }, + "outputs": { + "image": { + "id": "61feb8bf-95c9-4634-87e2-887fc43edbdf", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "9e203e41-73f7-4cfa-bdca-5040e5e60c55", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "ec7d99dc-0d82-4495-a759-6423808bff1c", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 339, + "position": { + "x": 1221.7155516160597, + "y": 1971.0099052871012 + } + }, + { + "id": "ca1d020c-89a8-4958-880a-016d28775cfa", + "type": "invocation", + "data": { + "id": "ca1d020c-89a8-4958-880a-016d28775cfa", "type": "controlnet", "inputs": { "image": { - "id": "d44259eb-bc93-4d4b-9665-a7895e5a77ab", + "id": "2973c126-e301-4595-a7dc-d6e1729ccdbf", "name": "image", "type": "ImageField", "fieldKind": "input", "label": "" }, "control_model": { - "id": "2021065f-d5c9-47ad-bfea-03eea03a19ce", + "id": "4bb4d987-8491-4839-b41b-6e2f546fe2d0", "name": "control_model", "type": "ControlNetModelField", "fieldKind": "input", @@ -689,15 +347,15 @@ } }, "control_weight": { - "id": "f856e29f-303f-4507-8c39-71ccc636f67c", + "id": "a3cf387a-b58f-4058-858f-6a918efac609", "name": "control_weight", - "type": "float", + "type": "FloatPolymorphic", "fieldKind": "input", "label": "", "value": 1 }, "begin_step_percent": { - "id": "f9187fa7-1510-439d-9c9b-e5fa990639b0", + "id": "e0614f69-8a58-408b-9238-d3a44a4db4e0", "name": "begin_step_percent", "type": "float", "fieldKind": "input", @@ -705,7 +363,7 @@ "value": 0 }, "end_step_percent": { - "id": "3f7c15c3-2b80-49a8-8eec-57d277c37364", + "id": "ac683539-b6ed-4166-9294-2040e3ede206", "name": "end_step_percent", "type": "float", "fieldKind": "input", @@ -713,7 +371,7 @@ "value": 1 }, "control_mode": { - "id": "f34cb991-ecb5-423c-865f-9890b2fa9d23", + "id": "f00b21de-cbd7-4901-8efc-e7134a2dc4c8", "name": "control_mode", "type": "enum", "fieldKind": "input", @@ -721,7 +379,7 @@ "value": "balanced" }, "resize_mode": { - "id": "552142f6-f6a6-4291-803c-68caefec7c6d", + "id": "cafb60ee-3959-4d57-a06c-13b83be6ea4f", "name": "resize_mode", "type": "enum", "fieldKind": "input", @@ -731,7 +389,7 @@ }, "outputs": { "control": { - "id": "6160265b-4a8d-4fc5-8e1f-d793a353d2db", + "id": "dfb88dd1-12bf-4034-9268-e726f894c131", "name": "control", "type": "ControlField", "fieldKind": "output" @@ -741,101 +399,218 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 480, + "height": 508, "position": { - "x": 1375, - "y": 875 + "x": 1681.7783532660528, + "y": 1845.0516454465633 } }, { - "id": "b79732f8-6126-4d06-9c8a-4e84bd5b1ac9", + "id": "f50624ce-82bf-41d0-bdf7-8aab11a80d48", "type": "invocation", "data": { - "id": "b79732f8-6126-4d06-9c8a-4e84bd5b1ac9", - "type": "vae_loader", + "id": "f50624ce-82bf-41d0-bdf7-8aab11a80d48", + "type": "noise", "inputs": { - "vae_model": { - "id": "b68038ae-b8cb-4e29-9581-da50d55af462", - "name": "vae_model", - "type": "VaeModelField", - "fieldKind": "input", - "label": "", - "value": { - "model_name": "sd-vae-ft-mse", - "base_model": "sd-1" - } - } - }, - "outputs": { - "vae": { - "id": "82f4ba9d-5e3a-4b48-b7a4-37de956663d7", - "name": "vae", - "type": "VaeField", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 138, - "position": { - "x": 175, - "y": 525 - } - }, - { - "id": "b10d39ab-7bc9-48bc-b883-2fd50920876d", - "type": "invocation", - "data": { - "id": "b10d39ab-7bc9-48bc-b883-2fd50920876d", - "type": "canny_image_processor", - "inputs": { - "image": { - "id": "dee9cdf2-9b3f-4d20-8433-ef7d6f6526bd", - "name": "image", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "low_threshold": { - "id": "4eaefd37-9fbe-4b73-8a17-c60d4e1d7e39", - "name": "low_threshold", + "seed": { + "id": "f76b0e01-b601-423f-9b5f-ab7a1f10fe82", + "name": "seed", "type": "integer", "fieldKind": "input", "label": "", - "value": 100 + "value": 0 }, - "high_threshold": { - "id": "c394720b-546b-464e-8f53-d20bfda4ee04", - "name": "high_threshold", + "width": { + "id": "eec326d6-710c-45de-a25c-95704c80d7e2", + "name": "width", "type": "integer", "fieldKind": "input", "label": "", - "value": 200 + "value": 512 + }, + "height": { + "id": "2794a27d-5337-43ca-95d9-41b673642c94", + "name": "height", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "use_cpu": { + "id": "ae7654e3-979e-44a1-8968-7e3199e91e66", + "name": "use_cpu", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": true } }, "outputs": { - "image": { - "id": "80fcdf76-dfc3-41a1-99ea-5ab2aa5fde07", - "name": "image", - "type": "ImageField", + "noise": { + "id": "8b6dc166-4ead-4124-8ac9-529814b0cbb9", + "name": "noise", + "type": "LatentsField", "fieldKind": "output" }, "width": { - "id": "3fa5e5a9-ff60-44d2-8d91-635d0c798f15", + "id": "e3fe3940-a277-4838-a448-5f81f2a7d99d", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "01a10c42-d485-4436-af8b-03d71c59bc8c", + "id": "48ecd6ef-c216-40d5-9d1b-d37bd00c82e7", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": false, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 32, + "position": { + "x": 1660.5387878479382, + "y": 1664.7391082353483 + } + }, + { + "id": "c3737554-8d87-48ff-a6f8-e71d2867f434", + "type": "invocation", + "data": { + "id": "c3737554-8d87-48ff-a6f8-e71d2867f434", + "type": "denoise_latents", + "inputs": { + "positive_conditioning": { + "id": "e127084b-72f5-4fe4-892b-84f34f88bce9", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "72cde4ee-55de-4d3e-9057-74e741c04e20", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "noise": { + "id": "747f7023-1c19-465b-bec8-1d9695dd3505", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "80860292-633c-46f2-83d0-60d0029b65d2", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "ebc71e6f-9148-4f12-b455-5e1f179d1c3a", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "ced44b8f-3bad-4c34-8113-13bc0faed28a", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "79bf4b77-3502-4f72-ba8b-269c4c3c5c72", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "ed56e2b8-f477-41a2-b9f5-f15f4933ae65", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "unet": { + "id": "146b790c-b08e-437c-a2e1-e393c2c1c41a", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, + "control": { + "id": "75ed3df1-d261-4b8e-a89b-341c4d7161fb", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "eab9a61d-9b64-44d3-8d90-4686f5887cb0", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "2dc8d637-58fd-4069-ad33-85c32d958b7b", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "6767e40a-97c6-4487-b3c9-cad1c150bf9f", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "6251efda-d97d-4ff1-94b5-8cc6b458c184", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "4e7986a4-dff2-4448-b16b-1af477b81f8b", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "dad525dd-d2f8-4f07-8c8d-51f2a3c5456e", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "af03a089-4739-40c6-8b48-25d458d63c2f", "name": "height", "type": "integer", "fieldKind": "output" @@ -845,165 +620,344 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" }, "width": 320, - "height": 339, + "height": 646, "position": { - "x": 925, - "y": 925 + "x": 2128.740065979906, + "y": 1232.6219060454753 + } + }, + { + "id": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0", + "type": "invocation", + "data": { + "id": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0", + "type": "l2i", + "inputs": { + "metadata": { + "id": "9f7a1a9f-7861-4f09-874b-831af89b7474", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "a5b42432-8ee7-48cd-b61c-b97be6e490a2", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "890de106-e6c3-4c2c-8d67-b368def64894", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "b8e5a2ca-5fbc-49bd-ad4c-ea0e109d46e3", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "fdaf6264-4593-4bd2-ac71-8a0acff261af", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "94c5877d-6c78-4662-a836-8a84fc75d0a0", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "2a854e42-1616-42f5-b9ef-7b73c40afc1d", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "dd649053-1433-4f31-90b3-8bb103efc5b1", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": false, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 267, + "position": { + "x": 2559.4751127537957, + "y": 1246.6000376741406 + } + }, + { + "id": "5ca498a4-c8c8-4580-a396-0c984317205d", + "type": "invocation", + "data": { + "id": "5ca498a4-c8c8-4580-a396-0c984317205d", + "type": "i2l", + "inputs": { + "image": { + "id": "9e6c4010-0f79-4587-9062-29d9a8f96b3b", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "b9ed2ec4-e8e3-4d69-8a42-27f2d983bcd6", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "bb48d10b-2440-4c46-b835-646ae5ebc013", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "1048612c-c0f4-4abf-a684-0045e7d158f8", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "latents": { + "id": "55301367-0578-4dee-8060-031ae13c7bf8", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "2eb65690-1f20-4070-afbd-1e771b9f8ca9", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "d5bf64c7-c30f-43b8-9bc2-95e7718c1bdc", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 325, + "position": { + "x": 848.091172736516, + "y": 1618.7467772496016 + } + }, + { + "id": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", + "type": "invocation", + "data": { + "id": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", + "type": "compel", + "inputs": { + "prompt": { + "id": "5f762fae-d791-42d9-8ab5-2b830c33ff20", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "", + "value": "" + }, + "clip": { + "id": "8ac95f40-317d-4513-bbba-b99effd3b438", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "46c65b2b-c0b5-40c2-b183-74e9451c6d56", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 1280.0309709777139, + "y": 1213.3027983934699 } } ], "edges": [ { - "source": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e", - "target": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe", - "id": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e-167ffc36-4cb7-425c-ae55-e88f80a1d6fe-collapsed", - "type": "collapsed" - }, - { - "source": "aba70c57-2495-4ec1-8d23-02b1d11445c7", - "sourceHandle": "clip", - "target": "c394834e-cab7-4c0c-919e-2e35eba7f34e", - "targetHandle": "clip", - "id": "reactflow__edge-aba70c57-2495-4ec1-8d23-02b1d11445c7clip-c394834e-cab7-4c0c-919e-2e35eba7f34eclip", + "source": "771bdf6a-0813-4099-a5d8-921a138754d4", + "sourceHandle": "image", + "target": "f7564dd2-9539-47f2-ac13-190804461f4e", + "targetHandle": "image", + "id": "reactflow__edge-771bdf6a-0813-4099-a5d8-921a138754d4image-f7564dd2-9539-47f2-ac13-190804461f4eimage", "type": "default" }, { - "source": "aba70c57-2495-4ec1-8d23-02b1d11445c7", - "sourceHandle": "clip", - "target": "465c7e6e-278f-49b0-87ab-642e88cd076f", - "targetHandle": "clip", - "id": "reactflow__edge-aba70c57-2495-4ec1-8d23-02b1d11445c7clip-465c7e6e-278f-49b0-87ab-642e88cd076fclip", + "source": "f7564dd2-9539-47f2-ac13-190804461f4e", + "sourceHandle": "image", + "target": "1d887701-df21-4966-ae6e-a7d82307d7bd", + "targetHandle": "image", + "id": "reactflow__edge-f7564dd2-9539-47f2-ac13-190804461f4eimage-1d887701-df21-4966-ae6e-a7d82307d7bdimage", "type": "default" }, { - "source": "aba70c57-2495-4ec1-8d23-02b1d11445c7", - "sourceHandle": "vae", - "target": "a7f1336d-516d-4735-826f-3c633dfaa5e8", - "targetHandle": "vae", - "id": "reactflow__edge-aba70c57-2495-4ec1-8d23-02b1d11445c7vae-a7f1336d-516d-4735-826f-3c633dfaa5e8vae", - "type": "default" - }, - { - "source": "aba70c57-2495-4ec1-8d23-02b1d11445c7", - "sourceHandle": "unet", - "target": "61613ab2-784d-4a5c-8576-18fd5da065ef", - "targetHandle": "unet", - "id": "reactflow__edge-aba70c57-2495-4ec1-8d23-02b1d11445c7unet-61613ab2-784d-4a5c-8576-18fd5da065efunet", - "type": "default" - }, - { - "source": "465c7e6e-278f-49b0-87ab-642e88cd076f", - "sourceHandle": "conditioning", - "target": "61613ab2-784d-4a5c-8576-18fd5da065ef", - "targetHandle": "negative_conditioning", - "id": "reactflow__edge-465c7e6e-278f-49b0-87ab-642e88cd076fconditioning-61613ab2-784d-4a5c-8576-18fd5da065efnegative_conditioning", - "type": "default" - }, - { - "source": "c394834e-cab7-4c0c-919e-2e35eba7f34e", - "sourceHandle": "conditioning", - "target": "61613ab2-784d-4a5c-8576-18fd5da065ef", - "targetHandle": "positive_conditioning", - "id": "reactflow__edge-c394834e-cab7-4c0c-919e-2e35eba7f34econditioning-61613ab2-784d-4a5c-8576-18fd5da065efpositive_conditioning", - "type": "default" - }, - { - "source": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe", - "sourceHandle": "noise", - "target": "61613ab2-784d-4a5c-8576-18fd5da065ef", - "targetHandle": "noise", - "id": "reactflow__edge-167ffc36-4cb7-425c-ae55-e88f80a1d6fenoise-61613ab2-784d-4a5c-8576-18fd5da065efnoise", - "type": "default" - }, - { - "source": "61613ab2-784d-4a5c-8576-18fd5da065ef", - "sourceHandle": "latents", - "target": "c2172a8b-1b5f-4330-acbe-dd2565c3b988", - "targetHandle": "latents", - "id": "reactflow__edge-61613ab2-784d-4a5c-8576-18fd5da065eflatents-c2172a8b-1b5f-4330-acbe-dd2565c3b988latents", - "type": "default" - }, - { - "source": "a7f1336d-516d-4735-826f-3c633dfaa5e8", - "sourceHandle": "latents", - "target": "61613ab2-784d-4a5c-8576-18fd5da065ef", - "targetHandle": "latents", - "id": "reactflow__edge-a7f1336d-516d-4735-826f-3c633dfaa5e8latents-61613ab2-784d-4a5c-8576-18fd5da065eflatents", - "type": "default" - }, - { - "source": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e", - "sourceHandle": "value", - "target": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe", - "targetHandle": "seed", - "id": "reactflow__edge-39abdb3e-f8d1-4b0a-bab1-547d9e25b78evalue-167ffc36-4cb7-425c-ae55-e88f80a1d6feseed", - "type": "default" - }, - { - "source": "a7f1336d-516d-4735-826f-3c633dfaa5e8", + "source": "5ca498a4-c8c8-4580-a396-0c984317205d", "sourceHandle": "width", - "target": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe", + "target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48", "targetHandle": "width", - "id": "reactflow__edge-a7f1336d-516d-4735-826f-3c633dfaa5e8width-167ffc36-4cb7-425c-ae55-e88f80a1d6fewidth", + "id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dwidth-f50624ce-82bf-41d0-bdf7-8aab11a80d48width", "type": "default" }, { - "source": "a7f1336d-516d-4735-826f-3c633dfaa5e8", + "source": "5ca498a4-c8c8-4580-a396-0c984317205d", "sourceHandle": "height", - "target": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe", + "target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48", "targetHandle": "height", - "id": "reactflow__edge-a7f1336d-516d-4735-826f-3c633dfaa5e8height-167ffc36-4cb7-425c-ae55-e88f80a1d6feheight", + "id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dheight-f50624ce-82bf-41d0-bdf7-8aab11a80d48height", "type": "default" }, { - "source": "4e2833b2-5d35-45ec-ae65-89ea1846a3f4", + "source": "f50624ce-82bf-41d0-bdf7-8aab11a80d48", + "sourceHandle": "noise", + "target": "c3737554-8d87-48ff-a6f8-e71d2867f434", + "targetHandle": "noise", + "id": "reactflow__edge-f50624ce-82bf-41d0-bdf7-8aab11a80d48noise-c3737554-8d87-48ff-a6f8-e71d2867f434noise", + "type": "default" + }, + { + "source": "5ca498a4-c8c8-4580-a396-0c984317205d", + "sourceHandle": "latents", + "target": "c3737554-8d87-48ff-a6f8-e71d2867f434", + "targetHandle": "latents", + "id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dlatents-c3737554-8d87-48ff-a6f8-e71d2867f434latents", + "type": "default" + }, + { + "source": "e8bf67fe-67de-4227-87eb-79e86afdfc74", + "sourceHandle": "conditioning", + "target": "c3737554-8d87-48ff-a6f8-e71d2867f434", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-e8bf67fe-67de-4227-87eb-79e86afdfc74conditioning-c3737554-8d87-48ff-a6f8-e71d2867f434negative_conditioning", + "type": "default" + }, + { + "source": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", + "sourceHandle": "conditioning", + "target": "c3737554-8d87-48ff-a6f8-e71d2867f434", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16bconditioning-c3737554-8d87-48ff-a6f8-e71d2867f434positive_conditioning", + "type": "default" + }, + { + "source": "d8ace142-c05f-4f1d-8982-88dc7473958d", + "sourceHandle": "clip", + "target": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b", + "targetHandle": "clip", + "id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dclip-63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16bclip", + "type": "default" + }, + { + "source": "d8ace142-c05f-4f1d-8982-88dc7473958d", + "sourceHandle": "clip", + "target": "e8bf67fe-67de-4227-87eb-79e86afdfc74", + "targetHandle": "clip", + "id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dclip-e8bf67fe-67de-4227-87eb-79e86afdfc74clip", + "type": "default" + }, + { + "source": "1d887701-df21-4966-ae6e-a7d82307d7bd", "sourceHandle": "image", - "target": "08c41d50-fb10-43a9-a58b-fc15ee678a83", + "target": "ca1d020c-89a8-4958-880a-016d28775cfa", "targetHandle": "image", - "id": "reactflow__edge-4e2833b2-5d35-45ec-ae65-89ea1846a3f4image-08c41d50-fb10-43a9-a58b-fc15ee678a83image", + "id": "reactflow__edge-1d887701-df21-4966-ae6e-a7d82307d7bdimage-ca1d020c-89a8-4958-880a-016d28775cfaimage", "type": "default" }, { - "source": "08c41d50-fb10-43a9-a58b-fc15ee678a83", - "sourceHandle": "image", - "target": "a7f1336d-516d-4735-826f-3c633dfaa5e8", - "targetHandle": "image", - "id": "reactflow__edge-08c41d50-fb10-43a9-a58b-fc15ee678a83image-a7f1336d-516d-4735-826f-3c633dfaa5e8image", - "type": "default" - }, - { - "source": "30598d37-cf80-4e12-896a-7683cf727e77", + "source": "ca1d020c-89a8-4958-880a-016d28775cfa", "sourceHandle": "control", - "target": "61613ab2-784d-4a5c-8576-18fd5da065ef", + "target": "c3737554-8d87-48ff-a6f8-e71d2867f434", "targetHandle": "control", - "id": "reactflow__edge-30598d37-cf80-4e12-896a-7683cf727e77control-61613ab2-784d-4a5c-8576-18fd5da065efcontrol", + "id": "reactflow__edge-ca1d020c-89a8-4958-880a-016d28775cfacontrol-c3737554-8d87-48ff-a6f8-e71d2867f434control", "type": "default" }, { - "source": "b79732f8-6126-4d06-9c8a-4e84bd5b1ac9", + "source": "c3737554-8d87-48ff-a6f8-e71d2867f434", + "sourceHandle": "latents", + "target": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0", + "targetHandle": "latents", + "id": "reactflow__edge-c3737554-8d87-48ff-a6f8-e71d2867f434latents-3ed9b2ef-f4ec-40a7-94db-92e63b583ec0latents", + "type": "default" + }, + { + "source": "d8ace142-c05f-4f1d-8982-88dc7473958d", "sourceHandle": "vae", - "target": "c2172a8b-1b5f-4330-acbe-dd2565c3b988", + "target": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0", "targetHandle": "vae", - "id": "reactflow__edge-b79732f8-6126-4d06-9c8a-4e84bd5b1ac9vae-c2172a8b-1b5f-4330-acbe-dd2565c3b988vae", + "id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dvae-3ed9b2ef-f4ec-40a7-94db-92e63b583ec0vae", "type": "default" }, { - "source": "08c41d50-fb10-43a9-a58b-fc15ee678a83", + "source": "f7564dd2-9539-47f2-ac13-190804461f4e", "sourceHandle": "image", - "target": "b10d39ab-7bc9-48bc-b883-2fd50920876d", + "target": "5ca498a4-c8c8-4580-a396-0c984317205d", "targetHandle": "image", - "id": "reactflow__edge-08c41d50-fb10-43a9-a58b-fc15ee678a83image-b10d39ab-7bc9-48bc-b883-2fd50920876dimage", + "id": "reactflow__edge-f7564dd2-9539-47f2-ac13-190804461f4eimage-5ca498a4-c8c8-4580-a396-0c984317205dimage", "type": "default" }, { - "source": "b10d39ab-7bc9-48bc-b883-2fd50920876d", - "sourceHandle": "image", - "target": "30598d37-cf80-4e12-896a-7683cf727e77", - "targetHandle": "image", - "id": "reactflow__edge-b10d39ab-7bc9-48bc-b883-2fd50920876dimage-30598d37-cf80-4e12-896a-7683cf727e77image", + "source": "d8ace142-c05f-4f1d-8982-88dc7473958d", + "sourceHandle": "unet", + "target": "c3737554-8d87-48ff-a6f8-e71d2867f434", + "targetHandle": "unet", + "id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dunet-c3737554-8d87-48ff-a6f8-e71d2867f434unet", "type": "default" } ] diff --git a/docs/workflows/FaceMask.json b/docs/workflows/FaceMask.json index 589ac097e4..54c1f800b6 100644 --- a/docs/workflows/FaceMask.json +++ b/docs/workflows/FaceMask.json @@ -1,7 +1,7 @@ { "name": "FaceMask", "author": "YMGenesis", - "description": "21 September 2023\n\nPlace an image with recognizable face(s) in Image Primitive, and write what sort of new face you want in the top prompt text box. See Notes for more info.", + "description": "Place an image with recognizable face(s) in Image Primitive, and write what sort of new face you want in the top prompt text box. See Notes for more info.", "version": "1.0", "contact": "YMGenesis on InvokeAI Discord", "tags": "facemask, facetools", @@ -56,14 +56,14 @@ "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": true + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 226, "position": { - "x": 4625, - "y": 1275 + "x": 4103.832039728059, + "y": 1987.4435345183065 } }, { @@ -102,14 +102,14 @@ "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": true + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 259, + "height": 261, "position": { - "x": 5025, - "y": 1275 + "x": 4988.591898842789, + "y": 1700.8901379603535 } }, { @@ -148,14 +148,14 @@ "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": true + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 259, + "height": 261, "position": { - "x": 5025, - "y": 1550 + "x": 4986.228031951785, + "y": 1987.19695578231 } }, { @@ -219,91 +219,18 @@ } }, "label": "", - "isOpen": true, + "isOpen": false, "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": true + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 388, + "height": 32, "position": { - "x": 5425, - "y": 1525 - } - }, - { - "id": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "type": "invocation", - "data": { - "id": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "type": "l2i", - "inputs": { - "tiled": { - "id": "8265e595-8f76-4615-8b70-3a0fca854cbf", - "name": "tiled", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "fp32": { - "id": "cc2dd2ee-63f6-40e7-937e-d82086685abe", - "name": "fp32", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "latents": { - "id": "fbbab00e-b47f-4f45-801a-a719873429ca", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "vae": { - "id": "476e6075-c62d-4d42-afcf-a18e4c4a47fb", - "name": "vae", - "type": "VaeField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "image": { - "id": "65086674-a0f7-4d6c-a02e-101010b34d74", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "e32514a9-50d5-423e-95d3-5d9fd460adaa", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "4d6557ec-2967-40ec-bcae-52725ffe766c", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true, - "version": "1.0.0", - "useCache": true - }, - "width": 320, - "height": 266, - "position": { - "x": 5825, - "y": 1275 + "x": 5398.389401611981, + "y": 2019.4053462371755 } }, { @@ -339,18 +266,18 @@ } }, "label": "", - "isOpen": true, + "isOpen": false, "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": false + "useCache": false, + "version": "1.0.0" }, "width": 320, - "height": 218, + "height": 32, "position": { - "x": 5425, - "y": 1275 + "x": 5386.304039775159, + "y": 1979.791644235275 } }, { @@ -407,18 +334,18 @@ } }, "label": "", - "isOpen": true, + "isOpen": false, "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": true + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 316, + "height": 32, "position": { - "x": 5025, - "y": 2175 + "x": 5009.179957658444, + "y": 2346.7322639404283 } }, { @@ -461,86 +388,14 @@ "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": true + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 225, "position": { - "x": 4625, - "y": 1525 - } - }, - { - "id": "31e99fe1-c39d-4cca-bd89-9ec27cb3a8a7", - "type": "invocation", - "data": { - "id": "31e99fe1-c39d-4cca-bd89-9ec27cb3a8a7", - "type": "color_correct", - "inputs": { - "image": { - "id": "8523ebb4-b6ef-4ba2-a0fc-32a5beb69c65", - "name": "image", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "reference": { - "id": "b4d476d4-cdca-4757-a7e3-509614ce956e", - "name": "reference", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "mask": { - "id": "66b192f5-50a6-4241-aac8-4ed68a34ac90", - "name": "mask", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "mask_blur_radius": { - "id": "deac73dc-2ad0-4e31-9519-920c56746c4e", - "name": "mask_blur_radius", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 8 - } - }, - "outputs": { - "image": { - "id": "d5eb4f4e-e277-435c-a960-0cf1731cac4b", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "9fef7aab-0294-43ac-adcf-f53fe3188823", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "148b6497-7a4e-43b4-bbba-a18e9426db37", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": true, - "isIntermediate": false, - "version": "1.0.0", - "useCache": true - }, - "width": 320, - "height": 396, - "position": { - "x": 5825, - "y": 1575 + "x": 4107.933245141945, + "y": 2255.443448115275 } }, { @@ -602,44 +457,197 @@ } }, "label": "", + "isOpen": false, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 32, + "position": { + "x": 5006.155303630073, + "y": 2277.2727128782517 + } + }, + { + "id": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", + "type": "invocation", + "data": { + "id": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", + "type": "denoise_latents", + "inputs": { + "noise": { + "id": "175b4d0a-3017-46e2-933f-c02f1cfb29b2", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "dd174b3e-3f6c-46cb-a703-3c6f3b3c72f1", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "0ea30aa7-8747-4c93-87e8-3c84e0dfd187", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "a6392edb-8895-41ed-918b-0ba8d2ac72ac", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "1d1807cc-a24d-426e-9de5-a7e61d45c006", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "947a5212-8923-4d5d-934c-dbc5879b9d07", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "control": { + "id": "585378b9-2686-4573-b762-3dc2d6179193", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "191f4687-fdcc-45da-859f-71fd5091a8bd", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "39fc54ae-2141-4cab-9c01-c6c415f964cd", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "7f1a388e-8355-496c-a45d-fce5b8685a63", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "de922941-b2d8-4c57-92a7-201f9ddaf262", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + }, + "positive_conditioning": { + "id": "2b42b4e8-4795-4fcc-bef1-c08cb8e25e0a", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "ff29b7d7-1bff-4aa9-b5c0-f8786a55023a", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "unet": { + "id": "0155f1cb-152b-4097-9395-afcc745c697b", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "03cfb327-02a1-4fbe-b7ce-b07fd501d2b8", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "3ffb87d7-b5a6-4b35-bdf1-2bb9b718d815", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "2c1f0588-943a-4fd1-b75b-48d04c944296", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", "isOpen": true, "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": true + "useCache": true, + "version": "1.3.0" }, "width": 320, - "height": 323, + "height": 646, "position": { - "x": 5025, - "y": 1825 + "x": 5512.059705982663, + "y": 2103.8364934988267 } }, { - "id": "c2032405-817c-494d-b1e5-441e74d248d2", + "id": "eb725b0b-1fa6-4f79-aedb-52c19afcfad9", "type": "invocation", "data": { - "id": "c2032405-817c-494d-b1e5-441e74d248d2", + "id": "eb725b0b-1fa6-4f79-aedb-52c19afcfad9", "type": "face_mask_detection", "inputs": { + "metadata": { + "id": "a56bf310-b5c3-4440-8ba6-79f5e434a9e6", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, "image": { - "id": "e04ef12b-5eb8-40ba-9e46-93f360be12e6", + "id": "11ece2fd-57ee-4504-b1f5-8d0c9332d785", "name": "image", "type": "ImageField", "fieldKind": "input", "label": "" }, "face_ids": { - "id": "63ce222d-2dd3-41a3-976d-a15e5c6bc6d2", + "id": "c5eaafc3-70f4-4bcd-9df7-0d4cd26e1734", "name": "face_ids", "type": "string", "fieldKind": "input", "label": "", - "value": "0" + "value": "" }, "minimum_confidence": { - "id": "0b5969ae-afa6-4007-be70-4dc2a86dda4a", + "id": "ab85fdbd-d61e-4584-8595-c1cea1ffb288", "name": "minimum_confidence", "type": "float", "fieldKind": "input", @@ -647,7 +655,7 @@ "value": 0.5 }, "x_offset": { - "id": "5dc14f5e-02ad-42c9-809c-e708f107c2cd", + "id": "e381e04d-b54d-4457-9b6e-b3b554a8e343", "name": "x_offset", "type": "float", "fieldKind": "input", @@ -655,7 +663,7 @@ "value": 0 }, "y_offset": { - "id": "0d84f0c3-6983-4902-b3ed-d8cd8797e35d", + "id": "98e771c8-df97-4a57-a5d4-9601dac68338", "name": "y_offset", "type": "float", "fieldKind": "input", @@ -663,7 +671,7 @@ "value": 0 }, "chunk": { - "id": "00edd0c0-ee3f-4739-8a22-895b88d83d78", + "id": "ae4af045-99f9-4d84-81d2-438dc3d13b8d", "name": "chunk", "type": "boolean", "fieldKind": "input", @@ -671,7 +679,7 @@ "value": false }, "invert_mask": { - "id": "cbf70aeb-ceec-45c2-8042-3921c3134baf", + "id": "a6a0fe16-da45-46aa-9f85-9469fde40d71", "name": "invert_mask", "type": "boolean", "fieldKind": "input", @@ -681,25 +689,25 @@ }, "outputs": { "image": { - "id": "fe460f4c-7aa2-409a-bc02-a07e69cf9f2a", + "id": "7de8643a-c6b1-4260-b843-728c8d0fc6d4", "name": "image", "type": "ImageField", "fieldKind": "output" }, "width": { - "id": "26cf35c6-2ccd-4298-a7db-b1c2fddb3fc9", + "id": "fb1450b6-de42-465a-98d7-1dc93ceb20d7", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "3c4e7666-d039-49a5-b4c6-c5d9ba11f1bb", + "id": "cea5abb6-1584-440a-9ee2-1e4c926235e7", "name": "height", "type": "integer", "fieldKind": "output" }, "mask": { - "id": "35f88fb5-94c1-4643-a5c6-cc288602f37d", + "id": "272e4224-1736-42de-895d-096309259ac7", "name": "mask", "type": "ImageField", "fieldKind": "output" @@ -710,135 +718,76 @@ "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.0.0", - "useCache": true + "useCache": true, + "version": "1.0.2" }, "width": 320, - "height": 583, + "height": 585, "position": { - "x": 4625, - "y": 1900 + "x": 4559.385043470649, + "y": 2082.7157021692556 } }, { - "id": "441fdd15-1a49-413c-bd2a-637a105ad370", + "id": "e4681270-ea7e-4063-9116-880408854eee", "type": "invocation", "data": { - "id": "441fdd15-1a49-413c-bd2a-637a105ad370", - "type": "denoise_latents", + "id": "e4681270-ea7e-4063-9116-880408854eee", + "type": "l2i", "inputs": { - "noise": { - "id": "86b6226c-1780-49d7-bbed-c4a5440eebb2", - "name": "noise", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "steps": { - "id": "f3cae478-9f7c-4836-b8f8-3a83ef2fdcc3", - "name": "steps", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 20 - }, - "cfg_scale": { - "id": "f7d64dc5-ea3d-44a8-8951-56622f0548ee", - "name": "cfg_scale", - "type": "FloatPolymorphic", - "fieldKind": "input", - "label": "", - "value": 5 - }, - "denoising_start": { - "id": "01409326-116d-41b5-9c3d-abaa2a5182aa", - "name": "denoising_start", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0.3 - }, - "denoising_end": { - "id": "445b857b-a136-4736-b677-b2a701d651d4", - "name": "denoising_end", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 1 - }, - "scheduler": { - "id": "8e7d75ef-6cf0-467e-8067-595bb96c9636", - "name": "scheduler", - "type": "Scheduler", - "fieldKind": "input", - "label": "", - "value": "euler" - }, - "control": { - "id": "ad0c589a-b5e9-4892-9173-a709ee8672db", - "name": "control", - "type": "ControlPolymorphic", - "fieldKind": "input", - "label": "" - }, - "ip_adapter": { - "id": "843f92c7-2714-46ea-ab9d-fc568aa684f3", - "name": "ip_adapter", - "type": "IPAdapterField", + "metadata": { + "id": "1632b6ac-605d-42a7-853c-65539a6f664e", + "name": "metadata", + "type": "MetadataField", "fieldKind": "input", "label": "" }, "latents": { - "id": "d21e2b79-344d-438f-8841-c5c2dcb82e77", + "id": "9986c874-6d4b-47fc-895a-88933ef2b473", "name": "latents", "type": "LatentsField", "fieldKind": "input", "label": "" }, - "denoise_mask": { - "id": "e8cf20d0-76e0-4543-b11f-2a4d209a6489", - "name": "denoise_mask", - "type": "DenoiseMaskField", + "vae": { + "id": "d5842416-e575-4e35-a5d0-fd1ce4401b52", + "name": "vae", + "type": "VaeField", "fieldKind": "input", "label": "" }, - "positive_conditioning": { - "id": "3ab3bd3b-4ecd-4b87-9ab3-5de24828f3cd", - "name": "positive_conditioning", - "type": "ConditioningField", + "tiled": { + "id": "6a84e45e-5f54-4952-9285-9dedc6d056d5", + "name": "tiled", + "type": "boolean", "fieldKind": "input", - "label": "" + "label": "", + "value": false }, - "negative_conditioning": { - "id": "6e2907e9-779d-4984-9c79-6032bf75ab0c", - "name": "negative_conditioning", - "type": "ConditioningField", + "fp32": { + "id": "2fa37674-8685-4ec4-87d9-d4683131d79c", + "name": "fp32", + "type": "boolean", "fieldKind": "input", - "label": "" - }, - "unet": { - "id": "bb038dd9-cb2e-41df-a830-a5e1d5696c31", - "name": "unet", - "type": "UNetField", - "fieldKind": "input", - "label": "" + "label": "", + "value": false } }, "outputs": { - "latents": { - "id": "82c61fed-7cf5-44d4-8d00-f42350619487", - "name": "latents", - "type": "LatentsField", + "image": { + "id": "b1856446-1b03-4825-aae0-0859e27c3c8c", + "name": "image", + "type": "ImageField", "fieldKind": "output" }, "width": { - "id": "73ab2f4f-898f-44a1-a46c-c0d4aaa9c2e3", + "id": "c95d92f6-2416-4fc8-a542-998c7c3fac73", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "92b783b2-8741-4089-8125-1ff2f2d15372", + "id": "7033a118-3b20-4c25-8450-e15d7fc8657c", "name": "height", "type": "integer", "fieldKind": "output" @@ -849,18 +798,109 @@ "notes": "", "embedWorkflow": false, "isIntermediate": true, - "version": "1.1.0", - "useCache": true + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 614, + "height": 267, "position": { - "x": 5425, - "y": 1925 + "x": 5941.909912847396, + "y": 2111.4771842290065 + } + }, + { + "id": "7bc3c331-4658-46fd-8736-fe3043fcd9d1", + "type": "invocation", + "data": { + "id": "7bc3c331-4658-46fd-8736-fe3043fcd9d1", + "type": "color_correct", + "inputs": { + "metadata": { + "id": "203d08d1-586f-47c9-95a1-2afa1db23751", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "50bc42c6-c3b8-44c4-89fe-ef2edd9b67f4", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "reference": { + "id": "2eef99df-b1f8-441c-a316-466a46812df0", + "name": "reference", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "mask": { + "id": "6cfde62b-e6fc-4f13-91dc-e679b26ec04b", + "name": "mask", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "mask_blur_radius": { + "id": "51125aec-bbcb-4ae0-b618-a9bc632a5a86", + "name": "mask_blur_radius", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 8 + } + }, + "outputs": { + "image": { + "id": "4c82dcb2-1c55-4b6d-a42c-061478881393", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "b7d23fea-e361-468d-96e7-9b243592e904", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "d056d182-d261-4037-bb5e-caec9bda9ca6", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 396, + "position": { + "x": 6399.947577155154, + "y": 2127.7477011465667 } } ], "edges": [ + { + "source": "27dd9fc3-8c6e-4602-8754-e9ca2f478d68", + "target": "098898c8-7a20-4d78-9363-296d42e3d8da", + "id": "27dd9fc3-8c6e-4602-8754-e9ca2f478d68-098898c8-7a20-4d78-9363-296d42e3d8da-collapsed", + "type": "collapsed" + }, + { + "source": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", + "target": "098898c8-7a20-4d78-9363-296d42e3d8da", + "id": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d-098898c8-7a20-4d78-9363-296d42e3d8da-collapsed", + "type": "collapsed" + }, { "source": "c9897be0-7f59-4388-816d-86cb72cc4036", "sourceHandle": "clip", @@ -877,14 +917,6 @@ "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036clip-7c4e5071-5b76-4d42-b340-68b52c5ded7aclip", "type": "default" }, - { - "source": "c9897be0-7f59-4388-816d-86cb72cc4036", - "sourceHandle": "vae", - "target": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "targetHandle": "vae", - "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036vae-3b2ebc7f-251d-4726-b1b8-5331355f8626vae", - "type": "default" - }, { "source": "27dd9fc3-8c6e-4602-8754-e9ca2f478d68", "sourceHandle": "value", @@ -926,115 +958,123 @@ "type": "default" }, { - "source": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "sourceHandle": "image", - "target": "31e99fe1-c39d-4cca-bd89-9ec27cb3a8a7", - "targetHandle": "image", - "id": "reactflow__edge-3b2ebc7f-251d-4726-b1b8-5331355f8626image-31e99fe1-c39d-4cca-bd89-9ec27cb3a8a7image", - "type": "default" - }, - { - "source": "3fac3aa6-910a-4a90-a8b6-5b7e1611efba", - "sourceHandle": "image", - "target": "31e99fe1-c39d-4cca-bd89-9ec27cb3a8a7", - "targetHandle": "reference", - "id": "reactflow__edge-3fac3aa6-910a-4a90-a8b6-5b7e1611efbaimage-31e99fe1-c39d-4cca-bd89-9ec27cb3a8a7reference", - "type": "default" - }, - { - "source": "c2032405-817c-494d-b1e5-441e74d248d2", - "sourceHandle": "mask", - "target": "31e99fe1-c39d-4cca-bd89-9ec27cb3a8a7", - "targetHandle": "mask", - "id": "reactflow__edge-c2032405-817c-494d-b1e5-441e74d248d2mask-31e99fe1-c39d-4cca-bd89-9ec27cb3a8a7mask", - "type": "default" - }, - { - "source": "3fac3aa6-910a-4a90-a8b6-5b7e1611efba", - "sourceHandle": "image", - "target": "c2032405-817c-494d-b1e5-441e74d248d2", - "targetHandle": "image", - "id": "reactflow__edge-3fac3aa6-910a-4a90-a8b6-5b7e1611efbaimage-c2032405-817c-494d-b1e5-441e74d248d2image", - "type": "default" - }, - { - "source": "c2032405-817c-494d-b1e5-441e74d248d2", - "sourceHandle": "image", - "target": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", - "targetHandle": "image", - "id": "reactflow__edge-c2032405-817c-494d-b1e5-441e74d248d2image-a6d08bcb-0b52-4dd8-9247-8b6480238c6dimage", - "type": "default" - }, - { - "source": "c2032405-817c-494d-b1e5-441e74d248d2", - "sourceHandle": "image", - "target": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", - "targetHandle": "image", - "id": "reactflow__edge-c2032405-817c-494d-b1e5-441e74d248d2image-bcbdc4ea-1fad-40d4-8632-70f84116f4b6image", - "type": "default" - }, - { - "source": "c2032405-817c-494d-b1e5-441e74d248d2", - "sourceHandle": "mask", - "target": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", - "targetHandle": "mask", - "id": "reactflow__edge-c2032405-817c-494d-b1e5-441e74d248d2mask-bcbdc4ea-1fad-40d4-8632-70f84116f4b6mask", - "type": "default" - }, - { - "source": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", - "sourceHandle": "latents", - "target": "441fdd15-1a49-413c-bd2a-637a105ad370", - "targetHandle": "latents", - "id": "reactflow__edge-a6d08bcb-0b52-4dd8-9247-8b6480238c6dlatents-441fdd15-1a49-413c-bd2a-637a105ad370latents", - "type": "default" - }, - { - "source": "fb7e72d9-51cb-432a-b511-c6c608d07413", - "sourceHandle": "conditioning", - "target": "441fdd15-1a49-413c-bd2a-637a105ad370", - "targetHandle": "positive_conditioning", - "id": "reactflow__edge-fb7e72d9-51cb-432a-b511-c6c608d07413conditioning-441fdd15-1a49-413c-bd2a-637a105ad370positive_conditioning", + "source": "c9897be0-7f59-4388-816d-86cb72cc4036", + "sourceHandle": "unet", + "target": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", + "targetHandle": "unet", + "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036unet-d14b4d95-bf74-4ec5-827b-4c9e797c7ae9unet", "type": "default" }, { "source": "7c4e5071-5b76-4d42-b340-68b52c5ded7a", "sourceHandle": "conditioning", - "target": "441fdd15-1a49-413c-bd2a-637a105ad370", + "target": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", "targetHandle": "negative_conditioning", - "id": "reactflow__edge-7c4e5071-5b76-4d42-b340-68b52c5ded7aconditioning-441fdd15-1a49-413c-bd2a-637a105ad370negative_conditioning", + "id": "reactflow__edge-7c4e5071-5b76-4d42-b340-68b52c5ded7aconditioning-d14b4d95-bf74-4ec5-827b-4c9e797c7ae9negative_conditioning", "type": "default" }, { - "source": "c9897be0-7f59-4388-816d-86cb72cc4036", - "sourceHandle": "unet", - "target": "441fdd15-1a49-413c-bd2a-637a105ad370", - "targetHandle": "unet", - "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036unet-441fdd15-1a49-413c-bd2a-637a105ad370unet", + "source": "fb7e72d9-51cb-432a-b511-c6c608d07413", + "sourceHandle": "conditioning", + "target": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-fb7e72d9-51cb-432a-b511-c6c608d07413conditioning-d14b4d95-bf74-4ec5-827b-4c9e797c7ae9positive_conditioning", "type": "default" }, { "source": "098898c8-7a20-4d78-9363-296d42e3d8da", "sourceHandle": "noise", - "target": "441fdd15-1a49-413c-bd2a-637a105ad370", + "target": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", "targetHandle": "noise", - "id": "reactflow__edge-098898c8-7a20-4d78-9363-296d42e3d8danoise-441fdd15-1a49-413c-bd2a-637a105ad370noise", + "id": "reactflow__edge-098898c8-7a20-4d78-9363-296d42e3d8danoise-d14b4d95-bf74-4ec5-827b-4c9e797c7ae9noise", "type": "default" }, { "source": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", "sourceHandle": "denoise_mask", - "target": "441fdd15-1a49-413c-bd2a-637a105ad370", + "target": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", "targetHandle": "denoise_mask", - "id": "reactflow__edge-bcbdc4ea-1fad-40d4-8632-70f84116f4b6denoise_mask-441fdd15-1a49-413c-bd2a-637a105ad370denoise_mask", + "id": "reactflow__edge-bcbdc4ea-1fad-40d4-8632-70f84116f4b6denoise_mask-d14b4d95-bf74-4ec5-827b-4c9e797c7ae9denoise_mask", "type": "default" }, { - "source": "441fdd15-1a49-413c-bd2a-637a105ad370", + "source": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", "sourceHandle": "latents", - "target": "3b2ebc7f-251d-4726-b1b8-5331355f8626", + "target": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", "targetHandle": "latents", - "id": "reactflow__edge-441fdd15-1a49-413c-bd2a-637a105ad370latents-3b2ebc7f-251d-4726-b1b8-5331355f8626latents", + "id": "reactflow__edge-a6d08bcb-0b52-4dd8-9247-8b6480238c6dlatents-d14b4d95-bf74-4ec5-827b-4c9e797c7ae9latents", + "type": "default" + }, + { + "source": "3fac3aa6-910a-4a90-a8b6-5b7e1611efba", + "sourceHandle": "image", + "target": "eb725b0b-1fa6-4f79-aedb-52c19afcfad9", + "targetHandle": "image", + "id": "reactflow__edge-3fac3aa6-910a-4a90-a8b6-5b7e1611efbaimage-eb725b0b-1fa6-4f79-aedb-52c19afcfad9image", + "type": "default" + }, + { + "source": "eb725b0b-1fa6-4f79-aedb-52c19afcfad9", + "sourceHandle": "image", + "target": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", + "targetHandle": "image", + "id": "reactflow__edge-eb725b0b-1fa6-4f79-aedb-52c19afcfad9image-bcbdc4ea-1fad-40d4-8632-70f84116f4b6image", + "type": "default" + }, + { + "source": "eb725b0b-1fa6-4f79-aedb-52c19afcfad9", + "sourceHandle": "mask", + "target": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", + "targetHandle": "mask", + "id": "reactflow__edge-eb725b0b-1fa6-4f79-aedb-52c19afcfad9mask-bcbdc4ea-1fad-40d4-8632-70f84116f4b6mask", + "type": "default" + }, + { + "source": "eb725b0b-1fa6-4f79-aedb-52c19afcfad9", + "sourceHandle": "image", + "target": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", + "targetHandle": "image", + "id": "reactflow__edge-eb725b0b-1fa6-4f79-aedb-52c19afcfad9image-a6d08bcb-0b52-4dd8-9247-8b6480238c6dimage", + "type": "default" + }, + { + "source": "c9897be0-7f59-4388-816d-86cb72cc4036", + "sourceHandle": "vae", + "target": "e4681270-ea7e-4063-9116-880408854eee", + "targetHandle": "vae", + "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036vae-e4681270-ea7e-4063-9116-880408854eeevae", + "type": "default" + }, + { + "source": "d14b4d95-bf74-4ec5-827b-4c9e797c7ae9", + "sourceHandle": "latents", + "target": "e4681270-ea7e-4063-9116-880408854eee", + "targetHandle": "latents", + "id": "reactflow__edge-d14b4d95-bf74-4ec5-827b-4c9e797c7ae9latents-e4681270-ea7e-4063-9116-880408854eeelatents", + "type": "default" + }, + { + "source": "3fac3aa6-910a-4a90-a8b6-5b7e1611efba", + "sourceHandle": "image", + "target": "7bc3c331-4658-46fd-8736-fe3043fcd9d1", + "targetHandle": "reference", + "id": "reactflow__edge-3fac3aa6-910a-4a90-a8b6-5b7e1611efbaimage-7bc3c331-4658-46fd-8736-fe3043fcd9d1reference", + "type": "default" + }, + { + "source": "e4681270-ea7e-4063-9116-880408854eee", + "sourceHandle": "image", + "target": "7bc3c331-4658-46fd-8736-fe3043fcd9d1", + "targetHandle": "image", + "id": "reactflow__edge-e4681270-ea7e-4063-9116-880408854eeeimage-7bc3c331-4658-46fd-8736-fe3043fcd9d1image", + "type": "default" + }, + { + "source": "eb725b0b-1fa6-4f79-aedb-52c19afcfad9", + "sourceHandle": "mask", + "target": "7bc3c331-4658-46fd-8736-fe3043fcd9d1", + "targetHandle": "mask", + "id": "reactflow__edge-eb725b0b-1fa6-4f79-aedb-52c19afcfad9mask-7bc3c331-4658-46fd-8736-fe3043fcd9d1mask", "type": "default" } ] diff --git a/docs/workflows/FaceOff_FaceScale2x.json b/docs/workflows/FaceOff_FaceScale2x.json index fa3cb80e9b..d1707e5e73 100644 --- a/docs/workflows/FaceOff_FaceScale2x.json +++ b/docs/workflows/FaceOff_FaceScale2x.json @@ -227,85 +227,12 @@ "version": "1.0.0" }, "width": 320, - "height": 388, + "height": 389, "position": { "x": 5425, "y": 1525 } }, - { - "id": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "type": "invocation", - "data": { - "id": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "type": "l2i", - "inputs": { - "tiled": { - "id": "8265e595-8f76-4615-8b70-3a0fca854cbf", - "name": "tiled", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "fp32": { - "id": "cc2dd2ee-63f6-40e7-937e-d82086685abe", - "name": "fp32", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "latents": { - "id": "fbbab00e-b47f-4f45-801a-a719873429ca", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "vae": { - "id": "476e6075-c62d-4d42-afcf-a18e4c4a47fb", - "name": "vae", - "type": "VaeField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "image": { - "id": "65086674-a0f7-4d6c-a02e-101010b34d74", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "e32514a9-50d5-423e-95d3-5d9fd460adaa", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "4d6557ec-2967-40ec-bcae-52725ffe766c", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true, - "useCache": true, - "version": "1.0.0" - }, - "width": 320, - "height": 266, - "position": { - "x": 5825, - "y": 1275 - } - }, { "id": "27dd9fc3-8c6e-4602-8754-e9ca2f478d68", "type": "invocation", @@ -415,7 +342,7 @@ "version": "1.0.0" }, "width": 320, - "height": 316, + "height": 318, "position": { "x": 5025, "y": 2175 @@ -471,94 +398,6 @@ "y": 1525 } }, - { - "id": "5b15322a-1ea2-4f23-b422-c2dea5f594e2", - "type": "invocation", - "data": { - "id": "5b15322a-1ea2-4f23-b422-c2dea5f594e2", - "type": "img_paste", - "inputs": { - "base_image": { - "id": "67487235-b302-4636-b8d5-b893bbd56db1", - "name": "base_image", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "image": { - "id": "049e467a-2a5f-4017-b330-3ec97519eeef", - "name": "image", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "mask": { - "id": "39396a0b-c12d-4384-bd24-f9358ece80b0", - "name": "mask", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "x": { - "id": "39bf90a6-8db3-4b12-8c3c-97a665e0d319", - "name": "x", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "y": { - "id": "7352707c-a833-45b3-83a5-c7b733741a5e", - "name": "y", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "crop": { - "id": "c9225c97-04a8-42f5-9cda-333a9c475abb", - "name": "crop", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - } - }, - "outputs": { - "image": { - "id": "a8a59a34-59d6-4edf-972c-d5b189e488a2", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "151b99f7-4736-4d87-b49b-a82d18b70a6f", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "a3b4856e-6136-4d29-a2d5-b8db7d1f7f55", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": true, - "isIntermediate": false, - "useCache": true, - "version": "1.0.1" - }, - "width": 320, - "height": 502, - "position": { - "x": 5825, - "y": 2300 - } - }, { "id": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", "type": "invocation", @@ -626,340 +465,51 @@ "version": "1.0.0" }, "width": 320, - "height": 323, + "height": 325, "position": { "x": 5025, "y": 1825 } }, { - "id": "233aeb43-980a-4578-bdb7-5488d54d1bbf", + "id": "01a35dfd-b4bd-4901-8088-49972eac7582", "type": "invocation", "data": { - "id": "233aeb43-980a-4578-bdb7-5488d54d1bbf", - "type": "img_scale", + "id": "01a35dfd-b4bd-4901-8088-49972eac7582", + "type": "l2i", "inputs": { - "image": { - "id": "81fe81ca-7532-472f-a1a7-84123ce007cd", - "name": "image", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "scale_factor": { - "id": "0c0092eb-90c9-44d3-9388-421071352571", - "name": "scale_factor", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 2 - }, - "resample_mode": { - "id": "521fa4d3-7117-48e6-82c5-27c83822cf2f", - "name": "resample_mode", - "type": "enum", - "fieldKind": "input", - "label": "", - "value": "bicubic" - } - }, - "outputs": { - "image": { - "id": "0110d34f-0cd0-456a-9f76-e6e2feb8e495", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "e9daa3a1-11ff-41f8-bc72-aa96c497ff13", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "0d52aba2-9181-4822-ba97-c947e455c9e2", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "Scale Image (Mask Up)", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true, - "useCache": true, - "version": "1.0.0" - }, - "width": 320, - "height": 339, - "position": { - "x": 5025, - "y": 2500 - } - }, - { - "id": "8bdaaacf-077d-4b73-b03c-ad7a2fdcc0b6", - "type": "invocation", - "data": { - "id": "8bdaaacf-077d-4b73-b03c-ad7a2fdcc0b6", - "type": "img_scale", - "inputs": { - "image": { - "id": "432f3c88-285d-42b7-a42e-24ae97a4751a", - "name": "image", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "scale_factor": { - "id": "b3dd9aff-0c84-4ad7-bfd9-a8fa1ccae58b", - "name": "scale_factor", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0.5 - }, - "resample_mode": { - "id": "2eb9d0a7-d0f8-474e-b238-548d864fbd8e", - "name": "resample_mode", - "type": "enum", - "fieldKind": "input", - "label": "", - "value": "bicubic" - } - }, - "outputs": { - "image": { - "id": "9c089c7e-930c-4d1f-b2ab-a957ecf288aa", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "f880ce2a-5f1c-4f0c-9398-095cf082d353", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "fde5f8e6-d5fc-4d06-a1b1-9880baabf3a6", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "Scale Image (Face Down)", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true, - "useCache": true, - "version": "1.0.0" - }, - "width": 320, - "height": 339, - "position": { - "x": 5825, - "y": 1550 - } - }, - { - "id": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "type": "invocation", - "data": { - "id": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "type": "denoise_latents", - "inputs": { - "noise": { - "id": "d7a78bb2-e3d2-4e5d-b494-1cc1e5f0cc84", - "name": "noise", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "steps": { - "id": "2d9aba03-011e-4858-b5e2-a1301565dde9", - "name": "steps", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 20 - }, - "cfg_scale": { - "id": "3528c5bb-12a2-4094-a35f-c5d07ddae5ee", - "name": "cfg_scale", - "type": "FloatPolymorphic", - "fieldKind": "input", - "label": "", - "value": 5 - }, - "denoising_start": { - "id": "c997229c-8f35-4427-8fcc-99a3dcaaf5a3", - "name": "denoising_start", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0.3 - }, - "denoising_end": { - "id": "abbb8f6b-f643-4cb2-9147-227feafd8aa6", - "name": "denoising_end", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 1 - }, - "scheduler": { - "id": "2fcd2bdc-de01-422f-963d-551ff58f7b70", - "name": "scheduler", - "type": "Scheduler", - "fieldKind": "input", - "label": "", - "value": "euler" - }, - "control": { - "id": "1ad815aa-0957-420b-a782-7e07a5e16a05", - "name": "control", - "type": "ControlPolymorphic", - "fieldKind": "input", - "label": "" - }, - "ip_adapter": { - "id": "8350154f-b179-44d6-88c8-08c8fa940648", - "name": "ip_adapter", - "type": "IPAdapterField", + "metadata": { + "id": "ce479dbf-d12f-43e7-9047-ec0e6bd838a7", + "name": "metadata", + "type": "MetadataField", "fieldKind": "input", "label": "" }, "latents": { - "id": "bf8c9465-743e-4b27-b370-e63053dc2175", + "id": "0d3f0abc-5c60-495e-af10-8136b25532d0", "name": "latents", "type": "LatentsField", "fieldKind": "input", "label": "" }, - "denoise_mask": { - "id": "4487f154-4f2f-459b-8a69-561ea892eea7", - "name": "denoise_mask", - "type": "DenoiseMaskField", + "vae": { + "id": "c0563226-c977-456a-8cf5-2873ab56a9f4", + "name": "vae", + "type": "VaeField", "fieldKind": "input", "label": "" }, - "positive_conditioning": { - "id": "a757a5d4-3aaa-4cd9-9a9d-bfb01b0676e4", - "name": "positive_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "negative_conditioning": { - "id": "68d0ce84-615a-4062-96bc-3599029a4b63", - "name": "negative_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "unet": { - "id": "0de640f1-2170-4983-88d3-5af648da1b5d", - "name": "unet", - "type": "UNetField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "latents": { - "id": "3198ada0-5926-41b3-89f0-8b572af78824", - "name": "latents", - "type": "LatentsField", - "fieldKind": "output" - }, - "width": { - "id": "3cea129c-3145-4f89-a5f1-849863e05001", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "ed204db5-ce67-4795-873a-67b5d6a099a6", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true, - "useCache": true, - "version": "1.1.0" - }, - "width": 320, - "height": 614, - "position": { - "x": 5425, - "y": 1925 - } - }, - { - "id": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", - "type": "invocation", - "data": { - "id": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", - "type": "face_off", - "inputs": { - "image": { - "id": "4c4611d3-baab-47e8-a814-18ea86f1aff2", - "name": "image", - "type": "ImageField", - "fieldKind": "input", - "label": "" - }, - "face_id": { - "id": "cfc00e4b-c60e-44b4-b0da-f26da1b4060e", - "name": "face_id", - "type": "integer", + "tiled": { + "id": "f909d67c-f789-4daa-8528-50113bba78ea", + "name": "tiled", + "type": "boolean", "fieldKind": "input", "label": "", - "value": 0 + "value": false }, - "minimum_confidence": { - "id": "4ba35ea2-d1b3-4d24-88c5-737fdd396210", - "name": "minimum_confidence", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0.5 - }, - "x_offset": { - "id": "c79b5f0a-df14-41e1-9f17-11f6b3ace52f", - "name": "x_offset", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "y_offset": { - "id": "ec8eb73f-2853-41b8-aa9d-c93b6f9f08bd", - "name": "y_offset", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "padding": { - "id": "a7a649f9-0b79-4f12-a482-b3af800b6584", - "name": "padding", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "chunk": { - "id": "df654e9c-dda3-414d-b11b-51b36071ae62", - "name": "chunk", + "fp32": { + "id": "d7c26ac3-df0f-47ab-aa91-0c5bfbe1ea38", + "name": "fp32", "type": "boolean", "fieldKind": "input", "label": "", @@ -968,40 +518,22 @@ }, "outputs": { "image": { - "id": "4292e151-cd37-4cdf-9f1b-2b3e0cc6d831", + "id": "380a7af7-bba5-434b-9996-47b8194d69d4", "name": "image", "type": "ImageField", "fieldKind": "output" }, "width": { - "id": "8e83877f-36ed-470c-889a-174a35259e6f", + "id": "a0f2a9cf-77f6-42a5-801d-30f635d1de26", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "40487efd-937a-491e-a1c3-e791afe32cd6", + "id": "890ec07c-9bc0-46f6-b962-8a6fd149d562", "name": "height", "type": "integer", "fieldKind": "output" - }, - "mask": { - "id": "2cc63f32-0f8e-46a9-bed3-9f47a8b834f2", - "name": "mask", - "type": "ImageField", - "fieldKind": "output" - }, - "x": { - "id": "12451165-986b-4eab-8159-55b8d7ce87ab", - "name": "x", - "type": "integer", - "fieldKind": "output" - }, - "y": { - "id": "928c0d08-613c-4c03-9a7e-a3087f45032b", - "name": "y", - "type": "integer", - "fieldKind": "output" } }, "label": "", @@ -1013,28 +545,35 @@ "version": "1.0.0" }, "width": 320, - "height": 655, + "height": 267, "position": { - "x": 4625, - "y": 1800 + "x": 5810.2137275110845, + "y": 1270.641572039504 } }, { - "id": "7ad41b86-c089-430c-b70c-3c1cbf886ed9", + "id": "43c00342-9ca3-498a-8635-c4c716e32d5f", "type": "invocation", "data": { - "id": "7ad41b86-c089-430c-b70c-3c1cbf886ed9", + "id": "43c00342-9ca3-498a-8635-c4c716e32d5f", "type": "img_scale", "inputs": { + "metadata": { + "id": "d35fa75e-439b-4fda-9064-4ed73f378de6", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, "image": { - "id": "6db4f3c8-ade5-46c7-8c39-f8b8bd949912", + "id": "e8c46885-2ec1-4fb9-8d34-07534e62ec97", "name": "image", "type": "ImageField", "fieldKind": "input", "label": "" }, "scale_factor": { - "id": "ac4f04dc-2fc8-424f-bcab-292d6139a763", + "id": "94b04846-9f70-44ac-ae82-99af13632255", "name": "scale_factor", "type": "float", "fieldKind": "input", @@ -1042,7 +581,7 @@ "value": 2 }, "resample_mode": { - "id": "1f2fdad6-1d75-42a9-8f20-dfe5361804ad", + "id": "2fbcd8bb-6668-4661-868a-1452f0e73e6d", "name": "resample_mode", "type": "enum", "fieldKind": "input", @@ -1052,25 +591,25 @@ }, "outputs": { "image": { - "id": "ab6ca82e-f936-47be-8640-1b1649032dfd", + "id": "96600931-0da3-4c68-842f-d06f2d93f383", "name": "image", "type": "ImageField", "fieldKind": "output" }, "width": { - "id": "bf4f0ecd-7628-46b8-9654-e728dc69985b", + "id": "ff158681-580c-4a51-a2c8-c4bd2058a011", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "07e63e0c-2da3-445f-bb34-82b9eef6edfb", + "id": "2a332d0e-4442-4a6e-8cf9-5e2e42f713c6", "name": "height", "type": "integer", "fieldKind": "output" } }, - "label": "Scale Image (Face Up)", + "label": "", "isOpen": true, "notes": "", "embedWorkflow": false, @@ -1081,40 +620,47 @@ "width": 320, "height": 339, "position": { - "x": 4625, - "y": 2500 + "x": 5808.235568293352, + "y": 1551.540180957498 } }, { - "id": "98a614c8-24df-413f-b001-a728b354e931", + "id": "0c71919b-a030-44fb-8c09-1baf37088d20", "type": "invocation", "data": { - "id": "98a614c8-24df-413f-b001-a728b354e931", + "id": "0c71919b-a030-44fb-8c09-1baf37088d20", "type": "color_correct", "inputs": { + "metadata": { + "id": "89536a0d-1c62-4975-ada9-33f359837481", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, "image": { - "id": "0f45f05a-192c-4951-9b94-32b32cc68cc2", + "id": "10d7c1da-e8e1-4226-ad9f-1bde9816c118", "name": "image", "type": "ImageField", "fieldKind": "input", "label": "" }, "reference": { - "id": "4a430d07-f13e-4889-a58e-a241e05bf142", + "id": "9c7354ed-33dc-462a-bc57-717082eb3f45", "name": "reference", "type": "ImageField", "fieldKind": "input", "label": "" }, "mask": { - "id": "c3bf8c13-0766-49e5-96cc-26a6e9dbafc9", + "id": "03aee643-6873-421c-8caf-1e05e85f6a9d", "name": "mask", "type": "ImageField", "fieldKind": "input", "label": "" }, "mask_blur_radius": { - "id": "c0a35154-41dc-494c-8807-f4cca2b996e9", + "id": "2d2dffa6-f719-4ec9-a201-c07a3a914c77", "name": "mask_blur_radius", "type": "float", "fieldKind": "input", @@ -1124,19 +670,19 @@ }, "outputs": { "image": { - "id": "27559b60-0a58-4615-8f5a-a46d85be1d89", + "id": "b652c45e-e797-47d2-9175-aa77ede6a8f5", "name": "image", "type": "ImageField", "fieldKind": "output" }, "width": { - "id": "f03d5175-d0d8-4f4f-970f-7792d72a69c7", + "id": "385015ed-2c6b-4b72-adda-ba5639841352", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "7a7b98e5-8b34-411c-b51c-e3174a9de7f6", + "id": "fe6632e0-560c-4c56-9b52-6e5e5d7eb642", "name": "height", "type": "integer", "fieldKind": "output" @@ -1153,8 +699,518 @@ "width": 320, "height": 396, "position": { - "x": 5825, - "y": 1900 + "x": 5806.25740907562, + "y": 1903.6525217138576 + } + }, + { + "id": "4e11665c-b932-493d-ab4f-019bed730b47", + "type": "invocation", + "data": { + "id": "4e11665c-b932-493d-ab4f-019bed730b47", + "type": "img_paste", + "inputs": { + "metadata": { + "id": "6f365d9d-bf84-4053-b936-8035a4c7c991", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "base_image": { + "id": "fb323f35-d1fd-4d20-b145-9a3db7a57a28", + "name": "base_image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "ea76a063-ec51-4b2b-bfa8-8df9c64ecc2e", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "mask": { + "id": "9b3f194a-d7f1-45f3-989a-6b2096dd3fd6", + "name": "mask", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "x": { + "id": "d74b8e26-bc59-4a27-bb00-f769b81e8ef0", + "name": "x", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "y": { + "id": "6ada744a-8f67-49c6-a8b6-df03b3397d73", + "name": "y", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "crop": { + "id": "dec1e8e1-2fb0-43e8-9ddf-0eb478e5e567", + "name": "crop", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "4a01e54d-9a7b-458d-aa87-640ce8c5cd62", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "be903c7c-b867-4938-9a0e-de8a6676ae89", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "7dbb23a1-bedc-418c-8c27-09cf24e0e777", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.1" + }, + "width": 320, + "height": 504, + "position": { + "x": 5810.103371182836, + "y": 2315.9071582023707 + } + }, + { + "id": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "type": "invocation", + "data": { + "id": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "type": "denoise_latents", + "inputs": { + "positive_conditioning": { + "id": "1039d676-b54f-4848-903d-a5e3eef84781", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "52e4b032-e32f-48b1-ac02-b17f0dff78f6", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "noise": { + "id": "7c6c35bb-576c-42fa-8191-169afff13d73", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "e0df62c7-edb8-4777-8ca1-c7a6fb30f6a2", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "e1a87d18-671d-4a6e-a528-7d4376500211", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "60ab8906-162c-4817-bd46-f5d7a93aa213", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "c8f73ade-d5a4-4e69-8462-c0f9ee3dfb96", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "e2872b56-4250-4a0d-86b3-37c60485fdc7", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "unet": { + "id": "b1f93d41-b399-4774-a913-4a0c0c6d5f66", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, + "control": { + "id": "89315aa7-95d7-463b-86fd-a8064092503c", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "370ded4f-ef23-4741-9e4c-1f3d1d429cfd", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "dac77b99-0cc9-46c5-9d17-66ddbabdb7f8", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "229e7492-abfd-47cb-9218-67c1ba75c4b4", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "1248eb2e-d362-446c-8ffc-ddab52b7c20f", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "de5c8012-df50-4fae-a4b7-ca09aaa181ca", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "ba1e5e4f-3ba9-467b-b80d-3c85aa97afab", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "b0fa6029-ede9-45be-b1ec-b1ec469bfeb6", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" + }, + "width": 320, + "height": 646, + "position": { + "x": 5468.65700494231, + "y": 1926.1754472409636 + } + }, + { + "id": "98048fa7-dd08-49ec-92b8-76c9017e5444", + "type": "invocation", + "data": { + "id": "98048fa7-dd08-49ec-92b8-76c9017e5444", + "type": "img_scale", + "inputs": { + "metadata": { + "id": "02fa128c-3d3b-4083-aa73-fb7acbfb072a", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "8008b9db-bd61-4a89-b2df-de444961bc5b", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "scale_factor": { + "id": "45268d5f-b8f9-49ee-b770-a6876100878a", + "name": "scale_factor", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 2 + }, + "resample_mode": { + "id": "645b99b1-fd64-429b-b61d-caf739830efa", + "name": "resample_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "bicubic" + } + }, + "outputs": { + "image": { + "id": "ff088e78-dd52-4605-a14d-d10d7c0015a5", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "b0bede16-55d6-4fee-8709-6bce92118431", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "88dc53cd-9d12-4f95-b23a-382b3db92db9", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 339, + "position": { + "x": 5026.491824134117, + "y": 2513.056664333261 + } + }, + { + "id": "894b9ca6-e7bc-428e-87e9-cce64095bce9", + "type": "invocation", + "data": { + "id": "894b9ca6-e7bc-428e-87e9-cce64095bce9", + "type": "img_scale", + "inputs": { + "metadata": { + "id": "ae97ac6c-6dc7-4c01-955a-ae98a608db4b", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "56d9afd5-b1c6-4b62-b332-a5585e863000", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "scale_factor": { + "id": "cdeeecb8-8f12-4d2b-b961-4d18cca858de", + "name": "scale_factor", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 2 + }, + "resample_mode": { + "id": "b120b6a1-dbdb-4989-80fb-af0a682e124c", + "name": "resample_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "bicubic" + } + }, + "outputs": { + "image": { + "id": "7888a231-4f89-4ad7-a216-d5664a96eb7d", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "9030b8fd-4963-4bc7-86a2-5a964443fc41", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "4beb6975-4ec2-4c76-8916-81331a2d2230", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 339, + "position": { + "x": 4624.131880974906, + "y": 2614.5694940293906 + } + }, + { + "id": "efea8306-de20-418f-806c-31ae4e5eb6bf", + "type": "invocation", + "data": { + "id": "efea8306-de20-418f-806c-31ae4e5eb6bf", + "type": "face_off", + "inputs": { + "metadata": { + "id": "4e7eb36f-3742-49d9-91b4-092b955ef588", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "003c3ff3-d0d0-464e-85ed-fb4df75fd513", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "face_id": { + "id": "9281074a-be25-4e43-8b80-9504b2365999", + "name": "face_id", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "minimum_confidence": { + "id": "808efdd1-41d5-4c3c-9ea3-0a1854503087", + "name": "minimum_confidence", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0.5 + }, + "x_offset": { + "id": "fd4fedf5-7fba-4687-864b-bfd018c296cf", + "name": "x_offset", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "y_offset": { + "id": "3d64697c-707e-404b-8261-9665c78f686d", + "name": "y_offset", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "padding": { + "id": "1f147de3-8fde-4bcc-8c68-a6617feed334", + "name": "padding", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "chunk": { + "id": "e60fae93-2056-4621-a6bf-d0248df6f538", + "name": "chunk", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "1c6aa0cf-4bb8-4236-b92e-a862036c9522", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "8dac27bd-7d72-471b-a310-bb7dae686a0b", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "4bfb3364-fe37-4fb8-b8a0-75d1038ce726", + "name": "height", + "type": "integer", + "fieldKind": "output" + }, + "mask": { + "id": "221c6022-5314-4f8f-afb0-4b00ceea8ecd", + "name": "mask", + "type": "ImageField", + "fieldKind": "output" + }, + "x": { + "id": "657fa37b-6e9c-4c04-b5d2-e3e659fcb0f3", + "name": "x", + "type": "integer", + "fieldKind": "output" + }, + "y": { + "id": "bc5cf72e-0389-4b8c-b886-40d145b6b73a", + "name": "y", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.2" + }, + "width": 320, + "height": 656, + "position": { + "x": 4618.594817536934, + "y": 1836.6663791833205 } } ], @@ -1175,14 +1231,6 @@ "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036clip-7c4e5071-5b76-4d42-b340-68b52c5ded7aclip", "type": "default" }, - { - "source": "c9897be0-7f59-4388-816d-86cb72cc4036", - "sourceHandle": "vae", - "target": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "targetHandle": "vae", - "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036vae-3b2ebc7f-251d-4726-b1b8-5331355f8626vae", - "type": "default" - }, { "source": "27dd9fc3-8c6e-4602-8754-e9ca2f478d68", "sourceHandle": "value", @@ -1191,22 +1239,6 @@ "id": "reactflow__edge-27dd9fc3-8c6e-4602-8754-e9ca2f478d68value-098898c8-7a20-4d78-9363-296d42e3d8daseed", "type": "default" }, - { - "source": "3fac3aa6-910a-4a90-a8b6-5b7e1611efba", - "sourceHandle": "image", - "target": "5b15322a-1ea2-4f23-b422-c2dea5f594e2", - "targetHandle": "base_image", - "id": "reactflow__edge-3fac3aa6-910a-4a90-a8b6-5b7e1611efbaimage-5b15322a-1ea2-4f23-b422-c2dea5f594e2base_image", - "type": "default" - }, - { - "source": "233aeb43-980a-4578-bdb7-5488d54d1bbf", - "sourceHandle": "image", - "target": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", - "targetHandle": "mask", - "id": "reactflow__edge-233aeb43-980a-4578-bdb7-5488d54d1bbfimage-bcbdc4ea-1fad-40d4-8632-70f84116f4b6mask", - "type": "default" - }, { "source": "c9897be0-7f59-4388-816d-86cb72cc4036", "sourceHandle": "vae", @@ -1239,156 +1271,180 @@ "id": "reactflow__edge-a6d08bcb-0b52-4dd8-9247-8b6480238c6dheight-098898c8-7a20-4d78-9363-296d42e3d8daheight", "type": "default" }, - { - "source": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "sourceHandle": "image", - "target": "8bdaaacf-077d-4b73-b03c-ad7a2fdcc0b6", - "targetHandle": "image", - "id": "reactflow__edge-3b2ebc7f-251d-4726-b1b8-5331355f8626image-8bdaaacf-077d-4b73-b03c-ad7a2fdcc0b6image", - "type": "default" - }, - { - "source": "fb7e72d9-51cb-432a-b511-c6c608d07413", - "sourceHandle": "conditioning", - "target": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "targetHandle": "positive_conditioning", - "id": "reactflow__edge-fb7e72d9-51cb-432a-b511-c6c608d07413conditioning-9e356e8e-d03d-4de9-b522-026c8751d6d4positive_conditioning", - "type": "default" - }, - { - "source": "7c4e5071-5b76-4d42-b340-68b52c5ded7a", - "sourceHandle": "conditioning", - "target": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "targetHandle": "negative_conditioning", - "id": "reactflow__edge-7c4e5071-5b76-4d42-b340-68b52c5ded7aconditioning-9e356e8e-d03d-4de9-b522-026c8751d6d4negative_conditioning", - "type": "default" - }, { "source": "c9897be0-7f59-4388-816d-86cb72cc4036", - "sourceHandle": "unet", - "target": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "targetHandle": "unet", - "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036unet-9e356e8e-d03d-4de9-b522-026c8751d6d4unet", + "sourceHandle": "vae", + "target": "01a35dfd-b4bd-4901-8088-49972eac7582", + "targetHandle": "vae", + "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036vae-01a35dfd-b4bd-4901-8088-49972eac7582vae", "type": "default" }, { - "source": "098898c8-7a20-4d78-9363-296d42e3d8da", - "sourceHandle": "noise", - "target": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "targetHandle": "noise", - "id": "reactflow__edge-098898c8-7a20-4d78-9363-296d42e3d8danoise-9e356e8e-d03d-4de9-b522-026c8751d6d4noise", + "source": "01a35dfd-b4bd-4901-8088-49972eac7582", + "sourceHandle": "image", + "target": "43c00342-9ca3-498a-8635-c4c716e32d5f", + "targetHandle": "image", + "id": "reactflow__edge-01a35dfd-b4bd-4901-8088-49972eac7582image-43c00342-9ca3-498a-8635-c4c716e32d5fimage", "type": "default" }, { - "source": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", - "sourceHandle": "denoise_mask", - "target": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "targetHandle": "denoise_mask", - "id": "reactflow__edge-bcbdc4ea-1fad-40d4-8632-70f84116f4b6denoise_mask-9e356e8e-d03d-4de9-b522-026c8751d6d4denoise_mask", - "type": "default" - }, - { - "source": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "sourceHandle": "latents", - "target": "3b2ebc7f-251d-4726-b1b8-5331355f8626", - "targetHandle": "latents", - "id": "reactflow__edge-9e356e8e-d03d-4de9-b522-026c8751d6d4latents-3b2ebc7f-251d-4726-b1b8-5331355f8626latents", - "type": "default" - }, - { - "source": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", - "sourceHandle": "latents", - "target": "9e356e8e-d03d-4de9-b522-026c8751d6d4", - "targetHandle": "latents", - "id": "reactflow__edge-a6d08bcb-0b52-4dd8-9247-8b6480238c6dlatents-9e356e8e-d03d-4de9-b522-026c8751d6d4latents", + "source": "43c00342-9ca3-498a-8635-c4c716e32d5f", + "sourceHandle": "image", + "target": "0c71919b-a030-44fb-8c09-1baf37088d20", + "targetHandle": "image", + "id": "reactflow__edge-43c00342-9ca3-498a-8635-c4c716e32d5fimage-0c71919b-a030-44fb-8c09-1baf37088d20image", "type": "default" }, { "source": "3fac3aa6-910a-4a90-a8b6-5b7e1611efba", "sourceHandle": "image", - "target": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", - "targetHandle": "image", - "id": "reactflow__edge-3fac3aa6-910a-4a90-a8b6-5b7e1611efbaimage-cd886d23-b786-4d0e-83e6-b6f1cf0ddc95image", + "target": "4e11665c-b932-493d-ab4f-019bed730b47", + "targetHandle": "base_image", + "id": "reactflow__edge-3fac3aa6-910a-4a90-a8b6-5b7e1611efbaimage-4e11665c-b932-493d-ab4f-019bed730b47base_image", "type": "default" }, { - "source": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", - "sourceHandle": "mask", - "target": "233aeb43-980a-4578-bdb7-5488d54d1bbf", - "targetHandle": "image", - "id": "reactflow__edge-cd886d23-b786-4d0e-83e6-b6f1cf0ddc95mask-233aeb43-980a-4578-bdb7-5488d54d1bbfimage", - "type": "default" - }, - { - "source": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", - "sourceHandle": "x", - "target": "5b15322a-1ea2-4f23-b422-c2dea5f594e2", - "targetHandle": "x", - "id": "reactflow__edge-cd886d23-b786-4d0e-83e6-b6f1cf0ddc95x-5b15322a-1ea2-4f23-b422-c2dea5f594e2x", - "type": "default" - }, - { - "source": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", - "sourceHandle": "y", - "target": "5b15322a-1ea2-4f23-b422-c2dea5f594e2", - "targetHandle": "y", - "id": "reactflow__edge-cd886d23-b786-4d0e-83e6-b6f1cf0ddc95y-5b15322a-1ea2-4f23-b422-c2dea5f594e2y", - "type": "default" - }, - { - "source": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", + "source": "0c71919b-a030-44fb-8c09-1baf37088d20", "sourceHandle": "image", - "target": "7ad41b86-c089-430c-b70c-3c1cbf886ed9", + "target": "4e11665c-b932-493d-ab4f-019bed730b47", "targetHandle": "image", - "id": "reactflow__edge-cd886d23-b786-4d0e-83e6-b6f1cf0ddc95image-7ad41b86-c089-430c-b70c-3c1cbf886ed9image", + "id": "reactflow__edge-0c71919b-a030-44fb-8c09-1baf37088d20image-4e11665c-b932-493d-ab4f-019bed730b47image", "type": "default" }, { - "source": "7ad41b86-c089-430c-b70c-3c1cbf886ed9", + "source": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "sourceHandle": "latents", + "target": "01a35dfd-b4bd-4901-8088-49972eac7582", + "targetHandle": "latents", + "id": "reactflow__edge-9a6a35cd-5c05-4df1-81bf-e40a1954c618latents-01a35dfd-b4bd-4901-8088-49972eac7582latents", + "type": "default" + }, + { + "source": "c9897be0-7f59-4388-816d-86cb72cc4036", + "sourceHandle": "unet", + "target": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "targetHandle": "unet", + "id": "reactflow__edge-c9897be0-7f59-4388-816d-86cb72cc4036unet-9a6a35cd-5c05-4df1-81bf-e40a1954c618unet", + "type": "default" + }, + { + "source": "7c4e5071-5b76-4d42-b340-68b52c5ded7a", + "sourceHandle": "conditioning", + "target": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-7c4e5071-5b76-4d42-b340-68b52c5ded7aconditioning-9a6a35cd-5c05-4df1-81bf-e40a1954c618negative_conditioning", + "type": "default" + }, + { + "source": "fb7e72d9-51cb-432a-b511-c6c608d07413", + "sourceHandle": "conditioning", + "target": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-fb7e72d9-51cb-432a-b511-c6c608d07413conditioning-9a6a35cd-5c05-4df1-81bf-e40a1954c618positive_conditioning", + "type": "default" + }, + { + "source": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", + "sourceHandle": "denoise_mask", + "target": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "targetHandle": "denoise_mask", + "id": "reactflow__edge-bcbdc4ea-1fad-40d4-8632-70f84116f4b6denoise_mask-9a6a35cd-5c05-4df1-81bf-e40a1954c618denoise_mask", + "type": "default" + }, + { + "source": "098898c8-7a20-4d78-9363-296d42e3d8da", + "sourceHandle": "noise", + "target": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "targetHandle": "noise", + "id": "reactflow__edge-098898c8-7a20-4d78-9363-296d42e3d8danoise-9a6a35cd-5c05-4df1-81bf-e40a1954c618noise", + "type": "default" + }, + { + "source": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", + "sourceHandle": "latents", + "target": "9a6a35cd-5c05-4df1-81bf-e40a1954c618", + "targetHandle": "latents", + "id": "reactflow__edge-a6d08bcb-0b52-4dd8-9247-8b6480238c6dlatents-9a6a35cd-5c05-4df1-81bf-e40a1954c618latents", + "type": "default" + }, + { + "source": "98048fa7-dd08-49ec-92b8-76c9017e5444", "sourceHandle": "image", "target": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", - "targetHandle": "image", - "id": "reactflow__edge-7ad41b86-c089-430c-b70c-3c1cbf886ed9image-bcbdc4ea-1fad-40d4-8632-70f84116f4b6image", + "targetHandle": "mask", + "id": "reactflow__edge-98048fa7-dd08-49ec-92b8-76c9017e5444image-bcbdc4ea-1fad-40d4-8632-70f84116f4b6mask", "type": "default" }, { - "source": "7ad41b86-c089-430c-b70c-3c1cbf886ed9", + "source": "894b9ca6-e7bc-428e-87e9-cce64095bce9", "sourceHandle": "image", "target": "a6d08bcb-0b52-4dd8-9247-8b6480238c6d", "targetHandle": "image", - "id": "reactflow__edge-7ad41b86-c089-430c-b70c-3c1cbf886ed9image-a6d08bcb-0b52-4dd8-9247-8b6480238c6dimage", + "id": "reactflow__edge-894b9ca6-e7bc-428e-87e9-cce64095bce9image-a6d08bcb-0b52-4dd8-9247-8b6480238c6dimage", "type": "default" }, { - "source": "8bdaaacf-077d-4b73-b03c-ad7a2fdcc0b6", + "source": "894b9ca6-e7bc-428e-87e9-cce64095bce9", "sourceHandle": "image", - "target": "98a614c8-24df-413f-b001-a728b354e931", + "target": "bcbdc4ea-1fad-40d4-8632-70f84116f4b6", "targetHandle": "image", - "id": "reactflow__edge-8bdaaacf-077d-4b73-b03c-ad7a2fdcc0b6image-98a614c8-24df-413f-b001-a728b354e931image", + "id": "reactflow__edge-894b9ca6-e7bc-428e-87e9-cce64095bce9image-bcbdc4ea-1fad-40d4-8632-70f84116f4b6image", "type": "default" }, { - "source": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", - "sourceHandle": "mask", - "target": "98a614c8-24df-413f-b001-a728b354e931", - "targetHandle": "mask", - "id": "reactflow__edge-cd886d23-b786-4d0e-83e6-b6f1cf0ddc95mask-98a614c8-24df-413f-b001-a728b354e931mask", - "type": "default" - }, - { - "source": "cd886d23-b786-4d0e-83e6-b6f1cf0ddc95", + "source": "efea8306-de20-418f-806c-31ae4e5eb6bf", "sourceHandle": "image", - "target": "98a614c8-24df-413f-b001-a728b354e931", + "target": "894b9ca6-e7bc-428e-87e9-cce64095bce9", + "targetHandle": "image", + "id": "reactflow__edge-efea8306-de20-418f-806c-31ae4e5eb6bfimage-894b9ca6-e7bc-428e-87e9-cce64095bce9image", + "type": "default" + }, + { + "source": "efea8306-de20-418f-806c-31ae4e5eb6bf", + "sourceHandle": "image", + "target": "0c71919b-a030-44fb-8c09-1baf37088d20", "targetHandle": "reference", - "id": "reactflow__edge-cd886d23-b786-4d0e-83e6-b6f1cf0ddc95image-98a614c8-24df-413f-b001-a728b354e931reference", + "id": "reactflow__edge-efea8306-de20-418f-806c-31ae4e5eb6bfimage-0c71919b-a030-44fb-8c09-1baf37088d20reference", "type": "default" }, { - "source": "98a614c8-24df-413f-b001-a728b354e931", - "sourceHandle": "image", - "target": "5b15322a-1ea2-4f23-b422-c2dea5f594e2", + "source": "efea8306-de20-418f-806c-31ae4e5eb6bf", + "sourceHandle": "mask", + "target": "98048fa7-dd08-49ec-92b8-76c9017e5444", "targetHandle": "image", - "id": "reactflow__edge-98a614c8-24df-413f-b001-a728b354e931image-5b15322a-1ea2-4f23-b422-c2dea5f594e2image", + "id": "reactflow__edge-efea8306-de20-418f-806c-31ae4e5eb6bfmask-98048fa7-dd08-49ec-92b8-76c9017e5444image", + "type": "default" + }, + { + "source": "efea8306-de20-418f-806c-31ae4e5eb6bf", + "sourceHandle": "mask", + "target": "0c71919b-a030-44fb-8c09-1baf37088d20", + "targetHandle": "mask", + "id": "reactflow__edge-efea8306-de20-418f-806c-31ae4e5eb6bfmask-0c71919b-a030-44fb-8c09-1baf37088d20mask", + "type": "default" + }, + { + "source": "efea8306-de20-418f-806c-31ae4e5eb6bf", + "sourceHandle": "x", + "target": "4e11665c-b932-493d-ab4f-019bed730b47", + "targetHandle": "x", + "id": "reactflow__edge-efea8306-de20-418f-806c-31ae4e5eb6bfx-4e11665c-b932-493d-ab4f-019bed730b47x", + "type": "default" + }, + { + "source": "efea8306-de20-418f-806c-31ae4e5eb6bf", + "sourceHandle": "y", + "target": "4e11665c-b932-493d-ab4f-019bed730b47", + "targetHandle": "y", + "id": "reactflow__edge-efea8306-de20-418f-806c-31ae4e5eb6bfy-4e11665c-b932-493d-ab4f-019bed730b47y", + "type": "default" + }, + { + "source": "3fac3aa6-910a-4a90-a8b6-5b7e1611efba", + "sourceHandle": "image", + "target": "efea8306-de20-418f-806c-31ae4e5eb6bf", + "targetHandle": "image", + "id": "reactflow__edge-3fac3aa6-910a-4a90-a8b6-5b7e1611efbaimage-efea8306-de20-418f-806c-31ae4e5eb6bfimage", "type": "default" } ] diff --git a/docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json b/docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json new file mode 100644 index 0000000000..3ef8d720f8 --- /dev/null +++ b/docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json @@ -0,0 +1,2032 @@ +{ + "name": "Face Detailer with IP-Adapter & Canny", + "author": "@kosmoskatten", + "description": "A workflow to automatically improve faces in an image using IP-Adapter and Canny ControlNet. ", + "version": "0.1.0", + "contact": "@kosmoskatten via Discord", + "tags": "face, detailer, SD1.5", + "notes": "", + "exposedFields": [ + { + "nodeId": "cdfa5ab0-b3e2-43ed-85bb-2ac4aa83bc05", + "fieldName": "value" + }, + { + "nodeId": "64712037-92e8-483f-9f6e-87588539c1b8", + "fieldName": "value" + }, + { + "nodeId": "f0de6c44-4515-4f79-bcc0-dee111bcfe31", + "fieldName": "value" + }, + { + "nodeId": "2c9bc2a6-6c03-4861-aad4-db884a7682f8", + "fieldName": "image" + } + ], + "meta": { + "version": "1.0.0" + }, + "nodes": [ + { + "id": "44f2c190-eb03-460d-8d11-a94d13b33f19", + "type": "invocation", + "data": { + "id": "44f2c190-eb03-460d-8d11-a94d13b33f19", + "type": "compel", + "inputs": { + "prompt": { + "id": "916b229a-38e1-45a2-a433-cca97495b143", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "", + "value": "" + }, + "clip": { + "id": "ae9aeb1a-4ebd-4bc3-b6e6-a8c9adca01f6", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "4d59bad1-99a9-43e2-bdb4-7a0f3dd5b787", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 3176.5748307120457, + "y": -605.7792243912572 + } + }, + { + "id": "2c9bc2a6-6c03-4861-aad4-db884a7682f8", + "type": "invocation", + "data": { + "id": "2c9bc2a6-6c03-4861-aad4-db884a7682f8", + "type": "image", + "inputs": { + "image": { + "id": "729c571b-d5a0-4b53-8f50-5e11eb744f66", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "Original Image", + "value": { + "image_name": "013a13e0-3af3-4fd8-8e6e-9dafb658e0bb.png" + } + } + }, + "outputs": { + "image": { + "id": "3632a144-58d6-4447-bafc-e4f7d6ca96bf", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "30faefcc-81a1-445b-a3fe-0110ceb56772", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "d173d225-849a-4498-a75d-ba17210dbd3e", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 225, + "position": { + "x": 1787.0260885895482, + "y": 171.61158302175093 + } + }, + { + "id": "de8b1a48-a2e4-42ca-90bb-66058bffd534", + "type": "invocation", + "data": { + "id": "de8b1a48-a2e4-42ca-90bb-66058bffd534", + "type": "i2l", + "inputs": { + "image": { + "id": "6c4d2827-4995-49d4-94ce-0ba0541d8839", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "9d6e3ab6-b6a4-45ac-ad75-0a96efba4c2f", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "9c258141-a75d-4ffd-bce5-f3fb3d90b720", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "2235cc48-53c9-4e8a-a74a-ed41c61f2993", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": true + } + }, + "outputs": { + "latents": { + "id": "8eb9293f-8f43-4c0c-b0fb-8c4db1200f87", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "ce493959-d308-423c-b0f5-db05912e0318", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "827bf290-94fb-455f-a970-f98ba8800eac", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 325, + "position": { + "x": 3165.534313998739, + "y": -261.54699233490976 + } + }, + { + "id": "35623411-ba3a-4eaa-91fd-1e0fda0a5b42", + "type": "invocation", + "data": { + "id": "35623411-ba3a-4eaa-91fd-1e0fda0a5b42", + "type": "noise", + "inputs": { + "seed": { + "id": "c6b5bc5e-ef09-4f9c-870e-1110a0f5017f", + "name": "seed", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 123451234 + }, + "width": { + "id": "7bdd24b6-4f14-4d0a-b8fc-9b24145b4ba9", + "name": "width", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "height": { + "id": "dc15bf97-b8d5-49c6-999b-798b33679418", + "name": "height", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "use_cpu": { + "id": "00626297-19dd-4989-9688-e8d527c9eacf", + "name": "use_cpu", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": true + } + }, + "outputs": { + "noise": { + "id": "2915f8ae-0f6e-4f26-8541-0ebf477586b6", + "name": "noise", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "26587461-a24a-434d-9ae5-8d8f36fea221", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "335d08fc-8bf1-4393-8902-2c579f327b51", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 389, + "position": { + "x": 4101.873817022418, + "y": -826.2333237957032 + } + }, + { + "id": "2974e5b3-3d41-4b6f-9953-cd21e8f3a323", + "type": "invocation", + "data": { + "id": "2974e5b3-3d41-4b6f-9953-cd21e8f3a323", + "type": "lscale", + "inputs": { + "latents": { + "id": "79e8f073-ddc3-416e-b818-6ef8ec73cc07", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "scale_factor": { + "id": "23f78d24-72df-4bde-8d3c-8593ce507205", + "name": "scale_factor", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1.5 + }, + "mode": { + "id": "4ab30c38-57d3-480d-8b34-918887e92340", + "name": "mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "bilinear" + }, + "antialias": { + "id": "22b39171-0003-44f0-9c04-d241581d2a39", + "name": "antialias", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": true + } + }, + "outputs": { + "latents": { + "id": "f6d71aef-6251-4d51-afa8-f692a72bfd1f", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "8db4cf33-5489-4887-a5f6-5e926d959c40", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "74e1ec7c-50b6-4e97-a7b8-6602e6d78c08", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 332, + "position": { + "x": 3693.9622528252976, + "y": -448.17155148430743 + } + }, + { + "id": "cdfa5ab0-b3e2-43ed-85bb-2ac4aa83bc05", + "type": "invocation", + "data": { + "id": "cdfa5ab0-b3e2-43ed-85bb-2ac4aa83bc05", + "type": "float", + "inputs": { + "value": { + "id": "d5d8063d-44f6-4e20-b557-2f4ce093c7ef", + "name": "value", + "type": "float", + "fieldKind": "input", + "label": "Orignal Image Percentage", + "value": 0.4 + } + }, + "outputs": { + "value": { + "id": "562416a4-0d75-48aa-835e-5e2d221dfbb7", + "name": "value", + "type": "float", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 161, + "position": { + "x": 3709.006050756633, + "y": -84.77200251095934 + } + }, + { + "id": "64712037-92e8-483f-9f6e-87588539c1b8", + "type": "invocation", + "data": { + "id": "64712037-92e8-483f-9f6e-87588539c1b8", + "type": "float", + "inputs": { + "value": { + "id": "750358d5-251d-4fe6-a673-2cde21995da2", + "name": "value", + "type": "float", + "fieldKind": "input", + "label": "CFG Main", + "value": 6 + } + }, + "outputs": { + "value": { + "id": "eea7f6d2-92e4-4581-b555-64a44fda2be9", + "name": "value", + "type": "float", + "fieldKind": "output" + } + }, + "label": "CFG Main", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 161, + "position": { + "x": 4064.218597371266, + "y": 221.28424598733164 + } + }, + { + "id": "c865f39f-f830-4ed7-88a5-e935cfe050a9", + "type": "invocation", + "data": { + "id": "c865f39f-f830-4ed7-88a5-e935cfe050a9", + "type": "rand_int", + "inputs": { + "low": { + "id": "31e29709-9f19-45b0-a2de-fdee29a50393", + "name": "low", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "high": { + "id": "d47d875c-509d-4fa3-9112-e335d3144a17", + "name": "high", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 2147483647 + } + }, + "outputs": { + "value": { + "id": "15b8d1ea-d2ac-4b3a-9619-57bba9a6da75", + "name": "value", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": false, + "version": "1.0.0" + }, + "width": 320, + "height": 218, + "position": { + "x": 3662.7851649243958, + "y": -784.114001413615 + } + }, + { + "id": "76ea1e31-eabe-4080-935e-e74ce20e2805", + "type": "invocation", + "data": { + "id": "76ea1e31-eabe-4080-935e-e74ce20e2805", + "type": "main_model_loader", + "inputs": { + "model": { + "id": "54e737f9-2547-4bd9-a607-733d02f0c990", + "name": "model", + "type": "MainModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "stable-diffusion-v1-5", + "base_model": "sd-1", + "model_type": "main" + } + } + }, + "outputs": { + "unet": { + "id": "3483ea21-f0b3-4422-894b-36c5d7701365", + "name": "unet", + "type": "UNetField", + "fieldKind": "output" + }, + "clip": { + "id": "dddd055f-5c1b-4e61-977b-6393da9006fa", + "name": "clip", + "type": "ClipField", + "fieldKind": "output" + }, + "vae": { + "id": "879893b4-3415-4879-8dff-aa1727ef5e63", + "name": "vae", + "type": "VaeField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 226, + "position": { + "x": 2517.1672231749144, + "y": -751.385499561434 + } + }, + { + "id": "f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65", + "type": "invocation", + "data": { + "id": "f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65", + "type": "compel", + "inputs": { + "prompt": { + "id": "916b229a-38e1-45a2-a433-cca97495b143", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "", + "value": "" + }, + "clip": { + "id": "ae9aeb1a-4ebd-4bc3-b6e6-a8c9adca01f6", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "4d59bad1-99a9-43e2-bdb4-7a0f3dd5b787", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 3174.140478653321, + "y": -874.0896905277255 + } + }, + { + "id": "f60b6161-8f26-42f6-89ff-545e6011e501", + "type": "invocation", + "data": { + "id": "f60b6161-8f26-42f6-89ff-545e6011e501", + "type": "controlnet", + "inputs": { + "image": { + "id": "96434c75-abd8-4b73-ab82-0b358e4735bf", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "control_model": { + "id": "21551ac2-ee50-4fe8-b06c-5be00680fb5c", + "name": "control_model", + "type": "ControlNetModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "sd-controlnet-canny", + "base_model": "sd-1" + } + }, + "control_weight": { + "id": "1dacac0a-b985-4bdf-b4b5-b960f4cff6ed", + "name": "control_weight", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 0.5 + }, + "begin_step_percent": { + "id": "b2a3f128-7fc1-4c12-acc8-540f013c856b", + "name": "begin_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "end_step_percent": { + "id": "0e701834-f7ba-4a6e-b9cb-6d4aff5dacd8", + "name": "end_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0.5 + }, + "control_mode": { + "id": "f9a5f038-ae80-4b6e-8a48-362a2c858299", + "name": "control_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "balanced" + }, + "resize_mode": { + "id": "5369dd44-a708-4b66-8182-fea814d2a0ae", + "name": "resize_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "just_resize" + } + }, + "outputs": { + "control": { + "id": "f470a1af-7b68-4849-a144-02bc345fd810", + "name": "control", + "type": "ControlField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 508, + "position": { + "x": 3926.9358191964657, + "y": 538.0129522372209 + } + }, + { + "id": "f0de6c44-4515-4f79-bcc0-dee111bcfe31", + "type": "invocation", + "data": { + "id": "f0de6c44-4515-4f79-bcc0-dee111bcfe31", + "type": "float", + "inputs": { + "value": { + "id": "9b51a26f-af3c-4caa-940a-5183234b1ed7", + "name": "value", + "type": "float", + "fieldKind": "input", + "label": "Face Detail Scale", + "value": 1.5 + } + }, + "outputs": { + "value": { + "id": "c7c87b77-c149-4e9c-8ed1-beb1ba013055", + "name": "value", + "type": "float", + "fieldKind": "output" + } + }, + "label": "Face Detail Scale", + "isOpen": true, + "notes": "The image is cropped to the face and scaled to 512x512. This value can scale even more. Best result with value between 1-2.\n\n1 = 512\n2 = 1024\n\n", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 161, + "position": { + "x": 2505.1027422955735, + "y": -447.7244534284996 + } + }, + { + "id": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "type": "invocation", + "data": { + "id": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "type": "denoise_latents", + "inputs": { + "noise": { + "id": "2223c30c-e3ef-4a2b-9ae8-31f9f2b38f68", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "09c2a8c9-0771-4e35-b674-26633fe5d740", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "dcef4b34-926f-46c1-9e57-c6401f1ac901", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "efb48776-93b4-4a1b-9b9e-ea173720850d", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "acedd753-b95c-4c23-91a8-398bc85f03cb", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "d6aedf09-ad0e-433f-9d70-fcf43303840d", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "control": { + "id": "f83850ba-d6d6-4ba9-9f41-424427a005f4", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "6e42ab48-8eaf-4d05-ba5e-0b7d7667249d", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "32c2f9c1-ae8a-4f9b-9e06-30debb6b1190", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "79479fdb-82b6-4b7f-8a4a-e658c2ff0d2f", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "c76b77a3-bcde-4425-bfdd-638becefb4ba", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + }, + "positive_conditioning": { + "id": "f14ea66a-5e7b-400b-a835-049286aa0c82", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "ce94941f-9e4e-41f4-b33f-1d6907919964", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "unet": { + "id": "290f45fa-1543-4e41-ab6f-63e139556dfc", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "ab8a8305-e6e7-4569-956c-02d5d0fa2989", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "161773a7-4e7b-44b6-98d9-f565036f190b", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "4fe54d15-d9b9-4fd0-b8ac-a1b2a11f3243", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.3.0" + }, + "width": 320, + "height": 646, + "position": { + "x": 4482.62568856733, + "y": -385.4092574423216 + } + }, + { + "id": "1bb5d85c-fc4d-41ed-94c6-88559f497491", + "type": "invocation", + "data": { + "id": "1bb5d85c-fc4d-41ed-94c6-88559f497491", + "type": "face_off", + "inputs": { + "metadata": { + "id": "6ad02931-9067-43e3-bc2b-29ce5388b317", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "01e0d368-7712-4c5b-8f73-18d3d3459d82", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "face_id": { + "id": "563cd209-2adf-4249-97df-0d13818ae057", + "name": "face_id", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "minimum_confidence": { + "id": "2cb09928-911c-43d7-8cbb-be13aed8f9a4", + "name": "minimum_confidence", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0.5 + }, + "x_offset": { + "id": "80d5fa06-a719-454f-9641-8ca5e2889d2d", + "name": "x_offset", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "y_offset": { + "id": "ff03fe44-26d1-4d8b-a012-3e05c918d56f", + "name": "y_offset", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "padding": { + "id": "05db8638-24f4-4a0a-9040-ae9beabd194b", + "name": "padding", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "chunk": { + "id": "d6a556b3-f1c7-480b-b1a2-e9441a3bb344", + "name": "chunk", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "963b445d-a35f-476b-b164-c61a35b86ef1", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "fa64b386-f45f-4979-8004-c1cf21b83d9c", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "bf7711ec-7788-4d9b-9abf-8cc0638f264d", + "name": "height", + "type": "integer", + "fieldKind": "output" + }, + "mask": { + "id": "c6dcf700-e903-4c9e-bd84-94f169c6d04c", + "name": "mask", + "type": "ImageField", + "fieldKind": "output" + }, + "x": { + "id": "d4993748-913b-4490-b71f-9400c1fe9f2d", + "name": "x", + "type": "integer", + "fieldKind": "output" + }, + "y": { + "id": "af9c4994-4f1b-4715-aa44-6c5b21274af4", + "name": "y", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.2" + }, + "width": 320, + "height": 656, + "position": { + "x": 2414.612131259088, + "y": 291.78659156828013 + } + }, + { + "id": "e3383c2f-828e-4b9c-94bb-9f10de86cc7f", + "type": "invocation", + "data": { + "id": "e3383c2f-828e-4b9c-94bb-9f10de86cc7f", + "type": "img_resize", + "inputs": { + "metadata": { + "id": "01bd4648-7c80-497c-823d-48ad41ce1cd2", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "1893f2cc-344a-4f15-8f01-85ba99da518b", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "width": { + "id": "123d1849-df1b-4e36-8329-f45235f857cf", + "name": "width", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "height": { + "id": "ebdf1684-abe5-42e1-b16e-797ffdfee7a2", + "name": "height", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "resample_mode": { + "id": "e83edb34-9391-4830-a1fb-fe0e0f5087bd", + "name": "resample_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "bicubic" + } + }, + "outputs": { + "image": { + "id": "0c369a81-bdd3-4b4c-9626-95ce47375ed1", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "560d5588-8df8-41e8-aa69-fb8c95cdb4e2", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "2b04576b-7b22-4b2a-9fb6-57c1fd86a506", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 396, + "position": { + "x": 2447.826252430936, + "y": -201.68035155632666 + } + }, + { + "id": "60343212-208e-4ec5-97c4-df78e30a3066", + "type": "invocation", + "data": { + "id": "60343212-208e-4ec5-97c4-df78e30a3066", + "type": "img_scale", + "inputs": { + "metadata": { + "id": "607e3b73-c76e-477e-8479-8130d3a25028", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "b04d11ba-cc0c-48d5-ab9e-f3958a5e8839", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "scale_factor": { + "id": "79656494-c8c0-4096-b25a-628d9c37f071", + "name": "scale_factor", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 2 + }, + "resample_mode": { + "id": "92176f78-e21a-4085-884d-c66c7135c714", + "name": "resample_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "bicubic" + } + }, + "outputs": { + "image": { + "id": "2321a8bf-3f55-470d-afe3-b8760240d59a", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "62005d4a-7961-439c-9357-9de5fc53b51b", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "3f0a7f7f-ea40-48de-8b1b-8570595e94ef", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 339, + "position": { + "x": 3007.7214378992408, + "y": 211.12372586521934 + } + }, + { + "id": "07099eb0-9d58-4e0b-82f8-3063c3af2689", + "type": "invocation", + "data": { + "id": "07099eb0-9d58-4e0b-82f8-3063c3af2689", + "type": "canny_image_processor", + "inputs": { + "metadata": { + "id": "7d38d9d6-029b-4de6-aabf-5aa46c508291", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "656409a8-aa89-4ecb-bb6c-8d5e612aa814", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "low_threshold": { + "id": "1879f7c5-dd3a-4770-a260-b18111577e63", + "name": "low_threshold", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 100 + }, + "high_threshold": { + "id": "b84b6aad-cb83-40d3-85a6-e18dc440d89b", + "name": "high_threshold", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 200 + } + }, + "outputs": { + "image": { + "id": "98e8c22c-43cb-41a3-9e8c-c4fdc3cbe749", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "a8eaa116-2bc1-4dab-84bf-4e21c6e14296", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "ddab1f63-c0c8-4ff7-9124-f0c6cdda81f8", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 339, + "position": { + "x": 3444.7466139600724, + "y": 586.5162666396674 + } + }, + { + "id": "a50af8a0-7b7e-4cb8-a116-89f2d83f33ed", + "type": "invocation", + "data": { + "id": "a50af8a0-7b7e-4cb8-a116-89f2d83f33ed", + "type": "ip_adapter", + "inputs": { + "image": { + "id": "1cee5c9b-8d23-4b11-a699-5919c86a972f", + "name": "image", + "type": "ImagePolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter_model": { + "id": "3ca282f7-be7c-44a1-8a5a-94afdecae0ae", + "name": "ip_adapter_model", + "type": "IPAdapterModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "ip_adapter_plus_face_sd15", + "base_model": "sd-1" + } + }, + "weight": { + "id": "eee84151-d5a0-4970-a984-9b0c26e3a58a", + "name": "weight", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "begin_step_percent": { + "id": "0ed4cc66-5688-4554-9f2b-acf0a33415a1", + "name": "begin_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "end_step_percent": { + "id": "9544971a-8963-43da-abf4-0920781209ca", + "name": "end_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + } + }, + "outputs": { + "ip_adapter": { + "id": "03814df3-0117-434e-b35b-7902fc519e80", + "name": "ip_adapter", + "type": "IPAdapterField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.1.0" + }, + "width": 320, + "height": 394, + "position": { + "x": 3486.4422814170716, + "y": 112.39273647258193 + } + }, + { + "id": "03df59d0-a061-430c-a76f-5a4cf3e14378", + "type": "invocation", + "data": { + "id": "03df59d0-a061-430c-a76f-5a4cf3e14378", + "type": "img_resize", + "inputs": { + "metadata": { + "id": "9fbe66f2-700c-457a-ab06-434a8f8169e9", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "37914278-90ba-4095-8c13-067a90a256cc", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "width": { + "id": "9d2b22b0-0a2a-4da8-aec0-c50f419bd134", + "name": "width", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "height": { + "id": "d5590b6b-96c3-4d7d-b472-56269515e1ad", + "name": "height", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "resample_mode": { + "id": "975d601c-8801-4881-b138-a43afccb89e9", + "name": "resample_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "bicubic" + } + }, + "outputs": { + "image": { + "id": "75099113-a677-4a05-96a6-e680c00c2c01", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "15132cfa-f46d-44df-9272-0b5abdd101b2", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "0f80c001-cee0-4a20-92a4-3a7b01130ffe", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 396, + "position": { + "x": 4423.278808515647, + "y": 350.94607680079463 + } + }, + { + "id": "8bb08a28-0a64-4da9-9bf4-4526ea40f437", + "type": "invocation", + "data": { + "id": "8bb08a28-0a64-4da9-9bf4-4526ea40f437", + "type": "img_blur", + "inputs": { + "metadata": { + "id": "ccbff6e7-4138-462c-bc2f-ea14bda4faa8", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "1c47e5ea-4f6e-4608-a15a-2eed4052d895", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "radius": { + "id": "646349d2-25ac-49ac-9aa9-bcd78a3362ab", + "name": "radius", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 8 + }, + "blur_type": { + "id": "0d3c8ada-0743-4f73-a29a-b359022b3745", + "name": "blur_type", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "gaussian" + } + }, + "outputs": { + "image": { + "id": "f775bed9-bb8e-47b1-b08a-cfb9a5d2501d", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "1ec74094-1181-41cb-9a11-6f93e4997913", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "7b91bdb5-27c0-4cee-8005-854675c2dedd", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 339, + "position": { + "x": 4876.996101782848, + "y": 349.1456113513215 + } + }, + { + "id": "227f0e19-3a4e-44f0-9f52-a8591b719bbe", + "type": "invocation", + "data": { + "id": "227f0e19-3a4e-44f0-9f52-a8591b719bbe", + "type": "mask_combine", + "inputs": { + "metadata": { + "id": "39eb7bba-0668-4349-9093-6aede07bec66", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "mask1": { + "id": "0ee7c237-b775-4470-8061-29eee0b1c9ae", + "name": "mask1", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "mask2": { + "id": "6231f048-0165-4bb8-b468-446f71b882bb", + "name": "mask2", + "type": "ImageField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "image": { + "id": "321dde14-3a6d-4606-af05-477e92735a14", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "0801ee9e-a19b-46ca-b919-747e81323aa9", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "7d2614d3-3ac8-44f0-9726-b1591057670f", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 282, + "position": { + "x": 5346.9175840953085, + "y": 374.352127643944 + } + }, + { + "id": "0d05a218-0208-4038-a3e7-867bd9367593", + "type": "invocation", + "data": { + "id": "0d05a218-0208-4038-a3e7-867bd9367593", + "type": "l2i", + "inputs": { + "metadata": { + "id": "a2f3806c-f4ea-41f2-b213-c049197bbeb4", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "e5a25c64-bafe-4fae-b066-21062f18582c", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "af671339-cc72-4fab-8908-1ede72ffbe23", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "a5d71aea-2e23-4517-be62-948fc70f0cfa", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "c2b8897e-67cb-43a5-a448-834ea9ce0ba0", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "93f1ba9e-5cfe-4282-91ed-fc5b586d3961", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "8f844f95-9ee6-469d-9706-9b48fb616a23", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "bac22e85-9bdd-468d-b3e3-a94325ea2f85", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 267, + "position": { + "x": 4966.928144382504, + "y": -294.018133443524 + } + }, + { + "id": "db4981fd-e6eb-42b3-910b-b15443fa863d", + "type": "invocation", + "data": { + "id": "db4981fd-e6eb-42b3-910b-b15443fa863d", + "type": "img_resize", + "inputs": { + "metadata": { + "id": "37f62260-8cf9-443d-8838-8acece688821", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "9e95616d-2079-4d48-bb7d-b641b4cd1d69", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "width": { + "id": "96cc2cac-69e8-40f8-9e0e-b23ee7db9e08", + "name": "width", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "height": { + "id": "eb9b8960-6f63-43be-aba5-2c165a46bfcf", + "name": "height", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "resample_mode": { + "id": "bafe9aef-0e59-49d3-97bf-00df38dd7e11", + "name": "resample_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "bicubic" + } + }, + "outputs": { + "image": { + "id": "9f3e1433-5ab1-4c2e-8adb-aafcc051054b", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "a8433208-ea4a-417e-a1ac-6fa4ca8a6537", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "29bceb69-05a9-463a-b003-75e337ad5be0", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 396, + "position": { + "x": 5379.153347937034, + "y": -198.406964558253 + } + }, + { + "id": "ab1387b7-6b00-4e20-acae-2ca2c1597896", + "type": "invocation", + "data": { + "id": "ab1387b7-6b00-4e20-acae-2ca2c1597896", + "type": "img_paste", + "inputs": { + "metadata": { + "id": "91fa397b-cadb-42ca-beee-e9437f6ddd3d", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "base_image": { + "id": "d2073c40-9299-438f-ab7b-b9fe85f5550d", + "name": "base_image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "c880fd54-a830-4f16-a1fe-c07d8ecd5d96", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "mask": { + "id": "a8ae99d0-7e67-4064-9f34-4c4cebfcdca9", + "name": "mask", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "x": { + "id": "bd874e6d-ec17-496e-864a-0a6f613ef3f2", + "name": "x", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "y": { + "id": "8fcb1a55-3275-448a-8571-f93909468cdc", + "name": "y", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "crop": { + "id": "54fad883-e0d8-46e8-a933-7bac0e8977eb", + "name": "crop", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "c9e38cb4-daad-4ed2-87e1-cf36cc4fccc4", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "93832b00-1691-475d-a04b-277598ca33e0", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "9e57e4da-e349-4356-956f-b50f41f7f19b", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": false, + "useCache": true, + "version": "1.0.1" + }, + "width": 320, + "height": 504, + "position": { + "x": 6054.981342632396, + "y": -107.76717121720509 + } + } + ], + "edges": [ + { + "source": "de8b1a48-a2e4-42ca-90bb-66058bffd534", + "sourceHandle": "latents", + "target": "2974e5b3-3d41-4b6f-9953-cd21e8f3a323", + "targetHandle": "latents", + "id": "reactflow__edge-de8b1a48-a2e4-42ca-90bb-66058bffd534latents-2974e5b3-3d41-4b6f-9953-cd21e8f3a323latents", + "type": "default" + }, + { + "source": "2974e5b3-3d41-4b6f-9953-cd21e8f3a323", + "sourceHandle": "width", + "target": "35623411-ba3a-4eaa-91fd-1e0fda0a5b42", + "targetHandle": "width", + "id": "reactflow__edge-2974e5b3-3d41-4b6f-9953-cd21e8f3a323width-35623411-ba3a-4eaa-91fd-1e0fda0a5b42width", + "type": "default" + }, + { + "source": "2974e5b3-3d41-4b6f-9953-cd21e8f3a323", + "sourceHandle": "height", + "target": "35623411-ba3a-4eaa-91fd-1e0fda0a5b42", + "targetHandle": "height", + "id": "reactflow__edge-2974e5b3-3d41-4b6f-9953-cd21e8f3a323height-35623411-ba3a-4eaa-91fd-1e0fda0a5b42height", + "type": "default" + }, + { + "source": "c865f39f-f830-4ed7-88a5-e935cfe050a9", + "sourceHandle": "value", + "target": "35623411-ba3a-4eaa-91fd-1e0fda0a5b42", + "targetHandle": "seed", + "id": "reactflow__edge-c865f39f-f830-4ed7-88a5-e935cfe050a9value-35623411-ba3a-4eaa-91fd-1e0fda0a5b42seed", + "type": "default" + }, + { + "source": "76ea1e31-eabe-4080-935e-e74ce20e2805", + "sourceHandle": "clip", + "target": "f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65", + "targetHandle": "clip", + "id": "reactflow__edge-76ea1e31-eabe-4080-935e-e74ce20e2805clip-f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65clip", + "type": "default" + }, + { + "source": "76ea1e31-eabe-4080-935e-e74ce20e2805", + "sourceHandle": "vae", + "target": "de8b1a48-a2e4-42ca-90bb-66058bffd534", + "targetHandle": "vae", + "id": "reactflow__edge-76ea1e31-eabe-4080-935e-e74ce20e2805vae-de8b1a48-a2e4-42ca-90bb-66058bffd534vae", + "type": "default" + }, + { + "source": "76ea1e31-eabe-4080-935e-e74ce20e2805", + "sourceHandle": "clip", + "target": "44f2c190-eb03-460d-8d11-a94d13b33f19", + "targetHandle": "clip", + "id": "reactflow__edge-76ea1e31-eabe-4080-935e-e74ce20e2805clip-44f2c190-eb03-460d-8d11-a94d13b33f19clip", + "type": "default" + }, + { + "source": "f0de6c44-4515-4f79-bcc0-dee111bcfe31", + "sourceHandle": "value", + "target": "2974e5b3-3d41-4b6f-9953-cd21e8f3a323", + "targetHandle": "scale_factor", + "id": "reactflow__edge-f0de6c44-4515-4f79-bcc0-dee111bcfe31value-2974e5b3-3d41-4b6f-9953-cd21e8f3a323scale_factor", + "type": "default" + }, + { + "source": "2974e5b3-3d41-4b6f-9953-cd21e8f3a323", + "sourceHandle": "latents", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "latents", + "id": "reactflow__edge-2974e5b3-3d41-4b6f-9953-cd21e8f3a323latents-e41c9dcc-a460-439a-85f3-eb18c74d9411latents", + "type": "default" + }, + { + "source": "f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65", + "sourceHandle": "conditioning", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-f4d15b64-c4a6-42a5-90fc-e4ed07a0ca65conditioning-e41c9dcc-a460-439a-85f3-eb18c74d9411positive_conditioning", + "type": "default" + }, + { + "source": "44f2c190-eb03-460d-8d11-a94d13b33f19", + "sourceHandle": "conditioning", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-44f2c190-eb03-460d-8d11-a94d13b33f19conditioning-e41c9dcc-a460-439a-85f3-eb18c74d9411negative_conditioning", + "type": "default" + }, + { + "source": "76ea1e31-eabe-4080-935e-e74ce20e2805", + "sourceHandle": "unet", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "unet", + "id": "reactflow__edge-76ea1e31-eabe-4080-935e-e74ce20e2805unet-e41c9dcc-a460-439a-85f3-eb18c74d9411unet", + "type": "default" + }, + { + "source": "f60b6161-8f26-42f6-89ff-545e6011e501", + "sourceHandle": "control", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "control", + "id": "reactflow__edge-f60b6161-8f26-42f6-89ff-545e6011e501control-e41c9dcc-a460-439a-85f3-eb18c74d9411control", + "type": "default" + }, + { + "source": "64712037-92e8-483f-9f6e-87588539c1b8", + "sourceHandle": "value", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "cfg_scale", + "id": "reactflow__edge-64712037-92e8-483f-9f6e-87588539c1b8value-e41c9dcc-a460-439a-85f3-eb18c74d9411cfg_scale", + "type": "default" + }, + { + "source": "35623411-ba3a-4eaa-91fd-1e0fda0a5b42", + "sourceHandle": "noise", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "noise", + "id": "reactflow__edge-35623411-ba3a-4eaa-91fd-1e0fda0a5b42noise-e41c9dcc-a460-439a-85f3-eb18c74d9411noise", + "type": "default" + }, + { + "source": "cdfa5ab0-b3e2-43ed-85bb-2ac4aa83bc05", + "sourceHandle": "value", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "denoising_start", + "id": "reactflow__edge-cdfa5ab0-b3e2-43ed-85bb-2ac4aa83bc05value-e41c9dcc-a460-439a-85f3-eb18c74d9411denoising_start", + "type": "default" + }, + { + "source": "2c9bc2a6-6c03-4861-aad4-db884a7682f8", + "sourceHandle": "image", + "target": "1bb5d85c-fc4d-41ed-94c6-88559f497491", + "targetHandle": "image", + "id": "reactflow__edge-2c9bc2a6-6c03-4861-aad4-db884a7682f8image-1bb5d85c-fc4d-41ed-94c6-88559f497491image", + "type": "default" + }, + { + "source": "1bb5d85c-fc4d-41ed-94c6-88559f497491", + "sourceHandle": "image", + "target": "e3383c2f-828e-4b9c-94bb-9f10de86cc7f", + "targetHandle": "image", + "id": "reactflow__edge-1bb5d85c-fc4d-41ed-94c6-88559f497491image-e3383c2f-828e-4b9c-94bb-9f10de86cc7fimage", + "type": "default" + }, + { + "source": "e3383c2f-828e-4b9c-94bb-9f10de86cc7f", + "sourceHandle": "image", + "target": "de8b1a48-a2e4-42ca-90bb-66058bffd534", + "targetHandle": "image", + "id": "reactflow__edge-e3383c2f-828e-4b9c-94bb-9f10de86cc7fimage-de8b1a48-a2e4-42ca-90bb-66058bffd534image", + "type": "default" + }, + { + "source": "e3383c2f-828e-4b9c-94bb-9f10de86cc7f", + "sourceHandle": "image", + "target": "60343212-208e-4ec5-97c4-df78e30a3066", + "targetHandle": "image", + "id": "reactflow__edge-e3383c2f-828e-4b9c-94bb-9f10de86cc7fimage-60343212-208e-4ec5-97c4-df78e30a3066image", + "type": "default" + }, + { + "source": "f0de6c44-4515-4f79-bcc0-dee111bcfe31", + "sourceHandle": "value", + "target": "60343212-208e-4ec5-97c4-df78e30a3066", + "targetHandle": "scale_factor", + "id": "reactflow__edge-f0de6c44-4515-4f79-bcc0-dee111bcfe31value-60343212-208e-4ec5-97c4-df78e30a3066scale_factor", + "type": "default" + }, + { + "source": "60343212-208e-4ec5-97c4-df78e30a3066", + "sourceHandle": "image", + "target": "07099eb0-9d58-4e0b-82f8-3063c3af2689", + "targetHandle": "image", + "id": "reactflow__edge-60343212-208e-4ec5-97c4-df78e30a3066image-07099eb0-9d58-4e0b-82f8-3063c3af2689image", + "type": "default" + }, + { + "source": "07099eb0-9d58-4e0b-82f8-3063c3af2689", + "sourceHandle": "image", + "target": "f60b6161-8f26-42f6-89ff-545e6011e501", + "targetHandle": "image", + "id": "reactflow__edge-07099eb0-9d58-4e0b-82f8-3063c3af2689image-f60b6161-8f26-42f6-89ff-545e6011e501image", + "type": "default" + }, + { + "source": "60343212-208e-4ec5-97c4-df78e30a3066", + "sourceHandle": "image", + "target": "a50af8a0-7b7e-4cb8-a116-89f2d83f33ed", + "targetHandle": "image", + "id": "reactflow__edge-60343212-208e-4ec5-97c4-df78e30a3066image-a50af8a0-7b7e-4cb8-a116-89f2d83f33edimage", + "type": "default" + }, + { + "source": "a50af8a0-7b7e-4cb8-a116-89f2d83f33ed", + "sourceHandle": "ip_adapter", + "target": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "targetHandle": "ip_adapter", + "id": "reactflow__edge-a50af8a0-7b7e-4cb8-a116-89f2d83f33edip_adapter-e41c9dcc-a460-439a-85f3-eb18c74d9411ip_adapter", + "type": "default" + }, + { + "source": "1bb5d85c-fc4d-41ed-94c6-88559f497491", + "sourceHandle": "width", + "target": "03df59d0-a061-430c-a76f-5a4cf3e14378", + "targetHandle": "width", + "id": "reactflow__edge-1bb5d85c-fc4d-41ed-94c6-88559f497491width-03df59d0-a061-430c-a76f-5a4cf3e14378width", + "type": "default" + }, + { + "source": "1bb5d85c-fc4d-41ed-94c6-88559f497491", + "sourceHandle": "height", + "target": "03df59d0-a061-430c-a76f-5a4cf3e14378", + "targetHandle": "height", + "id": "reactflow__edge-1bb5d85c-fc4d-41ed-94c6-88559f497491height-03df59d0-a061-430c-a76f-5a4cf3e14378height", + "type": "default" + }, + { + "source": "1bb5d85c-fc4d-41ed-94c6-88559f497491", + "sourceHandle": "mask", + "target": "8bb08a28-0a64-4da9-9bf4-4526ea40f437", + "targetHandle": "image", + "id": "reactflow__edge-1bb5d85c-fc4d-41ed-94c6-88559f497491mask-8bb08a28-0a64-4da9-9bf4-4526ea40f437image", + "type": "default" + }, + { + "source": "8bb08a28-0a64-4da9-9bf4-4526ea40f437", + "sourceHandle": "image", + "target": "227f0e19-3a4e-44f0-9f52-a8591b719bbe", + "targetHandle": "mask1", + "id": "reactflow__edge-8bb08a28-0a64-4da9-9bf4-4526ea40f437image-227f0e19-3a4e-44f0-9f52-a8591b719bbemask1", + "type": "default" + }, + { + "source": "03df59d0-a061-430c-a76f-5a4cf3e14378", + "sourceHandle": "image", + "target": "227f0e19-3a4e-44f0-9f52-a8591b719bbe", + "targetHandle": "mask2", + "id": "reactflow__edge-03df59d0-a061-430c-a76f-5a4cf3e14378image-227f0e19-3a4e-44f0-9f52-a8591b719bbemask2", + "type": "default" + }, + { + "source": "e41c9dcc-a460-439a-85f3-eb18c74d9411", + "sourceHandle": "latents", + "target": "0d05a218-0208-4038-a3e7-867bd9367593", + "targetHandle": "latents", + "id": "reactflow__edge-e41c9dcc-a460-439a-85f3-eb18c74d9411latents-0d05a218-0208-4038-a3e7-867bd9367593latents", + "type": "default" + }, + { + "source": "0d05a218-0208-4038-a3e7-867bd9367593", + "sourceHandle": "image", + "target": "db4981fd-e6eb-42b3-910b-b15443fa863d", + "targetHandle": "image", + "id": "reactflow__edge-0d05a218-0208-4038-a3e7-867bd9367593image-db4981fd-e6eb-42b3-910b-b15443fa863dimage", + "type": "default" + }, + { + "source": "db4981fd-e6eb-42b3-910b-b15443fa863d", + "sourceHandle": "image", + "target": "ab1387b7-6b00-4e20-acae-2ca2c1597896", + "targetHandle": "image", + "id": "reactflow__edge-db4981fd-e6eb-42b3-910b-b15443fa863dimage-ab1387b7-6b00-4e20-acae-2ca2c1597896image", + "type": "default" + }, + { + "source": "2c9bc2a6-6c03-4861-aad4-db884a7682f8", + "sourceHandle": "image", + "target": "ab1387b7-6b00-4e20-acae-2ca2c1597896", + "targetHandle": "base_image", + "id": "reactflow__edge-2c9bc2a6-6c03-4861-aad4-db884a7682f8image-ab1387b7-6b00-4e20-acae-2ca2c1597896base_image", + "type": "default" + }, + { + "source": "227f0e19-3a4e-44f0-9f52-a8591b719bbe", + "sourceHandle": "image", + "target": "ab1387b7-6b00-4e20-acae-2ca2c1597896", + "targetHandle": "mask", + "id": "reactflow__edge-227f0e19-3a4e-44f0-9f52-a8591b719bbeimage-ab1387b7-6b00-4e20-acae-2ca2c1597896mask", + "type": "default" + } + ] +} \ No newline at end of file diff --git a/docs/workflows/Multi_ControlNet_Canny_and_Depth.json b/docs/workflows/Multi_ControlNet_Canny_and_Depth.json new file mode 100644 index 0000000000..09c9ff72ea --- /dev/null +++ b/docs/workflows/Multi_ControlNet_Canny_and_Depth.json @@ -0,0 +1,985 @@ +{ + "name": "Multi ControlNet (Canny & Depth)", + "author": "Millu", + "description": "A sample workflow using canny & depth ControlNets to guide the generation process. ", + "version": "0.1.0", + "contact": "millun@invoke.ai", + "tags": "ControlNet, canny, depth", + "notes": "", + "exposedFields": [ + { + "nodeId": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "fieldName": "model" + }, + { + "nodeId": "7ce68934-3419-42d4-ac70-82cfc9397306", + "fieldName": "prompt" + }, + { + "nodeId": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", + "fieldName": "prompt" + }, + { + "nodeId": "c4b23e64-7986-40c4-9cad-46327b12e204", + "fieldName": "image" + }, + { + "nodeId": "8e860e51-5045-456e-bf04-9a62a2a5c49e", + "fieldName": "image" + } + ], + "meta": { + "version": "1.0.0" + }, + "nodes": [ + { + "id": "8e860e51-5045-456e-bf04-9a62a2a5c49e", + "type": "invocation", + "data": { + "id": "8e860e51-5045-456e-bf04-9a62a2a5c49e", + "type": "image", + "inputs": { + "image": { + "id": "189c8adf-68cc-4774-a729-49da89f6fdf1", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "Depth Input Image" + } + }, + "outputs": { + "image": { + "id": "1a31cacd-9d19-4f32-b558-c5e4aa39ce73", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "12f298fd-1d11-4cca-9426-01240f7ec7cf", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "c47dabcb-44e8-40c9-992d-81dca59f598e", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 225, + "position": { + "x": 3617.163483500202, + "y": 40.5529847930888 + } + }, + { + "id": "a33199c2-8340-401e-b8a2-42ffa875fc1c", + "type": "invocation", + "data": { + "id": "a33199c2-8340-401e-b8a2-42ffa875fc1c", + "type": "controlnet", + "inputs": { + "image": { + "id": "4e0a3172-d3c2-4005-a84c-fa12a404f8a0", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "control_model": { + "id": "8cb2d998-4086-430a-8b13-94cbc81e3ca3", + "name": "control_model", + "type": "ControlNetModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "sd-controlnet-depth", + "base_model": "sd-1" + } + }, + "control_weight": { + "id": "5e32bd8a-9dc8-42d8-9bcc-c2b0460c0b0f", + "name": "control_weight", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "begin_step_percent": { + "id": "c258a276-352a-416c-8358-152f11005c0c", + "name": "begin_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "end_step_percent": { + "id": "43001125-0d70-4f87-8e79-da6603ad6c33", + "name": "end_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "control_mode": { + "id": "d2f14561-9443-4374-9270-e2f05007944e", + "name": "control_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "balanced" + }, + "resize_mode": { + "id": "727ee7d3-8bf6-4c7d-8b8a-43546b3b59cd", + "name": "resize_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "just_resize" + } + }, + "outputs": { + "control": { + "id": "b034aa0f-4d0d-46e4-b5e3-e25a9588d087", + "name": "control", + "type": "ControlField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 508, + "position": { + "x": 4477.604342844504, + "y": -49.39005411272677 + } + }, + { + "id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", + "type": "invocation", + "data": { + "id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", + "type": "compel", + "inputs": { + "prompt": { + "id": "7c2c4771-2161-4d77-aced-ff8c4b3f1c15", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "Negative Prompt", + "value": "" + }, + "clip": { + "id": "06d59e91-9cca-411d-bf05-86b099b3e8f7", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "858bc33c-134c-4bf6-8855-f943e1d26f14", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 4444.706437017514, + "y": -924.0715320874991 + } + }, + { + "id": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "type": "invocation", + "data": { + "id": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "type": "main_model_loader", + "inputs": { + "model": { + "id": "f4a915a5-593e-4b6d-9198-c78eb5cefaed", + "name": "model", + "type": "MainModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "stable-diffusion-v1-5", + "base_model": "sd-1", + "model_type": "main" + } + } + }, + "outputs": { + "unet": { + "id": "ee24fb16-da38-4c66-9fbc-e8f296ed40d2", + "name": "unet", + "type": "UNetField", + "fieldKind": "output" + }, + "clip": { + "id": "f3fb0524-8803-41c1-86db-a61a13ee6a33", + "name": "clip", + "type": "ClipField", + "fieldKind": "output" + }, + "vae": { + "id": "5c4878a8-b40f-44ab-b146-1c1f42c860b3", + "name": "vae", + "type": "VaeField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 226, + "position": { + "x": 3837.096149678291, + "y": -1050.015351148365 + } + }, + { + "id": "7ce68934-3419-42d4-ac70-82cfc9397306", + "type": "invocation", + "data": { + "id": "7ce68934-3419-42d4-ac70-82cfc9397306", + "type": "compel", + "inputs": { + "prompt": { + "id": "7c2c4771-2161-4d77-aced-ff8c4b3f1c15", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "Positive Prompt", + "value": "" + }, + "clip": { + "id": "06d59e91-9cca-411d-bf05-86b099b3e8f7", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "858bc33c-134c-4bf6-8855-f943e1d26f14", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 4449.356038911986, + "y": -1201.659695420063 + } + }, + { + "id": "d204d184-f209-4fae-a0a1-d152800844e1", + "type": "invocation", + "data": { + "id": "d204d184-f209-4fae-a0a1-d152800844e1", + "type": "controlnet", + "inputs": { + "image": { + "id": "4e0a3172-d3c2-4005-a84c-fa12a404f8a0", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "control_model": { + "id": "8cb2d998-4086-430a-8b13-94cbc81e3ca3", + "name": "control_model", + "type": "ControlNetModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "sd-controlnet-canny", + "base_model": "sd-1" + } + }, + "control_weight": { + "id": "5e32bd8a-9dc8-42d8-9bcc-c2b0460c0b0f", + "name": "control_weight", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "begin_step_percent": { + "id": "c258a276-352a-416c-8358-152f11005c0c", + "name": "begin_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "end_step_percent": { + "id": "43001125-0d70-4f87-8e79-da6603ad6c33", + "name": "end_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "control_mode": { + "id": "d2f14561-9443-4374-9270-e2f05007944e", + "name": "control_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "balanced" + }, + "resize_mode": { + "id": "727ee7d3-8bf6-4c7d-8b8a-43546b3b59cd", + "name": "resize_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "just_resize" + } + }, + "outputs": { + "control": { + "id": "b034aa0f-4d0d-46e4-b5e3-e25a9588d087", + "name": "control", + "type": "ControlField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 508, + "position": { + "x": 4479.68542130465, + "y": -618.4221638099414 + } + }, + { + "id": "c4b23e64-7986-40c4-9cad-46327b12e204", + "type": "invocation", + "data": { + "id": "c4b23e64-7986-40c4-9cad-46327b12e204", + "type": "image", + "inputs": { + "image": { + "id": "189c8adf-68cc-4774-a729-49da89f6fdf1", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "Canny Input Image" + } + }, + "outputs": { + "image": { + "id": "1a31cacd-9d19-4f32-b558-c5e4aa39ce73", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "12f298fd-1d11-4cca-9426-01240f7ec7cf", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "c47dabcb-44e8-40c9-992d-81dca59f598e", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 225, + "position": { + "x": 3593.7474460420153, + "y": -538.1200472386865 + } + }, + { + "id": "ca4d5059-8bfb-447f-b415-da0faba5a143", + "type": "invocation", + "data": { + "id": "ca4d5059-8bfb-447f-b415-da0faba5a143", + "type": "collect", + "inputs": { + "item": { + "id": "b16ae602-8708-4b1b-8d4f-9e0808d429ab", + "name": "item", + "type": "CollectionItem", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "collection": { + "id": "d8987dd8-dec8-4d94-816a-3e356af29884", + "name": "collection", + "type": "Collection", + "fieldKind": "output" + } + }, + "label": "ControlNet Collection", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 104, + "position": { + "x": 4866.191497139488, + "y": -299.0538619537037 + } + }, + { + "id": "018b1214-c2af-43a7-9910-fb687c6726d7", + "type": "invocation", + "data": { + "id": "018b1214-c2af-43a7-9910-fb687c6726d7", + "type": "midas_depth_image_processor", + "inputs": { + "metadata": { + "id": "77f91980-c696-4a18-a9ea-6e2fc329a747", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "50710a20-2af5-424d-9d17-aa08167829c6", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "a_mult": { + "id": "f3b26f9d-2498-415e-9c01-197a8d06c0a5", + "name": "a_mult", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 2 + }, + "bg_th": { + "id": "4b1eb3ae-9d4a-47d6-b0ed-da62501e007f", + "name": "bg_th", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0.1 + } + }, + "outputs": { + "image": { + "id": "b4ed637c-c4a0-4fdd-a24e-36d6412e4ccf", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "6bf9b609-d72c-4239-99bd-390a73cc3a9c", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "3e8aef09-cf44-4e3e-a490-d3c9e7b23119", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 339, + "position": { + "x": 4054.229311491893, + "y": -31.611411056365725 + } + }, + { + "id": "c826ba5e-9676-4475-b260-07b85e88753c", + "type": "invocation", + "data": { + "id": "c826ba5e-9676-4475-b260-07b85e88753c", + "type": "canny_image_processor", + "inputs": { + "metadata": { + "id": "08331ea6-99df-4e61-a919-204d9bfa8fb2", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "image": { + "id": "33a37284-06ac-459c-ba93-1655e4f69b2d", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "low_threshold": { + "id": "21ec18a3-50c5-4ba1-9642-f921744d594f", + "name": "low_threshold", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 100 + }, + "high_threshold": { + "id": "ebeab271-a5ff-4c88-acfd-1d0271ab6ed4", + "name": "high_threshold", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 200 + } + }, + "outputs": { + "image": { + "id": "c0caadbf-883f-4cb4-a62d-626b9c81fc4e", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "df225843-8098-49c0-99d1-3b0b6600559f", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "e4abe0de-aa16-41f3-9cd7-968b49db5da3", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 339, + "position": { + "x": 4095.757337055795, + "y": -455.63440891935863 + } + }, + { + "id": "9db25398-c869-4a63-8815-c6559341ef12", + "type": "invocation", + "data": { + "id": "9db25398-c869-4a63-8815-c6559341ef12", + "type": "l2i", + "inputs": { + "metadata": { + "id": "2f269793-72e5-4ff3-b76c-fab4f93e983f", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "4aaedd3b-cc77-420c-806e-c7fa74ec4cdf", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "432b066a-2462-4d18-83d9-64620b72df45", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "61f86e0f-7c46-40f8-b3f5-fe2f693595ca", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "39b6c89a-37ef-4a7e-9509-daeca49d5092", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "6204e9b0-61dd-4250-b685-2092ba0e28e6", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "b4140649-8d5d-4d2d-bfa6-09e389ede5f9", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "f3a0c0c8-fc24-4646-8be1-ed8cdd140828", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": false, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 267, + "position": { + "x": 5678.726701377887, + "y": -351.6792416734579 + } + }, + { + "id": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce", + "type": "invocation", + "data": { + "id": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce", + "type": "denoise_latents", + "inputs": { + "positive_conditioning": { + "id": "869cd309-c238-444b-a1a0-5021f99785ba", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "343447b4-1e37-4e9e-8ac7-4d04864066af", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "noise": { + "id": "b556571e-0cf9-4e03-8cfc-5caad937d957", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "a3b3d2de-9308-423e-b00d-c209c3e6e808", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "b13c50a4-ec7e-4579-b0ef-2fe5df2605ea", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "57d5d755-f58f-4347-b991-f0bca4a0ab29", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "323e78a6-880a-4d73-a62c-70faff965aa6", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "c25fdc17-a089-43ac-953e-067c45d5c76b", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "unet": { + "id": "6cde662b-e633-4569-b6b4-ec87c52c9c11", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, + "control": { + "id": "276a4df9-bb26-4505-a4d3-a94e18c7b541", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "48d40c51-b5e2-4457-a428-eef0696695e8", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "75dd8af2-e7d7-48b4-a574-edd9f6e686ad", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "9223d67b-1dd7-4b34-a45f-ed0a725d9702", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "4ee99177-6923-4b7f-8fe0-d721dd7cb05b", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "7fb4e326-a974-43e8-9ee7-2e3ab235819d", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "6bb8acd0-8973-4195-a095-e376385dc705", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "795dea52-1c7d-4e64-99f7-2f60ec6e3ab9", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" + }, + "width": 320, + "height": 646, + "position": { + "x": 5274.672987098195, + "y": -823.0752416664332 + } + } + ], + "edges": [ + { + "source": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "sourceHandle": "clip", + "target": "7ce68934-3419-42d4-ac70-82cfc9397306", + "targetHandle": "clip", + "id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9clip-7ce68934-3419-42d4-ac70-82cfc9397306clip", + "type": "default" + }, + { + "source": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "sourceHandle": "clip", + "target": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", + "targetHandle": "clip", + "id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9clip-273e3f96-49ea-4dc5-9d5b-9660390f14e1clip", + "type": "default" + }, + { + "source": "a33199c2-8340-401e-b8a2-42ffa875fc1c", + "sourceHandle": "control", + "target": "ca4d5059-8bfb-447f-b415-da0faba5a143", + "targetHandle": "item", + "id": "reactflow__edge-a33199c2-8340-401e-b8a2-42ffa875fc1ccontrol-ca4d5059-8bfb-447f-b415-da0faba5a143item", + "type": "default" + }, + { + "source": "d204d184-f209-4fae-a0a1-d152800844e1", + "sourceHandle": "control", + "target": "ca4d5059-8bfb-447f-b415-da0faba5a143", + "targetHandle": "item", + "id": "reactflow__edge-d204d184-f209-4fae-a0a1-d152800844e1control-ca4d5059-8bfb-447f-b415-da0faba5a143item", + "type": "default" + }, + { + "source": "8e860e51-5045-456e-bf04-9a62a2a5c49e", + "sourceHandle": "image", + "target": "018b1214-c2af-43a7-9910-fb687c6726d7", + "targetHandle": "image", + "id": "reactflow__edge-8e860e51-5045-456e-bf04-9a62a2a5c49eimage-018b1214-c2af-43a7-9910-fb687c6726d7image", + "type": "default" + }, + { + "source": "018b1214-c2af-43a7-9910-fb687c6726d7", + "sourceHandle": "image", + "target": "a33199c2-8340-401e-b8a2-42ffa875fc1c", + "targetHandle": "image", + "id": "reactflow__edge-018b1214-c2af-43a7-9910-fb687c6726d7image-a33199c2-8340-401e-b8a2-42ffa875fc1cimage", + "type": "default" + }, + { + "source": "c4b23e64-7986-40c4-9cad-46327b12e204", + "sourceHandle": "image", + "target": "c826ba5e-9676-4475-b260-07b85e88753c", + "targetHandle": "image", + "id": "reactflow__edge-c4b23e64-7986-40c4-9cad-46327b12e204image-c826ba5e-9676-4475-b260-07b85e88753cimage", + "type": "default" + }, + { + "source": "c826ba5e-9676-4475-b260-07b85e88753c", + "sourceHandle": "image", + "target": "d204d184-f209-4fae-a0a1-d152800844e1", + "targetHandle": "image", + "id": "reactflow__edge-c826ba5e-9676-4475-b260-07b85e88753cimage-d204d184-f209-4fae-a0a1-d152800844e1image", + "type": "default" + }, + { + "source": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "sourceHandle": "vae", + "target": "9db25398-c869-4a63-8815-c6559341ef12", + "targetHandle": "vae", + "id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9vae-9db25398-c869-4a63-8815-c6559341ef12vae", + "type": "default" + }, + { + "source": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce", + "sourceHandle": "latents", + "target": "9db25398-c869-4a63-8815-c6559341ef12", + "targetHandle": "latents", + "id": "reactflow__edge-ac481b7f-08bf-4a9d-9e0c-3a82ea5243celatents-9db25398-c869-4a63-8815-c6559341ef12latents", + "type": "default" + }, + { + "source": "ca4d5059-8bfb-447f-b415-da0faba5a143", + "sourceHandle": "collection", + "target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce", + "targetHandle": "control", + "id": "reactflow__edge-ca4d5059-8bfb-447f-b415-da0faba5a143collection-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cecontrol", + "type": "default" + }, + { + "source": "54486974-835b-4d81-8f82-05f9f32ce9e9", + "sourceHandle": "unet", + "target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce", + "targetHandle": "unet", + "id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9unet-ac481b7f-08bf-4a9d-9e0c-3a82ea5243ceunet", + "type": "default" + }, + { + "source": "273e3f96-49ea-4dc5-9d5b-9660390f14e1", + "sourceHandle": "conditioning", + "target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-273e3f96-49ea-4dc5-9d5b-9660390f14e1conditioning-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cenegative_conditioning", + "type": "default" + }, + { + "source": "7ce68934-3419-42d4-ac70-82cfc9397306", + "sourceHandle": "conditioning", + "target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-7ce68934-3419-42d4-ac70-82cfc9397306conditioning-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cepositive_conditioning", + "type": "default" + } + ] +} \ No newline at end of file diff --git a/docs/workflows/Prompt_from_File.json b/docs/workflows/Prompt_from_File.json new file mode 100644 index 0000000000..0a273d3b50 --- /dev/null +++ b/docs/workflows/Prompt_from_File.json @@ -0,0 +1,719 @@ +{ + "name": "Prompt from File", + "author": "InvokeAI", + "description": "Sample workflow using prompt from file capabilities of InvokeAI ", + "version": "0.1.0", + "contact": "millun@invoke.ai", + "tags": "text2image, prompt from file, default", + "notes": "", + "exposedFields": [ + { + "nodeId": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "fieldName": "model" + }, + { + "nodeId": "1b7e0df8-8589-4915-a4ea-c0088f15d642", + "fieldName": "file_path" + } + ], + "meta": { + "version": "1.0.0" + }, + "nodes": [ + { + "id": "c2eaf1ba-5708-4679-9e15-945b8b432692", + "type": "invocation", + "data": { + "id": "c2eaf1ba-5708-4679-9e15-945b8b432692", + "type": "compel", + "inputs": { + "prompt": { + "id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "", + "value": "" + }, + "clip": { + "id": "3f1981c9-d8a9-42eb-a739-4f120eb80745", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 1177.3417789657444, + "y": -102.0924766641035 + } + }, + { + "id": "1b7e0df8-8589-4915-a4ea-c0088f15d642", + "type": "invocation", + "data": { + "id": "1b7e0df8-8589-4915-a4ea-c0088f15d642", + "type": "prompt_from_file", + "inputs": { + "file_path": { + "id": "37e37684-4f30-4ec8-beae-b333e550f904", + "name": "file_path", + "type": "string", + "fieldKind": "input", + "label": "Prompts File Path", + "value": "" + }, + "pre_prompt": { + "id": "7de02feb-819a-4992-bad3-72a30920ddea", + "name": "pre_prompt", + "type": "string", + "fieldKind": "input", + "label": "", + "value": "" + }, + "post_prompt": { + "id": "95f191d8-a282-428e-bd65-de8cb9b7513a", + "name": "post_prompt", + "type": "string", + "fieldKind": "input", + "label": "", + "value": "" + }, + "start_line": { + "id": "efee9a48-05ab-4829-8429-becfa64a0782", + "name": "start_line", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "max_prompts": { + "id": "abebb428-3d3d-49fd-a482-4e96a16fff08", + "name": "max_prompts", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 1 + } + }, + "outputs": { + "collection": { + "id": "77d5d7f1-9877-4ab1-9a8c-33e9ffa9abf3", + "name": "collection", + "type": "StringCollection", + "fieldKind": "output" + } + }, + "label": "Prompts from File", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 589, + "position": { + "x": 394.181884547075, + "y": -423.5345157864633 + } + }, + { + "id": "1b89067c-3f6b-42c8-991f-e3055789b251", + "type": "invocation", + "data": { + "id": "1b89067c-3f6b-42c8-991f-e3055789b251", + "type": "iterate", + "inputs": { + "collection": { + "id": "4c564bf8-5ed6-441e-ad2c-dda265d5785f", + "name": "collection", + "type": "Collection", + "fieldKind": "input", + "label": "", + "value": [] + } + }, + "outputs": { + "item": { + "id": "36340f9a-e7a5-4afa-b4b5-313f4e292380", + "name": "item", + "type": "CollectionItem", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 104, + "position": { + "x": 792.8735298060233, + "y": -432.6964953027252 + } + }, + { + "id": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "type": "invocation", + "data": { + "id": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "type": "main_model_loader", + "inputs": { + "model": { + "id": "3f264259-3418-47d5-b90d-b6600e36ae46", + "name": "model", + "type": "MainModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "stable-diffusion-v1-5", + "base_model": "sd-1", + "model_type": "main" + } + } + }, + "outputs": { + "unet": { + "id": "8e182ea2-9d0a-4c02-9407-27819288d4b5", + "name": "unet", + "type": "UNetField", + "fieldKind": "output" + }, + "clip": { + "id": "d67d9d30-058c-46d5-bded-3d09d6d1aa39", + "name": "clip", + "type": "ClipField", + "fieldKind": "output" + }, + "vae": { + "id": "89641601-0429-4448-98d5-190822d920d8", + "name": "vae", + "type": "VaeField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 226, + "position": { + "x": -47.66201354137797, + "y": -299.218193067033 + } + }, + { + "id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", + "type": "invocation", + "data": { + "id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", + "type": "compel", + "inputs": { + "prompt": { + "id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "", + "value": "" + }, + "clip": { + "id": "3f1981c9-d8a9-42eb-a739-4f120eb80745", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 1175.0187896425462, + "y": -420.64289413577114 + } + }, + { + "id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77", + "type": "invocation", + "data": { + "id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77", + "type": "noise", + "inputs": { + "seed": { + "id": "b722d84a-eeee-484f-bef2-0250c027cb67", + "name": "seed", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "width": { + "id": "d5f8ce11-0502-4bfc-9a30-5757dddf1f94", + "name": "width", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "height": { + "id": "f187d5ff-38a5-4c3f-b780-fc5801ef34af", + "name": "height", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "use_cpu": { + "id": "12f112b8-8b76-4816-b79e-662edc9f9aa5", + "name": "use_cpu", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": true + } + }, + "outputs": { + "noise": { + "id": "08576ad1-96d9-42d2-96ef-6f5c1961933f", + "name": "noise", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "f3e1f94a-258d-41ff-9789-bd999bd9f40d", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "6cefc357-4339-415e-a951-49b9c2be32f4", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 389, + "position": { + "x": 809.1964864135837, + "y": 183.2735123359796 + } + }, + { + "id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5", + "type": "invocation", + "data": { + "id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5", + "type": "rand_int", + "inputs": { + "low": { + "id": "b9fc6cf1-469c-4037-9bf0-04836965826f", + "name": "low", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "high": { + "id": "06eac725-0f60-4ba2-b8cd-7ad9f757488c", + "name": "high", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 2147483647 + } + }, + "outputs": { + "value": { + "id": "df08c84e-7346-4e92-9042-9e5cb773aaff", + "name": "value", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": false, + "version": "1.0.0" + }, + "width": 320, + "height": 218, + "position": { + "x": 354.19913145404166, + "y": 301.86324846905165 + } + }, + { + "id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1", + "type": "invocation", + "data": { + "id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1", + "type": "l2i", + "inputs": { + "metadata": { + "id": "022e4b33-562b-438d-b7df-41c3fd931f40", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "67cb6c77-a394-4a66-a6a9-a0a7dcca69ec", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "7b3fd9ad-a4ef-4e04-89fa-3832a9902dbd", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "5ac5680d-3add-4115-8ec0-9ef5bb87493b", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "db8297f5-55f8-452f-98cf-6572c2582152", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "d8778d0c-592a-4960-9280-4e77e00a7f33", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "c8b0a75a-f5de-4ff2-9227-f25bb2b97bec", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "83c05fbf-76b9-49ab-93c4-fa4b10e793e4", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 267, + "position": { + "x": 2037.861329274915, + "y": -329.8393457509562 + } + }, + { + "id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e", + "type": "invocation", + "data": { + "id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e", + "type": "denoise_latents", + "inputs": { + "positive_conditioning": { + "id": "751fb35b-3f23-45ce-af1c-053e74251337", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "b9dc06b6-7481-4db1-a8c2-39d22a5eacff", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "noise": { + "id": "6e15e439-3390-48a4-8031-01e0e19f0e1d", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "bfdfb3df-760b-4d51-b17b-0abb38b976c2", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "47770858-322e-41af-8494-d8b63ed735f3", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "2ba78720-ee02-4130-a348-7bc3531f790b", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "a874dffb-d433-4d1a-9f59-af4367bb05e4", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "36e021ad-b762-4fe4-ad4d-17f0291c40b2", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "unet": { + "id": "98d3282d-f9f6-4b5e-b9e8-58658f1cac78", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, + "control": { + "id": "f2ea3216-43d5-42b4-887f-36e8f7166d53", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "d0780610-a298-47c8-a54e-70e769e0dfe2", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "fdb40970-185e-4ea8-8bb5-88f06f91f46a", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "e05b538a-1b5a-4aa5-84b1-fd2361289a81", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "463a419e-df30-4382-8ffb-b25b25abe425", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "559ee688-66cf-4139-8b82-3d3aa69995ce", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "0b4285c2-e8b9-48e5-98f6-0a49d3f98fd2", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "8b0881b9-45e5-47d5-b526-24b6661de0ee", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" + }, + "width": 320, + "height": 646, + "position": { + "x": 1570.9941088179146, + "y": -407.6505491604564 + } + } + ], + "edges": [ + { + "source": "1b7e0df8-8589-4915-a4ea-c0088f15d642", + "sourceHandle": "collection", + "target": "1b89067c-3f6b-42c8-991f-e3055789b251", + "targetHandle": "collection", + "id": "reactflow__edge-1b7e0df8-8589-4915-a4ea-c0088f15d642collection-1b89067c-3f6b-42c8-991f-e3055789b251collection", + "type": "default" + }, + { + "source": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "sourceHandle": "clip", + "target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", + "targetHandle": "clip", + "id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-fc9d0e35-a6de-4a19-84e1-c72497c823f6clip", + "type": "default" + }, + { + "source": "1b89067c-3f6b-42c8-991f-e3055789b251", + "sourceHandle": "item", + "target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", + "targetHandle": "prompt", + "id": "reactflow__edge-1b89067c-3f6b-42c8-991f-e3055789b251item-fc9d0e35-a6de-4a19-84e1-c72497c823f6prompt", + "type": "default" + }, + { + "source": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "sourceHandle": "clip", + "target": "c2eaf1ba-5708-4679-9e15-945b8b432692", + "targetHandle": "clip", + "id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-c2eaf1ba-5708-4679-9e15-945b8b432692clip", + "type": "default" + }, + { + "source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5", + "sourceHandle": "value", + "target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77", + "targetHandle": "seed", + "id": "reactflow__edge-dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5value-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77seed", + "type": "default" + }, + { + "source": "fc9d0e35-a6de-4a19-84e1-c72497c823f6", + "sourceHandle": "conditioning", + "target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-fc9d0e35-a6de-4a19-84e1-c72497c823f6conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5epositive_conditioning", + "type": "default" + }, + { + "source": "c2eaf1ba-5708-4679-9e15-945b8b432692", + "sourceHandle": "conditioning", + "target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-c2eaf1ba-5708-4679-9e15-945b8b432692conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enegative_conditioning", + "type": "default" + }, + { + "source": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77", + "sourceHandle": "noise", + "target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e", + "targetHandle": "noise", + "id": "reactflow__edge-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77noise-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enoise", + "type": "default" + }, + { + "source": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "sourceHandle": "unet", + "target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e", + "targetHandle": "unet", + "id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426unet-2fb1577f-0a56-4f12-8711-8afcaaaf1d5eunet", + "type": "default" + }, + { + "source": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e", + "sourceHandle": "latents", + "target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1", + "targetHandle": "latents", + "id": "reactflow__edge-2fb1577f-0a56-4f12-8711-8afcaaaf1d5elatents-491ec988-3c77-4c37-af8a-39a0c4e7a2a1latents", + "type": "default" + }, + { + "source": "d6353b7f-b447-4e17-8f2e-80a88c91d426", + "sourceHandle": "vae", + "target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1", + "targetHandle": "vae", + "id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426vae-491ec988-3c77-4c37-af8a-39a0c4e7a2a1vae", + "type": "default" + } + ] +} \ No newline at end of file diff --git a/docs/workflows/QR_Code_Monster.json b/docs/workflows/QR_Code_Monster.json new file mode 100644 index 0000000000..f0ae5d74fd --- /dev/null +++ b/docs/workflows/QR_Code_Monster.json @@ -0,0 +1,758 @@ +{ + "name": "QR Code Monster", + "author": "InvokeAI", + "description": "Sample workflow for create images with QR code Monster ControlNet", + "version": "1.0.1", + "contact": "invoke@invoke.ai", + "tags": "qrcode, controlnet, default", + "notes": "", + "exposedFields": [ + { + "nodeId": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a", + "fieldName": "image" + }, + { + "nodeId": "aca3b054-bfba-4392-bd20-6476f59504df", + "fieldName": "prompt" + }, + { + "nodeId": "3db7cee0-31e2-4a3d-94a1-268cb16177dd", + "fieldName": "prompt" + } + ], + "meta": { + "version": "1.0.0" + }, + "nodes": [ + { + "id": "3db7cee0-31e2-4a3d-94a1-268cb16177dd", + "type": "invocation", + "data": { + "id": "3db7cee0-31e2-4a3d-94a1-268cb16177dd", + "type": "compel", + "inputs": { + "prompt": { + "id": "6a1fe244-5656-4f8c-91d1-1fb474e28807", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "Negative Prompt", + "value": "" + }, + "clip": { + "id": "f24688f3-29b8-4a2d-8603-046e5a5c7250", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "700528eb-3f8b-4745-b540-34f919b5b228", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "Prompt", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 773.0502679628016, + "y": 1622.4836086770556 + } + }, + { + "id": "610384f1-6f0c-4847-a9a2-37ce7f456ed1", + "type": "invocation", + "data": { + "id": "610384f1-6f0c-4847-a9a2-37ce7f456ed1", + "type": "main_model_loader", + "inputs": { + "model": { + "id": "cb36b6d3-6c1f-4911-a200-646745b0ff74", + "name": "model", + "type": "MainModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "stable-diffusion-v1-5", + "base_model": "sd-1", + "model_type": "main" + } + } + }, + "outputs": { + "unet": { + "id": "7246895b-b252-49bc-b952-8d801b4672f7", + "name": "unet", + "type": "UNetField", + "fieldKind": "output" + }, + "clip": { + "id": "3c2aedb8-30d5-4d4b-99df-d06a0d7bedc6", + "name": "clip", + "type": "ClipField", + "fieldKind": "output" + }, + "vae": { + "id": "b9743815-5501-4bbb-8bde-8bd6ba298a4e", + "name": "vae", + "type": "VaeField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 226, + "position": { + "x": 211.58866462619744, + "y": 1376.0542388105248 + } + }, + { + "id": "aca3b054-bfba-4392-bd20-6476f59504df", + "type": "invocation", + "data": { + "id": "aca3b054-bfba-4392-bd20-6476f59504df", + "type": "compel", + "inputs": { + "prompt": { + "id": "6a1fe244-5656-4f8c-91d1-1fb474e28807", + "name": "prompt", + "type": "string", + "fieldKind": "input", + "label": "Positive Prompt", + "value": "" + }, + "clip": { + "id": "f24688f3-29b8-4a2d-8603-046e5a5c7250", + "name": "clip", + "type": "ClipField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "conditioning": { + "id": "700528eb-3f8b-4745-b540-34f919b5b228", + "name": "conditioning", + "type": "ConditioningField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 261, + "position": { + "x": 770.6491131680111, + "y": 1316.379247112241 + } + }, + { + "id": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a", + "type": "invocation", + "data": { + "id": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a", + "type": "image", + "inputs": { + "image": { + "id": "89ba5d58-28c9-4e04-a5df-79fb7a6f3531", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "QR Code / Hidden Image" + } + }, + "outputs": { + "image": { + "id": "54335653-0e17-42da-b9e8-83c5fb5af670", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "a3c65953-39ea-4d97-8858-d65154ff9d11", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "2c7db511-ebc9-4286-a46b-bc11e0fd779f", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 225, + "position": { + "x": 700.5034176864369, + "y": 1981.749600549388 + } + }, + { + "id": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a", + "type": "invocation", + "data": { + "id": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a", + "type": "noise", + "inputs": { + "seed": { + "id": "7c6c76dd-127b-4829-b1ec-430790cb7ed7", + "name": "seed", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "width": { + "id": "8ec6a525-a421-40d8-a17e-39e7b6836438", + "name": "width", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "height": { + "id": "6af1e58a-e2ee-4ec4-9f06-d8d0412922ca", + "name": "height", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 512 + }, + "use_cpu": { + "id": "26662e99-5720-43a6-a5d8-06c9dab0e261", + "name": "use_cpu", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": true + } + }, + "outputs": { + "noise": { + "id": "cb4c4dfc-a744-49eb-af4f-677448e28407", + "name": "noise", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "97e87be6-e81f-40a3-a522-28ebe4aad0ac", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "80784420-f1e1-47b0-bd1d-1d381a15e22d", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": false, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 32, + "position": { + "x": 1182.460291960481, + "y": 1759.592972960265 + } + }, + { + "id": "2ac03cf6-0326-454a-bed0-d8baef2bf30d", + "type": "invocation", + "data": { + "id": "2ac03cf6-0326-454a-bed0-d8baef2bf30d", + "type": "controlnet", + "inputs": { + "image": { + "id": "1f683889-9f14-40c8-af29-4b991b211a3a", + "name": "image", + "type": "ImageField", + "fieldKind": "input", + "label": "" + }, + "control_model": { + "id": "a933b21d-22c1-4e06-818f-15416b971282", + "name": "control_model", + "type": "ControlNetModelField", + "fieldKind": "input", + "label": "", + "value": { + "model_name": "qrcode_monster", + "base_model": "sd-1" + } + }, + "control_weight": { + "id": "198a0825-e55e-4496-bc54-c3d7b02f3d75", + "name": "control_weight", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 1.4 + }, + "begin_step_percent": { + "id": "c85ce42f-22af-42a0-8993-676002fb275e", + "name": "begin_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "end_step_percent": { + "id": "a61a65c4-9e6f-4fe2-96a5-1294d17ec6e4", + "name": "end_step_percent", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "control_mode": { + "id": "1aa45cfa-0249-46b7-bf24-3e38e92f5fa0", + "name": "control_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "balanced" + }, + "resize_mode": { + "id": "a89d3cb9-a141-4cea-bb49-977bf267377b", + "name": "resize_mode", + "type": "enum", + "fieldKind": "input", + "label": "", + "value": "just_resize" + } + }, + "outputs": { + "control": { + "id": "c9a1fc7e-cb25-45a9-adff-1a97c9ff04d6", + "name": "control", + "type": "ControlField", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 508, + "position": { + "x": 1165.434407461108, + "y": 1862.916856351665 + } + }, + { + "id": "28542b66-5a00-4780-a318-0a036d2df914", + "type": "invocation", + "data": { + "id": "28542b66-5a00-4780-a318-0a036d2df914", + "type": "l2i", + "inputs": { + "metadata": { + "id": "a38e8f55-7f2c-4fcc-a71f-d51e2eb0374a", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "80e97bc8-e716-4175-9115-5b58495aa30c", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "5641bce6-ac2b-47eb-bb32-2f290026b7e1", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "9e75eb16-ae48-47ed-b180-e0409d377436", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "0518b0ce-ee37-437b-8437-cc2976a3279f", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "ec2ff985-a7eb-401f-92c4-1217cddad6a2", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "ba1d1720-6d67-4eca-9e9d-b97d08636774", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "10bcf8f4-6394-422f-b0c0-51680f3bfb25", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 267, + "position": { + "x": 2110.8415693683014, + "y": 1487.253341116115 + } + }, + { + "id": "9755ae4c-ef30-4db3-80f6-a31f98979a11", + "type": "invocation", + "data": { + "id": "9755ae4c-ef30-4db3-80f6-a31f98979a11", + "type": "denoise_latents", + "inputs": { + "positive_conditioning": { + "id": "8e6aceaa-a986-4ab2-9c04-5b1027b3daf6", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "fbbaa712-ca1a-420b-9016-763f2a29d68c", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "noise": { + "id": "a3b3d5d2-c0f9-4b89-a9b3-8de9418f7bb5", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "e491e664-2f8c-4f49-b3e4-57b051fbb9c5", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "f0318abd-ed65-4cad-86a7-48d1c19a6d14", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "f7c24c51-496f-44c4-836a-c734e529fec0", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "54f7656a-fb0d-4d9e-a459-f700f7dccd2e", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "363ee440-040d-499b-bf84-bf5391b08681", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "unet": { + "id": "5c93d4e5-1064-4700-ab1d-d12e1e9b5ba7", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, + "control": { + "id": "e1948eb3-7407-43b0-93e3-139470f186b7", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "5675b2c3-adfb-49ee-b33c-26bdbfab1fed", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "89cd4ab3-3bfc-4063-9de5-91d42305c651", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "ec01df90-5042-418d-b6d6-86b251c13770", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "561cde00-cb20-42ae-9bd3-4f477f73fbe1", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "f9addefe-efcc-4e01-8945-6ebbc934b002", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "6d48f78b-d681-422a-8677-0111bd0625f1", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "f25997b8-6316-44ce-b696-b82e4ed51ae5", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" + }, + "width": 320, + "height": 646, + "position": { + "x": 1597.9598293300219, + "y": 1420.4637727891632 + } + }, + { + "id": "59349822-af20-4e0e-a53f-3ba135d00c3f", + "type": "invocation", + "data": { + "id": "59349822-af20-4e0e-a53f-3ba135d00c3f", + "type": "rand_int", + "inputs": { + "low": { + "id": "051f22f9-2d4f-414f-bc51-84af2d626efa", + "name": "low", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "high": { + "id": "77206186-f264-4224-9589-f925cf903dc9", + "name": "high", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 2147483647 + } + }, + "outputs": { + "value": { + "id": "a7ed9387-3a24-4d34-b7c5-f713bd544ab1", + "name": "value", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": false, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": false, + "version": "1.0.0" + }, + "width": 320, + "height": 32, + "position": { + "x": 1178.16746986153, + "y": 1663.9433412808876 + } + } + ], + "edges": [ + { + "source": "59349822-af20-4e0e-a53f-3ba135d00c3f", + "target": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a", + "id": "59349822-af20-4e0e-a53f-3ba135d00c3f-280fd8a7-3b0c-49fe-8be4-6246e08b6c9a-collapsed", + "type": "collapsed" + }, + { + "source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1", + "sourceHandle": "clip", + "target": "aca3b054-bfba-4392-bd20-6476f59504df", + "targetHandle": "clip", + "id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1clip-aca3b054-bfba-4392-bd20-6476f59504dfclip", + "type": "default" + }, + { + "source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1", + "sourceHandle": "clip", + "target": "3db7cee0-31e2-4a3d-94a1-268cb16177dd", + "targetHandle": "clip", + "id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1clip-3db7cee0-31e2-4a3d-94a1-268cb16177ddclip", + "type": "default" + }, + { + "source": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a", + "sourceHandle": "image", + "target": "2ac03cf6-0326-454a-bed0-d8baef2bf30d", + "targetHandle": "image", + "id": "reactflow__edge-a6cc0986-f928-4a7e-8d44-ba2d4b36f54aimage-2ac03cf6-0326-454a-bed0-d8baef2bf30dimage", + "type": "default" + }, + { + "source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1", + "sourceHandle": "vae", + "target": "28542b66-5a00-4780-a318-0a036d2df914", + "targetHandle": "vae", + "id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1vae-28542b66-5a00-4780-a318-0a036d2df914vae", + "type": "default" + }, + { + "source": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a", + "sourceHandle": "noise", + "target": "9755ae4c-ef30-4db3-80f6-a31f98979a11", + "targetHandle": "noise", + "id": "reactflow__edge-280fd8a7-3b0c-49fe-8be4-6246e08b6c9anoise-9755ae4c-ef30-4db3-80f6-a31f98979a11noise", + "type": "default" + }, + { + "source": "3db7cee0-31e2-4a3d-94a1-268cb16177dd", + "sourceHandle": "conditioning", + "target": "9755ae4c-ef30-4db3-80f6-a31f98979a11", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-3db7cee0-31e2-4a3d-94a1-268cb16177ddconditioning-9755ae4c-ef30-4db3-80f6-a31f98979a11negative_conditioning", + "type": "default" + }, + { + "source": "aca3b054-bfba-4392-bd20-6476f59504df", + "sourceHandle": "conditioning", + "target": "9755ae4c-ef30-4db3-80f6-a31f98979a11", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-aca3b054-bfba-4392-bd20-6476f59504dfconditioning-9755ae4c-ef30-4db3-80f6-a31f98979a11positive_conditioning", + "type": "default" + }, + { + "source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1", + "sourceHandle": "unet", + "target": "9755ae4c-ef30-4db3-80f6-a31f98979a11", + "targetHandle": "unet", + "id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1unet-9755ae4c-ef30-4db3-80f6-a31f98979a11unet", + "type": "default" + }, + { + "source": "2ac03cf6-0326-454a-bed0-d8baef2bf30d", + "sourceHandle": "control", + "target": "9755ae4c-ef30-4db3-80f6-a31f98979a11", + "targetHandle": "control", + "id": "reactflow__edge-2ac03cf6-0326-454a-bed0-d8baef2bf30dcontrol-9755ae4c-ef30-4db3-80f6-a31f98979a11control", + "type": "default" + }, + { + "source": "9755ae4c-ef30-4db3-80f6-a31f98979a11", + "sourceHandle": "latents", + "target": "28542b66-5a00-4780-a318-0a036d2df914", + "targetHandle": "latents", + "id": "reactflow__edge-9755ae4c-ef30-4db3-80f6-a31f98979a11latents-28542b66-5a00-4780-a318-0a036d2df914latents", + "type": "default" + }, + { + "source": "59349822-af20-4e0e-a53f-3ba135d00c3f", + "sourceHandle": "value", + "target": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a", + "targetHandle": "seed", + "id": "reactflow__edge-59349822-af20-4e0e-a53f-3ba135d00c3fvalue-280fd8a7-3b0c-49fe-8be4-6246e08b6c9aseed", + "type": "default" + } + ] +} \ No newline at end of file diff --git a/docs/workflows/SDXL_Text_to_Image.json b/docs/workflows/SDXL_Text_to_Image.json index 57f95293eb..af11731703 100644 --- a/docs/workflows/SDXL_Text_to_Image.json +++ b/docs/workflows/SDXL_Text_to_Image.json @@ -26,10 +26,6 @@ { "nodeId": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "fieldName": "style" - }, - { - "nodeId": "87ee6243-fb0d-4f77-ad5f-56591659339e", - "fieldName": "steps" } ], "meta": { @@ -40,7 +36,6 @@ "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "type": "invocation", "data": { - "version": "1.0.0", "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "type": "sdxl_compel_prompt", "inputs": { @@ -135,10 +130,12 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 764, + "height": 793, "position": { "x": 1275, "y": -350 @@ -148,7 +145,6 @@ "id": "55705012-79b9-4aac-9f26-c0b10309785b", "type": "invocation", "data": { - "version": "1.0.0", "id": "55705012-79b9-4aac-9f26-c0b10309785b", "type": "noise", "inputs": { @@ -209,7 +205,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -218,83 +216,10 @@ "y": -300 } }, - { - "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "type": "invocation", - "data": { - "version": "1.0.0", - "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "type": "l2i", - "inputs": { - "tiled": { - "id": "24f5bc7b-f6a1-425d-8ab1-f50b4db5d0df", - "name": "tiled", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "fp32": { - "id": "b146d873-ffb9-4767-986a-5360504841a2", - "name": "fp32", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": true - }, - "latents": { - "id": "65441abd-7713-4b00-9d8d-3771404002e8", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "vae": { - "id": "a478b833-6e13-4611-9a10-842c89603c74", - "name": "vae", - "type": "VaeField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "image": { - "id": "c87ae925-f858-417a-8940-8708ba9b4b53", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "4bcb8512-b5a1-45f1-9e52-6e92849f9d6c", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "23e41c00-a354-48e8-8f59-5875679c27ab", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": true, - "isIntermediate": false - }, - "width": 320, - "height": 224, - "position": { - "x": 2025, - "y": -250 - } - }, { "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "type": "invocation", "data": { - "version": "1.0.0", "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "type": "rand_int", "inputs": { @@ -327,7 +252,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": false, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -340,7 +267,6 @@ "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "type": "invocation", "data": { - "version": "1.0.0", "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "type": "sdxl_model_loader", "inputs": { @@ -351,7 +277,7 @@ "fieldKind": "input", "label": "", "value": { - "model_name": "stable-diffusion-xl-base-1.0", + "model_name": "stable-diffusion-xl-base-1-0", "base_model": "sdxl", "model_type": "main" } @@ -387,10 +313,12 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 234, + "height": 258, "position": { "x": 475, "y": 25 @@ -400,7 +328,6 @@ "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", "type": "invocation", "data": { - "version": "1.0.0", "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", "type": "sdxl_compel_prompt", "inputs": { @@ -495,128 +422,77 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 764, + "height": 793, "position": { "x": 900, "y": -350 } }, { - "id": "87ee6243-fb0d-4f77-ad5f-56591659339e", + "id": "63e91020-83b2-4f35-b174-ad9692aabb48", "type": "invocation", "data": { - "version": "1.0.0", - "id": "87ee6243-fb0d-4f77-ad5f-56591659339e", - "type": "denoise_latents", + "id": "63e91020-83b2-4f35-b174-ad9692aabb48", + "type": "l2i", "inputs": { - "noise": { - "id": "4884a4b7-cc19-4fea-83c7-1f940e6edd24", - "name": "noise", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "steps": { - "id": "4c61675c-b6b9-41ac-b187-b5c13b587039", - "name": "steps", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 36 - }, - "cfg_scale": { - "id": "f8213f35-4637-4a1a-83f4-1f8cfb9ccd2c", - "name": "cfg_scale", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 7.5 - }, - "denoising_start": { - "id": "01e2f30d-0acd-4e21-98b9-a9b8e24c6db2", - "name": "denoising_start", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "denoising_end": { - "id": "3db95479-a73b-4c75-9b44-08daec16b224", - "name": "denoising_end", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 1 - }, - "scheduler": { - "id": "db8430a9-64c3-4c54-ae38-9f597cf7b6d5", - "name": "scheduler", - "type": "Scheduler", - "fieldKind": "input", - "label": "", - "value": "euler" - }, - "control": { - "id": "599b49e8-6435-4576-be41-a5155f3a17e3", - "name": "control", - "type": "ControlField", + "metadata": { + "id": "88971324-3fdb-442d-b8b7-7612478a8622", + "name": "metadata", + "type": "MetadataField", "fieldKind": "input", "label": "" }, "latents": { - "id": "226f9e91-454e-4159-9fa6-019c0cf29277", + "id": "da0e40cb-c49f-4fa5-9856-338b91a65f6b", "name": "latents", "type": "LatentsField", "fieldKind": "input", "label": "" }, - "denoise_mask": { - "id": "de019cb6-7fb5-45bf-a266-22e20889893f", - "name": "denoise_mask", - "type": "DenoiseMaskField", + "vae": { + "id": "ae5164ce-1710-4ec5-a83a-6113a0d1b5c0", + "name": "vae", + "type": "VaeField", "fieldKind": "input", "label": "" }, - "positive_conditioning": { - "id": "02fc400a-110d-470e-8411-f404f966a949", - "name": "positive_conditioning", - "type": "ConditioningField", + "tiled": { + "id": "2ccfd535-1a7b-4ecf-84db-9430a64fb3d7", + "name": "tiled", + "type": "boolean", "fieldKind": "input", - "label": "" + "label": "", + "value": false }, - "negative_conditioning": { - "id": "4bd3bdfa-fcf4-42be-8e47-1e314255798f", - "name": "negative_conditioning", - "type": "ConditioningField", + "fp32": { + "id": "64f07d5a-54a2-429c-8c5b-0c2a3a8e5cd5", + "name": "fp32", + "type": "boolean", "fieldKind": "input", - "label": "" - }, - "unet": { - "id": "7c2d58a8-b5f1-4e63-8ffd-8ada52c35832", - "name": "unet", - "type": "UNetField", - "fieldKind": "input", - "label": "" + "label": "", + "value": false } }, "outputs": { - "latents": { - "id": "6a6fa492-de26-4e95-b1d9-a322fe37eb13", - "name": "latents", - "type": "LatentsField", + "image": { + "id": "9b281eaa-6504-407d-a5ca-1e5e8020a4bf", + "name": "image", + "type": "ImageField", "fieldKind": "output" }, "width": { - "id": "a9790729-7d6c-4418-903d-4da961fccf56", + "id": "98e545f3-b53b-490d-b94d-bed9418ccc75", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "fa74efe5-7330-4a3c-b256-c82a544585b4", + "id": "4a74bd43-d7f7-4c7f-bb3b-d09bb2992c46", "name": "height", "type": "integer", "fieldKind": "output" @@ -626,13 +502,161 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": false, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 558, + "height": 267, "position": { - "x": 1650, - "y": -250 + "x": 2112.5626808057173, + "y": -174.24042139280238 + } + }, + { + "id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", + "type": "invocation", + "data": { + "id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", + "type": "denoise_latents", + "inputs": { + "positive_conditioning": { + "id": "29b73dfa-a06e-4b4a-a844-515b9eb93a81", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "a81e6f5b-f4de-4919-b483-b6e2f067465a", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "noise": { + "id": "4ba06bb7-eb45-4fb9-9984-31001b545587", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "36ee8a45-ca69-44bc-9bc3-aa881e6045c0", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "2a2024e0-a736-46ec-933c-c1c1ebe96943", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "be219d5e-41b7-430a-8fb5-bc21a31ad219", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "3adfb7ae-c9f7-4a40-b6e0-4c2050bd1a99", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "14423e0d-7215-4ee0-b065-f9e95eaa8d7d", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "unet": { + "id": "e73bbf98-6489-492b-b83c-faed215febac", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, + "control": { + "id": "dab351b3-0c86-4ea5-9782-4e8edbfb0607", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "192daea0-a90a-43cc-a2ee-0114a8e90318", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "ee386a55-d4c7-48c1-ac57-7bc4e3aada7a", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "3a922c6a-3d8c-4c9e-b3ec-2f4d81cda077", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "cd7ce032-835f-495f-8b45-d57272f33132", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "6260b84f-8361-470a-98d8-5b22a45c2d8c", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "aede0ecf-25b6-46be-aa30-b77f79715deb", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "519abf62-d475-48ef-ab8f-66136bc0e499", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" + }, + "width": 320, + "height": 646, + "position": { + "x": 1642.955772577545, + "y": -230.2485847594651 } } ], @@ -686,50 +710,42 @@ { "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "sourceHandle": "vae", - "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", + "target": "63e91020-83b2-4f35-b174-ad9692aabb48", "targetHandle": "vae", - "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22vae-dbcd2f98-d809-48c8-bf64-2635f88a2fe9vae", - "type": "default" - }, - { - "source": "87ee6243-fb0d-4f77-ad5f-56591659339e", - "sourceHandle": "latents", - "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "targetHandle": "latents", - "id": "reactflow__edge-87ee6243-fb0d-4f77-ad5f-56591659339elatents-dbcd2f98-d809-48c8-bf64-2635f88a2fe9latents", - "type": "default" - }, - { - "source": "faf965a4-7530-427b-b1f3-4ba6505c2a08", - "sourceHandle": "conditioning", - "target": "87ee6243-fb0d-4f77-ad5f-56591659339e", - "targetHandle": "positive_conditioning", - "id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-87ee6243-fb0d-4f77-ad5f-56591659339epositive_conditioning", - "type": "default" - }, - { - "source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", - "sourceHandle": "conditioning", - "target": "87ee6243-fb0d-4f77-ad5f-56591659339e", - "targetHandle": "negative_conditioning", - "id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-87ee6243-fb0d-4f77-ad5f-56591659339enegative_conditioning", + "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22vae-63e91020-83b2-4f35-b174-ad9692aabb48vae", "type": "default" }, { "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "sourceHandle": "unet", - "target": "87ee6243-fb0d-4f77-ad5f-56591659339e", + "target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", "targetHandle": "unet", - "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-87ee6243-fb0d-4f77-ad5f-56591659339eunet", + "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbunet", + "type": "default" + }, + { + "source": "faf965a4-7530-427b-b1f3-4ba6505c2a08", + "sourceHandle": "conditioning", + "target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbpositive_conditioning", + "type": "default" + }, + { + "source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", + "sourceHandle": "conditioning", + "target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnegative_conditioning", "type": "default" }, { "source": "55705012-79b9-4aac-9f26-c0b10309785b", "sourceHandle": "noise", - "target": "87ee6243-fb0d-4f77-ad5f-56591659339e", + "target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", "targetHandle": "noise", - "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-87ee6243-fb0d-4f77-ad5f-56591659339enoise", + "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnoise", "type": "default" } ] -} +} \ No newline at end of file diff --git a/docs/workflows/SDXL_w_Refiner_Text_to_Image.json b/docs/workflows/SDXL_w_Refiner_Text_to_Image.json index 22ffb8262d..f70d974702 100644 --- a/docs/workflows/SDXL_w_Refiner_Text_to_Image.json +++ b/docs/workflows/SDXL_w_Refiner_Text_to_Image.json @@ -11,10 +11,6 @@ "nodeId": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "fieldName": "model" }, - { - "nodeId": "65b56526-ef0a-4c1f-adda-1017c925b063", - "fieldName": "steps" - }, { "nodeId": "06a30867-1e9d-461f-bd58-14a63cc997dd", "fieldName": "scheduler" @@ -23,10 +19,6 @@ "nodeId": "62bdf243-d98f-4508-b6b5-c3af00ef49f0", "fieldName": "model" }, - { - "nodeId": "a9352523-613a-43e3-b97f-dade7ec317e5", - "fieldName": "steps" - }, { "nodeId": "b2b35add-929d-4538-aecb-02c661768b29", "fieldName": "value" @@ -48,142 +40,10 @@ "version": "1.0.0" }, "nodes": [ - { - "id": "a9352523-613a-43e3-b97f-dade7ec317e5", - "type": "invocation", - "data": { - "version": "1.0.0", - "id": "a9352523-613a-43e3-b97f-dade7ec317e5", - "type": "denoise_latents", - "inputs": { - "noise": { - "id": "962fb1ba-341c-441c-940b-1543caafab29", - "name": "noise", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "steps": { - "id": "2b76247b-cc60-4ef0-8a51-290700590805", - "name": "steps", - "type": "integer", - "fieldKind": "input", - "label": "Refiner Steps", - "value": 36 - }, - "cfg_scale": { - "id": "f13c5cf5-6198-4183-9b47-0a44c5666a2a", - "name": "cfg_scale", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 7.5 - }, - "denoising_start": { - "id": "397bb49d-7d00-465b-a918-456910d7fedb", - "name": "denoising_start", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0.8 - }, - "denoising_end": { - "id": "dac6aa2a-d074-4e86-af0c-def573dd69ac", - "name": "denoising_end", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 1 - }, - "scheduler": { - "id": "34f9f11c-f2fc-48b2-b015-ededbf2d000f", - "name": "scheduler", - "type": "Scheduler", - "fieldKind": "input", - "label": "", - "value": "euler" - }, - "control": { - "id": "80c69321-e712-453b-b8a8-b4e03d37844c", - "name": "control", - "type": "ControlField", - "fieldKind": "input", - "label": "" - }, - "latents": { - "id": "8122d26c-ad2f-4f65-93d5-9ebb426bdba4", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "denoise_mask": { - "id": "5dc048d6-28c3-4db4-9e8b-652006616c17", - "name": "denoise_mask", - "type": "DenoiseMaskField", - "fieldKind": "input", - "label": "" - }, - "positive_conditioning": { - "id": "dd5cab6f-6dbd-4791-a21a-ef0544f26f8f", - "name": "positive_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "negative_conditioning": { - "id": "5efedcb9-3286-426a-ad57-f77b2d7d1898", - "name": "negative_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "unet": { - "id": "3ebc07f0-4cd7-4f4d-a5b3-a8ce13383305", - "name": "unet", - "type": "UNetField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "latents": { - "id": "ca9c565a-1dda-428c-9fdf-7c51eb7fa9c5", - "name": "latents", - "type": "LatentsField", - "fieldKind": "output" - }, - "width": { - "id": "d91b7cbd-fe5c-4c92-923e-241d1a63648c", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "af9ee999-a666-42a8-8e5c-d04518c4aa8e", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 558, - "position": { - "x": 1650, - "y": -150 - } - }, { "id": "b2b35add-929d-4538-aecb-02c661768b29", "type": "invocation", "data": { - "version": "1.0.0", "id": "b2b35add-929d-4538-aecb-02c661768b29", "type": "string", "inputs": { @@ -208,7 +68,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -221,7 +83,6 @@ "id": "8d54b9db-3662-43af-8369-9a277e063f3b", "type": "invocation", "data": { - "version": "1.0.0", "id": "8d54b9db-3662-43af-8369-9a277e063f3b", "type": "string", "inputs": { @@ -246,7 +107,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -259,7 +122,6 @@ "id": "f1a6a160-4c36-4902-8eeb-8b1c23e81bc8", "type": "invocation", "data": { - "version": "1.0.0", "id": "f1a6a160-4c36-4902-8eeb-8b1c23e81bc8", "type": "string", "inputs": { @@ -284,7 +146,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -297,7 +161,6 @@ "id": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1", "type": "invocation", "data": { - "version": "1.0.0", "id": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1", "type": "sdxl_refiner_compel_prompt", "inputs": { @@ -369,10 +232,12 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 520, + "height": 547, "position": { "x": 1625, "y": -925 @@ -382,7 +247,6 @@ "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "type": "invocation", "data": { - "version": "1.0.0", "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "type": "sdxl_compel_prompt", "inputs": { @@ -477,10 +341,12 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 764, + "height": 793, "position": { "x": 900, "y": -925 @@ -490,7 +356,6 @@ "id": "55705012-79b9-4aac-9f26-c0b10309785b", "type": "invocation", "data": { - "version": "1.0.0", "id": "55705012-79b9-4aac-9f26-c0b10309785b", "type": "noise", "inputs": { @@ -551,7 +416,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -560,83 +427,10 @@ "y": -200 } }, - { - "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "type": "invocation", - "data": { - "version": "1.0.0", - "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "type": "l2i", - "inputs": { - "tiled": { - "id": "24f5bc7b-f6a1-425d-8ab1-f50b4db5d0df", - "name": "tiled", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "fp32": { - "id": "b146d873-ffb9-4767-986a-5360504841a2", - "name": "fp32", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": true - }, - "latents": { - "id": "65441abd-7713-4b00-9d8d-3771404002e8", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "vae": { - "id": "a478b833-6e13-4611-9a10-842c89603c74", - "name": "vae", - "type": "VaeField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "image": { - "id": "c87ae925-f858-417a-8940-8708ba9b4b53", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "4bcb8512-b5a1-45f1-9e52-6e92849f9d6c", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "23e41c00-a354-48e8-8f59-5875679c27ab", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": true, - "isIntermediate": false - }, - "width": 320, - "height": 266, - "position": { - "x": 2075, - "y": -400 - } - }, { "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "type": "invocation", "data": { - "version": "1.0.0", "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "type": "rand_int", "inputs": { @@ -669,7 +463,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": false, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -682,7 +478,6 @@ "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "type": "invocation", "data": { - "version": "1.0.0", "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "type": "sdxl_model_loader", "inputs": { @@ -729,7 +524,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -742,7 +539,6 @@ "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", "type": "invocation", "data": { - "version": "1.0.0", "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", "type": "sdxl_compel_prompt", "inputs": { @@ -837,10 +633,12 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 764, + "height": 793, "position": { "x": 550, "y": -925 @@ -850,7 +648,6 @@ "id": "f0e06b70-9f53-44e3-8f5f-63d813b6b579", "type": "invocation", "data": { - "version": "1.0.0", "id": "f0e06b70-9f53-44e3-8f5f-63d813b6b579", "type": "sdxl_refiner_compel_prompt", "inputs": { @@ -922,10 +719,12 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 520, + "height": 547, "position": { "x": 1275, "y": -925 @@ -935,7 +734,6 @@ "id": "62bdf243-d98f-4508-b6b5-c3af00ef49f0", "type": "invocation", "data": { - "version": "1.0.0", "id": "62bdf243-d98f-4508-b6b5-c3af00ef49f0", "type": "sdxl_refiner_model_loader", "inputs": { @@ -976,7 +774,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -989,7 +789,6 @@ "id": "5639e3bc-b769-4ae5-9262-72db703c5a7b", "type": "invocation", "data": { - "version": "1.0.0", "id": "5639e3bc-b769-4ae5-9262-72db703c5a7b", "type": "string", "inputs": { @@ -1014,7 +813,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -1023,142 +824,10 @@ "y": 25 } }, - { - "id": "65b56526-ef0a-4c1f-adda-1017c925b063", - "type": "invocation", - "data": { - "version": "1.0.0", - "id": "65b56526-ef0a-4c1f-adda-1017c925b063", - "type": "denoise_latents", - "inputs": { - "noise": { - "id": "962fb1ba-341c-441c-940b-1543caafab29", - "name": "noise", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "steps": { - "id": "2b76247b-cc60-4ef0-8a51-290700590805", - "name": "steps", - "type": "integer", - "fieldKind": "input", - "label": "", - "value": 36 - }, - "cfg_scale": { - "id": "f13c5cf5-6198-4183-9b47-0a44c5666a2a", - "name": "cfg_scale", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 7.5 - }, - "denoising_start": { - "id": "397bb49d-7d00-465b-a918-456910d7fedb", - "name": "denoising_start", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0 - }, - "denoising_end": { - "id": "dac6aa2a-d074-4e86-af0c-def573dd69ac", - "name": "denoising_end", - "type": "float", - "fieldKind": "input", - "label": "", - "value": 0.8 - }, - "scheduler": { - "id": "34f9f11c-f2fc-48b2-b015-ededbf2d000f", - "name": "scheduler", - "type": "Scheduler", - "fieldKind": "input", - "label": "", - "value": "euler" - }, - "control": { - "id": "80c69321-e712-453b-b8a8-b4e03d37844c", - "name": "control", - "type": "ControlField", - "fieldKind": "input", - "label": "" - }, - "latents": { - "id": "8122d26c-ad2f-4f65-93d5-9ebb426bdba4", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "denoise_mask": { - "id": "5dc048d6-28c3-4db4-9e8b-652006616c17", - "name": "denoise_mask", - "type": "DenoiseMaskField", - "fieldKind": "input", - "label": "" - }, - "positive_conditioning": { - "id": "dd5cab6f-6dbd-4791-a21a-ef0544f26f8f", - "name": "positive_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "negative_conditioning": { - "id": "5efedcb9-3286-426a-ad57-f77b2d7d1898", - "name": "negative_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "unet": { - "id": "3ebc07f0-4cd7-4f4d-a5b3-a8ce13383305", - "name": "unet", - "type": "UNetField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "latents": { - "id": "ca9c565a-1dda-428c-9fdf-7c51eb7fa9c5", - "name": "latents", - "type": "LatentsField", - "fieldKind": "output" - }, - "width": { - "id": "d91b7cbd-fe5c-4c92-923e-241d1a63648c", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "af9ee999-a666-42a8-8e5c-d04518c4aa8e", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": false, - "isIntermediate": true - }, - "width": 320, - "height": 558, - "position": { - "x": 1275, - "y": -150 - } - }, { "id": "06a30867-1e9d-461f-bd58-14a63cc997dd", "type": "invocation", "data": { - "version": "1.0.0", "id": "06a30867-1e9d-461f-bd58-14a63cc997dd", "type": "scheduler", "inputs": { @@ -1183,7 +852,9 @@ "isOpen": false, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, "height": 32, @@ -1191,6 +862,378 @@ "x": 700, "y": 125 } + }, + { + "id": "84df8f00-ea7e-499f-ab86-d019ddea5393", + "type": "invocation", + "data": { + "id": "84df8f00-ea7e-499f-ab86-d019ddea5393", + "type": "denoise_latents", + "inputs": { + "positive_conditioning": { + "id": "73b2ebc2-4a56-4809-b8ab-b78fde786961", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "04d1bfbb-6cdc-4c16-8e08-290ba86ca8ba", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "noise": { + "id": "39ea4659-ea69-415f-85c0-a06f94d53e14", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "dfd3c295-adae-499a-8c94-3c6c6d9ece0e", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "2ae0c196-8c94-4ea8-a9fc-1be06938a0c3", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "3d085ec1-14de-4eef-9853-2edf5d81daac", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "1a820924-15ca-4ba5-b981-6b588e486a5b", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "d0d19fab-5001-4c5d-b664-031df1a65311", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "unet": { + "id": "efbdecd1-5c07-420c-bd58-52de43fcde4c", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, + "control": { + "id": "e1a457c4-5546-4c02-83e1-092776b27cd1", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "d4082d78-7f17-4f87-af05-5a76129737ba", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "4841595c-f81b-440a-9377-fe89b26b42ac", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "60bbfc7e-6641-4354-b678-12029c580aa9", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "b2876171-e4c5-45cf-a352-852047c902fc", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "62705a29-cc3a-4154-8f62-a8f821daf861", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "eb2e2312-1e64-4008-a64f-6783d49dde29", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "7bd8d012-edcf-4def-98eb-7ebdd724c7c5", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" + }, + "width": 320, + "height": 646, + "position": { + "x": 1269.2683722842958, + "y": -119.4839111990423 + } + }, + { + "id": "3d40eda5-ff7b-4dff-8d2e-4f44742faa1b", + "type": "invocation", + "data": { + "id": "3d40eda5-ff7b-4dff-8d2e-4f44742faa1b", + "type": "denoise_latents", + "inputs": { + "positive_conditioning": { + "id": "a9c932a9-6164-4333-bade-3909c9a3ce59", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "a57fced5-aca6-40c9-8197-ce4f01433111", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "noise": { + "id": "b4cbec14-c24e-4ec2-bda3-8fc19c089717", + "name": "noise", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "steps": { + "id": "24dd36f7-fdf1-40c9-945c-216471b44a2f", + "name": "steps", + "type": "integer", + "fieldKind": "input", + "label": "", + "value": 10 + }, + "cfg_scale": { + "id": "5f3a7f0c-5088-49e9-b490-75822d0c20cc", + "name": "cfg_scale", + "type": "FloatPolymorphic", + "fieldKind": "input", + "label": "", + "value": 7.5 + }, + "denoising_start": { + "id": "b326ffde-625c-49c5-b5e1-90b79df80979", + "name": "denoising_start", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 0 + }, + "denoising_end": { + "id": "d4a458ef-5576-4c5d-8e6d-ee04c9c7c4dc", + "name": "denoising_end", + "type": "float", + "fieldKind": "input", + "label": "", + "value": 1 + }, + "scheduler": { + "id": "95aba2d0-c470-44f2-a25c-a192600be6da", + "name": "scheduler", + "type": "Scheduler", + "fieldKind": "input", + "label": "", + "value": "euler" + }, + "unet": { + "id": "68a8636e-3b2f-4a95-bd05-d86a01edb74a", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, + "control": { + "id": "508e68a6-1cfc-4121-baed-b829b2886474", + "name": "control", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "e976e72f-8bd1-44d4-ad75-8410db221e3f", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "38e15c99-ff72-443a-bddc-440fab9ccefc", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "f6d628e8-05ca-4ee3-a5a4-35323ebeb853", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "denoise_mask": { + "id": "e2385456-d127-4793-98ca-d93b4aee3481", + "name": "denoise_mask", + "type": "DenoiseMaskField", + "fieldKind": "input", + "label": "" + } + }, + "outputs": { + "latents": { + "id": "a61efc39-ba21-468b-ae58-5922337cf399", + "name": "latents", + "type": "LatentsField", + "fieldKind": "output" + }, + "width": { + "id": "9093043b-808b-4ac6-ab18-7d721a7e39d7", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "5ff472a4-ee22-4988-bcf7-7d6116a37e5a", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" + }, + "width": 320, + "height": 646, + "position": { + "x": 1672.552348276784, + "y": -118.3156091718022 + } + }, + { + "id": "17eb4b88-bdd8-4984-affa-26586b146866", + "type": "invocation", + "data": { + "id": "17eb4b88-bdd8-4984-affa-26586b146866", + "type": "l2i", + "inputs": { + "metadata": { + "id": "04f49f70-8ee6-43e3-ac63-af02e5b34204", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "d27c4313-01db-45cb-b9f4-6a827d0d766a", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "3751b0f6-69f3-4f95-a7f9-476b2d31e9f9", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "07e5e79a-b452-4beb-b26f-715da2387ac7", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "afe954f6-ecaf-4eac-98ee-23f4d0eb7a6b", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "98515f37-9fe7-420e-839b-6e349d9407df", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "5203f61f-02db-423c-9c85-80aa20816dea", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "499e6152-c604-4dad-84c3-8c5b26a39919", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": false, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 267, + "position": { + "x": 2045.2934900771834, + "y": -362.2916292593367 + } } ], "edges": [ @@ -1296,102 +1339,6 @@ "id": "reactflow__edge-8d54b9db-3662-43af-8369-9a277e063f3bvalue-fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1style", "type": "default" }, - { - "source": "faf965a4-7530-427b-b1f3-4ba6505c2a08", - "sourceHandle": "conditioning", - "target": "65b56526-ef0a-4c1f-adda-1017c925b063", - "targetHandle": "positive_conditioning", - "id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-65b56526-ef0a-4c1f-adda-1017c925b063positive_conditioning", - "type": "default" - }, - { - "source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", - "sourceHandle": "conditioning", - "target": "65b56526-ef0a-4c1f-adda-1017c925b063", - "targetHandle": "negative_conditioning", - "id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-65b56526-ef0a-4c1f-adda-1017c925b063negative_conditioning", - "type": "default" - }, - { - "source": "55705012-79b9-4aac-9f26-c0b10309785b", - "sourceHandle": "noise", - "target": "65b56526-ef0a-4c1f-adda-1017c925b063", - "targetHandle": "noise", - "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-65b56526-ef0a-4c1f-adda-1017c925b063noise", - "type": "default" - }, - { - "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", - "sourceHandle": "unet", - "target": "65b56526-ef0a-4c1f-adda-1017c925b063", - "targetHandle": "unet", - "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-65b56526-ef0a-4c1f-adda-1017c925b063unet", - "type": "default" - }, - { - "source": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1", - "sourceHandle": "conditioning", - "target": "a9352523-613a-43e3-b97f-dade7ec317e5", - "targetHandle": "negative_conditioning", - "id": "reactflow__edge-fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1conditioning-a9352523-613a-43e3-b97f-dade7ec317e5negative_conditioning", - "type": "default" - }, - { - "source": "f0e06b70-9f53-44e3-8f5f-63d813b6b579", - "sourceHandle": "conditioning", - "target": "a9352523-613a-43e3-b97f-dade7ec317e5", - "targetHandle": "positive_conditioning", - "id": "reactflow__edge-f0e06b70-9f53-44e3-8f5f-63d813b6b579conditioning-a9352523-613a-43e3-b97f-dade7ec317e5positive_conditioning", - "type": "default" - }, - { - "source": "a9352523-613a-43e3-b97f-dade7ec317e5", - "sourceHandle": "latents", - "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "targetHandle": "latents", - "id": "reactflow__edge-a9352523-613a-43e3-b97f-dade7ec317e5latents-dbcd2f98-d809-48c8-bf64-2635f88a2fe9latents", - "type": "default" - }, - { - "source": "62bdf243-d98f-4508-b6b5-c3af00ef49f0", - "sourceHandle": "vae", - "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "targetHandle": "vae", - "id": "reactflow__edge-62bdf243-d98f-4508-b6b5-c3af00ef49f0vae-dbcd2f98-d809-48c8-bf64-2635f88a2fe9vae", - "type": "default" - }, - { - "source": "62bdf243-d98f-4508-b6b5-c3af00ef49f0", - "sourceHandle": "unet", - "target": "a9352523-613a-43e3-b97f-dade7ec317e5", - "targetHandle": "unet", - "id": "reactflow__edge-62bdf243-d98f-4508-b6b5-c3af00ef49f0unet-a9352523-613a-43e3-b97f-dade7ec317e5unet", - "type": "default" - }, - { - "source": "65b56526-ef0a-4c1f-adda-1017c925b063", - "sourceHandle": "latents", - "target": "a9352523-613a-43e3-b97f-dade7ec317e5", - "targetHandle": "latents", - "id": "reactflow__edge-65b56526-ef0a-4c1f-adda-1017c925b063latents-a9352523-613a-43e3-b97f-dade7ec317e5latents", - "type": "default" - }, - { - "source": "06a30867-1e9d-461f-bd58-14a63cc997dd", - "sourceHandle": "scheduler", - "target": "65b56526-ef0a-4c1f-adda-1017c925b063", - "targetHandle": "scheduler", - "id": "reactflow__edge-06a30867-1e9d-461f-bd58-14a63cc997ddscheduler-65b56526-ef0a-4c1f-adda-1017c925b063scheduler", - "type": "default" - }, - { - "source": "06a30867-1e9d-461f-bd58-14a63cc997dd", - "sourceHandle": "scheduler", - "target": "a9352523-613a-43e3-b97f-dade7ec317e5", - "targetHandle": "scheduler", - "id": "reactflow__edge-06a30867-1e9d-461f-bd58-14a63cc997ddscheduler-a9352523-613a-43e3-b97f-dade7ec317e5scheduler", - "type": "default" - }, { "source": "8d54b9db-3662-43af-8369-9a277e063f3b", "sourceHandle": "value", @@ -1399,6 +1346,94 @@ "targetHandle": "style", "id": "reactflow__edge-8d54b9db-3662-43af-8369-9a277e063f3bvalue-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204style", "type": "default" + }, + { + "source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", + "sourceHandle": "conditioning", + "target": "84df8f00-ea7e-499f-ab86-d019ddea5393", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-84df8f00-ea7e-499f-ab86-d019ddea5393negative_conditioning", + "type": "default" + }, + { + "source": "faf965a4-7530-427b-b1f3-4ba6505c2a08", + "sourceHandle": "conditioning", + "target": "84df8f00-ea7e-499f-ab86-d019ddea5393", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-84df8f00-ea7e-499f-ab86-d019ddea5393positive_conditioning", + "type": "default" + }, + { + "source": "55705012-79b9-4aac-9f26-c0b10309785b", + "sourceHandle": "noise", + "target": "84df8f00-ea7e-499f-ab86-d019ddea5393", + "targetHandle": "noise", + "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-84df8f00-ea7e-499f-ab86-d019ddea5393noise", + "type": "default" + }, + { + "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", + "sourceHandle": "unet", + "target": "84df8f00-ea7e-499f-ab86-d019ddea5393", + "targetHandle": "unet", + "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-84df8f00-ea7e-499f-ab86-d019ddea5393unet", + "type": "default" + }, + { + "source": "06a30867-1e9d-461f-bd58-14a63cc997dd", + "sourceHandle": "scheduler", + "target": "84df8f00-ea7e-499f-ab86-d019ddea5393", + "targetHandle": "scheduler", + "id": "reactflow__edge-06a30867-1e9d-461f-bd58-14a63cc997ddscheduler-84df8f00-ea7e-499f-ab86-d019ddea5393scheduler", + "type": "default" + }, + { + "source": "84df8f00-ea7e-499f-ab86-d019ddea5393", + "sourceHandle": "latents", + "target": "3d40eda5-ff7b-4dff-8d2e-4f44742faa1b", + "targetHandle": "latents", + "id": "reactflow__edge-84df8f00-ea7e-499f-ab86-d019ddea5393latents-3d40eda5-ff7b-4dff-8d2e-4f44742faa1blatents", + "type": "default" + }, + { + "source": "62bdf243-d98f-4508-b6b5-c3af00ef49f0", + "sourceHandle": "unet", + "target": "3d40eda5-ff7b-4dff-8d2e-4f44742faa1b", + "targetHandle": "unet", + "id": "reactflow__edge-62bdf243-d98f-4508-b6b5-c3af00ef49f0unet-3d40eda5-ff7b-4dff-8d2e-4f44742faa1bunet", + "type": "default" + }, + { + "source": "f0e06b70-9f53-44e3-8f5f-63d813b6b579", + "sourceHandle": "conditioning", + "target": "3d40eda5-ff7b-4dff-8d2e-4f44742faa1b", + "targetHandle": "positive_conditioning", + "id": "reactflow__edge-f0e06b70-9f53-44e3-8f5f-63d813b6b579conditioning-3d40eda5-ff7b-4dff-8d2e-4f44742faa1bpositive_conditioning", + "type": "default" + }, + { + "source": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1", + "sourceHandle": "conditioning", + "target": "3d40eda5-ff7b-4dff-8d2e-4f44742faa1b", + "targetHandle": "negative_conditioning", + "id": "reactflow__edge-fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1conditioning-3d40eda5-ff7b-4dff-8d2e-4f44742faa1bnegative_conditioning", + "type": "default" + }, + { + "source": "3d40eda5-ff7b-4dff-8d2e-4f44742faa1b", + "sourceHandle": "latents", + "target": "17eb4b88-bdd8-4984-affa-26586b146866", + "targetHandle": "latents", + "id": "reactflow__edge-3d40eda5-ff7b-4dff-8d2e-4f44742faa1blatents-17eb4b88-bdd8-4984-affa-26586b146866latents", + "type": "default" + }, + { + "source": "62bdf243-d98f-4508-b6b5-c3af00ef49f0", + "sourceHandle": "vae", + "target": "17eb4b88-bdd8-4984-affa-26586b146866", + "targetHandle": "vae", + "id": "reactflow__edge-62bdf243-d98f-4508-b6b5-c3af00ef49f0vae-17eb4b88-bdd8-4984-affa-26586b146866vae", + "type": "default" } ] -} +} \ No newline at end of file diff --git a/docs/workflows/Text_to_Image.json b/docs/workflows/Text_to_Image.json index 7239a2247f..a49ce7bf93 100644 --- a/docs/workflows/Text_to_Image.json +++ b/docs/workflows/Text_to_Image.json @@ -18,10 +18,6 @@ { "nodeId": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "fieldName": "prompt" - }, - { - "nodeId": "75899702-fa44-46d2-b2d5-3e17f234c3e7", - "fieldName": "steps" } ], "meta": { @@ -32,7 +28,6 @@ "id": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "type": "invocation", "data": { - "version": "1.0.0", "id": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "type": "compel", "inputs": { @@ -64,20 +59,21 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 235, + "height": 261, "position": { - "x": 1400, - "y": -75 + "x": 995.7263915923627, + "y": 239.67783573351227 } }, { "id": "55705012-79b9-4aac-9f26-c0b10309785b", "type": "invocation", "data": { - "version": "1.0.0", "id": "55705012-79b9-4aac-9f26-c0b10309785b", "type": "noise", "inputs": { @@ -138,92 +134,21 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 364, + "height": 389, "position": { - "x": 1000, - "y": 350 - } - }, - { - "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "type": "invocation", - "data": { - "version": "1.0.0", - "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "type": "l2i", - "inputs": { - "tiled": { - "id": "24f5bc7b-f6a1-425d-8ab1-f50b4db5d0df", - "name": "tiled", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "fp32": { - "id": "b146d873-ffb9-4767-986a-5360504841a2", - "name": "fp32", - "type": "boolean", - "fieldKind": "input", - "label": "", - "value": false - }, - "latents": { - "id": "65441abd-7713-4b00-9d8d-3771404002e8", - "name": "latents", - "type": "LatentsField", - "fieldKind": "input", - "label": "" - }, - "vae": { - "id": "a478b833-6e13-4611-9a10-842c89603c74", - "name": "vae", - "type": "VaeField", - "fieldKind": "input", - "label": "" - } - }, - "outputs": { - "image": { - "id": "c87ae925-f858-417a-8940-8708ba9b4b53", - "name": "image", - "type": "ImageField", - "fieldKind": "output" - }, - "width": { - "id": "4bcb8512-b5a1-45f1-9e52-6e92849f9d6c", - "name": "width", - "type": "integer", - "fieldKind": "output" - }, - "height": { - "id": "23e41c00-a354-48e8-8f59-5875679c27ab", - "name": "height", - "type": "integer", - "fieldKind": "output" - } - }, - "label": "", - "isOpen": true, - "notes": "", - "embedWorkflow": true, - "isIntermediate": false - }, - "width": 320, - "height": 266, - "position": { - "x": 1800, - "y": 200 + "x": 993.4442117555518, + "y": 605.6757415334787 } }, { "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", "type": "invocation", "data": { - "version": "1.0.0", "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", "type": "main_model_loader", "inputs": { @@ -261,23 +186,24 @@ } }, "label": "", - "isOpen": false, + "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 32, + "height": 226, "position": { - "x": 1000, - "y": 200 + "x": 163.04436745878343, + "y": 254.63156870373479 } }, { "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c", "type": "invocation", "data": { - "version": "1.0.0", "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c", "type": "compel", "inputs": { @@ -309,20 +235,21 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.0.0" }, "width": 320, - "height": 235, + "height": 261, "position": { - "x": 1000, - "y": -75 + "x": 595.7263915923627, + "y": 239.67783573351227 } }, { "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "type": "invocation", "data": { - "version": "1.0.0", "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "type": "rand_int", "inputs": { @@ -352,51 +279,66 @@ } }, "label": "Random Seed", - "isOpen": false, + "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": false, + "version": "1.0.0" }, "width": 320, - "height": 32, + "height": 218, "position": { - "x": 1000, - "y": 275 + "x": 541.094822888628, + "y": 694.5704476446829 } }, { - "id": "75899702-fa44-46d2-b2d5-3e17f234c3e7", + "id": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "type": "invocation", "data": { - "version": "1.0.0", - "id": "75899702-fa44-46d2-b2d5-3e17f234c3e7", + "id": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "type": "denoise_latents", "inputs": { + "positive_conditioning": { + "id": "90b7f4f8-ada7-4028-8100-d2e54f192052", + "name": "positive_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, + "negative_conditioning": { + "id": "9393779e-796c-4f64-b740-902a1177bf53", + "name": "negative_conditioning", + "type": "ConditioningField", + "fieldKind": "input", + "label": "" + }, "noise": { - "id": "8b18f3eb-40d2-45c1-9a9d-28d6af0dce2b", + "id": "8e17f1e5-4f98-40b1-b7f4-86aeeb4554c1", "name": "noise", "type": "LatentsField", "fieldKind": "input", "label": "" }, "steps": { - "id": "0be4373c-46f3-441c-80a7-a4bb6ceb498c", + "id": "9b63302d-6bd2-42c9-ac13-9b1afb51af88", "name": "steps", "type": "integer", "fieldKind": "input", "label": "", - "value": 36 + "value": 10 }, "cfg_scale": { - "id": "107267ce-4666-4cd7-94b3-7476b7973ae9", + "id": "87dd04d3-870e-49e1-98bf-af003a810109", "name": "cfg_scale", - "type": "float", + "type": "FloatPolymorphic", "fieldKind": "input", "label": "", "value": 7.5 }, "denoising_start": { - "id": "d2ce9f0f-5fc2-48b2-b917-53442941e9a1", + "id": "f369d80f-4931-4740-9bcd-9f0620719fab", "name": "denoising_start", "type": "float", "fieldKind": "input", @@ -404,7 +346,7 @@ "value": 0 }, "denoising_end": { - "id": "8ad51505-b8d0-422a-beb8-96fc6fc6b65f", + "id": "747d10e5-6f02-445c-994c-0604d814de8c", "name": "denoising_end", "type": "float", "fieldKind": "input", @@ -412,71 +354,71 @@ "value": 1 }, "scheduler": { - "id": "53092874-a43b-4623-91a2-76e62fdb1f2e", + "id": "1de84a4e-3a24-4ec8-862b-16ce49633b9b", "name": "scheduler", "type": "Scheduler", "fieldKind": "input", "label": "", "value": "euler" }, + "unet": { + "id": "ffa6fef4-3ce2-4bdb-9296-9a834849489b", + "name": "unet", + "type": "UNetField", + "fieldKind": "input", + "label": "" + }, "control": { - "id": "7abe57cc-469d-437e-ad72-a18efa28215f", + "id": "077b64cb-34be-4fcc-83f2-e399807a02bd", "name": "control", - "type": "ControlField", + "type": "ControlPolymorphic", + "fieldKind": "input", + "label": "" + }, + "ip_adapter": { + "id": "1d6948f7-3a65-4a65-a20c-768b287251aa", + "name": "ip_adapter", + "type": "IPAdapterPolymorphic", + "fieldKind": "input", + "label": "" + }, + "t2i_adapter": { + "id": "75e67b09-952f-4083-aaf4-6b804d690412", + "name": "t2i_adapter", + "type": "T2IAdapterPolymorphic", "fieldKind": "input", "label": "" }, "latents": { - "id": "add8bbe5-14d0-42d4-a867-9c65ab8dd129", + "id": "334d4ba3-5a99-4195-82c5-86fb3f4f7d43", "name": "latents", "type": "LatentsField", "fieldKind": "input", "label": "" }, "denoise_mask": { - "id": "f373a190-0fc8-45b7-ae62-c4aa8e9687e1", + "id": "0d3dbdbf-b014-4e95-8b18-ff2ff9cb0bfa", "name": "denoise_mask", "type": "DenoiseMaskField", "fieldKind": "input", "label": "" - }, - "positive_conditioning": { - "id": "c7160303-8a23-4f15-9197-855d48802a7f", - "name": "positive_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "negative_conditioning": { - "id": "fd750efa-1dfc-4d0b-accb-828e905ba320", - "name": "negative_conditioning", - "type": "ConditioningField", - "fieldKind": "input", - "label": "" - }, - "unet": { - "id": "af1f41ba-ce2a-4314-8d7f-494bb5800381", - "name": "unet", - "type": "UNetField", - "fieldKind": "input", - "label": "" } }, "outputs": { "latents": { - "id": "8508d04d-f999-4a44-94d0-388ab1401d27", + "id": "70fa5bbc-0c38-41bb-861a-74d6d78d2f38", "name": "latents", "type": "LatentsField", "fieldKind": "output" }, "width": { - "id": "93dc8287-0a2a-4320-83a4-5e994b7ba23e", + "id": "98ee0e6c-82aa-4e8f-8be5-dc5f00ee47f0", "name": "width", "type": "integer", "fieldKind": "output" }, "height": { - "id": "d9862f5c-0ab5-46fa-8c29-5059bb581d96", + "id": "e8cb184a-5e1a-47c8-9695-4b8979564f5d", "name": "height", "type": "integer", "fieldKind": "output" @@ -486,13 +428,95 @@ "isOpen": true, "notes": "", "embedWorkflow": false, - "isIntermediate": true + "isIntermediate": true, + "useCache": true, + "version": "1.4.0" }, "width": 320, - "height": 558, + "height": 646, "position": { - "x": 1400, - "y": 200 + "x": 1476.5794704734735, + "y": 256.80174342731783 + } + }, + { + "id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", + "type": "invocation", + "data": { + "id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", + "type": "l2i", + "inputs": { + "metadata": { + "id": "ab375f12-0042-4410-9182-29e30db82c85", + "name": "metadata", + "type": "MetadataField", + "fieldKind": "input", + "label": "" + }, + "latents": { + "id": "3a7e7efd-bff5-47d7-9d48-615127afee78", + "name": "latents", + "type": "LatentsField", + "fieldKind": "input", + "label": "" + }, + "vae": { + "id": "a1f5f7a1-0795-4d58-b036-7820c0b0ef2b", + "name": "vae", + "type": "VaeField", + "fieldKind": "input", + "label": "" + }, + "tiled": { + "id": "da52059a-0cee-4668-942f-519aa794d739", + "name": "tiled", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + }, + "fp32": { + "id": "c4841df3-b24e-4140-be3b-ccd454c2522c", + "name": "fp32", + "type": "boolean", + "fieldKind": "input", + "label": "", + "value": false + } + }, + "outputs": { + "image": { + "id": "72d667d0-cf85-459d-abf2-28bd8b823fe7", + "name": "image", + "type": "ImageField", + "fieldKind": "output" + }, + "width": { + "id": "c8c907d8-1066-49d1-b9a6-83bdcd53addc", + "name": "width", + "type": "integer", + "fieldKind": "output" + }, + "height": { + "id": "230f359c-b4ea-436c-b372-332d7dcdca85", + "name": "height", + "type": "integer", + "fieldKind": "output" + } + }, + "label": "", + "isOpen": true, + "notes": "", + "embedWorkflow": false, + "isIntermediate": false, + "useCache": true, + "version": "1.0.0" + }, + "width": 320, + "height": 267, + "position": { + "x": 2037.9648469717395, + "y": 426.10844427600136 } } ], @@ -522,52 +546,52 @@ "type": "default" }, { - "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", - "sourceHandle": "vae", - "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "targetHandle": "vae", - "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-dbcd2f98-d809-48c8-bf64-2635f88a2fe9vae", - "type": "default" - }, - { - "source": "75899702-fa44-46d2-b2d5-3e17f234c3e7", - "sourceHandle": "latents", - "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9", - "targetHandle": "latents", - "id": "reactflow__edge-75899702-fa44-46d2-b2d5-3e17f234c3e7latents-dbcd2f98-d809-48c8-bf64-2635f88a2fe9latents", + "source": "55705012-79b9-4aac-9f26-c0b10309785b", + "sourceHandle": "noise", + "target": "eea2702a-19fb-45b5-9d75-56b4211ec03c", + "targetHandle": "noise", + "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise", "type": "default" }, { "source": "7d8bf987-284f-413a-b2fd-d825445a5d6c", "sourceHandle": "conditioning", - "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7", + "target": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "targetHandle": "positive_conditioning", - "id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-75899702-fa44-46d2-b2d5-3e17f234c3e7positive_conditioning", + "id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning", "type": "default" }, { "source": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "sourceHandle": "conditioning", - "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7", + "target": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "targetHandle": "negative_conditioning", - "id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-75899702-fa44-46d2-b2d5-3e17f234c3e7negative_conditioning", + "id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning", "type": "default" }, { "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", "sourceHandle": "unet", - "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7", + "target": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "targetHandle": "unet", - "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-75899702-fa44-46d2-b2d5-3e17f234c3e7unet", + "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet", "type": "default" }, { - "source": "55705012-79b9-4aac-9f26-c0b10309785b", - "sourceHandle": "noise", - "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7", - "targetHandle": "noise", - "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-75899702-fa44-46d2-b2d5-3e17f234c3e7noise", + "source": "eea2702a-19fb-45b5-9d75-56b4211ec03c", + "sourceHandle": "latents", + "target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", + "targetHandle": "latents", + "id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents", + "type": "default" + }, + { + "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", + "sourceHandle": "vae", + "target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", + "targetHandle": "vae", + "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae", "type": "default" } ] -} +} \ No newline at end of file From 63548c5ea7761053ec7bbf98ab4a106c68c4a21f Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Tue, 31 Oct 2023 16:26:53 +1100 Subject: [PATCH 169/202] Update community node installation instructions --- docs/features/CONTROLNET.md | 1 - docs/nodes/communityNodes.md | 10 +++++++--- docs/nodes/contributingNodes.md | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index f55194207c..dc773f1ec9 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -150,7 +150,6 @@ Start/End - 0 represents the start of the generation, 1 represents the end. The Additionally, each section can be expanded with the "Show Advanced" button in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in during the generation process. -**Note:** T2I-Adapter models and ControlNet models cannot currently be used together. ## IP-Adapter diff --git a/docs/nodes/communityNodes.md b/docs/nodes/communityNodes.md index f394a7c9bd..d24873b24c 100644 --- a/docs/nodes/communityNodes.md +++ b/docs/nodes/communityNodes.md @@ -4,7 +4,11 @@ These are nodes that have been developed by the community, for the community. If If you'd like to submit a node for the community, please refer to the [node creation overview](contributingNodes.md). -To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations` folder in your Invoke AI install location. If you used the automated installation, this can be found inside the `.venv` folder. Along with the node, an example node graph should be provided to help you get started with the node. +To use a node, add the node to the `nodes` folder found in your InvokeAI install location. + +The suggested method is to use `git clone` to clone the repository the node is found in. This allows for easy updates of the node in the future. + +If you'd prefer, you can also just download the `.py` file from the linked repository and add it to the `nodes` folder. To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor. @@ -329,9 +333,9 @@ See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/READ **Description:** This node allows you to do super cool things with InvokeAI. -**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py +**Node Link:** https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/app/invocations/prompt.py -**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json +**Example Workflow:** https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Prompt_from_File.json **Output Examples** diff --git a/docs/nodes/contributingNodes.md b/docs/nodes/contributingNodes.md index c58a56e4e6..a2fd066eb8 100644 --- a/docs/nodes/contributingNodes.md +++ b/docs/nodes/contributingNodes.md @@ -4,7 +4,7 @@ To learn about the specifics of creating a new node, please visit our [Node crea Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps: -- Make sure the node is contained in a new Python (.py) file. Preferrably, the node is in a repo with a README detaling the nodes usage & examples to help others more easily use your node. +- Make sure the node is contained in a new Python (.py) file. Preferably, the node is in a repo with a README detailing the nodes usage & examples to help others more easily use your node. Including the term "InvokeAI Node" in your repository's README can also help other users find it more easily. - Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list - Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node. - A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you may be asked for permission to include it in the core project. From ed81d6d533366c84d65e2f2868ed72b066c4ebd3 Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Tue, 31 Oct 2023 16:29:29 +1100 Subject: [PATCH 170/202] Update contributingNodes.md --- docs/nodes/contributingNodes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/nodes/contributingNodes.md b/docs/nodes/contributingNodes.md index a2fd066eb8..7a30c8aeb0 100644 --- a/docs/nodes/contributingNodes.md +++ b/docs/nodes/contributingNodes.md @@ -4,7 +4,7 @@ To learn about the specifics of creating a new node, please visit our [Node crea Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps: -- Make sure the node is contained in a new Python (.py) file. Preferably, the node is in a repo with a README detailing the nodes usage & examples to help others more easily use your node. Including the term "InvokeAI Node" in your repository's README can also help other users find it more easily. +- Make sure the node is contained in a new Python (.py) file. Preferably, the node is in a repo with a README detailing the nodes usage & examples to help others more easily use your node. Including the tag "invokeai-node" in your repository's README can also help other users find it more easily. - Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list - Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node. - A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you may be asked for permission to include it in the core project. From a79c86b901e20f136b91e69214fc247a88d59d3a Mon Sep 17 00:00:00 2001 From: Alexander Eichhorn Date: Tue, 31 Oct 2023 15:00:38 +0100 Subject: [PATCH 171/202] translationBot(ui): update translation (German) Currently translated at 51.7% (630 of 1217 strings) Co-authored-by: Alexander Eichhorn Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 179 ++++++++++++++++++- 1 file changed, 170 insertions(+), 9 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index b483d61928..c06ad56492 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -86,7 +86,10 @@ "batch": "Batch-Manager", "advanced": "Erweitert", "langBrPortuguese": "Portugiesisch (Brasilien)", - "unifiedCanvas": "Einheitliche Leinwand" + "unifiedCanvas": "Einheitliche Leinwand", + "openInNewTab": "In einem neuem Tab öffnen", + "statusProcessing": "wird bearbeitet", + "linear": "Linear" }, "gallery": { "generations": "Erzeugungen", @@ -101,7 +104,15 @@ "singleColumnLayout": "Einspaltiges Layout", "allImagesLoaded": "Alle Bilder geladen", "loadMore": "Mehr laden", - "noImagesInGallery": "Keine Bilder in der Galerie" + "noImagesInGallery": "Keine Bilder in der Galerie", + "loading": "Lade", + "preparingDownload": "bereite Download vor", + "preparingDownloadFailed": "Problem beim Download vorbereiten", + "deleteImage": "Lösche Bild", + "images": "Bilder", + "copy": "Kopieren", + "download": "Runterladen", + "setCurrentImage": "Setze aktuelle Bild" }, "hotkeys": { "keyboardShortcuts": "Tastenkürzel", @@ -110,7 +121,8 @@ "galleryHotkeys": "Galerie Tastenkürzel", "unifiedCanvasHotkeys": "Unified Canvas Tastenkürzel", "invoke": { - "desc": "Ein Bild erzeugen" + "desc": "Ein Bild erzeugen", + "title": "Invoke" }, "cancel": { "title": "Abbrechen", @@ -307,6 +319,10 @@ "acceptStagingImage": { "title": "Staging-Bild akzeptieren", "desc": "Akzeptieren Sie das aktuelle Bild des Staging-Bereichs" + }, + "nodesHotkeys": "Knoten Tastenkürzel", + "addNodes": { + "title": "Knotenpunkt hinzufügen" } }, "modelManager": { @@ -356,7 +372,7 @@ "deleteModel": "Model löschen", "deleteConfig": "Konfiguration löschen", "deleteMsg1": "Möchten Sie diesen Model-Eintrag wirklich aus InvokeAI löschen?", - "deleteMsg2": "Dadurch wird die Modellprüfpunktdatei nicht von Ihrer Festplatte gelöscht. Sie können sie bei Bedarf erneut hinzufügen.", + "deleteMsg2": "Dadurch WIRD das Modell von der Festplatte gelöscht WENN es im InvokeAI Root Ordner liegt. Wenn es in einem anderem Ordner liegt wird das Modell NICHT von der Festplatte gelöscht.", "customConfig": "Benutzerdefinierte Konfiguration", "invokeRoot": "InvokeAI Ordner", "formMessageDiffusersVAELocationDesc": "Falls nicht angegeben, sucht InvokeAI nach der VAE-Datei innerhalb des oben angegebenen Modell Speicherortes.", @@ -402,7 +418,17 @@ "v2_768": "v2 (768px)", "none": "Nix", "repoIDValidationMsg": "Online Repo Ihres Modells", - "vaeRepoIDValidationMsg": "Online Repo Ihrer VAE" + "vaeRepoIDValidationMsg": "Online Repo Ihrer VAE", + "importModels": "Importiere Modelle", + "merge": "Zusammenführen", + "addDiffuserModel": "Diffusers hinzufügen", + "advanced": "Erweitert", + "closeAdvanced": "Schließe Erweitert", + "convertingModelBegin": "Konvertiere Modell. Bitte warten.", + "customConfigFileLocation": "Benutzerdefinierte Konfiguration Datei Speicherort", + "baseModel": "Basis Modell", + "convertToDiffusers": "Konvertiere zu Diffusers", + "diffusersModels": "Diffusers" }, "parameters": { "images": "Bilder", @@ -422,7 +448,7 @@ "type": "Art", "strength": "Stärke", "upscaling": "Hochskalierung", - "upscale": "Hochskalieren", + "upscale": "Hochskalieren (Shift + U)", "upscaleImage": "Bild hochskalieren", "scale": "Maßstab", "otherOptions": "Andere Optionen", @@ -478,7 +504,7 @@ "resetWebUI": "Web-Oberfläche zurücksetzen", "resetWebUIDesc1": "Das Zurücksetzen der Web-Oberfläche setzt nur den lokalen Cache des Browsers mit Ihren Bildern und gespeicherten Einstellungen zurück. Es werden keine Bilder von der Festplatte gelöscht.", "resetWebUIDesc2": "Wenn die Bilder nicht in der Galerie angezeigt werden oder etwas anderes nicht funktioniert, versuchen Sie bitte, die Einstellungen zurückzusetzen, bevor Sie einen Fehler auf GitHub melden.", - "resetComplete": "Die Web-Oberfläche wurde zurückgesetzt. Aktualisieren Sie die Seite, um sie neu zu laden.", + "resetComplete": "Die Web-Oberfläche wurde zurückgesetzt.", "models": "Modelle", "useSlidersForAll": "Schieberegler für alle Optionen verwenden" }, @@ -490,7 +516,7 @@ "imageCopied": "Bild kopiert", "imageLinkCopied": "Bildlink kopiert", "imageNotLoaded": "Kein Bild geladen", - "imageNotLoadedDesc": "Kein Bild gefunden, das an das Bild zu Bild-Modul gesendet werden kann", + "imageNotLoadedDesc": "Konnte kein Bild finden", "imageSavedToGallery": "Bild in die Galerie gespeichert", "canvasMerged": "Leinwand zusammengeführt", "sentToImageToImage": "Gesendet an Bild zu Bild", @@ -585,7 +611,10 @@ "betaClear": "Löschen", "betaDarkenOutside": "Außen abdunkeln", "betaLimitToBox": "Begrenzung auf das Feld", - "betaPreserveMasked": "Maskiertes bewahren" + "betaPreserveMasked": "Maskiertes bewahren", + "antialiasing": "Kantenglättung", + "showResultsOn": "Zeige Ergebnisse (An)", + "showResultsOff": "Zeige Ergebnisse (Aus)" }, "accessibility": { "modelSelect": "Model Auswahl", @@ -627,5 +656,137 @@ "changeBoard": "Ordner wechseln", "loading": "Laden...", "clearSearch": "Suche leeren" + }, + "controlnet": { + "showAdvanced": "Zeige Erweitert", + "contentShuffleDescription": "Mischt den Inhalt von einem Bild", + "addT2IAdapter": "$t(common.t2iAdapter) hinzufügen", + "importImageFromCanvas": "Importieren Bild von Zeichenfläche", + "lineartDescription": "Konvertiere Bild zu Lineart", + "importMaskFromCanvas": "Importiere Maske von Zeichenfläche", + "hed": "HED", + "hideAdvanced": "Verstecke Erweitert", + "contentShuffle": "Inhalt mischen", + "controlNetEnabledT2IDisabled": "$t(common.controlNet) ist aktiv, $t(common.t2iAdapter) ist deaktiviert", + "ipAdapterModel": "Adapter Modell", + "beginEndStepPercent": "Start / Ende Step Prozent", + "duplicate": "Kopieren", + "f": "F", + "h": "H", + "depthMidasDescription": "Tiefenmap erstellen mit Midas", + "controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))", + "t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) ist aktiv, $t(common.controlNet) ist deaktiviert", + "weight": "Breite", + "selectModel": "Wähle ein Modell", + "depthMidas": "Tiefe (Midas)", + "w": "W", + "addControlNet": "$t(common.controlNet) hinzufügen", + "none": "Kein", + "incompatibleBaseModel": "Inkompatibles Basismodell:", + "enableControlnet": "Aktiviere ControlNet", + "detectResolution": "Auflösung erkennen", + "controlNetT2IMutexDesc": "$t(common.controlNet) und $t(common.t2iAdapter) zur gleichen Zeit wird nicht unterstützt.", + "ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))", + "fill": "Füllen", + "addIPAdapter": "$t(common.ipAdapter) hinzufügen", + "colorMapDescription": "Erstelle eine Farbkarte von diesem Bild", + "t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))", + "imageResolution": "Bild Auflösung", + "depthZoe": "Tiefe (Zoe)", + "colorMap": "Farbe", + "lowThreshold": "Niedrige Schwelle", + "highThreshold": "Hohe Schwelle", + "toggleControlNet": "Schalten ControlNet um", + "delete": "Löschen", + "controlAdapter_one": "Control Adapter", + "controlAdapter_other": "Control Adapters", + "colorMapTileSize": "Tile Größe", + "depthZoeDescription": "Tiefenmap erstellen mit Zoe", + "setControlImageDimensions": "Setze Control Bild Auflösung auf Breite/Höhe", + "handAndFace": "Hand und Gesicht", + "enableIPAdapter": "Aktiviere IP Adapter" + }, + "queue": { + "status": "Status", + "cancelTooltip": "Aktuellen Aufgabe abbrechen", + "queueEmpty": "Warteschlange leer", + "in_progress": "In Arbeit", + "queueFront": "An den Anfang der Warteschlange tun", + "completed": "Fertig", + "queueBack": "In die Warteschlange", + "clearFailed": "Probleme beim leeren der Warteschlange", + "clearSucceeded": "Warteschlange geleert", + "pause": "Pause", + "cancelSucceeded": "Auftrag abgebrochen", + "queue": "Warteschlange", + "batch": "Stapel", + "pending": "Ausstehend", + "clear": "Leeren", + "prune": "Leeren", + "total": "Gesamt", + "canceled": "Abgebrochen", + "clearTooltip": "Abbrechen und alle Aufträge leeren", + "current": "Aktuell", + "failed": "Fehler", + "cancelItem": "Abbruch Auftrag", + "next": "Nächste", + "cancel": "Abbruch", + "session": "Sitzung", + "queueTotal": "{{total}} Gesamt", + "resume": "Wieder aufnehmen", + "item": "Auftrag" + }, + "metadata": { + "negativePrompt": "Negativ Beschreibung", + "metadata": "Meta-Data", + "strength": "Bild zu Bild stärke", + "imageDetails": "Bild Details", + "model": "Modell", + "noImageDetails": "Keine Bild Details gefunden", + "cfgScale": "CFG-Skala", + "fit": "Bild zu Bild passen", + "height": "Höhe", + "noMetaData": "Keine Meta-Data gefunden", + "width": "Breite", + "createdBy": "Erstellt von", + "steps": "Schritte" + }, + "popovers": { + "noiseUseCPU": { + "heading": "Nutze Prozessor rauschen" + }, + "paramModel": { + "heading": "Modell" + }, + "paramIterations": { + "heading": "Iterationen" + }, + "paramCFGScale": { + "heading": "CFG-Skala" + }, + "paramSteps": { + "heading": "Schritte" + }, + "lora": { + "heading": "LoRA Gewichte" + }, + "infillMethod": { + "heading": "Füllmethode" + }, + "paramVAE": { + "heading": "VAE" + } + }, + "ui": { + "lockRatio": "Verhältnis sperren", + "hideProgressImages": "Verstecke Prozess Bild", + "showProgressImages": "Zeige Prozess Bild" + }, + "invocationCache": { + "disable": "Deaktivieren", + "misses": "Cache Nötig", + "hits": "Cache Treffer", + "enable": "Aktivieren", + "clear": "Leeren" } } From 94055ae54a644cd1d7026cd1fae9b575629501f2 Mon Sep 17 00:00:00 2001 From: nemuruibai Date: Tue, 31 Oct 2023 15:00:38 +0100 Subject: [PATCH 172/202] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 99.8% (1215 of 1217 strings) Co-authored-by: nemuruibai Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/zh_CN.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 3f896076d4..729d7652e3 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -866,7 +866,7 @@ "version": "版本", "validateConnections": "验证连接和节点图", "inputMayOnlyHaveOneConnection": "输入仅能有一个连接", - "notes": "节点", + "notes": "注释", "nodeOutputs": "节点输出", "currentImageDescription": "在节点编辑器中显示当前图像", "validateConnectionsHelp": "防止建立无效连接和调用无效节点图", @@ -892,11 +892,11 @@ "currentImage": "当前图像", "workflowName": "名称", "cannotConnectInputToInput": "无法将输入连接到输入", - "workflowNotes": "节点", + "workflowNotes": "注释", "cannotConnectOutputToOutput": "无法将输出连接到输出", "connectionWouldCreateCycle": "连接将创建一个循环", "cannotConnectToSelf": "无法连接自己", - "notesDescription": "添加有关您的工作流的节点", + "notesDescription": "添加有关您的工作流的注释", "unknownField": "未知", "colorCodeEdges": "边缘颜色编码", "unknownNode": "未知节点", From 584b5130381094d8a56bcc9a62d71a273d8988c9 Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Wed, 1 Nov 2023 08:55:06 +1100 Subject: [PATCH 173/202] Remove LowRA from Initial Models --- invokeai/configs/INITIAL_MODELS.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/invokeai/configs/INITIAL_MODELS.yaml b/invokeai/configs/INITIAL_MODELS.yaml index b6883ea915..67fcad4055 100644 --- a/invokeai/configs/INITIAL_MODELS.yaml +++ b/invokeai/configs/INITIAL_MODELS.yaml @@ -117,9 +117,6 @@ sd-1/embedding/EasyNegative: recommended: True sd-1/embedding/ahx-beta-453407d: repo_id: sd-concepts-library/ahx-beta-453407d -sd-1/lora/LowRA: - path: https://civitai.com/api/download/models/63006 - recommended: True sd-1/lora/Ink scenery: path: https://civitai.com/api/download/models/83390 sd-1/ip_adapter/ip_adapter_sd15: From 6c66adcd90313bd0861db4239ed4b22db9eca496 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 31 Oct 2023 21:55:46 -0500 Subject: [PATCH 174/202] fix(ui): show collapse labels only if not default value --- .../components/ParamDynamicPromptsCollapse.tsx | 8 +++----- .../Parameters/Advanced/ParamAdvancedCollapse.tsx | 4 +--- .../Parameters/HighResFix/ParamHrfCollapse.tsx | 12 ++++++------ 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx index f34235bab2..3ce6cdc99e 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx +++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx @@ -16,15 +16,13 @@ const ParamDynamicPromptsCollapse = () => { () => createSelector(stateSelector, ({ dynamicPrompts }) => { const count = dynamicPrompts.prompts.length; - if (count === 1) { - return t('dynamicPrompts.promptsWithCount_one', { - count, - }); - } else { + if (count > 1) { return t('dynamicPrompts.promptsWithCount_other', { count, }); } + + return; }), [t] ); diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse.tsx index 85b6eaa903..718b38cfba 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse.tsx @@ -28,9 +28,7 @@ export default function ParamAdvancedCollapse() { const activeLabel = useMemo(() => { const activeLabel: string[] = []; - if (shouldUseCpuNoise) { - activeLabel.push(t('parameters.cpuNoise')); - } else { + if (!shouldUseCpuNoise) { activeLabel.push(t('parameters.gpuNoise')); } diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/HighResFix/ParamHrfCollapse.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/HighResFix/ParamHrfCollapse.tsx index ef0db1af6d..63709f23aa 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/HighResFix/ParamHrfCollapse.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/HighResFix/ParamHrfCollapse.tsx @@ -4,12 +4,13 @@ import { RootState, stateSelector } from 'app/store/store'; import { useAppSelector } from 'app/store/storeHooks'; import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; import IAICollapse from 'common/components/IAICollapse'; +import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import ParamHrfHeight from './ParamHrfHeight'; import ParamHrfStrength from './ParamHrfStrength'; import ParamHrfToggle from './ParamHrfToggle'; import ParamHrfWidth from './ParamHrfWidth'; -import ParamHrfHeight from './ParamHrfHeight'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; const selector = createSelector( stateSelector, @@ -22,15 +23,14 @@ const selector = createSelector( ); export default function ParamHrfCollapse() { + const { t } = useTranslation(); const isHRFFeatureEnabled = useFeatureStatus('hrf').isFeatureEnabled; const { hrfEnabled } = useAppSelector(selector); const activeLabel = useMemo(() => { if (hrfEnabled) { - return 'On'; - } else { - return 'Off'; + return t('common.on'); } - }, [hrfEnabled]); + }, [t, hrfEnabled]); if (!isHRFFeatureEnabled) { return null; From 7f4ce518b7cbb90175fce0b91741c8c119ffd258 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 19 Oct 2023 15:18:32 -0400 Subject: [PATCH 175/202] auto-format lora.py --- invokeai/backend/model_management/lora.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index bb44455c88..fc2b764a43 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -196,7 +196,9 @@ class ModelPatcher: if model_embeddings.weight.data[token_id].shape != embedding.shape: raise ValueError( - f"Cannot load embedding for {trigger}. It was trained on a model with token dimension {embedding.shape[0]}, but the current model has token dimension {model_embeddings.weight.data[token_id].shape[0]}." + f"Cannot load embedding for {trigger}. It was trained on a model with token dimension" + f" {embedding.shape[0]}, but the current model has token dimension" + f" {model_embeddings.weight.data[token_id].shape[0]}." ) model_embeddings.weight.data[token_id] = embedding.to( @@ -257,7 +259,8 @@ class TextualInversionModel: if "string_to_param" in state_dict: if len(state_dict["string_to_param"]) > 1: print( - f'Warn: Embedding "{file_path.name}" contains multiple tokens, which is not supported. The first token will be used.' + f'Warn: Embedding "{file_path.name}" contains multiple tokens, which is not supported. The first' + " token will be used." ) result.embedding = next(iter(state_dict["string_to_param"].values())) @@ -470,7 +473,9 @@ class ONNXModelPatcher: if embeddings[token_id].shape != embedding.shape: raise ValueError( - f"Cannot load embedding for {trigger}. It was trained on a model with token dimension {embedding.shape[0]}, but the current model has token dimension {embeddings[token_id].shape[0]}." + f"Cannot load embedding for {trigger}. It was trained on a model with token dimension" + f" {embedding.shape[0]}, but the current model has token dimension" + f" {embeddings[token_id].shape[0]}." ) embeddings[token_id] = embedding From 2ba5b44ec49ad8a28d3bc7bca69f96889c3e9638 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 19 Oct 2023 15:20:13 -0400 Subject: [PATCH 176/202] Remove unused _lora_forward_hook(...). --- invokeai/backend/model_management/lora.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index fc2b764a43..e4f5aeb98e 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -54,24 +54,6 @@ class ModelPatcher: return (module_key, module) - @staticmethod - def _lora_forward_hook( - applied_loras: List[Tuple[LoRAModel, float]], - layer_name: str, - ): - def lora_forward(module, input_h, output): - if len(applied_loras) == 0: - return output - - for lora, weight in applied_loras: - layer = lora.layers.get(layer_name, None) - if layer is None: - continue - output += layer.forward(module, input_h, weight) - return output - - return lora_forward - @classmethod @contextmanager def apply_lora_unet( From 545c811bf10184150cc6567d927c8dab59cc29cd Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 31 Oct 2023 15:15:03 -0400 Subject: [PATCH 177/202] Remove device and dtype members from LoRAModelRaw, they can too easily get out-of-sync with the underlying layer states. --- .../backend/model_management/models/lora.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/invokeai/backend/model_management/models/lora.py b/invokeai/backend/model_management/models/lora.py index b6f321d60b..6ececa7df1 100644 --- a/invokeai/backend/model_management/models/lora.py +++ b/invokeai/backend/model_management/models/lora.py @@ -440,33 +440,19 @@ class IA3Layer(LoRALayerBase): class LoRAModelRaw: # (torch.nn.Module): _name: str layers: Dict[str, LoRALayer] - _device: torch.device - _dtype: torch.dtype def __init__( self, name: str, layers: Dict[str, LoRALayer], - device: torch.device, - dtype: torch.dtype, ): self._name = name - self._device = device or torch.cpu - self._dtype = dtype or torch.float32 self.layers = layers @property def name(self): return self._name - @property - def device(self): - return self._device - - @property - def dtype(self): - return self._dtype - def to( self, device: Optional[torch.device] = None, @@ -475,8 +461,6 @@ class LoRAModelRaw: # (torch.nn.Module): # TODO: try revert if exception? for key, layer in self.layers.items(): layer.to(device=device, dtype=dtype) - self._device = device - self._dtype = dtype def calc_size(self) -> int: model_size = 0 @@ -557,8 +541,6 @@ class LoRAModelRaw: # (torch.nn.Module): file_path = Path(file_path) model = cls( - device=device, - dtype=dtype, name=file_path.stem, # TODO: layers=dict(), ) From 379d68f595afda20d2496f6b6f129b9bde9050e9 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 31 Oct 2023 15:39:54 -0400 Subject: [PATCH 178/202] Patch LoRA on device when model is already on device. --- invokeai/app/invocations/compel.py | 6 ++++-- invokeai/app/invocations/latent.py | 3 ++- invokeai/backend/model_management/lora.py | 26 +++++++++++++++++------ 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index b3ebc92320..3a7d5e9e4d 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -108,13 +108,14 @@ class CompelInvocation(BaseInvocation): print(f'Warn: trigger: "{trigger}" not found') with ( - ModelPatcher.apply_lora_text_encoder(text_encoder_info.context.model, _lora_loader()), ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as ( tokenizer, ti_manager, ), ModelPatcher.apply_clip_skip(text_encoder_info.context.model, self.clip.skipped_layers), text_encoder_info as text_encoder, + # Apply the LoRA after text_encoder has been moved to its target device for faster patching. + ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()), ): compel = Compel( tokenizer=tokenizer, @@ -229,13 +230,14 @@ class SDXLPromptInvocationBase: print(f'Warn: trigger: "{trigger}" not found') with ( - ModelPatcher.apply_lora(text_encoder_info.context.model, _lora_loader(), lora_prefix), ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as ( tokenizer, ti_manager, ), ModelPatcher.apply_clip_skip(text_encoder_info.context.model, clip_field.skipped_layers), text_encoder_info as text_encoder, + # Apply the LoRA after text_encoder has been moved to its target device for faster patching. + ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix), ): compel = Compel( tokenizer=tokenizer, diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index a537972c0b..56c13e6816 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -710,9 +710,10 @@ class DenoiseLatentsInvocation(BaseInvocation): ) with ( ExitStack() as exit_stack, - ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()), set_seamless(unet_info.context.model, self.unet.seamless_axes), unet_info as unet, + # Apply the LoRA after unet has been moved to its target device for faster patching. + ModelPatcher.apply_lora_unet(unet, _lora_loader()), ): latents = latents.to(device=unet.device, dtype=unet.dtype) if noise is not None: diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index e4f5aeb98e..eb6c50bf0d 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -112,20 +112,34 @@ class ModelPatcher: continue module_key, module = cls._resolve_lora_key(model, layer_key, prefix) - if module_key not in original_weights: - original_weights[module_key] = module.weight.detach().to(device="cpu", copy=True) - # enable autocast to calc fp16 loras on cpu - # with torch.autocast(device_type="cpu"): + # All of the LoRA weight calculations will be done on the same device as the module weight. + # (Performance will be best if this is a CUDA device.) + device = module.weight.device + dtype = module.weight.dtype + + if module_key not in original_weights: + original_weights[module_key] = module.weight.to(device="cpu") + + # We intentionally move to the device first, then cast. Experimentally, this was found to + # be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the + # same thing in a single call to '.to(...)'. + tmp_weight = module.weight.to(device=device, copy=True).to(dtype=torch.float32) + + # We intentionally move to the target device first, then cast. Experimentally, this was found to + # be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the + # same thing in a single call to '.to(...)'. + layer.to(device=device) layer.to(dtype=torch.float32) layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0 - layer_weight = layer.get_weight(original_weights[module_key]) * lora_weight * layer_scale + layer_weight = layer.get_weight(tmp_weight) * (lora_weight * layer_scale) + layer.to(device="cpu") if module.weight.shape != layer_weight.shape: # TODO: debug on lycoris layer_weight = layer_weight.reshape(module.weight.shape) - module.weight += layer_weight.to(device=module.weight.device, dtype=module.weight.dtype) + module.weight = torch.nn.Parameter((tmp_weight + layer_weight).to(dtype=dtype)) yield # wait for context manager exit From 61b17c475ae256b991efd795e55d06625d978320 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 1 Nov 2023 09:34:52 -0400 Subject: [PATCH 179/202] Add TODO note about improving _resolve_lora_key(...). --- invokeai/backend/model_management/lora.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index eb6c50bf0d..1eaaa22adc 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -111,6 +111,13 @@ class ModelPatcher: if not layer_key.startswith(prefix): continue + # TODO(ryand): A non-negligible amount of time is currently spent resolving LoRA keys. This + # should be improved in the following ways: + # 1. The key mapping could be more-efficiently pre-computed. This would save time every time a + # LoRA model is applied. + # 2. From an API perspective, there's no reason that the `ModelPatcher` should be aware of the + # intricacies of Stable Diffusion key resolution. It should just expect the input LoRA + # weights to have valid keys. module_key, module = cls._resolve_lora_key(model, layer_key, prefix) # All of the LoRA weight calculations will be done on the same device as the module weight. From e92b84955c65fe4a17edf69edc8aa3f19f92e3f5 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 1 Nov 2023 10:52:51 -0400 Subject: [PATCH 180/202] Add minimal unit tests for ModelPatcher.apply_lora(...) --- tests/backend/model_management/test_lora.py | 102 ++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 tests/backend/model_management/test_lora.py diff --git a/tests/backend/model_management/test_lora.py b/tests/backend/model_management/test_lora.py new file mode 100644 index 0000000000..14bcc87c89 --- /dev/null +++ b/tests/backend/model_management/test_lora.py @@ -0,0 +1,102 @@ +# test that if the model's device changes while the lora is applied, the weights can still be restored + +# test that LoRA patching works on both CPU and CUDA + +import pytest +import torch + +from invokeai.backend.model_management.lora import ModelPatcher +from invokeai.backend.model_management.models.lora import LoRALayer, LoRAModelRaw + + +@pytest.mark.parametrize( + "device", + [ + "cpu", + pytest.param("cuda", marks=pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA device")), + ], +) +@torch.no_grad() +def test_apply_lora(device): + """Test the basic behavior of ModelPatcher.apply_lora(...). Check that patching and unpatching produce the correct + result, and that model/LoRA tensors are moved between devices as expected. + """ + + linear_in_features = 4 + linear_out_features = 8 + lora_dim = 2 + model = torch.nn.ModuleDict( + {"linear_layer_1": torch.nn.Linear(linear_in_features, linear_out_features, device=device, dtype=torch.float16)} + ) + + lora_layers = { + "linear_layer_1": LoRALayer( + layer_key="linear_layer_1", + values={ + "lora_down.weight": torch.ones((lora_dim, linear_in_features), device="cpu", dtype=torch.float16), + "lora_up.weight": torch.ones((linear_out_features, lora_dim), device="cpu", dtype=torch.float16), + }, + ) + } + lora = LoRAModelRaw("lora_name", lora_layers) + + lora_weight = 0.5 + orig_linear_weight = model["linear_layer_1"].weight.data.detach().clone() + expected_patched_linear_weight = orig_linear_weight + (lora_dim * lora_weight) + + with ModelPatcher.apply_lora(model, [(lora, lora_weight)], prefix=""): + # After patching, all LoRA layer weights should have been moved back to the cpu. + assert lora_layers["linear_layer_1"].up.device.type == "cpu" + assert lora_layers["linear_layer_1"].down.device.type == "cpu" + + # After patching, the patched model should still be on its original device. + assert model["linear_layer_1"].weight.data.device.type == device + + torch.testing.assert_close(model["linear_layer_1"].weight.data, expected_patched_linear_weight) + + # After unpatching, the original model weights should have been restored on the original device. + assert model["linear_layer_1"].weight.data.device.type == device + torch.testing.assert_close(model["linear_layer_1"].weight.data, orig_linear_weight) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA device") +@torch.no_grad() +def test_apply_lora_change_device(): + """Test that if LoRA patching is applied on the CPU, and then the patched model is moved to the GPU, unpatching + still behaves correctly. + """ + linear_in_features = 4 + linear_out_features = 8 + lora_dim = 2 + # Initialize the model on the CPU. + model = torch.nn.ModuleDict( + {"linear_layer_1": torch.nn.Linear(linear_in_features, linear_out_features, device="cpu", dtype=torch.float16)} + ) + + lora_layers = { + "linear_layer_1": LoRALayer( + layer_key="linear_layer_1", + values={ + "lora_down.weight": torch.ones((lora_dim, linear_in_features), device="cpu", dtype=torch.float16), + "lora_up.weight": torch.ones((linear_out_features, lora_dim), device="cpu", dtype=torch.float16), + }, + ) + } + lora = LoRAModelRaw("lora_name", lora_layers) + + orig_linear_weight = model["linear_layer_1"].weight.data.detach().clone() + + with ModelPatcher.apply_lora(model, [(lora, 0.5)], prefix=""): + # After patching, all LoRA layer weights should have been moved back to the cpu. + assert lora_layers["linear_layer_1"].up.device.type == "cpu" + assert lora_layers["linear_layer_1"].down.device.type == "cpu" + + # After patching, the patched model should still be on the CPU. + assert model["linear_layer_1"].weight.data.device.type == "cpu" + + # Move the model to the GPU. + assert model.to("cuda") + + # After unpatching, the original model weights should have been restored on the GPU. + assert model["linear_layer_1"].weight.data.device.type == "cuda" + torch.testing.assert_close(model["linear_layer_1"].weight.data, orig_linear_weight, check_device=False) From fa7f6a6a10f01b496bbb384462918c85279bc404 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 1 Nov 2023 11:45:39 -0400 Subject: [PATCH 181/202] Further tidying of LoRA patching. Revert some changes that didn't end up being important under the constraint that calculations are done on the same device as the model. --- invokeai/backend/model_management/lora.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index 1eaaa22adc..5002f278cc 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -126,27 +126,25 @@ class ModelPatcher: dtype = module.weight.dtype if module_key not in original_weights: - original_weights[module_key] = module.weight.to(device="cpu") + original_weights[module_key] = module.weight.detach().to(device="cpu", copy=True) - # We intentionally move to the device first, then cast. Experimentally, this was found to - # be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the - # same thing in a single call to '.to(...)'. - tmp_weight = module.weight.to(device=device, copy=True).to(dtype=torch.float32) + layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0 # We intentionally move to the target device first, then cast. Experimentally, this was found to # be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the # same thing in a single call to '.to(...)'. layer.to(device=device) layer.to(dtype=torch.float32) - layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0 - layer_weight = layer.get_weight(tmp_weight) * (lora_weight * layer_scale) + # TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA + # devices here. Experimentally, it was found to be very slow on CPU. More investigation needed. + layer_weight = layer.get_weight(module.weight) * (lora_weight * layer_scale) layer.to(device="cpu") if module.weight.shape != layer_weight.shape: # TODO: debug on lycoris layer_weight = layer_weight.reshape(module.weight.shape) - module.weight = torch.nn.Parameter((tmp_weight + layer_weight).to(dtype=dtype)) + module.weight += layer_weight.to(dtype=dtype) yield # wait for context manager exit From bac2a757e8f77d875cd3356f58a52dc0ea3ad518 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 15:49:44 -0400 Subject: [PATCH 182/202] Replace deepcopy with a pickle roundtrip in apply_ti(...) to improve speed. --- invokeai/backend/model_management/lora.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index 5002f278cc..2a0e465e03 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -1,6 +1,6 @@ from __future__ import annotations -import copy +import pickle from contextlib import contextmanager from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union @@ -165,7 +165,13 @@ class ModelPatcher: new_tokens_added = None try: - ti_tokenizer = copy.deepcopy(tokenizer) + # HACK: The CLIPTokenizer API does not include a way to remove tokens after calling add_tokens(...). As a + # workaround, we create a full copy of `tokenizer` so that its original behavior can be restored after + # exiting this `apply_ti(...)` context manager. + # + # In a previous implementation, the deep copy was obtained with `ti_tokenizer = copy.deepcopy(tokenizer)`, + # but a pickle roundtrip was found to be much faster (1 sec vs. 0.05 secs). + ti_tokenizer = pickle.loads(pickle.dumps(tokenizer)) ti_manager = TextualInversionManager(ti_tokenizer) init_tokens_count = text_encoder.resize_token_embeddings(None).num_embeddings @@ -439,7 +445,13 @@ class ONNXModelPatcher: orig_embeddings = None try: - ti_tokenizer = copy.deepcopy(tokenizer) + # HACK: The CLIPTokenizer API does not include a way to remove tokens after calling add_tokens(...). As a + # workaround, we create a full copy of `tokenizer` so that its original behavior can be restored after + # exiting this `apply_ti(...)` context manager. + # + # In a previous implementation, the deep copy was obtained with `ti_tokenizer = copy.deepcopy(tokenizer)`, + # but a pickle roundtrip was found to be much faster (1 sec vs. 0.05 secs). + ti_tokenizer = pickle.loads(pickle.dumps(tokenizer)) ti_manager = TextualInversionManager(ti_tokenizer) def _get_trigger(ti_name, index): From 8ff49109a8881d1306b146a5249c8eccbd402aad Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 11:55:24 -0400 Subject: [PATCH 183/202] Update get_pretty_snapshot_diff(...) to handle None-snapshots. --- invokeai/backend/model_management/memory_snapshot.py | 5 ++++- .../backend/model_management/test_memory_snapshot.py | 11 +++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/model_management/memory_snapshot.py b/invokeai/backend/model_management/memory_snapshot.py index 01f1328114..fe54af191c 100644 --- a/invokeai/backend/model_management/memory_snapshot.py +++ b/invokeai/backend/model_management/memory_snapshot.py @@ -64,7 +64,7 @@ class MemorySnapshot: return cls(process_ram, vram, malloc_info) -def get_pretty_snapshot_diff(snapshot_1: MemorySnapshot, snapshot_2: MemorySnapshot) -> str: +def get_pretty_snapshot_diff(snapshot_1: Optional[MemorySnapshot], snapshot_2: Optional[MemorySnapshot]) -> str: """Get a pretty string describing the difference between two `MemorySnapshot`s.""" def get_msg_line(prefix: str, val1: int, val2: int): @@ -73,6 +73,9 @@ def get_pretty_snapshot_diff(snapshot_1: MemorySnapshot, snapshot_2: MemorySnaps msg = "" + if snapshot_1 is None or snapshot_2 is None: + return msg + msg += get_msg_line("Process RAM", snapshot_1.process_ram, snapshot_2.process_ram) if snapshot_1.malloc_info is not None and snapshot_2.malloc_info is not None: diff --git a/tests/backend/model_management/test_memory_snapshot.py b/tests/backend/model_management/test_memory_snapshot.py index 80aed7b7ba..dcbb173e96 100644 --- a/tests/backend/model_management/test_memory_snapshot.py +++ b/tests/backend/model_management/test_memory_snapshot.py @@ -17,6 +17,7 @@ snapshots = [ MemorySnapshot(process_ram=1.0, vram=2.0, malloc_info=None), MemorySnapshot(process_ram=1.0, vram=None, malloc_info=Struct_mallinfo2()), MemorySnapshot(process_ram=1.0, vram=None, malloc_info=None), + None, ] @@ -26,10 +27,12 @@ def test_get_pretty_snapshot_diff(snapshot_1, snapshot_2): """Test that get_pretty_snapshot_diff() works with various combinations of missing MemorySnapshot fields.""" msg = get_pretty_snapshot_diff(snapshot_1, snapshot_2) - expected_lines = 1 - if snapshot_1.vram is not None and snapshot_2.vram is not None: + expected_lines = 0 + if snapshot_1 is not None and snapshot_2 is not None: expected_lines += 1 - if snapshot_1.malloc_info is not None and snapshot_2.malloc_info is not None: - expected_lines += 5 + if snapshot_1.vram is not None and snapshot_2.vram is not None: + expected_lines += 1 + if snapshot_1.malloc_info is not None and snapshot_2.malloc_info is not None: + expected_lines += 5 assert len(msg.splitlines()) == expected_lines From 267e709ba2d963f722f1571afaf5188ef5770ad1 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 11:56:38 -0400 Subject: [PATCH 184/202] (minor) Fix int literal typing error. --- tests/backend/model_management/test_memory_snapshot.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/backend/model_management/test_memory_snapshot.py b/tests/backend/model_management/test_memory_snapshot.py index dcbb173e96..216cd62171 100644 --- a/tests/backend/model_management/test_memory_snapshot.py +++ b/tests/backend/model_management/test_memory_snapshot.py @@ -13,10 +13,10 @@ def test_memory_snapshot_capture(): snapshots = [ - MemorySnapshot(process_ram=1.0, vram=2.0, malloc_info=Struct_mallinfo2()), - MemorySnapshot(process_ram=1.0, vram=2.0, malloc_info=None), - MemorySnapshot(process_ram=1.0, vram=None, malloc_info=Struct_mallinfo2()), - MemorySnapshot(process_ram=1.0, vram=None, malloc_info=None), + MemorySnapshot(process_ram=1, vram=2, malloc_info=Struct_mallinfo2()), + MemorySnapshot(process_ram=1, vram=2, malloc_info=None), + MemorySnapshot(process_ram=1, vram=None, malloc_info=Struct_mallinfo2()), + MemorySnapshot(process_ram=1, vram=None, malloc_info=None), None, ] From 3781e56e573e9d12f3c3bb5beae4abcf8609f6b9 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 12:00:07 -0400 Subject: [PATCH 185/202] Add log_memory_usage param to ModelCache. --- .../backend/model_management/model_cache.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 2a6079b2c4..baf232df66 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -117,6 +117,7 @@ class ModelCache(object): lazy_offloading: bool = True, sha_chunksize: int = 16777216, logger: types.ModuleType = logger, + log_memory_usage: bool = False, ): """ :param max_cache_size: Maximum size of the RAM cache [6.0 GB] @@ -126,6 +127,10 @@ class ModelCache(object): :param lazy_offloading: Keep model in VRAM until another model needs to be loaded :param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially :param sha_chunksize: Chunksize to use when calculating sha256 model hash + :param log_memory_usage: If True, a memory snapshot will be captured before and after every model cache + operation, and the result will be logged (at debug level). There is a time cost to capturing the memory + snapshots, so it is recommended to disable this feature unless you are actively inspecting the model cache's + behaviour. """ self.model_infos: Dict[str, ModelBase] = dict() # allow lazy offloading only when vram cache enabled @@ -137,6 +142,7 @@ class ModelCache(object): self.storage_device: torch.device = storage_device self.sha_chunksize = sha_chunksize self.logger = logger + self._log_memory_usage = log_memory_usage # used for stats collection self.stats = None @@ -144,6 +150,11 @@ class ModelCache(object): self._cached_models = dict() self._cache_stack = list() + def _capture_memory_snapshot(self) -> Optional[MemorySnapshot]: + if self._log_memory_usage: + return MemorySnapshot.capture() + return None + def get_key( self, model_path: str, @@ -223,10 +234,10 @@ class ModelCache(object): # Load the model from disk and capture a memory snapshot before/after. start_load_time = time.time() - snapshot_before = MemorySnapshot.capture() + snapshot_before = self._capture_memory_snapshot() with skip_torch_weight_init(): model = model_info.get_model(child_type=submodel, torch_dtype=self.precision) - snapshot_after = MemorySnapshot.capture() + snapshot_after = self._capture_memory_snapshot() end_load_time = time.time() self_reported_model_size_after_load = model_info.get_size(submodel) @@ -275,9 +286,9 @@ class ModelCache(object): return start_model_to_time = time.time() - snapshot_before = MemorySnapshot.capture() + snapshot_before = self._capture_memory_snapshot() cache_entry.model.to(target_device) - snapshot_after = MemorySnapshot.capture() + snapshot_after = self._capture_memory_snapshot() end_model_to_time = time.time() self.logger.debug( f"Moved model '{key}' from {source_device} to" @@ -286,7 +297,12 @@ class ModelCache(object): f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" ) - if snapshot_before.vram is not None and snapshot_after.vram is not None: + if ( + snapshot_before is not None + and snapshot_after is not None + and snapshot_before.vram is not None + and snapshot_after.vram is not None + ): vram_change = abs(snapshot_before.vram - snapshot_after.vram) # If the estimated model size does not match the change in VRAM, log a warning. From 4a683cc669992bb23ff43bea48799aa879e2d3cf Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 12:07:48 -0400 Subject: [PATCH 186/202] Add a app config parameter to control the ModelCache logging behavior. --- invokeai/app/services/config/config_default.py | 3 +++ invokeai/backend/model_management/model_manager.py | 7 +++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index a877c465d2..23436a9172 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -45,6 +45,7 @@ InvokeAI: ram: 13.5 vram: 0.25 lazy_offload: true + log_memory_usage: false Device: device: auto precision: auto @@ -261,6 +262,8 @@ class InvokeAIAppConfig(InvokeAISettings): ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) + log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature unless you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache) + # DEVICE device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 9390c8ce54..e63b559970 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -351,6 +351,7 @@ class ModelManager(object): precision=precision, sequential_offload=sequential_offload, logger=logger, + log_memory_usage=self.app_config.log_memory_usage, ) self._read_models(config) @@ -933,8 +934,7 @@ class ModelManager(object): """ Returns the preamble for the config file. """ - return textwrap.dedent( - """ + return textwrap.dedent(""" # This file describes the alternative machine learning models # available to InvokeAI script. # @@ -942,8 +942,7 @@ class ModelManager(object): # model requires a model config file, a weights file, # and the width and height of the images it # was trained on. - """ - ) + """) def scan_models_directory( self, From 6e7a3f05467c45a5418405ac4ff01412cd8b6c2d Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 13:31:10 -0400 Subject: [PATCH 187/202] (minor) Fix static checks and typo. --- invokeai/app/services/config/config_default.py | 3 +-- invokeai/backend/model_management/model_manager.py | 6 ++++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index 23436a9172..f0e9dbcda4 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -262,8 +262,7 @@ class InvokeAIAppConfig(InvokeAISettings): ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) - log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature unless you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache) - + log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache) # DEVICE device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index e63b559970..da4239fa07 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -934,7 +934,8 @@ class ModelManager(object): """ Returns the preamble for the config file. """ - return textwrap.dedent(""" + return textwrap.dedent( + """ # This file describes the alternative machine learning models # available to InvokeAI script. # @@ -942,7 +943,8 @@ class ModelManager(object): # model requires a model config file, a weights file, # and the width and height of the images it # was trained on. - """) + """ + ) def scan_models_directory( self, From e391f3c9a80224646ed6f8ad5d80d970fbf52ce7 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 11:03:16 -0400 Subject: [PATCH 188/202] Skip torch.nn.Embedding.reset_parameters(...) when loading a text encoder model. --- .../backend/model_management/model_load_optimizations.py | 2 +- .../model_management/test_model_load_optimization.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_management/model_load_optimizations.py b/invokeai/backend/model_management/model_load_optimizations.py index f835079213..8dc8a8793e 100644 --- a/invokeai/backend/model_management/model_load_optimizations.py +++ b/invokeai/backend/model_management/model_load_optimizations.py @@ -17,7 +17,7 @@ def skip_torch_weight_init(): completely unnecessary if the intent is to load checkpoint weights from disk for the layer. This context manager monkey-patches common torch layers to skip the weight initialization step. """ - torch_modules = [torch.nn.Linear, torch.nn.modules.conv._ConvNd] + torch_modules = [torch.nn.Linear, torch.nn.modules.conv._ConvNd, torch.nn.Embedding] saved_functions = [m.reset_parameters for m in torch_modules] try: diff --git a/tests/backend/model_management/test_model_load_optimization.py b/tests/backend/model_management/test_model_load_optimization.py index 43f007e972..a4fe1dd597 100644 --- a/tests/backend/model_management/test_model_load_optimization.py +++ b/tests/backend/model_management/test_model_load_optimization.py @@ -11,6 +11,7 @@ from invokeai.backend.model_management.model_load_optimizations import _no_op, s (torch.nn.Conv1d, {"in_channels": 10, "out_channels": 20, "kernel_size": 3}), (torch.nn.Conv2d, {"in_channels": 10, "out_channels": 20, "kernel_size": 3}), (torch.nn.Conv3d, {"in_channels": 10, "out_channels": 20, "kernel_size": 3}), + (torch.nn.Embedding, {"num_embeddings": 10, "embedding_dim": 10}), ], ) def test_skip_torch_weight_init_linear(torch_module, layer_args): @@ -36,12 +37,14 @@ def test_skip_torch_weight_init_linear(torch_module, layer_args): # Check that reset_parameters is skipped while `skip_torch_weight_init()` is active. assert reset_params_fn_during == _no_op assert not torch.allclose(layer_before.weight, layer_during.weight) - assert not torch.allclose(layer_before.bias, layer_during.bias) + if hasattr(layer_before, "bias"): + assert not torch.allclose(layer_before.bias, layer_during.bias) # Check that the original behavior is restored after `skip_torch_weight_init()` ends. assert reset_params_fn_before is reset_params_fn_after assert torch.allclose(layer_before.weight, layer_after.weight) - assert torch.allclose(layer_before.bias, layer_after.bias) + if hasattr(layer_before, "bias"): + assert torch.allclose(layer_before.bias, layer_after.bias) def test_skip_torch_weight_init_restores_base_class_behavior(): From 3d32ce2b58dd81462d8f71d73b67c8e3cb7fc2d0 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 31 Oct 2023 22:19:39 -0500 Subject: [PATCH 189/202] fix(ui): hide refiner collapse if refiner not installed --- .../features/sdxl/components/ParamSDXLRefinerCollapse.tsx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx b/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx index 6ac3034834..395ef302ed 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx @@ -14,6 +14,7 @@ import ParamSDXLRefinerStart from './SDXLRefiner/ParamSDXLRefinerStart'; import ParamSDXLRefinerSteps from './SDXLRefiner/ParamSDXLRefinerSteps'; import ParamUseSDXLRefiner from './SDXLRefiner/ParamUseSDXLRefiner'; import { useTranslation } from 'react-i18next'; +import { useIsRefinerAvailable } from 'services/api/hooks/useIsRefinerAvailable'; const selector = createSelector( stateSelector, @@ -31,6 +32,11 @@ const selector = createSelector( const ParamSDXLRefinerCollapse = () => { const { activeLabel, shouldUseSliders } = useAppSelector(selector); const { t } = useTranslation(); + const isRefinerAvailable = useIsRefinerAvailable(); + + if (!isRefinerAvailable) { + return null; + } return ( From 5b420653f97ede5d96f3bb09f4bdb16eedf0bd10 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 1 Nov 2023 20:27:28 -0500 Subject: [PATCH 190/202] feat(ui): show placeholder in refiner collapse instead of hiding it, if no refiner models installed --- invokeai/frontend/web/public/locales/en.json | 4 +++- .../src/features/lora/components/ParamLoraSelect.tsx | 5 +++-- .../sdxl/components/ParamSDXLRefinerCollapse.tsx | 12 ++++++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 04d817428c..564ed174a8 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -722,7 +722,9 @@ "noMatchingModels": "No matching Models", "noModelsAvailable": "No models available", "selectLoRA": "Select a LoRA", - "selectModel": "Select a Model" + "selectModel": "Select a Model", + "noLoRAsInstalled": "No LoRAs installed", + "noRefinerModelsInstalled": "No SDXL Refiner models installed" }, "nodes": { "addNode": "Add Node", diff --git a/invokeai/frontend/web/src/features/lora/components/ParamLoraSelect.tsx b/invokeai/frontend/web/src/features/lora/components/ParamLoraSelect.tsx index ef90e14656..6048e4a159 100644 --- a/invokeai/frontend/web/src/features/lora/components/ParamLoraSelect.tsx +++ b/invokeai/frontend/web/src/features/lora/components/ParamLoraSelect.tsx @@ -10,6 +10,7 @@ import { loraAdded } from 'features/lora/store/loraSlice'; import { MODEL_TYPE_MAP } from 'features/parameters/types/constants'; import { forEach } from 'lodash-es'; import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; import { useGetLoRAModelsQuery } from 'services/api/endpoints/models'; const selector = createSelector( @@ -24,7 +25,7 @@ const ParamLoRASelect = () => { const dispatch = useAppDispatch(); const { loras } = useAppSelector(selector); const { data: loraModels } = useGetLoRAModelsQuery(); - + const { t } = useTranslation(); const currentMainModel = useAppSelector( (state: RootState) => state.generation.model ); @@ -79,7 +80,7 @@ const ParamLoRASelect = () => { return ( - No LoRAs Loaded + {t('models.noLoRAsInstalled')} ); diff --git a/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx b/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx index 395ef302ed..ac33891a6c 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx @@ -1,4 +1,4 @@ -import { Flex } from '@chakra-ui/react'; +import { Flex, Text } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { stateSelector } from 'app/store/store'; import { useAppSelector } from 'app/store/storeHooks'; @@ -35,7 +35,15 @@ const ParamSDXLRefinerCollapse = () => { const isRefinerAvailable = useIsRefinerAvailable(); if (!isRefinerAvailable) { - return null; + return ( + + + + {t('models.noRefinerModelsInstalled')} + + + + ); } return ( From 43b300498f9802493fa61cbe4d17e9e7105813c7 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 3 Nov 2023 11:08:09 -0400 Subject: [PATCH 191/202] Remove explicit gc.collect() after transferring models from device to CPU. I'm not sure why this was there in the first place, but it was taking a significant amount of time (up to ~1sec in my tests). --- invokeai/backend/model_management/model_cache.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index baf232df66..7a162e3f99 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -507,7 +507,6 @@ class ModelCache(object): vram_in_use = torch.cuda.memory_allocated() self.logger.debug(f"{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB") - gc.collect() torch.cuda.empty_cache() if choose_torch_device() == torch.device("mps"): mps.empty_cache() From 875231ed3d1a32f1141aaf38d33f491c3ce69273 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 3 Nov 2023 15:14:04 -0400 Subject: [PATCH 192/202] Add reminder to clean up our model cache clearing logic. --- invokeai/backend/model_management/model_cache.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 7a162e3f99..1c3ad06e8e 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -444,6 +444,10 @@ class ModelCache(object): refs = sys.getrefcount(cache_entry.model) + # HACK: This is a workaround for a memory-management issue that we haven't tracked down yet. We are directly + # going against the advice in the Python docs by using `gc.get_referrers(...)` in this way: + # https://docs.python.org/3/library/gc.html#gc.get_referrers + # manualy clear local variable references of just finished function calls # for some reason python don't want to collect it even by gc.collect() immidiately if refs > 2: From 8488ab01347488d9a41b360aaad1658c2dab4b7a Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 3 Nov 2023 15:19:45 -0400 Subject: [PATCH 193/202] Reduce frequency that we call gc.collect() in the model cache. --- .../backend/model_management/model_cache.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 1c3ad06e8e..2385fd9bec 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -438,6 +438,7 @@ class ModelCache(object): self.logger.debug(f"Before unloading: cached_models={len(self._cached_models)}") pos = 0 + models_cleared = 0 while current_size + bytes_needed > maximum_size and pos < len(self._cache_stack): model_key = self._cache_stack[pos] cache_entry = self._cached_models[model_key] @@ -482,6 +483,7 @@ class ModelCache(object): f"Unloading model {model_key} to free {(model_size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)" ) current_size -= cache_entry.size + models_cleared += 1 if self.stats: self.stats.cleared += 1 del self._cache_stack[pos] @@ -491,7 +493,20 @@ class ModelCache(object): else: pos += 1 - gc.collect() + if models_cleared < 0: + # There would likely be some 'garbage' to be collected regardless of whether a model was cleared or not, but + # there is a significant time cost to calling `gc.collect()`, so we want to use it sparingly. (The time cost + # is high even if no garbage gets collected.) + # + # Calling gc.collect(...) when a model is cleared seems like a good middle-ground: + # - If models had to be cleared, it's a signal that we are close to our memory limit. + # - If models were cleared, there's a good chance that there's a significant amount of garbage to be + # collected. + # + # Keep in mind that gc is only responsible for handling reference cycles. Most objects should be cleaned up + # immediately when their reference count hits 0. + gc.collect() + torch.cuda.empty_cache() if choose_torch_device() == torch.device("mps"): mps.empty_cache() From fb3d0c4b124ecf1e6fb46121f4c8a1ee2cee6cf6 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 3 Nov 2023 15:21:27 -0400 Subject: [PATCH 194/202] Fix bug in model cache reference count checking. --- invokeai/backend/model_management/model_cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 2385fd9bec..0b2a8b8df7 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -474,11 +474,11 @@ class ModelCache(object): f" refs: {refs}" ) - # 2 refs: + # Expected refs: # 1 from cache_entry # 1 from getrefcount function # 1 from onnx runtime object - if not cache_entry.locked and refs <= 3 if "onnx" in model_key else 2: + if not cache_entry.locked and refs <= (3 if "onnx" in model_key else 2): self.logger.debug( f"Unloading model {model_key} to free {(model_size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)" ) From aa02ebf8f5c2f8a90737c5a9607fccb5bca00437 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Sat, 4 Nov 2023 08:52:10 -0400 Subject: [PATCH 195/202] Fix model cache gc.collect() condition. --- invokeai/backend/model_management/model_cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 0b2a8b8df7..83af789219 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -493,7 +493,7 @@ class ModelCache(object): else: pos += 1 - if models_cleared < 0: + if models_cleared > 0: # There would likely be some 'garbage' to be collected regardless of whether a model was cleared or not, but # there is a significant time cost to calling `gc.collect()`, so we want to use it sparingly. (The time cost # is high even if no garbage gets collected.) From 5a821384d3b5390a2a62a6121c3be4306766bf14 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 4 Nov 2023 11:12:29 -0400 Subject: [PATCH 196/202] fix model-not-found error --- invokeai/frontend/merge/merge_diffusers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py index 51602b75d1..d515c5b4ee 100644 --- a/invokeai/frontend/merge/merge_diffusers.py +++ b/invokeai/frontend/merge/merge_diffusers.py @@ -274,9 +274,10 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): else: interp = self.interpolations[self.merge_method.value[0]] + bases = ["sd-1", "sd-2", "sdxl"] args = dict( model_names=models, - base_model=tuple(BaseModelType)[self.base_select.value[0]], + base_model=BaseModelType(bases[self.base_select.value[0]]), alpha=self.alpha.value, interp=interp, force=self.force.value, From 6334c4adf52729076f15da2bfdbd9572e00c7269 Mon Sep 17 00:00:00 2001 From: Alexander Eichhorn Date: Sat, 4 Nov 2023 09:53:33 +0100 Subject: [PATCH 197/202] translationBot(ui): update translation (German) Currently translated at 53.8% (657 of 1219 strings) Co-authored-by: Alexander Eichhorn Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/de.json | 39 +++++++++++++++++--- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index c06ad56492..4f91f84b35 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -83,13 +83,14 @@ "darkMode": "Dunkler Modus", "loading": "Lade", "random": "Zufall", - "batch": "Batch-Manager", + "batch": "Stapel-Manager", "advanced": "Erweitert", "langBrPortuguese": "Portugiesisch (Brasilien)", "unifiedCanvas": "Einheitliche Leinwand", "openInNewTab": "In einem neuem Tab öffnen", "statusProcessing": "wird bearbeitet", - "linear": "Linear" + "linear": "Linear", + "imagePrompt": "Bild Prompt" }, "gallery": { "generations": "Erzeugungen", @@ -655,7 +656,8 @@ "downloadBoard": "Ordner runterladen", "changeBoard": "Ordner wechseln", "loading": "Laden...", - "clearSearch": "Suche leeren" + "clearSearch": "Suche leeren", + "bottomMessage": "Durch das Löschen dieses Ordners und seiner Bilder werden alle Funktionen zurückgesetzt, die sie derzeit verwenden." }, "controlnet": { "showAdvanced": "Zeige Erweitert", @@ -704,7 +706,17 @@ "depthZoeDescription": "Tiefenmap erstellen mit Zoe", "setControlImageDimensions": "Setze Control Bild Auflösung auf Breite/Höhe", "handAndFace": "Hand und Gesicht", - "enableIPAdapter": "Aktiviere IP Adapter" + "enableIPAdapter": "Aktiviere IP Adapter", + "resize": "Größe ändern", + "resetControlImage": "Zurücksetzen vom Referenz Bild", + "balanced": "Ausgewogen", + "prompt": "Prompt", + "resizeMode": "Größenänderungsmodus", + "processor": "Prozessor", + "saveControlImage": "Speichere Referenz Bild", + "safe": "Speichern", + "ipAdapterImageFallback": "Kein IP Adapter Bild ausgewählt", + "resetIPAdapterImage": "Zurücksetzen vom IP Adapter Bild" }, "queue": { "status": "Status", @@ -734,7 +746,19 @@ "session": "Sitzung", "queueTotal": "{{total}} Gesamt", "resume": "Wieder aufnehmen", - "item": "Auftrag" + "item": "Auftrag", + "notReady": "Warteschlange noch nicht bereit", + "batchValues": "Stapel Werte", + "queueCountPrediction": "{{predicted}} zur Warteschlange hinzufügen", + "queuedCount": "{{pending}} wartenden Elemente", + "clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.", + "completedIn": "Fertig in", + "cancelBatchSucceeded": "Stapel abgebrochen", + "cancelBatch": "Stapel stoppen", + "enqueueing": "Stapel in der Warteschlange", + "queueMaxExceeded": "Maximum von {{max_queue_size}} Elementen erreicht, würde {{skip}} Elemente überspringen", + "cancelBatchFailed": "Problem beim Abbruch vom Stapel", + "clearQueueAlertDialog2": "bist du sicher die Warteschlange zu leeren?" }, "metadata": { "negativePrompt": "Negativ Beschreibung", @@ -788,5 +812,10 @@ "hits": "Cache Treffer", "enable": "Aktivieren", "clear": "Leeren" + }, + "embedding": { + "noMatchingEmbedding": "Keine passenden Embeddings", + "addEmbedding": "Embedding hinzufügen", + "incompatibleModel": "Inkompatibles Basismodell:" } } From 4e6b579526b6fc368fac4284e9ffbf700543ef61 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Sat, 4 Nov 2023 09:53:33 +0100 Subject: [PATCH 198/202] translationBot(ui): update translation (Italian) Currently translated at 97.6% (1190 of 1219 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 49b258c95d..4815bc2482 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -1192,7 +1192,9 @@ "noLoRAsAvailable": "Nessun LoRA disponibile", "noModelsAvailable": "Nessun modello disponibile", "selectModel": "Seleziona un modello", - "selectLoRA": "Seleziona un LoRA" + "selectLoRA": "Seleziona un LoRA", + "noRefinerModelsInstalled": "Nessun modello SDXL Refiner installato", + "noLoRAsInstalled": "Nessun LoRA installato" }, "invocationCache": { "disable": "Disabilita", From 7f650d00de9563da2b5c55b3ac184ed44fa96e77 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Sun, 5 Nov 2023 14:42:41 +0100 Subject: [PATCH 199/202] translationBot(ui): update translation (Italian) Currently translated at 97.7% (1191 of 1219 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 4815bc2482..c358f415b2 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -1025,7 +1025,8 @@ "imageFieldDescription": "Le immagini possono essere passate tra i nodi.", "unableToParseEdge": "Impossibile analizzare il bordo", "latentsCollectionDescription": "Le immagini latenti possono essere passate tra i nodi.", - "imageCollection": "Raccolta Immagini" + "imageCollection": "Raccolta Immagini", + "loRAModelField": "LoRA" }, "boards": { "autoAddBoard": "Aggiungi automaticamente bacheca", From 546aaedbe4b3cf85d370496a43c550287576341e Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Mon, 6 Nov 2023 08:15:57 -0500 Subject: [PATCH 200/202] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index c0d91cf330..89f4ea2b45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ "datasets", # When bumping diffusers beyond 0.21, make sure to address this: # https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513 - "diffusers[torch]~=0.21.0", + "diffusers[torch]~=0.22.0", "dnspython~=2.4.0", "dynamicprompts", "easing-functions", From f8f1740668edd4c5e6b8549422b075e59d944bbf Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Sat, 4 Nov 2023 23:51:09 -0400 Subject: [PATCH 201/202] Set Defaults to 1 --- invokeai/app/invocations/math.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index 3ed325802e..ad676e824c 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -182,8 +182,8 @@ class IntegerMathInvocation(BaseInvocation): operation: INTEGER_OPERATIONS = InputField( default="ADD", description="The operation to perform", ui_choice_labels=INTEGER_OPERATIONS_LABELS ) - a: int = InputField(default=0, description=FieldDescriptions.num_1) - b: int = InputField(default=0, description=FieldDescriptions.num_2) + a: int = InputField(default=1, description=FieldDescriptions.num_1) + b: int = InputField(default=1, description=FieldDescriptions.num_2) @field_validator("b") def no_unrepresentable_results(cls, v: int, info: ValidationInfo): @@ -256,8 +256,8 @@ class FloatMathInvocation(BaseInvocation): operation: FLOAT_OPERATIONS = InputField( default="ADD", description="The operation to perform", ui_choice_labels=FLOAT_OPERATIONS_LABELS ) - a: float = InputField(default=0, description=FieldDescriptions.num_1) - b: float = InputField(default=0, description=FieldDescriptions.num_2) + a: float = InputField(default=1, description=FieldDescriptions.num_1) + b: float = InputField(default=1, description=FieldDescriptions.num_2) @field_validator("b") def no_unrepresentable_results(cls, v: float, info: ValidationInfo): From 76b3f8956b9a72365601fe39c264f1dc70d16464 Mon Sep 17 00:00:00 2001 From: "Wilson E. Alvarez" Date: Mon, 30 Oct 2023 11:58:40 -0400 Subject: [PATCH 202/202] Fix ROCm support in Docker container --- docker/.env.sample | 4 ++-- docker/Dockerfile | 6 +++--- docker/docker-compose.yml | 4 ++++ docker/run.sh | 2 +- pyproject.toml | 4 ++-- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/docker/.env.sample b/docker/.env.sample index c0a56402fc..98ad307c04 100644 --- a/docker/.env.sample +++ b/docker/.env.sample @@ -11,5 +11,5 @@ INVOKEAI_ROOT= # HUGGING_FACE_HUB_TOKEN= ## optional variables specific to the docker setup. -# GPU_DRIVER=cuda -# CONTAINER_UID=1000 \ No newline at end of file +# GPU_DRIVER=cuda # or rocm +# CONTAINER_UID=1000 diff --git a/docker/Dockerfile b/docker/Dockerfile index 73852ec66e..6aa6a43a1a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai ENV VIRTUAL_ENV=/opt/venv/invokeai ENV PATH="$VIRTUAL_ENV/bin:$PATH" -ARG TORCH_VERSION=2.0.1 -ARG TORCHVISION_VERSION=0.15.2 +ARG TORCH_VERSION=2.1.0 +ARG TORCHVISION_VERSION=0.16 ARG GPU_DRIVER=cuda ARG TARGETPLATFORM="linux/amd64" # unused but available @@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \ elif [ "$GPU_DRIVER" = "rocm" ]; then \ - extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \ + extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \ else \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \ fi &&\ diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 85deac428e..f7e92d6bf5 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -15,6 +15,10 @@ services: - driver: nvidia count: 1 capabilities: [gpu] + # For AMD support, comment out the deploy section above and uncomment the devices section below: + #devices: + # - /dev/kfd:/dev/kfd + # - /dev/dri:/dev/dri build: context: .. dockerfile: docker/Dockerfile diff --git a/docker/run.sh b/docker/run.sh index 0306c4ddab..4b595b06df 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -7,5 +7,5 @@ set -e SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") cd "$SCRIPTDIR" || exit 1 -docker compose up --build -d +docker compose up -d docker compose logs -f diff --git a/pyproject.toml b/pyproject.toml index 89f4ea2b45..7a71c3c7c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,8 +80,8 @@ dependencies = [ "semver~=3.0.1", "send2trash", "test-tube~=0.7.5", - "torch~=2.0.1", - "torchvision~=0.15.2", + "torch~=2.1.0", + "torchvision~=0.16", "torchmetrics~=0.11.0", "torchsde~=0.2.5", "transformers~=4.31.0",