Merge branch 'main' into docs/ti/add-using-troubleshooting

This commit is contained in:
blessedcoolant 2023-02-19 16:51:50 +13:00 committed by GitHub
commit d7180afe9d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 196183 additions and 1600 deletions

View File

@ -26,3 +26,12 @@ dist-ssr
# build stats # build stats
stats.html stats.html
# Yarn - https://yarnpkg.com/getting-started/qa#which-files-should-be-gitignored
.pnp.*
.yarn/*
!.yarn/patches
!.yarn/plugins
!.yarn/releases
!.yarn/sdks
!.yarn/versions

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,5 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
yarn-path ".yarn/releases/yarn-1.22.19.cjs"

View File

@ -0,0 +1 @@
yarnPath: .yarn/releases/yarn-1.22.19.cjs

View File

@ -7,7 +7,7 @@ The UI is in `invokeai/frontend`.
Install [node](https://nodejs.org/en/download/) (includes npm) and Install [node](https://nodejs.org/en/download/) (includes npm) and
[yarn](https://yarnpkg.com/getting-started/install). [yarn](https://yarnpkg.com/getting-started/install).
From `invokeai/frontend/` run `yarn install` to get everything set up. From `invokeai/frontend/` run `yarn install --immutable` to get everything set up.
## Dev ## Dev

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -5,7 +5,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title> <title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" /> <link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-1e76002e.js"></script> <script type="module" crossorigin src="./assets/index-fff3415a.js"></script>
<link rel="stylesheet" href="./assets/index-14cb2922.css"> <link rel="stylesheet" href="./assets/index-14cb2922.css">
</head> </head>

View File

@ -66,7 +66,7 @@
"hotkeys": { "hotkeys": {
"keyboardShortcuts": "مفاتيح الأزرار المختصرة", "keyboardShortcuts": "مفاتيح الأزرار المختصرة",
"appHotkeys": "مفاتيح التطبيق", "appHotkeys": "مفاتيح التطبيق",
"GeneralHotkeys": "مفاتيح عامة", "generalHotkeys": "مفاتيح عامة",
"galleryHotkeys": "مفاتيح المعرض", "galleryHotkeys": "مفاتيح المعرض",
"unifiedCanvasHotkeys": "مفاتيح اللوحةالموحدة ", "unifiedCanvasHotkeys": "مفاتيح اللوحةالموحدة ",
"invoke": { "invoke": {

View File

@ -390,7 +390,10 @@
"modelMergeHeaderHelp1": "You can merge upto three different models to create a blend that suits your needs.", "modelMergeHeaderHelp1": "You can merge upto three different models to create a blend that suits your needs.",
"modelMergeHeaderHelp2": "Only Diffusers are available for merging. If you want to merge a checkpoint model, please convert it to Diffusers first.", "modelMergeHeaderHelp2": "Only Diffusers are available for merging. If you want to merge a checkpoint model, please convert it to Diffusers first.",
"modelMergeAlphaHelp": "Alpha controls blend strength for the models. Lower alpha values lead to lower influence of the second model.", "modelMergeAlphaHelp": "Alpha controls blend strength for the models. Lower alpha values lead to lower influence of the second model.",
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above." "modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
"inverseSigmoid": "Inverse Sigmoid",
"sigmoid": "Sigmoid",
"weightedSum": "Weighted Sum"
}, },
"parameters": { "parameters": {
"general": "General", "general": "General",
@ -401,6 +404,7 @@
"height": "Height", "height": "Height",
"sampler": "Sampler", "sampler": "Sampler",
"seed": "Seed", "seed": "Seed",
"imageToImage": "Image to Image",
"randomizeSeed": "Randomize Seed", "randomizeSeed": "Randomize Seed",
"shuffle": "Shuffle", "shuffle": "Shuffle",
"noiseThreshold": "Noise Threshold", "noiseThreshold": "Noise Threshold",
@ -438,7 +442,12 @@
"img2imgStrength": "Image To Image Strength", "img2imgStrength": "Image To Image Strength",
"toggleLoopback": "Toggle Loopback", "toggleLoopback": "Toggle Loopback",
"invoke": "Invoke", "invoke": "Invoke",
"cancel": "Cancel", "cancel": {
"immediate": "Cancel immediately",
"schedule": "Cancel after current iteration",
"isScheduled": "Canceling",
"setType": "Set cancel type"
},
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)", "promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
"negativePrompts": "Negative Prompts", "negativePrompts": "Negative Prompts",
"sendTo": "Send to", "sendTo": "Send to",
@ -465,8 +474,8 @@
"confirmOnDelete": "Confirm On Delete", "confirmOnDelete": "Confirm On Delete",
"displayHelpIcons": "Display Help Icons", "displayHelpIcons": "Display Help Icons",
"useCanvasBeta": "Use Canvas Beta Layout", "useCanvasBeta": "Use Canvas Beta Layout",
"useSlidersForAll": "Use Sliders For All Options",
"enableImageDebugging": "Enable Image Debugging", "enableImageDebugging": "Enable Image Debugging",
"useSlidersForAll": "Use Sliders For All Options",
"resetWebUI": "Reset Web UI", "resetWebUI": "Reset Web UI",
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.", "resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.", "resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
@ -508,7 +517,7 @@
"feature": { "feature": {
"prompt": "This is the prompt field. Prompt includes generation objects and stylistic terms. You can add weight (token importance) in the prompt as well, but CLI commands and parameters will not work.", "prompt": "This is the prompt field. Prompt includes generation objects and stylistic terms. You can add weight (token importance) in the prompt as well, but CLI commands and parameters will not work.",
"gallery": "Gallery displays generations from the outputs folder as they're created. Settings are stored within files and accesed by context menu.", "gallery": "Gallery displays generations from the outputs folder as they're created. Settings are stored within files and accesed by context menu.",
"other": "These options will enable alternative processing modes for Invoke. 'Seamless tiling' will create repeating patterns in the output. 'High resolution' is generation in two steps with img2img: use this setting when you want a larger and more coherent image without artifacts. It will take longer that usual txt2img.", "other": "These options will enable alternative processing modes for Invoke. 'Seamless tiling' will create repeating patterns in the output. 'High resolution' is generation in two steps with img2img: use this setting when you want a larger and more coherent image without artifacts. It will take longer than usual txt2img.",
"seed": "Seed value affects the initial noise from which the image is formed. You can use the already existing seeds from previous images. 'Noise Threshold' is used to mitigate artifacts at high CFG values (try the 0-10 range), and Perlin to add Perlin noise during generation: both serve to add variation to your outputs.", "seed": "Seed value affects the initial noise from which the image is formed. You can use the already existing seeds from previous images. 'Noise Threshold' is used to mitigate artifacts at high CFG values (try the 0-10 range), and Perlin to add Perlin noise during generation: both serve to add variation to your outputs.",
"variations": "Try a variation with a value between 0.1 and 1.0 to change the result for a given seed. Interesting variations of the seed are between 0.1 and 0.3.", "variations": "Try a variation with a value between 0.1 and 1.0 to change the result for a given seed. Interesting variations of the seed are between 0.1 and 0.3.",
"upscale": "Use ESRGAN to enlarge the image immediately after generation.", "upscale": "Use ESRGAN to enlarge the image immediately after generation.",

View File

@ -66,7 +66,7 @@
"hotkeys": { "hotkeys": {
"keyboardShortcuts": "Raccourcis clavier", "keyboardShortcuts": "Raccourcis clavier",
"appHotkeys": "Raccourcis de l'application", "appHotkeys": "Raccourcis de l'application",
"GeneralHotkeys": "Raccourcis généraux", "generalHotkeys": "Raccourcis généraux",
"galleryHotkeys": "Raccourcis de la galerie", "galleryHotkeys": "Raccourcis de la galerie",
"unifiedCanvasHotkeys": "Raccourcis du Canvas unifié", "unifiedCanvasHotkeys": "Raccourcis du Canvas unifié",
"invoke": { "invoke": {

View File

@ -15,11 +15,11 @@
"langItalian": "Italiano", "langItalian": "Italiano",
"nodesDesc": "Attualmente è in fase di sviluppo un sistema basato su nodi per la generazione di immagini. Resta sintonizzato per gli aggiornamenti su questa fantastica funzionalità.", "nodesDesc": "Attualmente è in fase di sviluppo un sistema basato su nodi per la generazione di immagini. Resta sintonizzato per gli aggiornamenti su questa fantastica funzionalità.",
"postProcessing": "Post-elaborazione", "postProcessing": "Post-elaborazione",
"postProcessDesc1": "Invoke AI offre un'ampia varietà di funzionalità di post-elaborazione. Ampiamento Immagine e Restaura i Volti sono già disponibili nell'interfaccia Web. È possibile accedervi dal menu 'Opzioni avanzate' delle schede 'Testo a Immagine' e 'Immagine a Immagine'. È inoltre possibile elaborare le immagini direttamente, utilizzando i pulsanti di azione dell'immagine sopra la visualizzazione dell'immagine corrente o nel visualizzatore.", "postProcessDesc1": "Invoke AI offre un'ampia varietà di funzionalità di post-elaborazione. Ampliamento Immagine e Restaura Volti sono già disponibili nell'interfaccia Web. È possibile accedervi dal menu 'Opzioni avanzate' delle schede 'Testo a Immagine' e 'Immagine a Immagine'. È inoltre possibile elaborare le immagini direttamente, utilizzando i pulsanti di azione dell'immagine sopra la visualizzazione dell'immagine corrente o nel visualizzatore.",
"postProcessDesc2": "Presto verrà rilasciata un'interfaccia utente dedicata per facilitare flussi di lavoro di post-elaborazione più avanzati.", "postProcessDesc2": "Presto verrà rilasciata un'interfaccia utente dedicata per facilitare flussi di lavoro di post-elaborazione più avanzati.",
"postProcessDesc3": "L'interfaccia da riga di comando di 'Invoke AI' offre varie altre funzionalità tra cui Embiggen.", "postProcessDesc3": "L'interfaccia da riga di comando di 'Invoke AI' offre varie altre funzionalità tra cui Embiggen.",
"training": "Addestramento", "training": "Addestramento",
"trainingDesc1": "Un flusso di lavoro dedicato per addestrare i tuoi incorporamenti e checkpoint utilizzando Inversione Testuale e Dreambooth dall'interfaccia web.", "trainingDesc1": "Un flusso di lavoro dedicato per addestrare i tuoi Incorporamenti e Checkpoint utilizzando Inversione Testuale e Dreambooth dall'interfaccia web.",
"trainingDesc2": "InvokeAI supporta già l'addestramento di incorporamenti personalizzati utilizzando l'inversione testuale utilizzando lo script principale.", "trainingDesc2": "InvokeAI supporta già l'addestramento di incorporamenti personalizzati utilizzando l'inversione testuale utilizzando lo script principale.",
"upload": "Caricamento", "upload": "Caricamento",
"close": "Chiudi", "close": "Chiudi",
@ -45,7 +45,25 @@
"statusUpscaling": "Ampliamento", "statusUpscaling": "Ampliamento",
"statusUpscalingESRGAN": "Ampliamento (ESRGAN)", "statusUpscalingESRGAN": "Ampliamento (ESRGAN)",
"statusLoadingModel": "Caricamento del modello", "statusLoadingModel": "Caricamento del modello",
"statusModelChanged": "Modello cambiato" "statusModelChanged": "Modello cambiato",
"githubLabel": "GitHub",
"discordLabel": "Discord",
"langArabic": "Arabo",
"langEnglish": "Inglese",
"langFrench": "Francese",
"langGerman": "Tedesco",
"langJapanese": "Giapponese",
"langPolish": "Polacco",
"langBrPortuguese": "Portoghese Basiliano",
"langRussian": "Russo",
"langUkranian": "Ucraino",
"langSpanish": "Spagnolo",
"statusMergingModels": "Fusione Modelli",
"statusMergedModels": "Modelli fusi",
"langSimplifiedChinese": "Cinese semplificato",
"langDutch": "Olandese",
"statusModelConverted": "Modello Convertito",
"statusConvertingModel": "Conversione Modello"
}, },
"gallery": { "gallery": {
"generations": "Generazioni", "generations": "Generazioni",
@ -70,7 +88,7 @@
"galleryHotkeys": "Tasti di scelta rapida della galleria", "galleryHotkeys": "Tasti di scelta rapida della galleria",
"unifiedCanvasHotkeys": "Tasti di scelta rapida Tela Unificata", "unifiedCanvasHotkeys": "Tasti di scelta rapida Tela Unificata",
"invoke": { "invoke": {
"title": "Invoca", "title": "Invoke",
"desc": "Genera un'immagine" "desc": "Genera un'immagine"
}, },
"cancel": { "cancel": {
@ -335,7 +353,47 @@
"formMessageDiffusersModelLocation": "Ubicazione modelli diffusori", "formMessageDiffusersModelLocation": "Ubicazione modelli diffusori",
"formMessageDiffusersModelLocationDesc": "Inseriscine almeno uno.", "formMessageDiffusersModelLocationDesc": "Inseriscine almeno uno.",
"formMessageDiffusersVAELocation": "Ubicazione file VAE", "formMessageDiffusersVAELocation": "Ubicazione file VAE",
"formMessageDiffusersVAELocationDesc": "Se non fornito, InvokeAI cercherà il file VAE all'interno dell'ubicazione del modello sopra indicata." "formMessageDiffusersVAELocationDesc": "Se non fornito, InvokeAI cercherà il file VAE all'interno dell'ubicazione del modello sopra indicata.",
"convert": "Converti",
"convertToDiffusers": "Converti in Diffusori",
"convertToDiffusersHelpText2": "Questo processo sostituirà la voce in Gestione Modelli con la versione Diffusori dello stesso modello.",
"convertToDiffusersHelpText4": "Questo è un processo una tantum. Potrebbero essere necessari circa 30-60 secondi a seconda delle specifiche del tuo computer.",
"convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 4 GB e 7 GB di dimensioni.",
"convertToDiffusersHelpText6": "Vuoi convertire questo modello?",
"convertToDiffusersSaveLocation": "Ubicazione salvataggio",
"v2": "v2",
"inpainting": "v1 Inpainting",
"customConfig": "Configurazione personalizzata",
"statusConverting": "Conversione in corso",
"modelConverted": "Modello convertito",
"sameFolder": "Stessa cartella",
"invokeRoot": "Cartella InvokeAI",
"merge": "Fondere",
"modelsMerged": "Modelli fusi",
"mergeModels": "Fondi Modelli",
"modelOne": "Modello 1",
"modelTwo": "Modello 2",
"mergedModelName": "Nome del modello fuso",
"alpha": "Alpha",
"interpolationType": "Tipo di interpolazione",
"mergedModelCustomSaveLocation": "Percorso personalizzato",
"invokeAIFolder": "Cartella Invoke AI",
"ignoreMismatch": "Ignora le discrepanze tra i modelli selezionati",
"modelMergeHeaderHelp2": "Solo i diffusori sono disponibili per l'unione. Se desideri unire un modello Checkpoint, convertilo prima in Diffusori.",
"modelMergeInterpAddDifferenceHelp": "In questa modalità, il Modello 3 viene prima sottratto dal Modello 2. La versione risultante viene unita al Modello 1 con il tasso Alpha impostato sopra.",
"mergedModelSaveLocation": "Ubicazione salvataggio",
"convertToDiffusersHelpText1": "Questo modello verrà convertito nel formato 🧨 Diffusore.",
"custom": "Personalizzata",
"convertToDiffusersHelpText3": "Il tuo file checkpoint sul disco NON verrà comunque cancellato o modificato. Se lo desideri, puoi aggiungerlo di nuovo in Gestione Modelli.",
"v1": "v1",
"pathToCustomConfig": "Percorso alla configurazione personalizzata",
"modelThree": "Modello 3",
"modelMergeHeaderHelp1": "Puoi unire fino a tre diversi modelli per creare una miscela adatta alle tue esigenze.",
"modelMergeAlphaHelp": "Il valore Alpha controlla la forza di miscelazione dei modelli. Valori Alpha più bassi attenuano l'influenza del secondo modello.",
"customSaveLocation": "Ubicazione salvataggio personalizzata",
"weightedSum": "Somma pesata",
"sigmoid": "Sigmoide",
"inverseSigmoid": "Sigmoide inverso"
}, },
"parameters": { "parameters": {
"images": "Immagini", "images": "Immagini",
@ -352,7 +410,7 @@
"variations": "Variazioni", "variations": "Variazioni",
"variationAmount": "Quantità di variazione", "variationAmount": "Quantità di variazione",
"seedWeights": "Pesi dei semi", "seedWeights": "Pesi dei semi",
"faceRestoration": "Restaura volti", "faceRestoration": "Restauro volti",
"restoreFaces": "Restaura volti", "restoreFaces": "Restaura volti",
"type": "Tipo", "type": "Tipo",
"strength": "Forza", "strength": "Forza",
@ -396,7 +454,12 @@
"info": "Informazioni", "info": "Informazioni",
"deleteImage": "Elimina immagine", "deleteImage": "Elimina immagine",
"initialImage": "Immagine iniziale", "initialImage": "Immagine iniziale",
"showOptionsPanel": "Mostra pannello opzioni" "showOptionsPanel": "Mostra pannello opzioni",
"general": "Generale",
"denoisingStrength": "Forza riduzione rumore",
"copyImage": "Copia immagine",
"hiresStrength": "Forza Alta Risoluzione",
"negativePrompts": "Prompt Negativi"
}, },
"settings": { "settings": {
"models": "Modelli", "models": "Modelli",
@ -409,7 +472,8 @@
"resetWebUI": "Reimposta l'interfaccia utente Web", "resetWebUI": "Reimposta l'interfaccia utente Web",
"resetWebUIDesc1": "Il ripristino dell'interfaccia utente Web reimposta solo la cache locale del browser delle immagini e le impostazioni memorizzate. Non cancella alcuna immagine dal disco.", "resetWebUIDesc1": "Il ripristino dell'interfaccia utente Web reimposta solo la cache locale del browser delle immagini e le impostazioni memorizzate. Non cancella alcuna immagine dal disco.",
"resetWebUIDesc2": "Se le immagini non vengono visualizzate nella galleria o qualcos'altro non funziona, prova a reimpostare prima di segnalare un problema su GitHub.", "resetWebUIDesc2": "Se le immagini non vengono visualizzate nella galleria o qualcos'altro non funziona, prova a reimpostare prima di segnalare un problema su GitHub.",
"resetComplete": "L'interfaccia utente Web è stata reimpostata. Aggiorna la pagina per ricaricarla." "resetComplete": "L'interfaccia utente Web è stata reimpostata. Aggiorna la pagina per ricaricarla.",
"useSlidersForAll": "Usa i cursori per tutte le opzioni"
}, },
"toast": { "toast": {
"tempFoldersEmptied": "Cartella temporanea svuotata", "tempFoldersEmptied": "Cartella temporanea svuotata",
@ -447,7 +511,7 @@
"feature": { "feature": {
"prompt": "Questo è il campo del prompt. Il prompt include oggetti di generazione e termini stilistici. Puoi anche aggiungere il peso (importanza del token) nel prompt, ma i comandi e i parametri dell'interfaccia a linea di comando non funzioneranno.", "prompt": "Questo è il campo del prompt. Il prompt include oggetti di generazione e termini stilistici. Puoi anche aggiungere il peso (importanza del token) nel prompt, ma i comandi e i parametri dell'interfaccia a linea di comando non funzioneranno.",
"gallery": "Galleria visualizza le generazioni dalla cartella degli output man mano che vengono create. Le impostazioni sono memorizzate all'interno di file e accessibili dal menu contestuale.", "gallery": "Galleria visualizza le generazioni dalla cartella degli output man mano che vengono create. Le impostazioni sono memorizzate all'interno di file e accessibili dal menu contestuale.",
"other": "Queste opzioni abiliteranno modalità di elaborazione alternative per Invoke. 'Piastrella senza cuciture' creerà modelli ripetuti nell'output. 'Ottimizzzazione Alta risoluzione' è la generazione in due passaggi con 'Immagine a Immagine': usa questa impostazione quando vuoi un'immagine più grande e più coerente senza artefatti. Ci vorrà più tempo del solito 'Testo a Immagine'.", "other": "Queste opzioni abiliteranno modalità di elaborazione alternative per Invoke. 'Piastrella senza cuciture' creerà modelli ripetuti nell'output. 'Ottimizzazione Alta risoluzione' è la generazione in due passaggi con 'Immagine a Immagine': usa questa impostazione quando vuoi un'immagine più grande e più coerente senza artefatti. Ci vorrà più tempo del solito 'Testo a Immagine'.",
"seed": "Il valore del Seme influenza il rumore iniziale da cui è formata l'immagine. Puoi usare i semi già esistenti dalle immagini precedenti. 'Soglia del rumore' viene utilizzato per mitigare gli artefatti a valori CFG elevati (provare l'intervallo 0-10) e Perlin per aggiungere il rumore Perlin durante la generazione: entrambi servono per aggiungere variazioni ai risultati.", "seed": "Il valore del Seme influenza il rumore iniziale da cui è formata l'immagine. Puoi usare i semi già esistenti dalle immagini precedenti. 'Soglia del rumore' viene utilizzato per mitigare gli artefatti a valori CFG elevati (provare l'intervallo 0-10) e Perlin per aggiungere il rumore Perlin durante la generazione: entrambi servono per aggiungere variazioni ai risultati.",
"variations": "Prova una variazione con un valore compreso tra 0.1 e 1.0 per modificare il risultato per un dato seme. Variazioni interessanti del seme sono comprese tra 0.1 e 0.3.", "variations": "Prova una variazione con un valore compreso tra 0.1 e 1.0 per modificare il risultato per un dato seme. Variazioni interessanti del seme sono comprese tra 0.1 e 0.3.",
"upscale": "Utilizza ESRGAN per ingrandire l'immagine subito dopo la generazione.", "upscale": "Utilizza ESRGAN per ingrandire l'immagine subito dopo la generazione.",
@ -515,6 +579,6 @@
"betaClear": "Svuota", "betaClear": "Svuota",
"betaDarkenOutside": "Oscura all'esterno", "betaDarkenOutside": "Oscura all'esterno",
"betaLimitToBox": "Limita al rettangolo", "betaLimitToBox": "Limita al rettangolo",
"betaPreserveMasked": "Conserva quanto mascheato" "betaPreserveMasked": "Conserva quanto mascherato"
} }
} }

View File

@ -160,7 +160,7 @@
"title": "Увеличить размер миниатюр галереи", "title": "Увеличить размер миниатюр галереи",
"desc": "Увеличивает размер миниатюр галереи" "desc": "Увеличивает размер миниатюр галереи"
}, },
"reduceGalleryThumbSize": { "decreaseGalleryThumbSize": {
"title": "Уменьшает размер миниатюр галереи", "title": "Уменьшает размер миниатюр галереи",
"desc": "Уменьшает размер миниатюр галереи" "desc": "Уменьшает размер миниатюр галереи"
}, },
@ -172,7 +172,7 @@
"title": "Выбрать ластик", "title": "Выбрать ластик",
"desc": "Выбирает ластик для холста" "desc": "Выбирает ластик для холста"
}, },
"reduceBrushSize": { "decreaseBrushSize": {
"title": "Уменьшить размер кисти", "title": "Уменьшить размер кисти",
"desc": "Уменьшает размер кисти/ластика холста" "desc": "Уменьшает размер кисти/ластика холста"
}, },
@ -180,7 +180,7 @@
"title": "Увеличить размер кисти", "title": "Увеличить размер кисти",
"desc": "Увеличивает размер кисти/ластика холста" "desc": "Увеличивает размер кисти/ластика холста"
}, },
"reduceBrushOpacity": { "decreaseBrushOpacity": {
"title": "Уменьшить непрозрачность кисти", "title": "Уменьшить непрозрачность кисти",
"desc": "Уменьшает непрозрачность кисти холста" "desc": "Уменьшает непрозрачность кисти холста"
}, },
@ -494,7 +494,7 @@
"cursorPosition": "Положение курсора", "cursorPosition": "Положение курсора",
"previous": "Предыдущее", "previous": "Предыдущее",
"next": "Следующее", "next": "Следующее",
"принять": "Принять", "accept": "Принять",
"showHide": "Показать/Скрыть", "showHide": "Показать/Скрыть",
"discardAll": "Отменить все", "discardAll": "Отменить все",
"betaClear": "Очистить", "betaClear": "Очистить",

View File

@ -160,7 +160,7 @@
"title": "Збільшити розмір мініатюр галереї", "title": "Збільшити розмір мініатюр галереї",
"desc": "Збільшує розмір мініатюр галереї" "desc": "Збільшує розмір мініатюр галереї"
}, },
"reduceGalleryThumbSize": { "decreaseGalleryThumbSize": {
"title": "Зменшує розмір мініатюр галереї", "title": "Зменшує розмір мініатюр галереї",
"desc": "Зменшує розмір мініатюр галереї" "desc": "Зменшує розмір мініатюр галереї"
}, },
@ -172,7 +172,7 @@
"title": "Вибрати ластик", "title": "Вибрати ластик",
"desc": "Вибирає ластик для полотна" "desc": "Вибирає ластик для полотна"
}, },
"reduceBrushSize": { "decreaseBrushSize": {
"title": "Зменшити розмір пензля", "title": "Зменшити розмір пензля",
"desc": "Зменшує розмір пензля/ластика полотна" "desc": "Зменшує розмір пензля/ластика полотна"
}, },
@ -180,7 +180,7 @@
"title": "Збільшити розмір пензля", "title": "Збільшити розмір пензля",
"desc": "Збільшує розмір пензля/ластика полотна" "desc": "Збільшує розмір пензля/ластика полотна"
}, },
"reduceBrushOpacity": { "decreaseBrushOpacity": {
"title": "Зменшити непрозорість пензля", "title": "Зменшити непрозорість пензля",
"desc": "Зменшує непрозорість пензля полотна" "desc": "Зменшує непрозорість пензля полотна"
}, },
@ -354,7 +354,6 @@
"seamBlur": "Розмиття шву", "seamBlur": "Розмиття шву",
"seamStrength": "Сила шву", "seamStrength": "Сила шву",
"seamSteps": "Кроки шву", "seamSteps": "Кроки шву",
"inpaintReplace": "Inpaint-заміна",
"scaleBeforeProcessing": "Масштабувати", "scaleBeforeProcessing": "Масштабувати",
"scaledWidth": "Масштаб Ш", "scaledWidth": "Масштаб Ш",
"scaledHeight": "Масштаб В", "scaledHeight": "Масштаб В",
@ -495,7 +494,7 @@
"cursorPosition": "Розташування курсора", "cursorPosition": "Розташування курсора",
"previous": "Попереднє", "previous": "Попереднє",
"next": "Наступне", "next": "Наступне",
"принять": "Приняти", "accept": "Приняти",
"showHide": "Показати/Сховати", "showHide": "Показати/Сховати",
"discardAll": "Відмінити все", "discardAll": "Відмінити все",
"betaClear": "Очистити", "betaClear": "Очистити",

View File

@ -442,7 +442,12 @@
"img2imgStrength": "Image To Image Strength", "img2imgStrength": "Image To Image Strength",
"toggleLoopback": "Toggle Loopback", "toggleLoopback": "Toggle Loopback",
"invoke": "Invoke", "invoke": "Invoke",
"cancel": "Cancel", "cancel": {
"immediate": "Cancel immediately",
"schedule": "Cancel after current iteration",
"isScheduled": "Canceling",
"setType": "Set cancel type"
},
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)", "promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
"negativePrompts": "Negative Prompts", "negativePrompts": "Negative Prompts",
"sendTo": "Send to", "sendTo": "Send to",

View File

@ -48,6 +48,7 @@ const systemBlacklist = [
'totalIterations', 'totalIterations',
'totalSteps', 'totalSteps',
'openModel', 'openModel',
'cancelOptions.cancelAfter',
].map((blacklistItem) => `system.${blacklistItem}`); ].map((blacklistItem) => `system.${blacklistItem}`);
const galleryBlacklist = [ const galleryBlacklist = [

View File

@ -0,0 +1,102 @@
import {
Menu,
MenuButton,
MenuItem,
MenuList,
type MenuProps,
type MenuButtonProps,
type MenuListProps,
type MenuItemProps,
} from '@chakra-ui/react';
import { MouseEventHandler, ReactNode } from 'react';
import { MdArrowDropDown, MdArrowDropUp } from 'react-icons/md';
import IAIButton from './IAIButton';
import IAIIconButton from './IAIIconButton';
interface IAIMenuItem {
item: ReactNode | string;
onClick: MouseEventHandler<HTMLButtonElement> | undefined;
}
interface IAIMenuProps {
menuType?: 'icon' | 'regular';
buttonText?: string;
iconTooltip?: string;
menuItems: IAIMenuItem[];
menuProps?: MenuProps;
menuButtonProps?: MenuButtonProps;
menuListProps?: MenuListProps;
menuItemProps?: MenuItemProps;
}
export default function IAISimpleMenu(props: IAIMenuProps) {
const {
menuType = 'icon',
iconTooltip,
buttonText,
menuItems,
menuProps,
menuButtonProps,
menuListProps,
menuItemProps,
} = props;
const renderMenuItems = () => {
const menuItemsToRender: ReactNode[] = [];
menuItems.forEach((menuItem, index) => {
menuItemsToRender.push(
<MenuItem
key={index}
onClick={menuItem.onClick}
fontSize="0.9rem"
color="var(--text-color-secondary)"
backgroundColor="var(--background-color-secondary)"
_focus={{
color: 'var(--text-color)',
backgroundColor: 'var(--border-color)',
}}
{...menuItemProps}
>
{menuItem.item}
</MenuItem>
);
});
return menuItemsToRender;
};
return (
<Menu {...menuProps}>
{({ isOpen }) => (
<>
<MenuButton
as={menuType === 'icon' ? IAIIconButton : IAIButton}
tooltip={iconTooltip}
icon={isOpen ? <MdArrowDropUp /> : <MdArrowDropDown />}
padding={menuType === 'regular' ? '0 0.5rem' : 0}
backgroundColor="var(--btn-base-color)"
_hover={{
backgroundColor: 'var(--btn-base-color-hover)',
}}
minWidth="1rem"
minHeight="1rem"
fontSize="1.5rem"
{...menuButtonProps}
>
{menuType === 'regular' && buttonText}
</MenuButton>
<MenuList
zIndex={15}
padding={0}
borderRadius="0.5rem"
backgroundColor="var(--background-color-secondary)"
color="var(--text-color-secondary)"
borderColor="var(--border-color)"
{...menuListProps}
>
{renderMenuItems()}
</MenuList>
</>
)}
</Menu>
);
}

View File

@ -5,12 +5,20 @@ import IAIIconButton, {
IAIIconButtonProps, IAIIconButtonProps,
} from 'common/components/IAIIconButton'; } from 'common/components/IAIIconButton';
import { systemSelector } from 'features/system/store/systemSelectors'; import { systemSelector } from 'features/system/store/systemSelectors';
import { SystemState } from 'features/system/store/systemSlice'; import {
SystemState,
setCancelAfter,
setCancelType,
} from 'features/system/store/systemSlice';
import { isEqual } from 'lodash'; import { isEqual } from 'lodash';
import { useEffect, useCallback } from 'react';
import { ButtonSpinner, ButtonGroup } from '@chakra-ui/react';
import { useHotkeys } from 'react-hotkeys-hook'; import { useHotkeys } from 'react-hotkeys-hook';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { MdCancel } from 'react-icons/md'; import { MdCancel, MdCancelScheduleSend } from 'react-icons/md';
import IAISimpleMenu from 'common/components/IAISimpleMenu';
const cancelButtonSelector = createSelector( const cancelButtonSelector = createSelector(
systemSelector, systemSelector,
@ -19,6 +27,10 @@ const cancelButtonSelector = createSelector(
isProcessing: system.isProcessing, isProcessing: system.isProcessing,
isConnected: system.isConnected, isConnected: system.isConnected,
isCancelable: system.isCancelable, isCancelable: system.isCancelable,
currentIteration: system.currentIteration,
totalIterations: system.totalIterations,
cancelType: system.cancelOptions.cancelType,
cancelAfter: system.cancelOptions.cancelAfter,
}; };
}, },
{ {
@ -31,14 +43,26 @@ const cancelButtonSelector = createSelector(
export default function CancelButton( export default function CancelButton(
props: Omit<IAIIconButtonProps, 'aria-label'> props: Omit<IAIIconButtonProps, 'aria-label'>
) { ) {
const { ...rest } = props;
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const { isProcessing, isConnected, isCancelable } = const { ...rest } = props;
useAppSelector(cancelButtonSelector); const {
const handleClickCancel = () => dispatch(cancelProcessing()); isProcessing,
isConnected,
isCancelable,
currentIteration,
totalIterations,
cancelType,
cancelAfter,
} = useAppSelector(cancelButtonSelector);
const handleClickCancel = useCallback(() => {
dispatch(cancelProcessing());
dispatch(setCancelAfter(null));
}, [dispatch]);
const { t } = useTranslation(); const { t } = useTranslation();
const isCancelScheduled = cancelAfter === null ? false : true;
useHotkeys( useHotkeys(
'shift+x', 'shift+x',
() => { () => {
@ -49,15 +73,82 @@ export default function CancelButton(
[isConnected, isProcessing, isCancelable] [isConnected, isProcessing, isCancelable]
); );
useEffect(() => {
if (cancelAfter !== null && cancelAfter < currentIteration) {
handleClickCancel();
}
}, [cancelAfter, currentIteration, handleClickCancel]);
const cancelMenuItems = [
{
item: t('parameters.cancel.immediate'),
onClick: () => dispatch(setCancelType('immediate')),
},
{
item: t('parameters.cancel.schedule'),
onClick: () => dispatch(setCancelType('scheduled')),
},
];
return ( return (
<ButtonGroup isAttached variant="link">
{cancelType === 'immediate' ? (
<IAIIconButton <IAIIconButton
icon={<MdCancel />} icon={<MdCancel />}
tooltip={t('parameters.cancel')} tooltip={t('parameters.cancel.immediate')}
aria-label={t('parameters.cancel')} aria-label={t('parameters.cancel.immediate')}
isDisabled={!isConnected || !isProcessing || !isCancelable} isDisabled={!isConnected || !isProcessing || !isCancelable}
onClick={handleClickCancel} onClick={handleClickCancel}
styleClass="cancel-btn" className="cancel-btn"
{...rest} {...rest}
/> />
) : (
<IAIIconButton
icon={
isCancelScheduled ? (
<ButtonSpinner color="var(--text-color)" />
) : (
<MdCancelScheduleSend />
)
}
tooltip={
isCancelScheduled
? t('parameters.cancel.isScheduled')
: t('parameters.cancel.schedule')
}
aria-label={
isCancelScheduled
? t('parameters.cancel.isScheduled')
: t('parameters.cancel.schedule')
}
isDisabled={
!isConnected ||
!isProcessing ||
!isCancelable ||
currentIteration === totalIterations
}
onClick={() => {
// If a cancel request has already been made, and the user clicks again before the next iteration has been processed, stop the request.
if (isCancelScheduled) dispatch(setCancelAfter(null));
else dispatch(setCancelAfter(currentIteration));
}}
className="cancel-btn"
{...rest}
/>
)}
<IAISimpleMenu
menuItems={cancelMenuItems}
iconTooltip={t('parameters.cancel.setType')}
menuButtonProps={{
backgroundColor: 'var(--destructive-color)',
color: 'var(--text-color)',
minWidth: '1.5rem',
minHeight: '1.5rem',
_hover: {
backgroundColor: 'var(--destructive-color-hover)',
},
}}
/>
</ButtonGroup>
); );
} }

View File

@ -1,20 +1,21 @@
import { Box, Flex, Text } from '@chakra-ui/react'; import { Box, Flex, Spinner, Text } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import IAIInput from 'common/components/IAIInput'; import IAIInput from 'common/components/IAIInput';
import { useMemo, useState, useTransition } from 'react'; import IAIButton from 'common/components/IAIButton';
import AddModel from './AddModel'; import AddModel from './AddModel';
import ModelListItem from './ModelListItem'; import ModelListItem from './ModelListItem';
import MergeModels from './MergeModels';
import { useAppSelector } from 'app/storeHooks'; import { useAppSelector } from 'app/storeHooks';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import IAIButton from 'common/components/IAIButton'; import { createSelector } from '@reduxjs/toolkit';
import { systemSelector } from 'features/system/store/systemSelectors'; import { systemSelector } from 'features/system/store/systemSelectors';
import type { SystemState } from 'features/system/store/systemSlice'; import type { SystemState } from 'features/system/store/systemSlice';
import { isEqual, map } from 'lodash'; import { isEqual, map } from 'lodash';
import React, { useMemo, useState, useTransition } from 'react';
import type { ChangeEvent, ReactNode } from 'react'; import type { ChangeEvent, ReactNode } from 'react';
import MergeModels from './MergeModels';
const modelListSelector = createSelector( const modelListSelector = createSelector(
systemSelector, systemSelector,
@ -58,6 +59,16 @@ function ModelFilterButton({
const ModelList = () => { const ModelList = () => {
const models = useAppSelector(modelListSelector); const models = useAppSelector(modelListSelector);
const [renderModelList, setRenderModelList] = React.useState<boolean>(false);
React.useEffect(() => {
const timer = setTimeout(() => {
setRenderModelList(true);
}, 200);
return () => clearTimeout(timer);
}, []);
const [searchText, setSearchText] = useState<string>(''); const [searchText, setSearchText] = useState<string>('');
const [isSelectedFilter, setIsSelectedFilter] = useState< const [isSelectedFilter, setIsSelectedFilter] = useState<
'all' | 'ckpt' | 'diffusers' 'all' | 'ckpt' | 'diffusers'
@ -217,7 +228,19 @@ const ModelList = () => {
isActive={isSelectedFilter === 'diffusers'} isActive={isSelectedFilter === 'diffusers'}
/> />
</Flex> </Flex>
{renderModelListItems}
{renderModelList ? (
renderModelListItems
) : (
<Flex
width="100%"
minHeight="30rem"
justifyContent="center"
alignItems="center"
>
<Spinner />
</Flex>
)}
</Flex> </Flex>
</Flex> </Flex>
); );

View File

@ -23,6 +23,8 @@ export type ReadinessPayload = {
export type InProgressImageType = 'none' | 'full-res' | 'latents'; export type InProgressImageType = 'none' | 'full-res' | 'latents';
export type CancelType = 'immediate' | 'scheduled';
export interface SystemState export interface SystemState
extends InvokeAI.SystemStatus, extends InvokeAI.SystemStatus,
InvokeAI.SystemConfig { InvokeAI.SystemConfig {
@ -50,6 +52,10 @@ export interface SystemState
searchFolder: string | null; searchFolder: string | null;
foundModels: InvokeAI.FoundModel[] | null; foundModels: InvokeAI.FoundModel[] | null;
openModel: string | null; openModel: string | null;
cancelOptions: {
cancelType: CancelType;
cancelAfter: number | null;
};
} }
const initialSystemState: SystemState = { const initialSystemState: SystemState = {
@ -88,6 +94,10 @@ const initialSystemState: SystemState = {
searchFolder: null, searchFolder: null,
foundModels: null, foundModels: null,
openModel: null, openModel: null,
cancelOptions: {
cancelType: 'immediate',
cancelAfter: null,
},
}; };
export const systemSlice = createSlice({ export const systemSlice = createSlice({
@ -255,6 +265,12 @@ export const systemSlice = createSlice({
setOpenModel: (state, action: PayloadAction<string | null>) => { setOpenModel: (state, action: PayloadAction<string | null>) => {
state.openModel = action.payload; state.openModel = action.payload;
}, },
setCancelType: (state, action: PayloadAction<CancelType>) => {
state.cancelOptions.cancelType = action.payload;
},
setCancelAfter: (state, action: PayloadAction<number | null>) => {
state.cancelOptions.cancelAfter = action.payload;
},
}, },
}); });
@ -288,6 +304,8 @@ export const {
setSearchFolder, setSearchFolder,
setFoundModels, setFoundModels,
setOpenModel, setOpenModel,
setCancelType,
setCancelAfter,
} = systemSlice.actions; } = systemSlice.actions;
export default systemSlice.reducer; export default systemSlice.reducer;

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -751,6 +751,9 @@ class Args(object):
!fix applies upscaling/facefixing to a previously-generated image. !fix applies upscaling/facefixing to a previously-generated image.
invoke> !fix 0000045.4829112.png -G1 -U4 -ft codeformer invoke> !fix 0000045.4829112.png -G1 -U4 -ft codeformer
*embeddings*
invoke> !triggers -- return all trigger phrases contained in loaded embedding files
*History manipulation* *History manipulation*
!fetch retrieves the command used to generate an earlier image. Provide !fetch retrieves the command used to generate an earlier image. Provide
a directory wildcard and the name of a file to write and all the commands a directory wildcard and the name of a file to write and all the commands

View File

@ -60,7 +60,7 @@ COMMANDS = (
'--text_mask','-tm', '--text_mask','-tm',
'!fix','!fetch','!replay','!history','!search','!clear', '!fix','!fetch','!replay','!history','!search','!clear',
'!models','!switch','!import_model','!optimize_model','!convert_model','!edit_model','!del_model', '!models','!switch','!import_model','!optimize_model','!convert_model','!edit_model','!del_model',
'!mask', '!mask','!triggers',
) )
MODEL_COMMANDS = ( MODEL_COMMANDS = (
'!switch', '!switch',

View File

@ -1,11 +1,12 @@
import os import os
import traceback import traceback
from typing import Optional from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Union
import torch import torch
from dataclasses import dataclass
from picklescan.scanner import scan_file_path from picklescan.scanner import scan_file_path
from transformers import CLIPTokenizer, CLIPTextModel from transformers import CLIPTextModel, CLIPTokenizer
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
@ -21,11 +22,14 @@ class TextualInversion:
def embedding_vector_length(self) -> int: def embedding_vector_length(self) -> int:
return self.embedding.shape[0] return self.embedding.shape[0]
class TextualInversionManager():
def __init__(self, class TextualInversionManager:
def __init__(
self,
tokenizer: CLIPTokenizer, tokenizer: CLIPTokenizer,
text_encoder: CLIPTextModel, text_encoder: CLIPTextModel,
full_precision: bool=True): full_precision: bool = True,
):
self.tokenizer = tokenizer self.tokenizer = tokenizer
self.text_encoder = text_encoder self.text_encoder = text_encoder
self.full_precision = full_precision self.full_precision = full_precision
@ -38,47 +42,70 @@ class TextualInversionManager():
if concept_name in self.hf_concepts_library.concepts_loaded: if concept_name in self.hf_concepts_library.concepts_loaded:
continue continue
trigger = self.hf_concepts_library.concept_to_trigger(concept_name) trigger = self.hf_concepts_library.concept_to_trigger(concept_name)
if self.has_textual_inversion_for_trigger_string(trigger) \ if (
or self.has_textual_inversion_for_trigger_string(concept_name) \ self.has_textual_inversion_for_trigger_string(trigger)
or self.has_textual_inversion_for_trigger_string(f'<{concept_name}>'): # in case a token with literal angle brackets encountered or self.has_textual_inversion_for_trigger_string(concept_name)
print(f'>> Loaded local embedding for trigger {concept_name}') or self.has_textual_inversion_for_trigger_string(f"<{concept_name}>")
): # in case a token with literal angle brackets encountered
print(f">> Loaded local embedding for trigger {concept_name}")
continue continue
bin_file = self.hf_concepts_library.get_concept_model_path(concept_name) bin_file = self.hf_concepts_library.get_concept_model_path(concept_name)
if not bin_file: if not bin_file:
continue continue
print(f'>> Loaded remote embedding for trigger {concept_name}') print(f">> Loaded remote embedding for trigger {concept_name}")
self.load_textual_inversion(bin_file) self.load_textual_inversion(bin_file)
self.hf_concepts_library.concepts_loaded[concept_name] = True self.hf_concepts_library.concepts_loaded[concept_name] = True
def get_all_trigger_strings(self) -> list[str]: def get_all_trigger_strings(self) -> list[str]:
return [ti.trigger_string for ti in self.textual_inversions] return [ti.trigger_string for ti in self.textual_inversions]
def load_textual_inversion(self, ckpt_path, defer_injecting_tokens: bool=False): def load_textual_inversion(self, ckpt_path: Union[str,Path], defer_injecting_tokens: bool = False):
if str(ckpt_path).endswith('.DS_Store'): ckpt_path = Path(ckpt_path)
if str(ckpt_path).endswith(".DS_Store"):
return return
try: try:
scan_result = scan_file_path(ckpt_path) scan_result = scan_file_path(str(ckpt_path))
if scan_result.infected_files == 1: if scan_result.infected_files == 1:
print(f'\n### Security Issues Found in Model: {scan_result.issues_count}') print(
print('### For your safety, InvokeAI will not load this embed.') f"\n### Security Issues Found in Model: {scan_result.issues_count}"
)
print("### For your safety, InvokeAI will not load this embed.")
return return
except Exception: except Exception:
print(f"### WARNING::: Invalid or corrupt embeddings found. Ignoring: {ckpt_path}") print(
f"### {ckpt_path.parents[0].name}/{ckpt_path.name} is damaged or corrupt."
)
return
embedding_info = self._parse_embedding(str(ckpt_path))
if (
self.text_encoder.get_input_embeddings().weight.data[0].shape[0]
!= embedding_info["embedding"].shape[0]
):
print(
f"** Notice: {ckpt_path.parents[0].name}/{ckpt_path.name} was trained on a model with a different token dimension. It can't be used with this model."
)
return return
embedding_info = self._parse_embedding(ckpt_path)
if embedding_info: if embedding_info:
try: try:
self._add_textual_inversion(embedding_info['name'], self._add_textual_inversion(
embedding_info['embedding'], embedding_info["name"],
defer_injecting_tokens=defer_injecting_tokens) embedding_info["embedding"],
defer_injecting_tokens=defer_injecting_tokens,
)
except ValueError as e: except ValueError as e:
print(f' | Ignoring incompatible embedding {embedding_info["name"]}') print(f' | Ignoring incompatible embedding {embedding_info["name"]}')
print(f' | The error was {str(e)}') print(f" | The error was {str(e)}")
else: else:
print(f'>> Failed to load embedding located at {ckpt_path}. Unsupported file.') print(
f">> Failed to load embedding located at {str(ckpt_path)}. Unsupported file."
)
def _add_textual_inversion(self, trigger_str, embedding, defer_injecting_tokens=False) -> TextualInversion: def _add_textual_inversion(
self, trigger_str, embedding, defer_injecting_tokens=False
) -> TextualInversion:
""" """
Add a textual inversion to be recognised. Add a textual inversion to be recognised.
:param trigger_str: The trigger text in the prompt that activates this textual inversion. If unknown to the embedder's tokenizer, will be added. :param trigger_str: The trigger text in the prompt that activates this textual inversion. If unknown to the embedder's tokenizer, will be added.
@ -86,46 +113,59 @@ class TextualInversionManager():
:return: The token id for the added embedding, either existing or newly-added. :return: The token id for the added embedding, either existing or newly-added.
""" """
if trigger_str in [ti.trigger_string for ti in self.textual_inversions]: if trigger_str in [ti.trigger_string for ti in self.textual_inversions]:
print(f">> TextualInversionManager refusing to overwrite already-loaded token '{trigger_str}'") print(
f">> TextualInversionManager refusing to overwrite already-loaded token '{trigger_str}'"
)
return return
if not self.full_precision: if not self.full_precision:
embedding = embedding.half() embedding = embedding.half()
if len(embedding.shape) == 1: if len(embedding.shape) == 1:
embedding = embedding.unsqueeze(0) embedding = embedding.unsqueeze(0)
elif len(embedding.shape) > 2: elif len(embedding.shape) > 2:
raise ValueError(f"TextualInversionManager cannot add {trigger_str} because the embedding shape {embedding.shape} is incorrect. The embedding must have shape [token_dim] or [V, token_dim] where V is vector length and token_dim is 768 for SD1 or 1280 for SD2.") raise ValueError(
f"TextualInversionManager cannot add {trigger_str} because the embedding shape {embedding.shape} is incorrect. The embedding must have shape [token_dim] or [V, token_dim] where V is vector length and token_dim is 768 for SD1 or 1280 for SD2."
)
try: try:
ti = TextualInversion( ti = TextualInversion(trigger_string=trigger_str, embedding=embedding)
trigger_string=trigger_str,
embedding=embedding
)
if not defer_injecting_tokens: if not defer_injecting_tokens:
self._inject_tokens_and_assign_embeddings(ti) self._inject_tokens_and_assign_embeddings(ti)
self.textual_inversions.append(ti) self.textual_inversions.append(ti)
return ti return ti
except ValueError as e: except ValueError as e:
if str(e).startswith('Warning'): if str(e).startswith("Warning"):
print(f">> {str(e)}") print(f">> {str(e)}")
else: else:
traceback.print_exc() traceback.print_exc()
print(f">> TextualInversionManager was unable to add a textual inversion with trigger string {trigger_str}.") print(
f">> TextualInversionManager was unable to add a textual inversion with trigger string {trigger_str}."
)
raise raise
def _inject_tokens_and_assign_embeddings(self, ti: TextualInversion) -> int: def _inject_tokens_and_assign_embeddings(self, ti: TextualInversion) -> int:
if ti.trigger_token_id is not None: if ti.trigger_token_id is not None:
raise ValueError(f"Tokens already injected for textual inversion with trigger '{ti.trigger_string}'") raise ValueError(
f"Tokens already injected for textual inversion with trigger '{ti.trigger_string}'"
)
trigger_token_id = self._get_or_create_token_id_and_assign_embedding(ti.trigger_string, ti.embedding[0]) trigger_token_id = self._get_or_create_token_id_and_assign_embedding(
ti.trigger_string, ti.embedding[0]
)
if ti.embedding_vector_length > 1: if ti.embedding_vector_length > 1:
# for embeddings with vector length > 1 # for embeddings with vector length > 1
pad_token_strings = [ti.trigger_string + "-!pad-" + str(pad_index) for pad_index in range(1, ti.embedding_vector_length)] pad_token_strings = [
ti.trigger_string + "-!pad-" + str(pad_index)
for pad_index in range(1, ti.embedding_vector_length)
]
# todo: batched UI for faster loading when vector length >2 # todo: batched UI for faster loading when vector length >2
pad_token_ids = [self._get_or_create_token_id_and_assign_embedding(pad_token_str, ti.embedding[1 + i]) \ pad_token_ids = [
for (i, pad_token_str) in enumerate(pad_token_strings)] self._get_or_create_token_id_and_assign_embedding(
pad_token_str, ti.embedding[1 + i]
)
for (i, pad_token_str) in enumerate(pad_token_strings)
]
else: else:
pad_token_ids = [] pad_token_ids = []
@ -133,7 +173,6 @@ class TextualInversionManager():
ti.pad_token_ids = pad_token_ids ti.pad_token_ids = pad_token_ids
return ti.trigger_token_id return ti.trigger_token_id
def has_textual_inversion_for_trigger_string(self, trigger_string: str) -> bool: def has_textual_inversion_for_trigger_string(self, trigger_string: str) -> bool:
try: try:
ti = self.get_textual_inversion_for_trigger_string(trigger_string) ti = self.get_textual_inversion_for_trigger_string(trigger_string)
@ -141,32 +180,43 @@ class TextualInversionManager():
except StopIteration: except StopIteration:
return False return False
def get_textual_inversion_for_trigger_string(
def get_textual_inversion_for_trigger_string(self, trigger_string: str) -> TextualInversion: self, trigger_string: str
return next(ti for ti in self.textual_inversions if ti.trigger_string == trigger_string) ) -> TextualInversion:
return next(
ti for ti in self.textual_inversions if ti.trigger_string == trigger_string
)
def get_textual_inversion_for_token_id(self, token_id: int) -> TextualInversion: def get_textual_inversion_for_token_id(self, token_id: int) -> TextualInversion:
return next(ti for ti in self.textual_inversions if ti.trigger_token_id == token_id) return next(
ti for ti in self.textual_inversions if ti.trigger_token_id == token_id
)
def create_deferred_token_ids_for_any_trigger_terms(self, prompt_string: str) -> list[int]: def create_deferred_token_ids_for_any_trigger_terms(
self, prompt_string: str
) -> list[int]:
injected_token_ids = [] injected_token_ids = []
for ti in self.textual_inversions: for ti in self.textual_inversions:
if ti.trigger_token_id is None and ti.trigger_string in prompt_string: if ti.trigger_token_id is None and ti.trigger_string in prompt_string:
if ti.embedding_vector_length > 1: if ti.embedding_vector_length > 1:
print(f">> Preparing tokens for textual inversion {ti.trigger_string}...") print(
f">> Preparing tokens for textual inversion {ti.trigger_string}..."
)
try: try:
self._inject_tokens_and_assign_embeddings(ti) self._inject_tokens_and_assign_embeddings(ti)
except ValueError as e: except ValueError as e:
print(f' | Ignoring incompatible embedding trigger {ti.trigger_string}') print(
print(f' | The error was {str(e)}') f" | Ignoring incompatible embedding trigger {ti.trigger_string}"
)
print(f" | The error was {str(e)}")
continue continue
injected_token_ids.append(ti.trigger_token_id) injected_token_ids.append(ti.trigger_token_id)
injected_token_ids.extend(ti.pad_token_ids) injected_token_ids.extend(ti.pad_token_ids)
return injected_token_ids return injected_token_ids
def expand_textual_inversion_token_ids_if_necessary(
def expand_textual_inversion_token_ids_if_necessary(self, prompt_token_ids: list[int]) -> list[int]: self, prompt_token_ids: list[int]
) -> list[int]:
""" """
Insert padding tokens as necessary into the passed-in list of token ids to match any textual inversions it includes. Insert padding tokens as necessary into the passed-in list of token ids to match any textual inversions it includes.
@ -181,20 +231,31 @@ class TextualInversionManager():
raise ValueError("prompt_token_ids must not start with bos_token_id") raise ValueError("prompt_token_ids must not start with bos_token_id")
if prompt_token_ids[-1] == self.tokenizer.eos_token_id: if prompt_token_ids[-1] == self.tokenizer.eos_token_id:
raise ValueError("prompt_token_ids must not end with eos_token_id") raise ValueError("prompt_token_ids must not end with eos_token_id")
textual_inversion_trigger_token_ids = [ti.trigger_token_id for ti in self.textual_inversions] textual_inversion_trigger_token_ids = [
ti.trigger_token_id for ti in self.textual_inversions
]
prompt_token_ids = prompt_token_ids.copy() prompt_token_ids = prompt_token_ids.copy()
for i, token_id in reversed(list(enumerate(prompt_token_ids))): for i, token_id in reversed(list(enumerate(prompt_token_ids))):
if token_id in textual_inversion_trigger_token_ids: if token_id in textual_inversion_trigger_token_ids:
textual_inversion = next(ti for ti in self.textual_inversions if ti.trigger_token_id == token_id) textual_inversion = next(
ti
for ti in self.textual_inversions
if ti.trigger_token_id == token_id
)
for pad_idx in range(0, textual_inversion.embedding_vector_length - 1): for pad_idx in range(0, textual_inversion.embedding_vector_length - 1):
prompt_token_ids.insert(i+pad_idx+1, textual_inversion.pad_token_ids[pad_idx]) prompt_token_ids.insert(
i + pad_idx + 1, textual_inversion.pad_token_ids[pad_idx]
)
return prompt_token_ids return prompt_token_ids
def _get_or_create_token_id_and_assign_embedding(
def _get_or_create_token_id_and_assign_embedding(self, token_str: str, embedding: torch.Tensor) -> int: self, token_str: str, embedding: torch.Tensor
) -> int:
if len(embedding.shape) != 1: if len(embedding.shape) != 1:
raise ValueError("Embedding has incorrect shape - must be [token_dim] where token_dim is 768 for SD1 or 1280 for SD2") raise ValueError(
"Embedding has incorrect shape - must be [token_dim] where token_dim is 768 for SD1 or 1280 for SD2"
)
existing_token_id = self.tokenizer.convert_tokens_to_ids(token_str) existing_token_id = self.tokenizer.convert_tokens_to_ids(token_str)
if existing_token_id == self.tokenizer.unk_token_id: if existing_token_id == self.tokenizer.unk_token_id:
num_tokens_added = self.tokenizer.add_tokens(token_str) num_tokens_added = self.tokenizer.add_tokens(token_str)
@ -207,66 +268,78 @@ class TextualInversionManager():
token_id = self.tokenizer.convert_tokens_to_ids(token_str) token_id = self.tokenizer.convert_tokens_to_ids(token_str)
if token_id == self.tokenizer.unk_token_id: if token_id == self.tokenizer.unk_token_id:
raise RuntimeError(f"Unable to find token id for token '{token_str}'") raise RuntimeError(f"Unable to find token id for token '{token_str}'")
if self.text_encoder.get_input_embeddings().weight.data[token_id].shape != embedding.shape: if (
raise ValueError(f"Warning. Cannot load embedding for {token_str}. It was trained on a model with token dimension {embedding.shape[0]}, but the current model has token dimension {self.text_encoder.get_input_embeddings().weight.data[token_id].shape[0]}.") self.text_encoder.get_input_embeddings().weight.data[token_id].shape
!= embedding.shape
):
raise ValueError(
f"Warning. Cannot load embedding for {token_str}. It was trained on a model with token dimension {embedding.shape[0]}, but the current model has token dimension {self.text_encoder.get_input_embeddings().weight.data[token_id].shape[0]}."
)
self.text_encoder.get_input_embeddings().weight.data[token_id] = embedding self.text_encoder.get_input_embeddings().weight.data[token_id] = embedding
return token_id return token_id
def _parse_embedding(self, embedding_file: str): def _parse_embedding(self, embedding_file: str):
file_type = embedding_file.split('.')[-1] file_type = embedding_file.split(".")[-1]
if file_type == 'pt': if file_type == "pt":
return self._parse_embedding_pt(embedding_file) return self._parse_embedding_pt(embedding_file)
elif file_type == 'bin': elif file_type == "bin":
return self._parse_embedding_bin(embedding_file) return self._parse_embedding_bin(embedding_file)
else: else:
print(f'>> Not a recognized embedding file: {embedding_file}') print(f">> Not a recognized embedding file: {embedding_file}")
def _parse_embedding_pt(self, embedding_file): def _parse_embedding_pt(self, embedding_file):
embedding_ckpt = torch.load(embedding_file, map_location='cpu') embedding_ckpt = torch.load(embedding_file, map_location="cpu")
embedding_info = {} embedding_info = {}
# Check if valid embedding file # Check if valid embedding file
if 'string_to_token' and 'string_to_param' in embedding_ckpt: if "string_to_token" and "string_to_param" in embedding_ckpt:
# Catch variants that do not have the expected keys or values. # Catch variants that do not have the expected keys or values.
try: try:
embedding_info['name'] = embedding_ckpt['name'] or os.path.basename(os.path.splitext(embedding_file)[0]) embedding_info["name"] = embedding_ckpt["name"] or os.path.basename(
os.path.splitext(embedding_file)[0]
)
# Check num of embeddings and warn user only the first will be used # Check num of embeddings and warn user only the first will be used
embedding_info['num_of_embeddings'] = len(embedding_ckpt["string_to_token"]) embedding_info["num_of_embeddings"] = len(
if embedding_info['num_of_embeddings'] > 1: embedding_ckpt["string_to_token"]
print('>> More than 1 embedding found. Will use the first one') )
if embedding_info["num_of_embeddings"] > 1:
print(">> More than 1 embedding found. Will use the first one")
embedding = list(embedding_ckpt['string_to_param'].values())[0] embedding = list(embedding_ckpt["string_to_param"].values())[0]
except (AttributeError, KeyError): except (AttributeError, KeyError):
return self._handle_broken_pt_variants(embedding_ckpt, embedding_file) return self._handle_broken_pt_variants(embedding_ckpt, embedding_file)
embedding_info['embedding'] = embedding embedding_info["embedding"] = embedding
embedding_info['num_vectors_per_token'] = embedding.size()[0] embedding_info["num_vectors_per_token"] = embedding.size()[0]
embedding_info['token_dim'] = embedding.size()[1] embedding_info["token_dim"] = embedding.size()[1]
try: try:
embedding_info['trained_steps'] = embedding_ckpt['step'] embedding_info["trained_steps"] = embedding_ckpt["step"]
embedding_info['trained_model_name'] = embedding_ckpt['sd_checkpoint_name'] embedding_info["trained_model_name"] = embedding_ckpt[
embedding_info['trained_model_checksum'] = embedding_ckpt['sd_checkpoint'] "sd_checkpoint_name"
]
embedding_info["trained_model_checksum"] = embedding_ckpt[
"sd_checkpoint"
]
except AttributeError: except AttributeError:
print(">> No Training Details Found. Passing ...") print(">> No Training Details Found. Passing ...")
# .pt files found at https://cyberes.github.io/stable-diffusion-textual-inversion-models/ # .pt files found at https://cyberes.github.io/stable-diffusion-textual-inversion-models/
# They are actually .bin files # They are actually .bin files
elif len(embedding_ckpt.keys()) == 1: elif len(embedding_ckpt.keys()) == 1:
print('>> Detected .bin file masquerading as .pt file') print(">> Detected .bin file masquerading as .pt file")
embedding_info = self._parse_embedding_bin(embedding_file) embedding_info = self._parse_embedding_bin(embedding_file)
else: else:
print('>> Invalid embedding format') print(">> Invalid embedding format")
embedding_info = None embedding_info = None
return embedding_info return embedding_info
def _parse_embedding_bin(self, embedding_file): def _parse_embedding_bin(self, embedding_file):
embedding_ckpt = torch.load(embedding_file, map_location='cpu') embedding_ckpt = torch.load(embedding_file, map_location="cpu")
embedding_info = {} embedding_info = {}
if list(embedding_ckpt.keys()) == 0: if list(embedding_ckpt.keys()) == 0:
@ -274,27 +347,45 @@ class TextualInversionManager():
embedding_info = None embedding_info = None
else: else:
for token in list(embedding_ckpt.keys()): for token in list(embedding_ckpt.keys()):
embedding_info['name'] = token or os.path.basename(os.path.splitext(embedding_file)[0]) embedding_info["name"] = token or os.path.basename(
embedding_info['embedding'] = embedding_ckpt[token] os.path.splitext(embedding_file)[0]
embedding_info['num_vectors_per_token'] = 1 # All Concepts seem to default to 1 )
embedding_info['token_dim'] = embedding_info['embedding'].size()[0] embedding_info["embedding"] = embedding_ckpt[token]
embedding_info[
"num_vectors_per_token"
] = 1 # All Concepts seem to default to 1
embedding_info["token_dim"] = embedding_info["embedding"].size()[0]
return embedding_info return embedding_info
def _handle_broken_pt_variants(self, embedding_ckpt:dict, embedding_file:str)->dict: def _handle_broken_pt_variants(
''' self, embedding_ckpt: dict, embedding_file: str
) -> dict:
"""
This handles the broken .pt file variants. We only know of one at present. This handles the broken .pt file variants. We only know of one at present.
''' """
embedding_info = {} embedding_info = {}
if isinstance(list(embedding_ckpt['string_to_token'].values())[0],torch.Tensor): if isinstance(
print('>> Detected .pt file variant 1') # example at https://github.com/invoke-ai/InvokeAI/issues/1829 list(embedding_ckpt["string_to_token"].values())[0], torch.Tensor
for token in list(embedding_ckpt['string_to_token'].keys()): ):
embedding_info['name'] = token if token != '*' else os.path.basename(os.path.splitext(embedding_file)[0]) print(
embedding_info['embedding'] = embedding_ckpt['string_to_param'].state_dict()[token] ">> Detected .pt file variant 1"
embedding_info['num_vectors_per_token'] = embedding_info['embedding'].shape[0] ) # example at https://github.com/invoke-ai/InvokeAI/issues/1829
embedding_info['token_dim'] = embedding_info['embedding'].size()[0] for token in list(embedding_ckpt["string_to_token"].keys()):
embedding_info["name"] = (
token
if token != "*"
else os.path.basename(os.path.splitext(embedding_file)[0])
)
embedding_info["embedding"] = embedding_ckpt[
"string_to_param"
].state_dict()[token]
embedding_info["num_vectors_per_token"] = embedding_info[
"embedding"
].shape[0]
embedding_info["token_dim"] = embedding_info["embedding"].size()[0]
else: else:
print('>> Invalid embedding format') print(">> Invalid embedding format")
embedding_info = None embedding_info = None
return embedding_info return embedding_info