Merge remote-tracking branch 'upstream/main' into refactor_use_compel

This commit is contained in:
Damian Stewart 2023-02-20 23:34:38 +01:00
commit 6420b81a5d
92 changed files with 2642 additions and 3717 deletions

59
.github/CODEOWNERS vendored
View File

@ -1,50 +1,51 @@
# continuous integration # continuous integration
/.github/workflows/ @mauwii /.github/workflows/ @mauwii @lstein @blessedcoolant
# documentation # documentation
/docs/ @lstein @mauwii @tildebyte /docs/ @lstein @mauwii @tildebyte @blessedcoolant
mkdocs.yml @lstein @mauwii mkdocs.yml @lstein @mauwii @blessedcoolant
# installation and configuration # installation and configuration
/pyproject.toml @mauwii @lstein @ebr /pyproject.toml @mauwii @lstein @ebr @blessedcoolant
/docker/ @mauwii /docker/ @mauwii @lstein @blessedcoolant
/scripts/ @ebr @lstein /scripts/ @ebr @lstein @blessedcoolant
/installer/ @ebr @lstein @tildebyte /installer/ @ebr @lstein @tildebyte @blessedcoolant
ldm/invoke/config @lstein @ebr ldm/invoke/config @lstein @ebr @blessedcoolant
invokeai/assets @lstein @ebr invokeai/assets @lstein @ebr @blessedcoolant
invokeai/configs @lstein @ebr invokeai/configs @lstein @ebr @blessedcoolant
/ldm/invoke/_version.py @lstein @blessedcoolant /ldm/invoke/_version.py @lstein @blessedcoolant
# web ui # web ui
/invokeai/frontend @blessedcoolant @psychedelicious /invokeai/frontend @blessedcoolant @psychedelicious @lstein
/invokeai/backend @blessedcoolant @psychedelicious /invokeai/backend @blessedcoolant @psychedelicious @lstein
# generation and model management # generation and model management
/ldm/*.py @lstein /ldm/*.py @lstein @blessedcoolant
/ldm/generate.py @lstein @keturn /ldm/generate.py @lstein @keturn @blessedcoolant
/ldm/invoke/args.py @lstein @blessedcoolant /ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein /ldm/invoke/ckpt* @lstein @blessedcoolant
/ldm/invoke/ckpt_generator @lstein /ldm/invoke/ckpt_generator @lstein @blessedcoolant
/ldm/invoke/CLI.py @lstein /ldm/invoke/CLI.py @lstein @blessedcoolant
/ldm/invoke/config @lstein @ebr @mauwii /ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
/ldm/invoke/generator @keturn @damian0815 /ldm/invoke/generator @keturn @damian0815 @blessedcoolant
/ldm/invoke/globals.py @lstein @blessedcoolant /ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein /ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
/ldm/invoke/model_manager.py @lstein @blessedcoolant /ldm/invoke/model_manager.py @lstein @blessedcoolant
/ldm/invoke/txt2mask.py @lstein /ldm/invoke/txt2mask.py @lstein @blessedcoolant
/ldm/invoke/patchmatch.py @Kyle0654 /ldm/invoke/patchmatch.py @Kyle0654 @blessedcoolant @lstein
/ldm/invoke/restoration @lstein @blessedcoolant /ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration # attention, textual inversion, model configuration
/ldm/models @damian0815 @keturn /ldm/models @damian0815 @keturn @lstein @blessedcoolant
/ldm/modules @damian0815 @keturn /ldm/modules @damian0815 @keturn @lstein @blessedcoolant
# Nodes # Nodes
apps/ @Kyle0654 apps/ @Kyle0654 @lstein @blessedcoolant
# legacy REST API # legacy REST API
# is CapableWeb still engaged? # is CapableWeb still engaged?
/ldm/invoke/pngwriter.py @CapableWeb /ldm/invoke/pngwriter.py @CapableWeb @lstein @blessedcoolant
/ldm/invoke/server_legacy.py @CapableWeb /ldm/invoke/server_legacy.py @CapableWeb @lstein @blessedcoolant
/scripts/legacy_api.py @CapableWeb /scripts/legacy_api.py @CapableWeb @lstein @blessedcoolant
/tests/legacy_tests.sh @CapableWeb /tests/legacy_tests.sh @CapableWeb @lstein @blessedcoolant

View File

@ -10,7 +10,7 @@
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github [CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain [CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
@ -28,6 +28,8 @@
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main [latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github [latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases [latest release link]: https://github.com/invoke-ai/InvokeAI/releases
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
[translation status link]: https://hosted.weblate.org/engage/invokeai/
</div> </div>
@ -257,6 +259,8 @@ cleanup, testing, or code reviews, is very much encouraged to do so.
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board. To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
If you'd like to help with localization, please register on [Weblate][translation status link]. If you want add a new language, please let us know which language and we will add it to the Weblate project.
If you are unfamiliar with how If you are unfamiliar with how
to contribute to GitHub projects, here is a to contribute to GitHub projects, here is a
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**. [Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.

View File

@ -214,6 +214,8 @@ Here are the invoke> command that apply to txt2img:
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). | | `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. | | `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory | | `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
!!! note !!! note

View File

@ -40,7 +40,7 @@ for adj in adjectives:
print(f'a {adj} day -A{samp} -C{cg}') print(f'a {adj} day -A{samp} -C{cg}')
``` ```
It's output looks like this (abbreviated): Its output looks like this (abbreviated):
```bash ```bash
a sunny day -Aklms -C7.5 a sunny day -Aklms -C7.5

View File

@ -1,19 +0,0 @@
<!-- HTML for static distribution bundle build -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Swagger UI</title>
<link rel="stylesheet" type="text/css" href="swagger-ui/swagger-ui.css" />
<link rel="stylesheet" type="text/css" href="swagger-ui/index.css" />
<link rel="icon" type="image/png" href="swagger-ui/favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="swagger-ui/favicon-16x16.png" sizes="16x16" />
</head>
<body>
<div id="swagger-ui"></div>
<script src="swagger-ui/swagger-ui-bundle.js" charset="UTF-8"> </script>
<script src="swagger-ui/swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
<script src="swagger-ui/swagger-initializer.js" charset="UTF-8"> </script>
</body>
</html>

View File

@ -1,73 +0,0 @@
openapi: 3.0.3
info:
title: Stable Diffusion
description: |-
TODO: Description Here
Some useful links:
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
license:
name: MIT License
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
version: 1.0.0
servers:
- url: http://localhost:9090/api
tags:
- name: images
description: Retrieve and manage generated images
paths:
/images/{imageId}:
get:
tags:
- images
summary: Get image by ID
description: Returns a single image
operationId: getImageById
parameters:
- name: imageId
in: path
description: ID of image to return
required: true
schema:
type: string
responses:
'200':
description: successful operation
content:
image/png:
schema:
type: string
format: binary
'404':
description: Image not found
/intermediates/{intermediateId}/{step}:
get:
tags:
- images
summary: Get intermediate image by ID
description: Returns a single intermediate image
operationId: getIntermediateById
parameters:
- name: intermediateId
in: path
description: ID of intermediate to return
required: true
schema:
type: string
- name: step
in: path
description: The generation step of the intermediate
required: true
schema:
type: string
responses:
'200':
description: successful operation
content:
image/png:
schema:
type: string
format: binary
'404':
description: Intermediate not found

Binary file not shown.

Before

Width:  |  Height:  |  Size: 665 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 628 B

View File

@ -1,16 +0,0 @@
html {
box-sizing: border-box;
overflow: -moz-scrollbars-vertical;
overflow-y: scroll;
}
*,
*:before,
*:after {
box-sizing: inherit;
}
body {
margin: 0;
background: #fafafa;
}

View File

@ -1,79 +0,0 @@
<!doctype html>
<html lang="en-US">
<head>
<title>Swagger UI: OAuth2 Redirect</title>
</head>
<body>
<script>
'use strict';
function run () {
var oauth2 = window.opener.swaggerUIRedirectOauth2;
var sentState = oauth2.state;
var redirectUrl = oauth2.redirectUrl;
var isValid, qp, arr;
if (/code|token|error/.test(window.location.hash)) {
qp = window.location.hash.substring(1).replace('?', '&');
} else {
qp = location.search.substring(1);
}
arr = qp.split("&");
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
qp = qp ? JSON.parse('{' + arr.join() + '}',
function (key, value) {
return key === "" ? value : decodeURIComponent(value);
}
) : {};
isValid = qp.state === sentState;
if ((
oauth2.auth.schema.get("flow") === "accessCode" ||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
oauth2.auth.schema.get("flow") === "authorization_code"
) && !oauth2.auth.code) {
if (!isValid) {
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "warning",
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
});
}
if (qp.code) {
delete oauth2.state;
oauth2.auth.code = qp.code;
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
} else {
let oauthErrorMsg;
if (qp.error) {
oauthErrorMsg = "["+qp.error+"]: " +
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
(qp.error_uri ? "More info: "+qp.error_uri : "");
}
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "error",
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
});
}
} else {
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
}
window.close();
}
if (document.readyState !== 'loading') {
run();
} else {
document.addEventListener('DOMContentLoaded', function () {
run();
});
}
</script>
</body>
</html>

View File

@ -1,20 +0,0 @@
window.onload = function() {
//<editor-fold desc="Changeable Configuration Block">
// the following lines will be replaced by docker/configurator, when it runs in a docker-container
window.ui = SwaggerUIBundle({
url: "openapi3_0.yaml",
dom_id: '#swagger-ui',
deepLinking: true,
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIStandalonePreset
],
plugins: [
SwaggerUIBundle.plugins.DownloadUrl
],
layout: "StandaloneLayout"
});
//</editor-fold>
};

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -12,9 +12,10 @@ echo 2. browser-based UI
echo 3. run textual inversion training echo 3. run textual inversion training
echo 4. merge models (diffusers type only) echo 4. merge models (diffusers type only)
echo 5. re-run the configure script to download new models echo 5. re-run the configure script to download new models
echo 6. open the developer console echo 6. update InvokeAI
echo 7. command-line help echo 7. open the developer console
set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] " echo 8. command-line help
set /P restore="Please enter 1, 2, 3, 4, 5, 6, 7 or 8: [2] "
if not defined restore set restore=2 if not defined restore set restore=2
IF /I "%restore%" == "1" ( IF /I "%restore%" == "1" (
echo Starting the InvokeAI command-line.. echo Starting the InvokeAI command-line..
@ -32,6 +33,9 @@ IF /I "%restore%" == "1" (
echo Running invokeai-configure... echo Running invokeai-configure...
python .venv\Scripts\invokeai-configure.exe %* python .venv\Scripts\invokeai-configure.exe %*
) ELSE IF /I "%restore%" == "6" ( ) ELSE IF /I "%restore%" == "6" (
echo Running invokeai-update...
python .venv\Scripts\invokeai-update.exe %*
) ELSE IF /I "%restore%" == "7" (
echo Developer Console echo Developer Console
echo Python command is: echo Python command is:
where python where python
@ -43,7 +47,7 @@ IF /I "%restore%" == "1" (
echo ************************* echo *************************
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment *** echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
call cmd /k call cmd /k
) ELSE IF /I "%restore%" == "7" ( ) ELSE IF /I "%restore%" == "8" (
echo Displaying command line help... echo Displaying command line help...
python .venv\Scripts\invokeai.exe --help %* python .venv\Scripts\invokeai.exe --help %*
pause pause

View File

@ -30,11 +30,12 @@ if [ "$0" != "bash" ]; then
echo "2. browser-based UI" echo "2. browser-based UI"
echo "3. run textual inversion training" echo "3. run textual inversion training"
echo "4. merge models (diffusers type only)" echo "4. merge models (diffusers type only)"
echo "5. open the developer console" echo "5. re-run the configure script to download new models"
echo "6. re-run the configure script to download new models" echo "6. update InvokeAI"
echo "7. command-line help " echo "7. open the developer console"
echo "8. command-line help"
echo "" echo ""
read -p "Please enter 1, 2, 3, 4, 5, 6 or 7: [2] " yn read -p "Please enter 1, 2, 3, 4, 5, 6, 7 or 8: [2] " yn
choice=${yn:='2'} choice=${yn:='2'}
case $choice in case $choice in
1) 1)
@ -54,14 +55,19 @@ if [ "$0" != "bash" ]; then
exec invokeai-merge --gui $@ exec invokeai-merge --gui $@
;; ;;
5) 5)
echo "Configuration:"
exec invokeai-configure --root ${INVOKEAI_ROOT}
;;
6)
echo "Update:"
exec invokeai-update
;;
7)
echo "Developer Console:" echo "Developer Console:"
file_name=$(basename "${BASH_SOURCE[0]}") file_name=$(basename "${BASH_SOURCE[0]}")
bash --init-file "$file_name" bash --init-file "$file_name"
;; ;;
6) 8)
exec invokeai-configure --root ${INVOKEAI_ROOT}
;;
7)
exec invokeai --help exec invokeai --help
;; ;;
*) *)

View File

@ -1,4 +1,4 @@
#!/usr/bin/env sh #!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh" . "$(dirname -- "$0")/_/husky.sh"
cd invokeai/frontend/ && npx run lint cd invokeai/frontend/ && npm run lint-staged

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -5,7 +5,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title> <title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" /> <link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-fff3415a.js"></script> <script type="module" crossorigin src="./assets/index-762ec810.js"></script>
<link rel="stylesheet" href="./assets/index-14cb2922.css"> <link rel="stylesheet" href="./assets/index-14cb2922.css">
</head> </head>

View File

@ -380,7 +380,6 @@
"img2imgStrength": "قوة صورة إلى صورة", "img2imgStrength": "قوة صورة إلى صورة",
"toggleLoopback": "تبديل الإعادة", "toggleLoopback": "تبديل الإعادة",
"invoke": "إطلاق", "invoke": "إطلاق",
"cancel": "إلغاء",
"promptPlaceholder": "اكتب المحث هنا. [العلامات السلبية], (زيادة الوزن) ++, (نقص الوزن)--, التبديل و الخلط متاحة (انظر الوثائق)", "promptPlaceholder": "اكتب المحث هنا. [العلامات السلبية], (زيادة الوزن) ++, (نقص الوزن)--, التبديل و الخلط متاحة (انظر الوثائق)",
"sendTo": "أرسل إلى", "sendTo": "أرسل إلى",
"sendToImg2Img": "أرسل إلى صورة إلى صورة", "sendToImg2Img": "أرسل إلى صورة إلى صورة",

View File

@ -357,7 +357,6 @@
"img2imgStrength": "Bild-zu-Bild-Stärke", "img2imgStrength": "Bild-zu-Bild-Stärke",
"toggleLoopback": "Toggle Loopback", "toggleLoopback": "Toggle Loopback",
"invoke": "Invoke", "invoke": "Invoke",
"cancel": "Abbrechen",
"promptPlaceholder": "Prompt hier eingeben. [negative Token], (mehr Gewicht)++, (geringeres Gewicht)--, Tausch und Überblendung sind verfügbar (siehe Dokumente)", "promptPlaceholder": "Prompt hier eingeben. [negative Token], (mehr Gewicht)++, (geringeres Gewicht)--, Tausch und Überblendung sind verfügbar (siehe Dokumente)",
"sendTo": "Senden an", "sendTo": "Senden an",
"sendToImg2Img": "Senden an Bild zu Bild", "sendToImg2Img": "Senden an Bild zu Bild",

View File

@ -441,6 +441,9 @@
"infillScalingHeader": "Infill and Scaling", "infillScalingHeader": "Infill and Scaling",
"img2imgStrength": "Image To Image Strength", "img2imgStrength": "Image To Image Strength",
"toggleLoopback": "Toggle Loopback", "toggleLoopback": "Toggle Loopback",
"symmetry": "Symmetry",
"hSymmetryStep": "H Symmetry Step",
"vSymmetryStep": "V Symmetry Step",
"invoke": "Invoke", "invoke": "Invoke",
"cancel": { "cancel": {
"immediate": "Cancel immediately", "immediate": "Cancel immediately",

View File

@ -365,7 +365,6 @@
"img2imgStrength": "Peso de Imagen a Imagen", "img2imgStrength": "Peso de Imagen a Imagen",
"toggleLoopback": "Alternar Retroalimentación", "toggleLoopback": "Alternar Retroalimentación",
"invoke": "Invocar", "invoke": "Invocar",
"cancel": "Cancelar",
"promptPlaceholder": "Ingrese la entrada aquí. [símbolos negativos], (subir peso)++, (bajar peso)--, también disponible alternado y mezclado (ver documentación)", "promptPlaceholder": "Ingrese la entrada aquí. [símbolos negativos], (subir peso)++, (bajar peso)--, también disponible alternado y mezclado (ver documentación)",
"sendTo": "Enviar a", "sendTo": "Enviar a",
"sendToImg2Img": "Enviar a Imagen a Imagen", "sendToImg2Img": "Enviar a Imagen a Imagen",

View File

@ -15,8 +15,8 @@
"langFrench": "Français", "langFrench": "Français",
"nodesDesc": "Un système basé sur les nœuds pour la génération d'images est actuellement en développement. Restez à l'écoute pour des mises à jour à ce sujet.", "nodesDesc": "Un système basé sur les nœuds pour la génération d'images est actuellement en développement. Restez à l'écoute pour des mises à jour à ce sujet.",
"postProcessing": "Post-traitement", "postProcessing": "Post-traitement",
"postProcessDesc1": "Invoke AI offre une grande variété de fonctionnalités de post-traitement. Le redimensionnement d'images et la restauration de visages sont déjà disponibles dans la WebUI. Vous pouvez y accéder à partir du menu Options avancées des onglets Texte en image et Image en image. Vous pouvez également traiter les images directement en utilisant les boutons d'action d'image ci-dessus l'affichage d'image actuel ou dans le visualiseur.", "postProcessDesc1": "Invoke AI offre une grande variété de fonctionnalités de post-traitement. Le redimensionnement d'images et la restauration de visages sont déjà disponibles dans la WebUI. Vous pouvez y accéder à partir du menu 'Options avancées' des onglets 'Texte vers image' et 'Image vers image'. Vous pouvez également traiter les images directement en utilisant les boutons d'action d'image au-dessus de l'affichage d'image actuel ou dans le visualiseur.",
"postProcessDesc2": "Une interface utilisateur dédiée sera bientôt disponible pour faciliter les workflows de post-traitement plus avancés.", "postProcessDesc2": "Une interface dédiée sera bientôt disponible pour faciliter les workflows de post-traitement plus avancés.",
"postProcessDesc3": "L'interface en ligne de commande d'Invoke AI offre diverses autres fonctionnalités, notamment Embiggen.", "postProcessDesc3": "L'interface en ligne de commande d'Invoke AI offre diverses autres fonctionnalités, notamment Embiggen.",
"training": "Formation", "training": "Formation",
"trainingDesc1": "Un workflow dédié pour former vos propres embeddings et checkpoints en utilisant Textual Inversion et Dreambooth depuis l'interface web.", "trainingDesc1": "Un workflow dédié pour former vos propres embeddings et checkpoints en utilisant Textual Inversion et Dreambooth depuis l'interface web.",
@ -25,27 +25,27 @@
"close": "Fermer", "close": "Fermer",
"load": "Charger", "load": "Charger",
"back": "Retour", "back": "Retour",
"statusConnected": "Connecté", "statusConnected": "En ligne",
"statusDisconnected": "Déconnecté", "statusDisconnected": "Hors ligne",
"statusError": "Erreur", "statusError": "Erreur",
"statusPreparing": "Préparation", "statusPreparing": "Préparation",
"statusProcessingCanceled": "Traitement Annulé", "statusProcessingCanceled": "Traitement annulé",
"statusProcessingComplete": "Traitement Terminé", "statusProcessingComplete": "Traitement terminé",
"statusGenerating": "Génération", "statusGenerating": "Génération",
"statusGeneratingTextToImage": "Génération Texte vers Image", "statusGeneratingTextToImage": "Génération Texte vers Image",
"statusGeneratingImageToImage": "Génération Image vers Image", "statusGeneratingImageToImage": "Génération Image vers Image",
"statusGeneratingInpainting": "Génération de Réparation", "statusGeneratingInpainting": "Génération de réparation",
"statusGeneratingOutpainting": "Génération de Completion", "statusGeneratingOutpainting": "Génération de complétion",
"statusGenerationComplete": "Génération Terminée", "statusGenerationComplete": "Génération terminée",
"statusIterationComplete": "Itération Terminée", "statusIterationComplete": "Itération terminée",
"statusSavingImage": "Sauvegarde de l'Image", "statusSavingImage": "Sauvegarde de l'image",
"statusRestoringFaces": "Restauration des Visages", "statusRestoringFaces": "Restauration des visages",
"statusRestoringFacesGFPGAN": "Restauration des Visages (GFPGAN)", "statusRestoringFacesGFPGAN": "Restauration des visages (GFPGAN)",
"statusRestoringFacesCodeFormer": "Restauration des Visages (CodeFormer)", "statusRestoringFacesCodeFormer": "Restauration des visages (CodeFormer)",
"statusUpscaling": "Mise à Échelle", "statusUpscaling": "Mise à échelle",
"statusUpscalingESRGAN": "Mise à Échelle (ESRGAN)", "statusUpscalingESRGAN": "Mise à échelle (ESRGAN)",
"statusLoadingModel": "Chargement du Modèle", "statusLoadingModel": "Chargement du modèle",
"statusModelChanged": "Modèle Changé" "statusModelChanged": "Modèle changé"
}, },
"gallery": { "gallery": {
"generations": "Générations", "generations": "Générations",
@ -68,7 +68,7 @@
"appHotkeys": "Raccourcis de l'application", "appHotkeys": "Raccourcis de l'application",
"generalHotkeys": "Raccourcis généraux", "generalHotkeys": "Raccourcis généraux",
"galleryHotkeys": "Raccourcis de la galerie", "galleryHotkeys": "Raccourcis de la galerie",
"unifiedCanvasHotkeys": "Raccourcis du Canvas unifié", "unifiedCanvasHotkeys": "Raccourcis du canvas unifié",
"invoke": { "invoke": {
"title": "Invoquer", "title": "Invoquer",
"desc": "Générer une image" "desc": "Générer une image"
@ -78,36 +78,36 @@
"desc": "Annuler la génération d'image" "desc": "Annuler la génération d'image"
}, },
"focusPrompt": { "focusPrompt": {
"title": "Prompt de Focus", "title": "Prompt de focus",
"desc": "Mettre en focus la zone de saisie de la commande" "desc": "Mettre en focus la zone de saisie de la commande"
}, },
"toggleOptions": { "toggleOptions": {
"title": "Basculer Options", "title": "Affichage des options",
"desc": "Ouvrir et fermer le panneau d'options" "desc": "Afficher et masquer le panneau d'options"
}, },
"pinOptions": { "pinOptions": {
"title": "Epingler Options", "title": "Epinglage des options",
"desc": "Epingler le panneau d'options" "desc": "Epingler le panneau d'options"
}, },
"toggleViewer": { "toggleViewer": {
"title": "Basculer Visionneuse", "title": "Affichage de la visionneuse",
"desc": "Ouvrir et fermer la visionneuse d'image" "desc": "Afficher et masquer la visionneuse d'image"
}, },
"toggleGallery": { "toggleGallery": {
"title": "Basculer Galerie", "title": "Affichage de la galerie",
"desc": "Ouvrir et fermer le tiroir de galerie" "desc": "Afficher et masquer la galerie"
}, },
"maximizeWorkSpace": { "maximizeWorkSpace": {
"title": "Maximiser Espace de travail", "title": "Maximiser la zone de travail",
"desc": "Fermer les panneaux et maximiser la zone de travail" "desc": "Fermer les panneaux et maximiser la zone de travail"
}, },
"changeTabs": { "changeTabs": {
"title": "Changer d'onglets", "title": "Changer d'onglet",
"desc": "Passer à un autre espace de travail" "desc": "Passer à un autre espace de travail"
}, },
"consoleToggle": { "consoleToggle": {
"title": "Bascule de la console", "title": "Affichage de la console",
"desc": "Ouvrir et fermer la console" "desc": "Afficher et masquer la console"
}, },
"setPrompt": { "setPrompt": {
"title": "Définir le prompt", "title": "Définir le prompt",
@ -122,7 +122,7 @@
"desc": "Utiliser tous les paramètres de l'image actuelle" "desc": "Utiliser tous les paramètres de l'image actuelle"
}, },
"restoreFaces": { "restoreFaces": {
"title": "Restaurer les faces", "title": "Restaurer les visages",
"desc": "Restaurer l'image actuelle" "desc": "Restaurer l'image actuelle"
}, },
"upscale": { "upscale": {
@ -155,7 +155,7 @@
}, },
"toggleGalleryPin": { "toggleGalleryPin": {
"title": "Activer/désactiver l'épinglage de la galerie", "title": "Activer/désactiver l'épinglage de la galerie",
"desc": "Épingle ou dépingle la galerie à l'interface utilisateur" "desc": "Épingle ou dépingle la galerie à l'interface"
}, },
"increaseGalleryThumbSize": { "increaseGalleryThumbSize": {
"title": "Augmenter la taille des miniatures de la galerie", "title": "Augmenter la taille des miniatures de la galerie",
@ -330,7 +330,7 @@
"delete": "Supprimer", "delete": "Supprimer",
"deleteModel": "Supprimer le modèle", "deleteModel": "Supprimer le modèle",
"deleteConfig": "Supprimer la configuration", "deleteConfig": "Supprimer la configuration",
"deleteMsg1": "Êtes-vous sûr de vouloir supprimer cette entrée de modèle dans InvokeAI?", "deleteMsg1": "Voulez-vous vraiment supprimer cette entrée de modèle dans InvokeAI ?",
"deleteMsg2": "Cela n'effacera pas le fichier de point de contrôle du modèle de votre disque. Vous pouvez les réajouter si vous le souhaitez.", "deleteMsg2": "Cela n'effacera pas le fichier de point de contrôle du modèle de votre disque. Vous pouvez les réajouter si vous le souhaitez.",
"formMessageDiffusersModelLocation": "Emplacement du modèle de diffuseurs", "formMessageDiffusersModelLocation": "Emplacement du modèle de diffuseurs",
"formMessageDiffusersModelLocationDesc": "Veuillez en entrer au moins un.", "formMessageDiffusersModelLocationDesc": "Veuillez en entrer au moins un.",
@ -380,7 +380,6 @@
"img2imgStrength": "Force de l'Image à l'Image", "img2imgStrength": "Force de l'Image à l'Image",
"toggleLoopback": "Activer/Désactiver la Boucle", "toggleLoopback": "Activer/Désactiver la Boucle",
"invoke": "Invoker", "invoke": "Invoker",
"cancel": "Annuler",
"promptPlaceholder": "Tapez le prompt ici. [tokens négatifs], (poids positif)++, (poids négatif)--, swap et blend sont disponibles (voir les docs)", "promptPlaceholder": "Tapez le prompt ici. [tokens négatifs], (poids positif)++, (poids négatif)--, swap et blend sont disponibles (voir les docs)",
"sendTo": "Envoyer à", "sendTo": "Envoyer à",
"sendToImg2Img": "Envoyer à Image à Image", "sendToImg2Img": "Envoyer à Image à Image",
@ -448,11 +447,11 @@
"feature": { "feature": {
"prompt": "Ceci est le champ prompt. Le prompt inclut des objets de génération et des termes stylistiques. Vous pouvez également ajouter un poids (importance du jeton) dans le prompt, mais les commandes CLI et les paramètres ne fonctionneront pas.", "prompt": "Ceci est le champ prompt. Le prompt inclut des objets de génération et des termes stylistiques. Vous pouvez également ajouter un poids (importance du jeton) dans le prompt, mais les commandes CLI et les paramètres ne fonctionneront pas.",
"gallery": "La galerie affiche les générations à partir du dossier de sortie à mesure qu'elles sont créées. Les paramètres sont stockés dans des fichiers et accessibles via le menu contextuel.", "gallery": "La galerie affiche les générations à partir du dossier de sortie à mesure qu'elles sont créées. Les paramètres sont stockés dans des fichiers et accessibles via le menu contextuel.",
"other": "Ces options activent des modes de traitement alternatifs pour Invoke. 'Tuilage seamless' créera des motifs répétitifs dans la sortie. 'Haute résolution' est la génération en deux étapes avec img2img: utilisez ce paramètre lorsque vous souhaitez une image plus grande et plus cohérente sans artefacts. Cela prendra plus de temps que d'habitude txt2img.", "other": "Ces options activent des modes de traitement alternatifs pour Invoke. 'Tuilage seamless' créera des motifs répétitifs dans la sortie. 'Haute résolution' est la génération en deux étapes avec img2img : utilisez ce paramètre lorsque vous souhaitez une image plus grande et plus cohérente sans artefacts. Cela prendra plus de temps que d'habitude txt2img.",
"seed": "La valeur de grain affecte le bruit initial à partir duquel l'image est formée. Vous pouvez utiliser les graines déjà existantes provenant d'images précédentes. 'Seuil de bruit' est utilisé pour atténuer les artefacts à des valeurs CFG élevées (essayez la plage de 0 à 10), et Perlin pour ajouter du bruit Perlin pendant la génération: les deux servent à ajouter de la variété à vos sorties.", "seed": "La valeur de grain affecte le bruit initial à partir duquel l'image est formée. Vous pouvez utiliser les graines déjà existantes provenant d'images précédentes. 'Seuil de bruit' est utilisé pour atténuer les artefacts à des valeurs CFG élevées (essayez la plage de 0 à 10), et Perlin pour ajouter du bruit Perlin pendant la génération : les deux servent à ajouter de la variété à vos sorties.",
"variations": "Essayez une variation avec une valeur comprise entre 0,1 et 1,0 pour changer le résultat pour une graine donnée. Des variations intéressantes de la graine sont entre 0,1 et 0,3.", "variations": "Essayez une variation avec une valeur comprise entre 0,1 et 1,0 pour changer le résultat pour une graine donnée. Des variations intéressantes de la graine sont entre 0,1 et 0,3.",
"upscale": "Utilisez ESRGAN pour agrandir l'image immédiatement après la génération.", "upscale": "Utilisez ESRGAN pour agrandir l'image immédiatement après la génération.",
"faceCorrection": "Correction de visage avec GFPGAN ou Codeformer: l'algorithme détecte les visages dans l'image et corrige tout défaut. La valeur élevée changera plus l'image, ce qui donnera des visages plus attirants. Codeformer avec une fidélité plus élevée préserve l'image originale au prix d'une correction de visage plus forte.", "faceCorrection": "Correction de visage avec GFPGAN ou Codeformer : l'algorithme détecte les visages dans l'image et corrige tout défaut. La valeur élevée changera plus l'image, ce qui donnera des visages plus attirants. Codeformer avec une fidélité plus élevée préserve l'image originale au prix d'une correction de visage plus forte.",
"imageToImage": "Image to Image charge n'importe quelle image en tant qu'initiale, qui est ensuite utilisée pour générer une nouvelle avec le prompt. Plus la valeur est élevée, plus l'image de résultat changera. Des valeurs de 0,0 à 1,0 sont possibles, la plage recommandée est de 0,25 à 0,75", "imageToImage": "Image to Image charge n'importe quelle image en tant qu'initiale, qui est ensuite utilisée pour générer une nouvelle avec le prompt. Plus la valeur est élevée, plus l'image de résultat changera. Des valeurs de 0,0 à 1,0 sont possibles, la plage recommandée est de 0,25 à 0,75",
"boundingBox": "La boîte englobante est la même que les paramètres Largeur et Hauteur pour Texte à Image ou Image à Image. Seulement la zone dans la boîte sera traitée.", "boundingBox": "La boîte englobante est la même que les paramètres Largeur et Hauteur pour Texte à Image ou Image à Image. Seulement la zone dans la boîte sera traitée.",
"seamCorrection": "Contrôle la gestion des coutures visibles qui se produisent entre les images générées sur la toile.", "seamCorrection": "Contrôle la gestion des coutures visibles qui se produisent entre les images générées sur la toile.",
@ -495,11 +494,11 @@
"clearCanvasHistory": "Effacer l'historique du canvas", "clearCanvasHistory": "Effacer l'historique du canvas",
"clearHistory": "Effacer l'historique", "clearHistory": "Effacer l'historique",
"clearCanvasHistoryMessage": "Effacer l'historique du canvas laisse votre canvas actuel intact, mais efface de manière irréversible l'historique annuler et refaire.", "clearCanvasHistoryMessage": "Effacer l'historique du canvas laisse votre canvas actuel intact, mais efface de manière irréversible l'historique annuler et refaire.",
"clearCanvasHistoryConfirm": "Êtes-vous sûr de vouloir effacer l'historique du canvas?", "clearCanvasHistoryConfirm": "Voulez-vous vraiment effacer l'historique du canvas ?",
"emptyTempImageFolder": "Vider le dossier d'images temporaires", "emptyTempImageFolder": "Vider le dossier d'images temporaires",
"emptyFolder": "Vider le dossier", "emptyFolder": "Vider le dossier",
"emptyTempImagesFolderMessage": "Vider le dossier d'images temporaires réinitialise également complètement le canvas unifié. Cela inclut tout l'historique annuler/refaire, les images dans la zone de mise en attente et la couche de base du canvas.", "emptyTempImagesFolderMessage": "Vider le dossier d'images temporaires réinitialise également complètement le canvas unifié. Cela inclut tout l'historique annuler/refaire, les images dans la zone de mise en attente et la couche de base du canvas.",
"emptyTempImagesFolderConfirm": "Êtes-vous sûr de vouloir vider le dossier temporaire?", "emptyTempImagesFolderConfirm": "Voulez-vous vraiment vider le dossier temporaire ?",
"activeLayer": "Calque actif", "activeLayer": "Calque actif",
"canvasScale": "Échelle du canevas", "canvasScale": "Échelle du canevas",
"boundingBox": "Boîte englobante", "boundingBox": "Boîte englobante",

View File

@ -438,7 +438,6 @@
"img2imgStrength": "Forza da Immagine a Immagine", "img2imgStrength": "Forza da Immagine a Immagine",
"toggleLoopback": "Attiva/disattiva elaborazione ricorsiva", "toggleLoopback": "Attiva/disattiva elaborazione ricorsiva",
"invoke": "Invoke", "invoke": "Invoke",
"cancel": "Annulla",
"promptPlaceholder": "Digita qui il prompt usando termini in lingua inglese. [token negativi], (aumenta il peso)++, (diminuisci il peso)--, scambia e fondi sono disponibili (consulta la documentazione)", "promptPlaceholder": "Digita qui il prompt usando termini in lingua inglese. [token negativi], (aumenta il peso)++, (diminuisci il peso)--, scambia e fondi sono disponibili (consulta la documentazione)",
"sendTo": "Invia a", "sendTo": "Invia a",
"sendToImg2Img": "Invia a da Immagine a Immagine", "sendToImg2Img": "Invia a da Immagine a Immagine",
@ -459,7 +458,14 @@
"denoisingStrength": "Forza riduzione rumore", "denoisingStrength": "Forza riduzione rumore",
"copyImage": "Copia immagine", "copyImage": "Copia immagine",
"hiresStrength": "Forza Alta Risoluzione", "hiresStrength": "Forza Alta Risoluzione",
"negativePrompts": "Prompt Negativi" "negativePrompts": "Prompt Negativi",
"imageToImage": "Immagine a Immagine",
"cancel": {
"schedule": "Annulla dopo l'iterazione corrente",
"isScheduled": "Annullamento",
"setType": "Imposta il tipo di annullamento",
"immediate": "Annulla immediatamente"
}
}, },
"settings": { "settings": {
"models": "Modelli", "models": "Modelli",

View File

@ -304,7 +304,6 @@
"scaledHeight": "高さのスケール", "scaledHeight": "高さのスケール",
"boundingBoxHeader": "バウンディングボックス", "boundingBoxHeader": "バウンディングボックス",
"img2imgStrength": "Image To Imageの強度", "img2imgStrength": "Image To Imageの強度",
"cancel": "キャンセル",
"sendTo": "転送", "sendTo": "転送",
"sendToImg2Img": "Image to Imageに転送", "sendToImg2Img": "Image to Imageに転送",
"sendToUnifiedCanvas": "Unified Canvasに転送", "sendToUnifiedCanvas": "Unified Canvasに転送",

View File

@ -364,7 +364,6 @@
"img2imgStrength": "Sterkte Afbeelding naar afbeelding", "img2imgStrength": "Sterkte Afbeelding naar afbeelding",
"toggleLoopback": "Zet recursieve verwerking aan/uit", "toggleLoopback": "Zet recursieve verwerking aan/uit",
"invoke": "Genereer", "invoke": "Genereer",
"cancel": "Annuleer",
"promptPlaceholder": "Voer invoertekst hier in. [negatieve trefwoorden], (verhoogdgewicht)++, (verlaagdgewicht)--, swap (wisselen) en blend (mengen) zijn beschikbaar (zie documentatie)", "promptPlaceholder": "Voer invoertekst hier in. [negatieve trefwoorden], (verhoogdgewicht)++, (verlaagdgewicht)--, swap (wisselen) en blend (mengen) zijn beschikbaar (zie documentatie)",
"sendTo": "Stuur naar", "sendTo": "Stuur naar",
"sendToImg2Img": "Stuur naar Afbeelding naar afbeelding", "sendToImg2Img": "Stuur naar Afbeelding naar afbeelding",

View File

@ -269,7 +269,6 @@
"desc": "Akceptuje aktualnie wybrany obraz tymczasowy" "desc": "Akceptuje aktualnie wybrany obraz tymczasowy"
} }
}, },
"modelManager": {},
"parameters": { "parameters": {
"images": "L. obrazów", "images": "L. obrazów",
"steps": "L. kroków", "steps": "L. kroków",
@ -313,7 +312,6 @@
"img2imgStrength": "Wpływ sugestii na obraz", "img2imgStrength": "Wpływ sugestii na obraz",
"toggleLoopback": "Wł/wył sprzężenie zwrotne", "toggleLoopback": "Wł/wył sprzężenie zwrotne",
"invoke": "Wywołaj", "invoke": "Wywołaj",
"cancel": "Anuluj",
"promptPlaceholder": "W tym miejscu wprowadź swoje sugestie. [negatywne sugestie], (wzmocnienie), (osłabienie)--, po więcej opcji (np. swap lub blend) zajrzyj do dokumentacji", "promptPlaceholder": "W tym miejscu wprowadź swoje sugestie. [negatywne sugestie], (wzmocnienie), (osłabienie)--, po więcej opcji (np. swap lub blend) zajrzyj do dokumentacji",
"sendTo": "Wyślij do", "sendTo": "Wyślij do",
"sendToImg2Img": "Użyj w trybie \"Obraz na obraz\"", "sendToImg2Img": "Użyj w trybie \"Obraz na obraz\"",

View File

@ -362,7 +362,6 @@
"img2imgStrength": "Força de Imagem Para Imagem", "img2imgStrength": "Força de Imagem Para Imagem",
"toggleLoopback": "Ativar Loopback", "toggleLoopback": "Ativar Loopback",
"invoke": "Invoke", "invoke": "Invoke",
"cancel": "Cancelar",
"promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)", "promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)",
"sendTo": "Mandar para", "sendTo": "Mandar para",
"sendToImg2Img": "Mandar para Imagem Para Imagem", "sendToImg2Img": "Mandar para Imagem Para Imagem",
@ -425,7 +424,6 @@
"initialImageNotSet": "Imagem Inicial Não Definida", "initialImageNotSet": "Imagem Inicial Não Definida",
"initialImageNotSetDesc": "Não foi possível carregar imagem incial" "initialImageNotSetDesc": "Não foi possível carregar imagem incial"
}, },
"tooltip": {},
"unifiedCanvas": { "unifiedCanvas": {
"layer": "Camada", "layer": "Camada",
"base": "Base", "base": "Base",

View File

@ -365,7 +365,6 @@
"img2imgStrength": "Сила обработки img2img", "img2imgStrength": "Сила обработки img2img",
"toggleLoopback": "Зациклить обработку", "toggleLoopback": "Зациклить обработку",
"invoke": "Вызвать", "invoke": "Вызвать",
"cancel": "Отменить",
"promptPlaceholder": "Введите запрос здесь (на английском). [исключенные токены], (более значимые)++, (менее значимые)--, swap и blend тоже доступны (смотрите Github)", "promptPlaceholder": "Введите запрос здесь (на английском). [исключенные токены], (более значимые)++, (менее значимые)--, swap и blend тоже доступны (смотрите Github)",
"sendTo": "Отправить", "sendTo": "Отправить",
"sendToImg2Img": "Отправить в img2img", "sendToImg2Img": "Отправить в img2img",

View File

@ -365,7 +365,6 @@
"img2imgStrength": "Сила обробки img2img", "img2imgStrength": "Сила обробки img2img",
"toggleLoopback": "Зациклити обробку", "toggleLoopback": "Зациклити обробку",
"invoke": "Викликати", "invoke": "Викликати",
"cancel": "Скасувати",
"promptPlaceholder": "Введіть запит тут (англійською). [видалені токени], (більш вагомі)++, (менш вагомі)--, swap и blend також доступні (дивіться Github)", "promptPlaceholder": "Введіть запит тут (англійською). [видалені токени], (більш вагомі)++, (менш вагомі)--, swap и blend також доступні (дивіться Github)",
"sendTo": "Надіслати", "sendTo": "Надіслати",
"sendToImg2Img": "Надіслати у img2img", "sendToImg2Img": "Надіслати у img2img",

View File

@ -362,7 +362,6 @@
"img2imgStrength": "图像到图像强度", "img2imgStrength": "图像到图像强度",
"toggleLoopback": "切换环回", "toggleLoopback": "切换环回",
"invoke": "Invoke", "invoke": "Invoke",
"cancel": "取消",
"promptPlaceholder": "在这里输入提示。可以使用[反提示]、(加权)++、(减权)--、交换和混合(见文档)", "promptPlaceholder": "在这里输入提示。可以使用[反提示]、(加权)++、(减权)--、交换和混合(见文档)",
"sendTo": "发送到", "sendTo": "发送到",
"sendToImg2Img": "发送到图像到图像", "sendToImg2Img": "发送到图像到图像",
@ -425,7 +424,6 @@
"initialImageNotSet": "初始图像未设定", "initialImageNotSet": "初始图像未设定",
"initialImageNotSetDesc": "无法加载初始图像" "initialImageNotSetDesc": "无法加载初始图像"
}, },
"tooltip": {},
"unifiedCanvas": { "unifiedCanvas": {
"layer": "图层", "layer": "图层",
"base": "基础层", "base": "基础层",

View File

@ -1 +1,41 @@
export {};
declare module 'redux-socket.io-middleware'; declare module 'redux-socket.io-middleware';
declare global {
/* eslint-disable @typescript-eslint/no-explicit-any */
interface Array<T> {
/**
* Returns the value of the last element in the array where predicate is true, and undefined
* otherwise.
* @param predicate findLast calls predicate once for each element of the array, in descending
* order, until it finds one where predicate returns true. If such an element is found, findLast
* immediately returns that element value. Otherwise, findLast returns undefined.
* @param thisArg If provided, it will be used as the this value for each invocation of
* predicate. If it is not provided, undefined is used instead.
*/
findLast<S extends T>(
predicate: (value: T, index: number, array: T[]) => value is S,
thisArg?: any
): S | undefined;
findLast(
predicate: (value: T, index: number, array: T[]) => unknown,
thisArg?: any
): T | undefined;
/**
* Returns the index of the last element in the array where predicate is true, and -1
* otherwise.
* @param predicate findLastIndex calls predicate once for each element of the array, in descending
* order, until it finds one where predicate returns true. If such an element is found,
* findLastIndex immediately returns that element index. Otherwise, findLastIndex returns -1.
* @param thisArg If provided, it will be used as the this value for each invocation of
* predicate. If it is not provided, undefined is used instead.
*/
findLastIndex(
predicate: (value: T, index: number, array: T[]) => unknown,
thisArg?: any
): number;
}
/* eslint-enable @typescript-eslint/no-explicit-any */
}

View File

@ -15,72 +15,70 @@
"postinstall": "patch-package" "postinstall": "patch-package"
}, },
"dependencies": { "dependencies": {
"@chakra-ui/icons": "^2.0.10", "@chakra-ui/icons": "^2.0.17",
"@chakra-ui/react": "^2.3.1", "@chakra-ui/react": "^2.5.1",
"@emotion/cache": "^11.10.5", "@emotion/cache": "^11.10.5",
"@emotion/react": "^11.10.4", "@emotion/react": "^11.10.6",
"@emotion/styled": "^11.10.4", "@emotion/styled": "^11.10.6",
"@radix-ui/react-context-menu": "^2.0.1", "@radix-ui/react-context-menu": "^2.1.1",
"@radix-ui/react-slider": "^1.1.0", "@radix-ui/react-slider": "^1.1.0",
"@radix-ui/react-tooltip": "^1.0.2", "@radix-ui/react-tooltip": "^1.0.3",
"@reduxjs/toolkit": "^1.8.5", "@reduxjs/toolkit": "^1.9.2",
"@types/uuid": "^8.3.4", "@types/uuid": "^9.0.0",
"@vitejs/plugin-react-swc": "^3.1.0", "@vitejs/plugin-react-swc": "^3.2.0",
"add": "^2.0.6", "add": "^2.0.6",
"dateformat": "^5.0.3", "dateformat": "^5.0.3",
"formik": "^2.2.9", "formik": "^2.2.9",
"framer-motion": "^7.2.1", "framer-motion": "^9.0.4",
"i18next": "^22.4.5", "i18next": "^22.4.10",
"i18next-browser-languagedetector": "^7.0.1", "i18next-browser-languagedetector": "^7.0.1",
"i18next-http-backend": "^2.1.0", "i18next-http-backend": "^2.1.1",
"konva": "^8.3.13", "konva": "^8.4.2",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"re-resizable": "^6.9.9", "re-resizable": "^6.9.9",
"react": "^18.2.0", "react": "^18.2.0",
"react-colorful": "^5.6.1", "react-colorful": "^5.6.1",
"react-dom": "^18.2.0", "react-dom": "^18.2.0",
"react-dropzone": "^14.2.2", "react-dropzone": "^14.2.3",
"react-hotkeys-hook": "4.0.2", "react-hotkeys-hook": "4.3.5",
"react-i18next": "^12.1.1", "react-i18next": "^12.1.5",
"react-icons": "^4.4.0", "react-icons": "^4.7.1",
"react-konva": "^18.2.3", "react-konva": "^18.2.4",
"react-konva-utils": "^0.3.0", "react-konva-utils": "^0.3.2",
"react-redux": "^8.0.2", "react-redux": "^8.0.5",
"react-transition-group": "^4.4.5", "react-transition-group": "^4.4.5",
"react-zoom-pan-pinch": "^2.1.3", "react-zoom-pan-pinch": "^2.6.1",
"redux-deep-persist": "^1.0.6", "redux-deep-persist": "^1.0.7",
"redux-persist": "^6.0.0", "redux-persist": "^6.0.0",
"socket.io": "^4.5.2", "socket.io": "^4.6.0",
"socket.io-client": "^4.5.2", "socket.io-client": "^4.6.0",
"use-image": "^1.1.0", "use-image": "^1.1.0",
"uuid": "^9.0.0", "uuid": "^9.0.0",
"yarn": "^1.22.19" "yarn": "^1.22.19"
}, },
"devDependencies": { "devDependencies": {
"@types/dateformat": "^5.0.0", "@types/dateformat": "^5.0.0",
"@types/react": "^18.0.17", "@types/react": "^18.0.28",
"@types/react-dom": "^18.0.6", "@types/react-dom": "^18.0.11",
"@types/react-transition-group": "^4.4.5", "@types/react-transition-group": "^4.4.5",
"@typescript-eslint/eslint-plugin": "^5.36.2", "@typescript-eslint/eslint-plugin": "^5.52.0",
"@typescript-eslint/parser": "^5.36.2", "@typescript-eslint/parser": "^5.52.0",
"babel-plugin-transform-imports": "^2.0.0", "babel-plugin-transform-imports": "^2.0.0",
"eslint": "^8.23.0", "eslint": "^8.34.0",
"eslint-config-prettier": "^8.6.0", "eslint-config-prettier": "^8.6.0",
"eslint-plugin-prettier": "^4.2.1", "eslint-plugin-prettier": "^4.2.1",
"eslint-plugin-react": "^7.32.2", "eslint-plugin-react": "^7.32.2",
"eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-hooks": "^4.6.0",
"husky": "^8.0.3", "husky": "^8.0.3",
"lint-staged": "^13.1.0", "lint-staged": "^13.1.2",
"madge": "^5.0.1", "madge": "^6.0.0",
"patch-package": "^6.5.0", "patch-package": "^6.5.1",
"postinstall-postinstall": "^2.1.0", "postinstall-postinstall": "^2.1.0",
"prettier": "^2.8.3", "prettier": "^2.8.4",
"rollup-plugin-visualizer": "^5.9.0", "rollup-plugin-visualizer": "^5.9.0",
"sass": "^1.55.0", "sass": "^1.58.3",
"terser": "^5.16.1", "terser": "^5.16.4",
"tsc-watch": "^5.0.3", "vite": "^4.1.2",
"typescript": "^5.0.0-beta",
"vite": "^4.1.1",
"vite-plugin-eslint": "^1.8.1", "vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.0.5" "vite-tsconfig-paths": "^4.0.5"
}, },
@ -95,9 +93,9 @@
} }
}, },
"lint-staged": { "lint-staged": {
"**/*.{js,jsx,ts,tsx,cjs}": [ "**/*.{js,jsx,ts,tsx,cjs,json,html,scss}": [
"npx prettier --write", "npm run prettier",
"npx eslint --fix" "npm run lint"
] ]
} }
} }

View File

@ -441,6 +441,9 @@
"infillScalingHeader": "Infill and Scaling", "infillScalingHeader": "Infill and Scaling",
"img2imgStrength": "Image To Image Strength", "img2imgStrength": "Image To Image Strength",
"toggleLoopback": "Toggle Loopback", "toggleLoopback": "Toggle Loopback",
"symmetry": "Symmetry",
"hSymmetryStep": "H Symmetry Step",
"vSymmetryStep": "V Symmetry Step",
"invoke": "Invoke", "invoke": "Invoke",
"cancel": { "cancel": {
"immediate": "Cancel immediately", "immediate": "Cancel immediately",

View File

@ -3,10 +3,10 @@ import {
MenuButton, MenuButton,
MenuItem, MenuItem,
MenuList, MenuList,
type MenuProps, MenuProps,
type MenuButtonProps, MenuButtonProps,
type MenuListProps, MenuListProps,
type MenuItemProps, MenuItemProps,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { MouseEventHandler, ReactNode } from 'react'; import { MouseEventHandler, ReactNode } from 'react';
import { MdArrowDropDown, MdArrowDropUp } from 'react-icons/md'; import { MdArrowDropDown, MdArrowDropUp } from 'react-icons/md';

View File

@ -65,6 +65,8 @@ export type BackendGenerationParameters = {
with_variations?: Array<Array<number>>; with_variations?: Array<Array<number>>;
variation_amount?: number; variation_amount?: number;
enable_image_debugging?: boolean; enable_image_debugging?: boolean;
h_symmetry_time_pct?: number;
v_symmetry_time_pct?: number;
}; };
export type BackendEsrGanParameters = { export type BackendEsrGanParameters = {
@ -141,6 +143,9 @@ export const frontendToBackendParameters = (
tileSize, tileSize,
variationAmount, variationAmount,
width, width,
shouldUseSymmetry,
horizontalSymmetryTimePercentage,
verticalSymmetryTimePercentage,
} = generationState; } = generationState;
const { const {
@ -170,9 +175,6 @@ export const frontendToBackendParameters = (
let esrganParameters: false | BackendEsrGanParameters = false; let esrganParameters: false | BackendEsrGanParameters = false;
let facetoolParameters: false | BackendFacetoolParameters = false; let facetoolParameters: false | BackendFacetoolParameters = false;
// Multiplying it by 10000 so the Slider can have values between 0 and 1 which makes more sense
generationParameters.threshold = threshold * 1000;
if (negativePrompt !== '') { if (negativePrompt !== '') {
generationParameters.prompt = `${prompt} [${negativePrompt}]`; generationParameters.prompt = `${prompt} [${negativePrompt}]`;
} }
@ -181,6 +183,23 @@ export const frontendToBackendParameters = (
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
: seed; : seed;
// Symmetry Settings
if (shouldUseSymmetry) {
if (horizontalSymmetryTimePercentage > 0) {
generationParameters.h_symmetry_time_pct = Math.max(
0,
Math.min(1, horizontalSymmetryTimePercentage / steps)
);
}
if (horizontalSymmetryTimePercentage > 0) {
generationParameters.v_symmetry_time_pct = Math.max(
0,
Math.min(1, verticalSymmetryTimePercentage / steps)
);
}
}
// txt2img exclusive parameters // txt2img exclusive parameters
if (generationMode === 'txt2img') { if (generationMode === 'txt2img') {
generationParameters.hires_fix = hiresFix; generationParameters.hires_fix = hiresFix;

View File

@ -1,12 +1,12 @@
import { FACETOOL_TYPES } from 'app/constants'; import { FACETOOL_TYPES } from 'app/constants';
import { type RootState } from 'app/store'; import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISelect from 'common/components/IAISelect'; import IAISelect from 'common/components/IAISelect';
import { import {
type FacetoolType, FacetoolType,
setFacetoolType, setFacetoolType,
} from 'features/parameters/store/postprocessingSlice'; } from 'features/parameters/store/postprocessingSlice';
import { type ChangeEvent } from 'react'; import { ChangeEvent } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
export default function FaceRestoreType() { export default function FaceRestoreType() {

View File

@ -0,0 +1,55 @@
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISlider from 'common/components/IAISlider';
import {
setHorizontalSymmetryTimePercentage,
setVerticalSymmetryTimePercentage,
} from 'features/parameters/store/generationSlice';
import { useTranslation } from 'react-i18next';
export default function SymmetrySettings() {
const horizontalSymmetryTimePercentage = useAppSelector(
(state: RootState) => state.generation.horizontalSymmetryTimePercentage
);
const verticalSymmetryTimePercentage = useAppSelector(
(state: RootState) => state.generation.verticalSymmetryTimePercentage
);
const steps = useAppSelector((state: RootState) => state.generation.steps);
const dispatch = useAppDispatch();
const { t } = useTranslation();
return (
<>
<IAISlider
label={t('parameters.hSymmetryStep')}
value={horizontalSymmetryTimePercentage}
onChange={(v) => dispatch(setHorizontalSymmetryTimePercentage(v))}
min={0}
max={steps}
step={1}
withInput
withSliderMarks
withReset
handleReset={() => dispatch(setHorizontalSymmetryTimePercentage(0))}
sliderMarkRightOffset={-6}
></IAISlider>
<IAISlider
label={t('parameters.vSymmetryStep')}
value={verticalSymmetryTimePercentage}
onChange={(v) => dispatch(setVerticalSymmetryTimePercentage(v))}
min={0}
max={steps}
step={1}
withInput
withSliderMarks
withReset
handleReset={() => dispatch(setVerticalSymmetryTimePercentage(0))}
sliderMarkRightOffset={-6}
></IAISlider>
</>
);
}

View File

@ -0,0 +1,19 @@
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISwitch from 'common/components/IAISwitch';
import { setShouldUseSymmetry } from 'features/parameters/store/generationSlice';
export default function SymmetryToggle() {
const shouldUseSymmetry = useAppSelector(
(state: RootState) => state.generation.shouldUseSymmetry
);
const dispatch = useAppDispatch();
return (
<IAISwitch
isChecked={shouldUseSymmetry}
onChange={(e) => dispatch(setShouldUseSymmetry(e.target.checked))}
/>
);
}

View File

@ -15,15 +15,15 @@ export default function Threshold() {
<IAISlider <IAISlider
label={t('parameters.noiseThreshold')} label={t('parameters.noiseThreshold')}
min={0} min={0}
max={1} max={20}
step={0.005} step={0.1}
onChange={(v) => dispatch(setThreshold(v))} onChange={(v) => dispatch(setThreshold(v))}
handleReset={() => dispatch(setThreshold(0))} handleReset={() => dispatch(setThreshold(0))}
value={threshold} value={threshold}
withInput withInput
withReset withReset
withSliderMarks withSliderMarks
inputWidth="6rem" sliderMarkRightOffset={-4}
/> />
); );
} }

View File

@ -4,7 +4,7 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISelect from 'common/components/IAISelect'; import IAISelect from 'common/components/IAISelect';
import { import {
setUpscalingLevel, setUpscalingLevel,
type UpscalingLevel, UpscalingLevel,
} from 'features/parameters/store/postprocessingSlice'; } from 'features/parameters/store/postprocessingSlice';
import type { ChangeEvent } from 'react'; import type { ChangeEvent } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';

View File

@ -1,5 +1,5 @@
import { Flex } from '@chakra-ui/react'; import { Flex } from '@chakra-ui/react';
import { type RootState } from 'app/store'; import { RootState } from 'app/store';
import { useAppSelector } from 'app/storeHooks'; import { useAppSelector } from 'app/storeHooks';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import ParametersAccordion from '../ParametersAccordion'; import ParametersAccordion from '../ParametersAccordion';

View File

@ -40,11 +40,15 @@ const cancelButtonSelector = createSelector(
} }
); );
interface CancelButtonProps {
btnGroupWidth?: string | number;
}
export default function CancelButton( export default function CancelButton(
props: Omit<IAIIconButtonProps, 'aria-label'> props: CancelButtonProps & Omit<IAIIconButtonProps, 'aria-label'>
) { ) {
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const { ...rest } = props; const { btnGroupWidth = 'auto', ...rest } = props;
const { const {
isProcessing, isProcessing,
isConnected, isConnected,
@ -91,7 +95,12 @@ export default function CancelButton(
]; ];
return ( return (
<ButtonGroup isAttached variant="link"> <ButtonGroup
isAttached
variant="link"
minHeight="2.5rem"
width={btnGroupWidth}
>
{cancelType === 'immediate' ? ( {cancelType === 'immediate' ? (
<IAIIconButton <IAIIconButton
icon={<MdCancel />} icon={<MdCancel />}

View File

@ -32,6 +32,9 @@ export interface GenerationState {
tileSize: number; tileSize: number;
variationAmount: number; variationAmount: number;
width: number; width: number;
shouldUseSymmetry: boolean;
horizontalSymmetryTimePercentage: number;
verticalSymmetryTimePercentage: number;
} }
const initialGenerationState: GenerationState = { const initialGenerationState: GenerationState = {
@ -60,6 +63,9 @@ const initialGenerationState: GenerationState = {
tileSize: 32, tileSize: 32,
variationAmount: 0.1, variationAmount: 0.1,
width: 512, width: 512,
shouldUseSymmetry: false,
horizontalSymmetryTimePercentage: 0,
verticalSymmetryTimePercentage: 0,
}; };
const initialState: GenerationState = initialGenerationState; const initialState: GenerationState = initialGenerationState;
@ -325,6 +331,21 @@ export const generationSlice = createSlice({
setInfillMethod: (state, action: PayloadAction<string>) => { setInfillMethod: (state, action: PayloadAction<string>) => {
state.infillMethod = action.payload; state.infillMethod = action.payload;
}, },
setShouldUseSymmetry: (state, action: PayloadAction<boolean>) => {
state.shouldUseSymmetry = action.payload;
},
setHorizontalSymmetryTimePercentage: (
state,
action: PayloadAction<number>
) => {
state.horizontalSymmetryTimePercentage = action.payload;
},
setVerticalSymmetryTimePercentage: (
state,
action: PayloadAction<number>
) => {
state.verticalSymmetryTimePercentage = action.payload;
},
}, },
}); });
@ -362,6 +383,9 @@ export const {
setTileSize, setTileSize,
setVariationAmount, setVariationAmount,
setWidth, setWidth,
setShouldUseSymmetry,
setHorizontalSymmetryTimePercentage,
setVerticalSymmetryTimePercentage,
} = generationSlice.actions; } = generationSlice.actions;
export default generationSlice.reducer; export default generationSlice.reducer;

View File

@ -12,7 +12,7 @@ import {
useDisclosure, useDisclosure,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { mergeDiffusersModels } from 'app/socketio/actions'; import { mergeDiffusersModels } from 'app/socketio/actions';
import { type RootState } from 'app/store'; import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIButton from 'common/components/IAIButton'; import IAIButton from 'common/components/IAIButton';
import IAIInput from 'common/components/IAIInput'; import IAIInput from 'common/components/IAIInput';

View File

@ -14,7 +14,7 @@ import {
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit'; import { createSelector } from '@reduxjs/toolkit';
import { IN_PROGRESS_IMAGE_TYPES } from 'app/constants'; import { IN_PROGRESS_IMAGE_TYPES } from 'app/constants';
import { type RootState } from 'app/store'; import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAINumberInput from 'common/components/IAINumberInput'; import IAINumberInput from 'common/components/IAINumberInput';
import IAISelect from 'common/components/IAISelect'; import IAISelect from 'common/components/IAISelect';
@ -27,14 +27,14 @@ import {
setShouldConfirmOnDelete, setShouldConfirmOnDelete,
setShouldDisplayGuides, setShouldDisplayGuides,
setShouldDisplayInProgressType, setShouldDisplayInProgressType,
type SystemState, SystemState,
} from 'features/system/store/systemSlice'; } from 'features/system/store/systemSlice';
import { uiSelector } from 'features/ui/store/uiSelectors'; import { uiSelector } from 'features/ui/store/uiSelectors';
import { import {
setShouldUseCanvasBetaLayout, setShouldUseCanvasBetaLayout,
setShouldUseSliders, setShouldUseSliders,
} from 'features/ui/store/uiSlice'; } from 'features/ui/store/uiSlice';
import { type UIState } from 'features/ui/store/uiTypes'; import { UIState } from 'features/ui/store/uiTypes';
import { isEqual, map } from 'lodash'; import { isEqual, map } from 'lodash';
import { persistor } from 'persistor'; import { persistor } from 'persistor';
import { ChangeEvent, cloneElement, ReactElement } from 'react'; import { ChangeEvent, cloneElement, ReactElement } from 'react';

View File

@ -69,7 +69,7 @@ const initialSystemState: SystemState = {
isESRGANAvailable: true, isESRGANAvailable: true,
socketId: '', socketId: '',
shouldConfirmOnDelete: true, shouldConfirmOnDelete: true,
openAccordions: [], openAccordions: [0],
currentStep: 0, currentStep: 0,
totalSteps: 0, totalSteps: 0,
currentIteration: 0, currentIteration: 0,

View File

@ -2,20 +2,13 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton'; import IAIIconButton from 'common/components/IAIIconButton';
import { setDoesCanvasNeedScaling } from 'features/canvas/store/canvasSlice'; import { setDoesCanvasNeedScaling } from 'features/canvas/store/canvasSlice';
import { setShouldShowGallery } from 'features/gallery/store/gallerySlice'; import { setShouldShowGallery } from 'features/gallery/store/gallerySlice';
import { setShouldShowParametersPanel } from 'features/ui/store/uiSlice';
import { useHotkeys } from 'react-hotkeys-hook';
import { MdPhotoLibrary } from 'react-icons/md'; import { MdPhotoLibrary } from 'react-icons/md';
import { floatingSelector } from './FloatingParametersPanelButtons'; import { floatingSelector } from './FloatingParametersPanelButtons';
const FloatingGalleryButton = () => { const FloatingGalleryButton = () => {
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const { const { shouldShowGalleryButton, shouldPinGallery } =
shouldShowGallery, useAppSelector(floatingSelector);
shouldShowGalleryButton,
shouldPinGallery,
shouldShowParametersPanel,
shouldPinParametersPanel,
} = useAppSelector(floatingSelector);
const handleShowGallery = () => { const handleShowGallery = () => {
dispatch(setShouldShowGallery(true)); dispatch(setShouldShowGallery(true));
@ -24,22 +17,6 @@ const FloatingGalleryButton = () => {
} }
}; };
useHotkeys(
'f',
() => {
if (shouldShowGallery || shouldShowParametersPanel) {
dispatch(setShouldShowParametersPanel(false));
dispatch(setShouldShowGallery(false));
} else {
dispatch(setShouldShowParametersPanel(true));
dispatch(setShouldShowGallery(true));
}
if (shouldPinGallery || shouldPinParametersPanel)
setTimeout(() => dispatch(setDoesCanvasNeedScaling(true)), 400);
},
[shouldShowGallery, shouldShowParametersPanel]
);
return shouldShowGalleryButton ? ( return shouldShowGalleryButton ? (
<IAIIconButton <IAIIconButton
tooltip="Show Gallery (G)" tooltip="Show Gallery (G)"

View File

@ -3,10 +3,7 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton'; import IAIIconButton from 'common/components/IAIIconButton';
import { setDoesCanvasNeedScaling } from 'features/canvas/store/canvasSlice'; import { setDoesCanvasNeedScaling } from 'features/canvas/store/canvasSlice';
import { gallerySelector } from 'features/gallery/store/gallerySelectors'; import { gallerySelector } from 'features/gallery/store/gallerySelectors';
import { import { GalleryState } from 'features/gallery/store/gallerySlice';
GalleryState,
setShouldShowGallery,
} from 'features/gallery/store/gallerySlice';
import CancelButton from 'features/parameters/components/ProcessButtons/CancelButton'; import CancelButton from 'features/parameters/components/ProcessButtons/CancelButton';
import InvokeButton from 'features/parameters/components/ProcessButtons/InvokeButton'; import InvokeButton from 'features/parameters/components/ProcessButtons/InvokeButton';
import { import {
@ -16,7 +13,6 @@ import {
import { setShouldShowParametersPanel } from 'features/ui/store/uiSlice'; import { setShouldShowParametersPanel } from 'features/ui/store/uiSlice';
import { isEqual } from 'lodash'; import { isEqual } from 'lodash';
import { useHotkeys } from 'react-hotkeys-hook';
import { FaSlidersH } from 'react-icons/fa'; import { FaSlidersH } from 'react-icons/fa';
export const floatingSelector = createSelector( export const floatingSelector = createSelector(
@ -67,12 +63,9 @@ export const floatingSelector = createSelector(
const FloatingParametersPanelButtons = () => { const FloatingParametersPanelButtons = () => {
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const { const {
shouldShowParametersPanel,
shouldShowParametersPanelButton, shouldShowParametersPanelButton,
shouldShowProcessButtons, shouldShowProcessButtons,
shouldPinParametersPanel, shouldPinParametersPanel,
shouldShowGallery,
shouldPinGallery,
} = useAppSelector(floatingSelector); } = useAppSelector(floatingSelector);
const handleShowOptionsPanel = () => { const handleShowOptionsPanel = () => {
@ -82,22 +75,6 @@ const FloatingParametersPanelButtons = () => {
} }
}; };
useHotkeys(
'f',
() => {
if (shouldShowGallery || shouldShowParametersPanel) {
dispatch(setShouldShowParametersPanel(false));
dispatch(setShouldShowGallery(false));
} else {
dispatch(setShouldShowParametersPanel(true));
dispatch(setShouldShowGallery(true));
}
if (shouldPinGallery || shouldPinParametersPanel)
setTimeout(() => dispatch(setDoesCanvasNeedScaling(true)), 400);
},
[shouldShowGallery, shouldShowParametersPanel]
);
return shouldShowParametersPanelButton ? ( return shouldShowParametersPanelButton ? (
<div className="show-hide-button-options"> <div className="show-hide-button-options">
<IAIIconButton <IAIIconButton

View File

@ -3,6 +3,8 @@ import { Feature } from 'app/features';
import FaceRestoreSettings from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings'; import FaceRestoreSettings from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings';
import FaceRestoreToggle from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle'; import FaceRestoreToggle from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle';
import ImageToImageOutputSettings from 'features/parameters/components/AdvancedParameters/Output/ImageToImageOutputSettings'; import ImageToImageOutputSettings from 'features/parameters/components/AdvancedParameters/Output/ImageToImageOutputSettings';
import SymmetrySettings from 'features/parameters/components/AdvancedParameters/Output/SymmetrySettings';
import SymmetryToggle from 'features/parameters/components/AdvancedParameters/Output/SymmetryToggle';
import SeedSettings from 'features/parameters/components/AdvancedParameters/Seed/SeedSettings'; import SeedSettings from 'features/parameters/components/AdvancedParameters/Seed/SeedSettings';
import UpscaleSettings from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings'; import UpscaleSettings from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings';
import UpscaleToggle from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle'; import UpscaleToggle from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle';
@ -44,6 +46,11 @@ export default function ImageToImagePanel() {
content: <UpscaleSettings />, content: <UpscaleSettings />,
additionalHeaderComponents: <UpscaleToggle />, additionalHeaderComponents: <UpscaleToggle />,
}, },
symmetry: {
header: `${t('parameters.symmetry')}`,
content: <SymmetrySettings />,
additionalHeaderComponents: <SymmetryToggle />,
},
other: { other: {
header: `${t('parameters.otherOptions')}`, header: `${t('parameters.otherOptions')}`,
feature: Feature.OTHER, feature: Feature.OTHER,

View File

@ -11,14 +11,20 @@ import PostprocessingIcon from 'common/icons/PostprocessingIcon';
import TextToImageIcon from 'common/icons/TextToImageIcon'; import TextToImageIcon from 'common/icons/TextToImageIcon';
import TrainingIcon from 'common/icons/TrainingIcon'; import TrainingIcon from 'common/icons/TrainingIcon';
import UnifiedCanvasIcon from 'common/icons/UnifiedCanvasIcon'; import UnifiedCanvasIcon from 'common/icons/UnifiedCanvasIcon';
import { setDoesCanvasNeedScaling } from 'features/canvas/store/canvasSlice';
import { setShouldShowGallery } from 'features/gallery/store/gallerySlice';
import Lightbox from 'features/lightbox/components/Lightbox'; import Lightbox from 'features/lightbox/components/Lightbox';
import { setIsLightboxOpen } from 'features/lightbox/store/lightboxSlice'; import { setIsLightboxOpen } from 'features/lightbox/store/lightboxSlice';
import { InvokeTabName } from 'features/ui/store/tabMap'; import { InvokeTabName } from 'features/ui/store/tabMap';
import { setActiveTab } from 'features/ui/store/uiSlice'; import {
setActiveTab,
setShouldShowParametersPanel,
} from 'features/ui/store/uiSlice';
import i18n from 'i18n'; import i18n from 'i18n';
import { ReactElement } from 'react'; import { ReactElement } from 'react';
import { useHotkeys } from 'react-hotkeys-hook'; import { useHotkeys } from 'react-hotkeys-hook';
import { activeTabIndexSelector } from '../store/uiSelectors'; import { activeTabIndexSelector } from '../store/uiSelectors';
import { floatingSelector } from './FloatingParametersPanelButtons';
import ImageToImageWorkarea from './ImageToImage'; import ImageToImageWorkarea from './ImageToImage';
import TextToImageWorkarea from './TextToImage'; import TextToImageWorkarea from './TextToImage';
import UnifiedCanvasWorkarea from './UnifiedCanvas/UnifiedCanvasWorkarea'; import UnifiedCanvasWorkarea from './UnifiedCanvas/UnifiedCanvasWorkarea';
@ -73,10 +79,18 @@ function updateTabTranslations() {
export default function InvokeTabs() { export default function InvokeTabs() {
const activeTab = useAppSelector(activeTabIndexSelector); const activeTab = useAppSelector(activeTabIndexSelector);
const isLightBoxOpen = useAppSelector( const isLightBoxOpen = useAppSelector(
(state: RootState) => state.lightbox.isLightboxOpen (state: RootState) => state.lightbox.isLightboxOpen
); );
const {
shouldShowGallery,
shouldShowParametersPanel,
shouldPinGallery,
shouldPinParametersPanel,
} = useAppSelector(floatingSelector);
useUpdateTranslations(updateTabTranslations); useUpdateTranslations(updateTabTranslations);
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
@ -114,6 +128,22 @@ export default function InvokeTabs() {
[isLightBoxOpen] [isLightBoxOpen]
); );
useHotkeys(
'f',
() => {
if (shouldShowGallery || shouldShowParametersPanel) {
dispatch(setShouldShowParametersPanel(false));
dispatch(setShouldShowGallery(false));
} else {
dispatch(setShouldShowParametersPanel(true));
dispatch(setShouldShowGallery(true));
}
if (shouldPinGallery || shouldPinParametersPanel)
setTimeout(() => dispatch(setDoesCanvasNeedScaling(true)), 400);
},
[shouldShowGallery, shouldShowParametersPanel]
);
const renderTabs = () => { const renderTabs = () => {
const tabsToRender: ReactElement[] = []; const tabsToRender: ReactElement[] = [];
Object.keys(tabDict).forEach((key) => { Object.keys(tabDict).forEach((key) => {

View File

@ -3,6 +3,8 @@ import { Feature } from 'app/features';
import FaceRestoreSettings from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings'; import FaceRestoreSettings from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings';
import FaceRestoreToggle from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle'; import FaceRestoreToggle from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle';
import OutputSettings from 'features/parameters/components/AdvancedParameters/Output/OutputSettings'; import OutputSettings from 'features/parameters/components/AdvancedParameters/Output/OutputSettings';
import SymmetrySettings from 'features/parameters/components/AdvancedParameters/Output/SymmetrySettings';
import SymmetryToggle from 'features/parameters/components/AdvancedParameters/Output/SymmetryToggle';
import SeedSettings from 'features/parameters/components/AdvancedParameters/Seed/SeedSettings'; import SeedSettings from 'features/parameters/components/AdvancedParameters/Seed/SeedSettings';
import UpscaleSettings from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings'; import UpscaleSettings from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings';
import UpscaleToggle from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle'; import UpscaleToggle from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle';
@ -43,6 +45,11 @@ export default function TextToImagePanel() {
content: <UpscaleSettings />, content: <UpscaleSettings />,
additionalHeaderComponents: <UpscaleToggle />, additionalHeaderComponents: <UpscaleToggle />,
}, },
symmetry: {
header: `${t('parameters.symmetry')}`,
content: <SymmetrySettings />,
additionalHeaderComponents: <SymmetryToggle />,
},
other: { other: {
header: `${t('parameters.otherOptions')}`, header: `${t('parameters.otherOptions')}`,
feature: Feature.OTHER, feature: Feature.OTHER,

View File

@ -38,7 +38,7 @@ export default function UnifiedCanvasProcessingButtons() {
<InvokeButton iconButton /> <InvokeButton iconButton />
</Flex> </Flex>
<Flex> <Flex>
<CancelButton width="100%" height="40px" /> <CancelButton width="100%" height="40px" btnGroupWidth="100%" />
</Flex> </Flex>
</Flex> </Flex>
); );

View File

@ -5,6 +5,8 @@ import BoundingBoxSettings from 'features/parameters/components/AdvancedParamete
import InfillAndScalingSettings from 'features/parameters/components/AdvancedParameters/Canvas/InfillAndScalingSettings'; import InfillAndScalingSettings from 'features/parameters/components/AdvancedParameters/Canvas/InfillAndScalingSettings';
import SeamCorrectionSettings from 'features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamCorrectionSettings'; import SeamCorrectionSettings from 'features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamCorrectionSettings';
import ImageToImageStrength from 'features/parameters/components/AdvancedParameters/ImageToImage/ImageToImageStrength'; import ImageToImageStrength from 'features/parameters/components/AdvancedParameters/ImageToImage/ImageToImageStrength';
import SymmetrySettings from 'features/parameters/components/AdvancedParameters/Output/SymmetrySettings';
import SymmetryToggle from 'features/parameters/components/AdvancedParameters/Output/SymmetryToggle';
import SeedSettings from 'features/parameters/components/AdvancedParameters/Seed/SeedSettings'; import SeedSettings from 'features/parameters/components/AdvancedParameters/Seed/SeedSettings';
import GenerateVariationsToggle from 'features/parameters/components/AdvancedParameters/Variations/GenerateVariations'; import GenerateVariationsToggle from 'features/parameters/components/AdvancedParameters/Variations/GenerateVariations';
import VariationsSettings from 'features/parameters/components/AdvancedParameters/Variations/VariationsSettings'; import VariationsSettings from 'features/parameters/components/AdvancedParameters/Variations/VariationsSettings';
@ -46,6 +48,11 @@ export default function UnifiedCanvasPanel() {
content: <VariationsSettings />, content: <VariationsSettings />,
additionalHeaderComponents: <GenerateVariationsToggle />, additionalHeaderComponents: <GenerateVariationsToggle />,
}, },
symmetry: {
header: `${t('parameters.symmetry')}`,
content: <SymmetrySettings />,
additionalHeaderComponents: <SymmetryToggle />,
},
}; };
const unifiedCanvasImg2ImgAccordion = { const unifiedCanvasImg2ImgAccordion = {

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -320,6 +320,8 @@ class Generate:
variation_amount=0.0, variation_amount=0.0,
threshold=0.0, threshold=0.0,
perlin=0.0, perlin=0.0,
h_symmetry_time_pct = None,
v_symmetry_time_pct = None,
karras_max=None, karras_max=None,
outdir=None, outdir=None,
# these are specific to img2img and inpaint # these are specific to img2img and inpaint
@ -390,6 +392,8 @@ class Generate:
variation_amount // optional 0-1 value to slerp from -S noise to random noise (allows variations on an image) variation_amount // optional 0-1 value to slerp from -S noise to random noise (allows variations on an image)
threshold // optional value >=0 to add thresholding to latent values for k-diffusion samplers (0 disables) threshold // optional value >=0 to add thresholding to latent values for k-diffusion samplers (0 disables)
perlin // optional 0-1 value to add a percentage of perlin noise to the initial noise perlin // optional 0-1 value to add a percentage of perlin noise to the initial noise
h_symmetry_time_pct // optional 0-1 value that indicates the time at which horizontal symmetry is applied
v_symmetry_time_pct // optional 0-1 value that indicates the time at which vertical symmetry is applied
embiggen // scale factor relative to the size of the --init_img (-I), followed by ESRGAN upscaling strength (0-1.0), followed by minimum amount of overlap between tiles as a decimal ratio (0 - 1.0) or number of pixels embiggen // scale factor relative to the size of the --init_img (-I), followed by ESRGAN upscaling strength (0-1.0), followed by minimum amount of overlap between tiles as a decimal ratio (0 - 1.0) or number of pixels
embiggen_tiles // list of tiles by number in order to process and replace onto the image e.g. `0 2 4` embiggen_tiles // list of tiles by number in order to process and replace onto the image e.g. `0 2 4`
embiggen_strength // strength for embiggen. 0.0 preserves image exactly, 1.0 replaces it completely embiggen_strength // strength for embiggen. 0.0 preserves image exactly, 1.0 replaces it completely
@ -561,6 +565,8 @@ class Generate:
strength=strength, strength=strength,
threshold=threshold, threshold=threshold,
perlin=perlin, perlin=perlin,
h_symmetry_time_pct=h_symmetry_time_pct,
v_symmetry_time_pct=v_symmetry_time_pct,
embiggen=embiggen, embiggen=embiggen,
embiggen_tiles=embiggen_tiles, embiggen_tiles=embiggen_tiles,
embiggen_strength=embiggen_strength, embiggen_strength=embiggen_strength,
@ -958,6 +964,7 @@ class Generate:
seed_everything(random.randrange(0, np.iinfo(np.uint32).max)) seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
if self.embedding_path is not None: if self.embedding_path is not None:
print(f'>> Loading embeddings from {self.embedding_path}')
for root, _, files in os.walk(self.embedding_path): for root, _, files in os.walk(self.embedding_path):
for name in files: for name in files:
ti_path = os.path.join(root, name) ti_path = os.path.join(root, name)
@ -965,7 +972,7 @@ class Generate:
ti_path, defer_injecting_tokens=True ti_path, defer_injecting_tokens=True
) )
print( print(
f'>> Textual inversions available: {", ".join(self.model.textual_inversion_manager.get_all_trigger_strings())}' f'>> Textual inversion triggers: {", ".join(self.model.textual_inversion_manager.get_all_trigger_strings())}'
) )
self.model_name = model_name self.model_name = model_name

View File

@ -17,16 +17,17 @@ if sys.platform == "darwin":
import pyparsing # type: ignore import pyparsing # type: ignore
import ldm.invoke import ldm.invoke
from ldm.generate import Generate from ..generate import Generate
from ldm.invoke.args import (Args, dream_cmd_from_png, metadata_dumps, from .args import (Args, dream_cmd_from_png, metadata_dumps,
metadata_from_png) metadata_from_png)
from ldm.invoke.globals import Globals from .generator.diffusers_pipeline import PipelineIntermediateState
from ldm.invoke.image_util import make_grid from .globals import Globals
from ldm.invoke.log import write_log from .image_util import make_grid
from ldm.invoke.model_manager import ModelManager from .log import write_log
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata, write_metadata from .model_manager import ModelManager
from ldm.invoke.readline import Completer, get_completer from .pngwriter import PngWriter, retrieve_metadata, write_metadata
from ldm.util import url_attachment_name from .readline import Completer, get_completer
from ..util import url_attachment_name
# global used in multiple functions (fix) # global used in multiple functions (fix)
infile = None infile = None
@ -1263,10 +1264,13 @@ def make_step_callback(gen, opt, prefix):
os.makedirs(destination, exist_ok=True) os.makedirs(destination, exist_ok=True)
print(f">> Intermediate images will be written into {destination}") print(f">> Intermediate images will be written into {destination}")
def callback(img, step): def callback(state: PipelineIntermediateState):
latents = state.latents
step = state.step
if step % opt.save_intermediates == 0 or step == opt.steps - 1: if step % opt.save_intermediates == 0 or step == opt.steps - 1:
filename = os.path.join(destination, f"{step:04}.png") filename = os.path.join(destination, f"{step:04}.png")
image = gen.sample_to_image(img) image = gen.sample_to_lowres_estimated_image(latents)
image = image.resize((image.size[0]*8,image.size[1]*8))
image.save(filename, "PNG") image.save(filename, "PNG")
return callback return callback
@ -1388,3 +1392,7 @@ def check_internet() -> bool:
return True return True
except: except:
return False return False
if __name__ == '__main__':
main()

View File

@ -1 +1 @@
__version__='2.3.0' __version__='2.3.1+a0'

View File

@ -272,6 +272,10 @@ class Args(object):
switches.append('--seamless') switches.append('--seamless')
if a['hires_fix']: if a['hires_fix']:
switches.append('--hires_fix') switches.append('--hires_fix')
if a['h_symmetry_time_pct']:
switches.append(f'--h_symmetry_time_pct {a["h_symmetry_time_pct"]}')
if a['v_symmetry_time_pct']:
switches.append(f'--v_symmetry_time_pct {a["v_symmetry_time_pct"]}')
# img2img generations have parameters relevant only to them and have special handling # img2img generations have parameters relevant only to them and have special handling
if a['init_img'] and len(a['init_img'])>0: if a['init_img'] and len(a['init_img'])>0:
@ -845,6 +849,18 @@ class Args(object):
type=float, type=float,
help='Perlin noise scale (0.0 - 1.0) - add perlin noise to the initialization instead of the usual gaussian noise.', help='Perlin noise scale (0.0 - 1.0) - add perlin noise to the initialization instead of the usual gaussian noise.',
) )
render_group.add_argument(
'--h_symmetry_time_pct',
default=None,
type=float,
help='Horizontal symmetry point (0.0 - 1.0) - apply horizontal symmetry at this point in image generation.',
)
render_group.add_argument(
'--v_symmetry_time_pct',
default=None,
type=float,
help='Vertical symmetry point (0.0 - 1.0) - apply vertical symmetry at this point in image generation.',
)
render_group.add_argument( render_group.add_argument(
'--fnformat', '--fnformat',
default='{prefix}.{seed}.png', default='{prefix}.{seed}.png',
@ -1151,7 +1167,8 @@ def metadata_dumps(opt,
# remove any image keys not mentioned in RFC #266 # remove any image keys not mentioned in RFC #266
rfc266_img_fields = ['type','postprocessing','sampler','prompt','seed','variations','steps', rfc266_img_fields = ['type','postprocessing','sampler','prompt','seed','variations','steps',
'cfg_scale','threshold','perlin','step_number','width','height','extra','strength','seamless' 'cfg_scale','threshold','perlin','step_number','width','height','extra','strength','seamless'
'init_img','init_mask','facetool','facetool_strength','upscale'] 'init_img','init_mask','facetool','facetool_strength','upscale','h_symmetry_time_pct',
'v_symmetry_time_pct']
rfc_dict ={} rfc_dict ={}
for item in image_dict.items(): for item in image_dict.items():

View File

@ -0,0 +1,102 @@
'''
Minimalist updater script. Prompts user for the tag or branch to update to and runs
pip install <path_to_git_source>.
'''
import platform
import requests
import subprocess
from rich import box, print
from rich.console import Console, group
from rich.panel import Panel
from rich.prompt import Prompt
from rich.style import Style
from rich.text import Text
from rich.live import Live
from rich.table import Table
from ldm.invoke import __version__
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive"
INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"
OS = platform.uname().system
ARCH = platform.uname().machine
ORANGE_ON_DARK_GREY = Style(bgcolor="grey23", color="orange1")
if OS == "Windows":
# Windows terminals look better without a background colour
console = Console(style=Style(color="grey74"))
else:
console = Console(style=Style(color="grey74", bgcolor="grey23"))
def get_versions()->dict:
return requests.get(url=INVOKE_AI_REL).json()
def welcome(versions: dict):
@group()
def text():
yield f'InvokeAI Version: [bold yellow]{__version__}'
yield ''
yield 'This script will update InvokeAI to the latest release, or to a development version of your choice.'
yield ''
yield '[bold yellow]Options:'
yield f'''[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic])
[2] Update to the bleeding-edge development version ([italic]main[/italic])
[3] Manually enter the tag or branch name you wish to update'''
console.rule()
console.print(
Panel(
title="[bold wheat1]InvokeAI Updater",
renderable=text(),
box=box.DOUBLE,
expand=True,
padding=(1, 2),
style=ORANGE_ON_DARK_GREY,
subtitle=f"[bold grey39]{OS}-{ARCH}",
)
)
# console.rule is used instead of console.line to maintain dark background
# on terminals where light background is the default
console.rule(characters=" ")
def main():
versions = get_versions()
welcome(versions)
tag = None
choice = Prompt.ask(Text.from_markup(('[grey74 on grey23]Choice:')),choices=['1','2','3'],default='1')
if choice=='1':
tag = versions[0]['tag_name']
elif choice=='2':
tag = 'main'
elif choice=='3':
tag = Prompt.ask('[grey74 on grey23]Enter an InvokeAI tag or branch name')
console.print(Panel(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]', box=box.MINIMAL, style=ORANGE_ON_DARK_GREY))
cmd = f'pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517'
progress = Table.grid(expand=True)
progress_panel = Panel(progress, box=box.MINIMAL, style=ORANGE_ON_DARK_GREY)
with subprocess.Popen(['bash', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
progress.add_column()
with Live(progress_panel, console=console, vertical_overflow='visible'):
while proc.poll() is None:
for l in iter(proc.stdout.readline, b''):
progress.add_row(l.decode().strip(), style=ORANGE_ON_DARK_GREY)
if proc.returncode == 0:
console.rule(f':heavy_check_mark: Upgrade successful')
else:
console.rule(f':exclamation: [bold red]Upgrade failed[/red bold]')
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass

View File

@ -64,6 +64,7 @@ class Generator:
def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None, def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None,
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0, image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
h_symmetry_time_pct=None, v_symmetry_time_pct=None,
safety_checker:dict=None, safety_checker:dict=None,
free_gpu_mem: bool=False, free_gpu_mem: bool=False,
**kwargs): **kwargs):
@ -81,6 +82,8 @@ class Generator:
step_callback = step_callback, step_callback = step_callback,
threshold = threshold, threshold = threshold,
perlin = perlin, perlin = perlin,
h_symmetry_time_pct = h_symmetry_time_pct,
v_symmetry_time_pct = v_symmetry_time_pct,
attention_maps_callback = attention_maps_callback, attention_maps_callback = attention_maps_callback,
**kwargs **kwargs
) )

View File

@ -16,8 +16,8 @@ class Img2Img(Generator):
self.init_latent = None # by get_noise() self.init_latent = None # by get_noise()
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
conditioning,init_image,strength,step_callback=None,threshold=0.0,perlin=0.0, conditioning,init_image,strength,step_callback=None,threshold=0.0,warmup=0.2,perlin=0.0,
attention_maps_callback=None, h_symmetry_time_pct=None,v_symmetry_time_pct=None,attention_maps_callback=None,
**kwargs): **kwargs):
""" """
Returns a function returning an image derived from the prompt and the initial image Returns a function returning an image derived from the prompt and the initial image
@ -33,8 +33,13 @@ class Img2Img(Generator):
conditioning_data = ( conditioning_data = (
ConditioningData( ConditioningData(
uc, c, cfg_scale, extra_conditioning_info, uc, c, cfg_scale, extra_conditioning_info,
postprocessing_settings = PostprocessingSettings(threshold, warmup=0.2) if threshold else None) postprocessing_settings=PostprocessingSettings(
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) threshold=threshold,
warmup=warmup,
h_symmetry_time_pct=h_symmetry_time_pct,
v_symmetry_time_pct=v_symmetry_time_pct
)
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))
def make_image(x_T): def make_image(x_T):

View File

@ -15,8 +15,8 @@ class Txt2Img(Generator):
@torch.no_grad() @torch.no_grad()
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
conditioning,width,height,step_callback=None,threshold=0.0,perlin=0.0, conditioning,width,height,step_callback=None,threshold=0.0,warmup=0.2,perlin=0.0,
attention_maps_callback=None, h_symmetry_time_pct=None,v_symmetry_time_pct=None,attention_maps_callback=None,
**kwargs): **kwargs):
""" """
Returns a function returning an image derived from the prompt and the initial image Returns a function returning an image derived from the prompt and the initial image
@ -33,8 +33,13 @@ class Txt2Img(Generator):
conditioning_data = ( conditioning_data = (
ConditioningData( ConditioningData(
uc, c, cfg_scale, extra_conditioning_info, uc, c, cfg_scale, extra_conditioning_info,
postprocessing_settings = PostprocessingSettings(threshold, warmup=0.2) if threshold else None) postprocessing_settings=PostprocessingSettings(
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) threshold=threshold,
warmup=warmup,
h_symmetry_time_pct=h_symmetry_time_pct,
v_symmetry_time_pct=v_symmetry_time_pct
)
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))
def make_image(x_T) -> PIL.Image.Image: def make_image(x_T) -> PIL.Image.Image:
pipeline_output = pipeline.image_from_embeddings( pipeline_output = pipeline.image_from_embeddings(
@ -44,8 +49,10 @@ class Txt2Img(Generator):
conditioning_data=conditioning_data, conditioning_data=conditioning_data,
callback=step_callback, callback=step_callback,
) )
if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None:
attention_maps_callback(pipeline_output.attention_map_saver) attention_maps_callback(pipeline_output.attention_map_saver)
return pipeline.numpy_to_pil(pipeline_output.images)[0] return pipeline.numpy_to_pil(pipeline_output.images)[0]
return make_image return make_image

View File

@ -21,12 +21,14 @@ class Txt2Img2Img(Generator):
def get_make_image(self, prompt:str, sampler, steps:int, cfg_scale:float, ddim_eta, def get_make_image(self, prompt:str, sampler, steps:int, cfg_scale:float, ddim_eta,
conditioning, width:int, height:int, strength:float, conditioning, width:int, height:int, strength:float,
step_callback:Optional[Callable]=None, threshold=0.0, **kwargs): step_callback:Optional[Callable]=None, threshold=0.0, warmup=0.2, perlin=0.0,
h_symmetry_time_pct=None, v_symmetry_time_pct=None, attention_maps_callback=None, **kwargs):
""" """
Returns a function returning an image derived from the prompt and the initial image Returns a function returning an image derived from the prompt and the initial image
Return value depends on the seed at the time you call it Return value depends on the seed at the time you call it
kwargs are 'width' and 'height' kwargs are 'width' and 'height'
""" """
self.perlin = perlin
# noinspection PyTypeChecker # noinspection PyTypeChecker
pipeline: StableDiffusionGeneratorPipeline = self.model pipeline: StableDiffusionGeneratorPipeline = self.model
@ -36,8 +38,13 @@ class Txt2Img2Img(Generator):
conditioning_data = ( conditioning_data = (
ConditioningData( ConditioningData(
uc, c, cfg_scale, extra_conditioning_info, uc, c, cfg_scale, extra_conditioning_info,
postprocessing_settings = PostprocessingSettings(threshold=threshold, warmup=0.2) if threshold else None) postprocessing_settings = PostprocessingSettings(
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) threshold=threshold,
warmup=0.2,
h_symmetry_time_pct=h_symmetry_time_pct,
v_symmetry_time_pct=v_symmetry_time_pct
)
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))
def make_image(x_T): def make_image(x_T):
@ -69,19 +76,28 @@ class Txt2Img2Img(Generator):
if clear_cuda_cache is not None: if clear_cuda_cache is not None:
clear_cuda_cache() clear_cuda_cache()
second_pass_noise = self.get_noise_like(resized_latents) second_pass_noise = self.get_noise_like(resized_latents, override_perlin=True)
# Clear symmetry for the second pass
from dataclasses import replace
new_postprocessing_settings = replace(conditioning_data.postprocessing_settings, h_symmetry_time_pct=None)
new_postprocessing_settings = replace(new_postprocessing_settings, v_symmetry_time_pct=None)
new_conditioning_data = replace(conditioning_data, postprocessing_settings=new_postprocessing_settings)
verbosity = get_verbosity() verbosity = get_verbosity()
set_verbosity_error() set_verbosity_error()
pipeline_output = pipeline.img2img_from_latents_and_embeddings( pipeline_output = pipeline.img2img_from_latents_and_embeddings(
resized_latents, resized_latents,
num_inference_steps=steps, num_inference_steps=steps,
conditioning_data=conditioning_data, conditioning_data=new_conditioning_data,
strength=strength, strength=strength,
noise=second_pass_noise, noise=second_pass_noise,
callback=step_callback) callback=step_callback)
set_verbosity(verbosity) set_verbosity(verbosity)
if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None:
attention_maps_callback(pipeline_output.attention_map_saver)
return pipeline.numpy_to_pil(pipeline_output.images)[0] return pipeline.numpy_to_pil(pipeline_output.images)[0]
@ -95,13 +111,13 @@ class Txt2Img2Img(Generator):
return make_image return make_image
def get_noise_like(self, like: torch.Tensor): def get_noise_like(self, like: torch.Tensor, override_perlin: bool=False):
device = like.device device = like.device
if device.type == 'mps': if device.type == 'mps':
x = torch.randn_like(like, device='cpu', dtype=self.torch_dtype()).to(device) x = torch.randn_like(like, device='cpu', dtype=self.torch_dtype()).to(device)
else: else:
x = torch.randn_like(like, device=device, dtype=self.torch_dtype()) x = torch.randn_like(like, device=device, dtype=self.torch_dtype())
if self.perlin > 0.0: if self.perlin > 0.0 and override_perlin == False:
shape = like.shape shape = like.shape
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2])
return x return x
@ -139,6 +155,9 @@ class Txt2Img2Img(Generator):
shape = (1, channels, shape = (1, channels,
scaled_height // self.downsampling_factor, scaled_width // self.downsampling_factor) scaled_height // self.downsampling_factor, scaled_width // self.downsampling_factor)
if self.use_mps_noise or device.type == 'mps': if self.use_mps_noise or device.type == 'mps':
return torch.randn(shape, dtype=self.torch_dtype(), device='cpu').to(device) tensor = torch.empty(size=shape, device='cpu')
tensor = self.get_noise_like(like=tensor).to(device)
else: else:
return torch.randn(shape, dtype=self.torch_dtype(), device=device) tensor = torch.empty(size=shape, device=device)
tensor = self.get_noise_like(like=tensor)
return tensor

View File

@ -323,7 +323,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
if selected_model3 > 0: if selected_model3 > 0:
self.merge_method.values = ['add_difference ( A+(B-C) )'] self.merge_method.values = ['add_difference ( A+(B-C) )']
self.merged_model_name.value += f"+{models[selected_model3]}" self.merged_model_name.value += f"+{models[selected_model3 -1]}" # In model3 there is one more element in the list (None). So we have to subtract one.
else: else:
self.merge_method.values = self.interpolations self.merge_method.values = self.interpolations
self.merge_method.value = 0 self.merge_method.value = 0

View File

@ -58,6 +58,8 @@ COMMANDS = (
'--inpaint_replace','-r', '--inpaint_replace','-r',
'--png_compression','-z', '--png_compression','-z',
'--text_mask','-tm', '--text_mask','-tm',
'--h_symmetry_time_pct',
'--v_symmetry_time_pct',
'!fix','!fetch','!replay','!history','!search','!clear', '!fix','!fetch','!replay','!history','!search','!clear',
'!models','!switch','!import_model','!optimize_model','!convert_model','!edit_model','!del_model', '!models','!switch','!import_model','!optimize_model','!convert_model','!edit_model','!del_model',
'!mask','!triggers', '!mask','!triggers',

View File

@ -441,6 +441,7 @@ class TextualInversionDataset(Dataset):
self.image_paths = [ self.image_paths = [
os.path.join(self.data_root, file_path) os.path.join(self.data_root, file_path)
for file_path in os.listdir(self.data_root) for file_path in os.listdir(self.data_root)
if os.path.isfile(file_path) and file_path.endswith(('.png','.PNG','.jpg','.JPG','.jpeg','.JPEG','.gif','.GIF'))
] ]
self.num_images = len(self.image_paths) self.num_images = len(self.image_paths)

View File

@ -566,7 +566,9 @@ class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor):
# print(f"SwapCrossAttnContext for {attention_type} active") # print(f"SwapCrossAttnContext for {attention_type} active")
batch_size, sequence_length, _ = hidden_states.shape batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length) attention_mask = attn.prepare_attention_mask(
attention_mask=attention_mask, target_length=sequence_length,
batch_size=batch_size)
query = attn.to_q(hidden_states) query = attn.to_q(hidden_states)
dim = query.shape[-1] dim = query.shape[-1]

View File

@ -18,6 +18,8 @@ from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver
class PostprocessingSettings: class PostprocessingSettings:
threshold: float threshold: float
warmup: float warmup: float
h_symmetry_time_pct: Optional[float]
v_symmetry_time_pct: Optional[float]
class InvokeAIDiffuserComponent: class InvokeAIDiffuserComponent:
@ -30,7 +32,7 @@ class InvokeAIDiffuserComponent:
* Hybrid conditioning (used for inpainting) * Hybrid conditioning (used for inpainting)
''' '''
debug_thresholding = False debug_thresholding = False
last_percent_through = 0.0
@dataclass @dataclass
class ExtraConditioningInfo: class ExtraConditioningInfo:
@ -56,6 +58,7 @@ class InvokeAIDiffuserComponent:
self.is_running_diffusers = is_running_diffusers self.is_running_diffusers = is_running_diffusers
self.model_forward_callback = model_forward_callback self.model_forward_callback = model_forward_callback
self.cross_attention_control_context = None self.cross_attention_control_context = None
self.last_percent_through = 0.0
@contextmanager @contextmanager
def custom_attention_context(self, def custom_attention_context(self,
@ -164,6 +167,7 @@ class InvokeAIDiffuserComponent:
if postprocessing_settings is not None: if postprocessing_settings is not None:
percent_through = self.calculate_percent_through(sigma, step_index, total_step_count) percent_through = self.calculate_percent_through(sigma, step_index, total_step_count)
latents = self.apply_threshold(postprocessing_settings, latents, percent_through) latents = self.apply_threshold(postprocessing_settings, latents, percent_through)
latents = self.apply_symmetry(postprocessing_settings, latents, percent_through)
return latents return latents
def calculate_percent_through(self, sigma, step_index, total_step_count): def calculate_percent_through(self, sigma, step_index, total_step_count):
@ -292,8 +296,12 @@ class InvokeAIDiffuserComponent:
self, self,
postprocessing_settings: PostprocessingSettings, postprocessing_settings: PostprocessingSettings,
latents: torch.Tensor, latents: torch.Tensor,
percent_through percent_through: float
) -> torch.Tensor: ) -> torch.Tensor:
if postprocessing_settings.threshold is None or postprocessing_settings.threshold == 0.0:
return latents
threshold = postprocessing_settings.threshold threshold = postprocessing_settings.threshold
warmup = postprocessing_settings.warmup warmup = postprocessing_settings.warmup
@ -342,6 +350,56 @@ class InvokeAIDiffuserComponent:
return latents return latents
def apply_symmetry(
self,
postprocessing_settings: PostprocessingSettings,
latents: torch.Tensor,
percent_through: float
) -> torch.Tensor:
# Reset our last percent through if this is our first step.
if percent_through == 0.0:
self.last_percent_through = 0.0
if postprocessing_settings is None:
return latents
# Check for out of bounds
h_symmetry_time_pct = postprocessing_settings.h_symmetry_time_pct
if (h_symmetry_time_pct is not None and (h_symmetry_time_pct <= 0.0 or h_symmetry_time_pct > 1.0)):
h_symmetry_time_pct = None
v_symmetry_time_pct = postprocessing_settings.v_symmetry_time_pct
if (v_symmetry_time_pct is not None and (v_symmetry_time_pct <= 0.0 or v_symmetry_time_pct > 1.0)):
v_symmetry_time_pct = None
dev = latents.device.type
latents.to(device='cpu')
if (
h_symmetry_time_pct != None and
self.last_percent_through < h_symmetry_time_pct and
percent_through >= h_symmetry_time_pct
):
# Horizontal symmetry occurs on the 3rd dimension of the latent
width = latents.shape[3]
x_flipped = torch.flip(latents, dims=[3])
latents = torch.cat([latents[:, :, :, 0:int(width/2)], x_flipped[:, :, :, int(width/2):int(width)]], dim=3)
if (
v_symmetry_time_pct != None and
self.last_percent_through < v_symmetry_time_pct and
percent_through >= v_symmetry_time_pct
):
# Vertical symmetry occurs on the 2nd dimension of the latent
height = latents.shape[2]
y_flipped = torch.flip(latents, dims=[2])
latents = torch.cat([latents[:, :, 0:int(height / 2)], y_flipped[:, :, int(height / 2):int(height)]], dim=2)
self.last_percent_through = percent_through
return latents.to(device=dev)
def estimate_percent_through(self, step_index, sigma): def estimate_percent_through(self, step_index, sigma):
if step_index is not None and self.cross_attention_control_context is not None: if step_index is not None and self.cross_attention_control_context is not None:
# percent_through will never reach 1.0 (but this is intended) # percent_through will never reach 1.0 (but this is intended)

View File

@ -62,8 +62,13 @@ class TextualInversionManager(BaseTextualInversionManager):
def load_textual_inversion(self, ckpt_path: Union[str,Path], defer_injecting_tokens: bool = False): def load_textual_inversion(self, ckpt_path: Union[str,Path], defer_injecting_tokens: bool = False):
ckpt_path = Path(ckpt_path) ckpt_path = Path(ckpt_path)
if not ckpt_path.is_file():
return
if str(ckpt_path).endswith(".DS_Store"): if str(ckpt_path).endswith(".DS_Store"):
return return
try: try:
scan_result = scan_file_path(str(ckpt_path)) scan_result = scan_file_path(str(ckpt_path))
if scan_result.infected_files == 1: if scan_result.infected_files == 1:
@ -80,12 +85,15 @@ class TextualInversionManager(BaseTextualInversionManager):
embedding_info = self._parse_embedding(str(ckpt_path)) embedding_info = self._parse_embedding(str(ckpt_path))
if ( if embedding_info is None:
# We've already put out an error message about the bad embedding in _parse_embedding, so just return.
return
elif (
self.text_encoder.get_input_embeddings().weight.data[0].shape[0] self.text_encoder.get_input_embeddings().weight.data[0].shape[0]
!= embedding_info["embedding"].shape[0] != embedding_info['token_dim']
): ):
print( print(
f"** Notice: {ckpt_path.parents[0].name}/{ckpt_path.name} was trained on a model with a different token dimension. It can't be used with this model." f"** Notice: {ckpt_path.parents[0].name}/{ckpt_path.name} was trained on a model with an incompatible token dimension: {self.text_encoder.get_input_embeddings().weight.data[0].shape[0]} vs {embedding_info['token_dim']}."
) )
return return
@ -288,6 +296,7 @@ class TextualInversionManager(BaseTextualInversionManager):
return self._parse_embedding_bin(embedding_file) return self._parse_embedding_bin(embedding_file)
else: else:
print(f">> Not a recognized embedding file: {embedding_file}") print(f">> Not a recognized embedding file: {embedding_file}")
return None
def _parse_embedding_pt(self, embedding_file): def _parse_embedding_pt(self, embedding_file):
embedding_ckpt = torch.load(embedding_file, map_location="cpu") embedding_ckpt = torch.load(embedding_file, map_location="cpu")
@ -330,7 +339,6 @@ class TextualInversionManager(BaseTextualInversionManager):
# .pt files found at https://cyberes.github.io/stable-diffusion-textual-inversion-models/ # .pt files found at https://cyberes.github.io/stable-diffusion-textual-inversion-models/
# They are actually .bin files # They are actually .bin files
elif len(embedding_ckpt.keys()) == 1: elif len(embedding_ckpt.keys()) == 1:
print(">> Detected .bin file masquerading as .pt file")
embedding_info = self._parse_embedding_bin(embedding_file) embedding_info = self._parse_embedding_bin(embedding_file)
else: else:
@ -369,9 +377,6 @@ class TextualInversionManager(BaseTextualInversionManager):
if isinstance( if isinstance(
list(embedding_ckpt["string_to_token"].values())[0], torch.Tensor list(embedding_ckpt["string_to_token"].values())[0], torch.Tensor
): ):
print(
">> Detected .pt file variant 1"
) # example at https://github.com/invoke-ai/InvokeAI/issues/1829
for token in list(embedding_ckpt["string_to_token"].keys()): for token in list(embedding_ckpt["string_to_token"].keys()):
embedding_info["name"] = ( embedding_info["name"] = (
token token
@ -384,7 +389,7 @@ class TextualInversionManager(BaseTextualInversionManager):
embedding_info["num_vectors_per_token"] = embedding_info[ embedding_info["num_vectors_per_token"] = embedding_info[
"embedding" "embedding"
].shape[0] ].shape[0]
embedding_info["token_dim"] = embedding_info["embedding"].size()[0] embedding_info["token_dim"] = embedding_info["embedding"].size()[1]
else: else:
print(">> Invalid embedding format") print(">> Invalid embedding format")
embedding_info = None embedding_info = None

View File

@ -109,6 +109,7 @@ dependencies = [
"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main" "invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging "invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main" "invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
"invokeai-update" = "ldm.invoke.config.invokeai_update:main"
[project.urls] [project.urls]
"Homepage" = "https://invoke-ai.github.io/InvokeAI/" "Homepage" = "https://invoke-ai.github.io/InvokeAI/"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -1,179 +0,0 @@
:root {
--fields-dark:#DCDCDC;
--fields-light:#F5F5F5;
}
* {
font-family: 'Arial';
font-size: 100%;
}
body {
font-size: 1em;
}
textarea {
font-size: 0.95em;
}
header, form, #progress-section {
margin-left: auto;
margin-right: auto;
max-width: 1024px;
text-align: center;
}
fieldset {
border: none;
line-height: 2.2em;
}
fieldset > legend {
width: auto;
margin-left: 0;
margin-right: auto;
font-weight:bold;
}
select, input {
margin-right: 10px;
padding: 2px;
}
input:disabled {
cursor:auto;
}
input[type=submit] {
cursor: pointer;
background-color: #666;
color: white;
}
input[type=checkbox] {
cursor: pointer;
margin-right: 0px;
width: 20px;
height: 20px;
vertical-align: middle;
}
input#seed {
margin-right: 0px;
}
div {
padding: 10px 10px 10px 10px;
}
header {
margin-bottom: 16px;
}
header h1 {
margin-bottom: 0;
font-size: 2em;
}
#search-box {
display: flex;
}
#scaling-inprocess-message {
font-weight: bold;
font-style: italic;
display: none;
}
#prompt {
flex-grow: 1;
padding: 5px 10px 5px 10px;
border: 1px solid #999;
outline: none;
}
#submit {
padding: 5px 10px 5px 10px;
border: 1px solid #999;
}
#reset-all, #remove-image {
margin-top: 12px;
font-size: 0.8em;
background-color: pink;
border: 1px solid #999;
border-radius: 4px;
}
#results {
text-align: center;
margin: auto;
padding-top: 10px;
}
#results figure {
display: inline-block;
margin: 10px;
}
#results figcaption {
font-size: 0.8em;
padding: 3px;
color: #888;
cursor: pointer;
}
#results img {
border-radius: 5px;
object-fit: contain;
background-color: var(--fields-dark);
}
#fieldset-config {
line-height:2em;
}
input[type="number"] {
width: 60px;
}
#seed {
width: 150px;
}
button#reset-seed {
font-size: 1.7em;
background: #efefef;
border: 1px solid #999;
border-radius: 4px;
line-height: 0.8;
margin: 0 10px 0 0;
padding: 0 5px 3px;
vertical-align: middle;
}
label {
white-space: nowrap;
}
#progress-section {
display: none;
}
#progress-image {
width: 30vh;
height: 30vh;
object-fit: contain;
background-color: var(--fields-dark);
}
#cancel-button {
cursor: pointer;
color: red;
}
#txt2img {
background-color: var(--fields-dark);
}
#variations {
background-color: var(--fields-light);
}
#initimg {
background-color: var(--fields-dark);
}
#img2img {
background-color: var(--fields-light);
}
#initimg > :not(legend) {
background-color: var(--fields-light);
margin: .5em;
}
#postprocess, #initimg {
display:flex;
flex-wrap:wrap;
padding: 0;
margin-top: 1em;
background-color: var(--fields-dark);
}
#postprocess > fieldset, #initimg > * {
flex-grow: 1;
}
#postprocess > fieldset {
background-color: var(--fields-dark);
}
#progress-section {
background-color: var(--fields-light);
}
#no-results-message:not(:only-child) {
display: none;
}

View File

@ -1,187 +0,0 @@
<html lang="en">
<head>
<title>Stable Diffusion Dream Server</title>
<meta charset="utf-8">
<link rel="icon" type="image/x-icon" href="static/dream_web/favicon.ico" />
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<script src="config.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.0.1/socket.io.js"
integrity="sha512-q/dWJ3kcmjBLU4Qc47E4A9kTB4m3wuTY7vkFJDTZKjTs8jhyGQnaUrxa0Ytd0ssMZhbNua9hE+E7Qv1j+DyZwA=="
crossorigin="anonymous"></script>
<link rel="stylesheet" href="index.css">
<script src="index.js"></script>
</head>
<body>
<header>
<h1>Stable Diffusion Dream Server</h1>
<div id="about">
For news and support for this web service, visit our <a href="http://github.com/lstein/stable-diffusion">GitHub
site</a>
</div>
</header>
<main>
<!--
<div id="dropper" style="background-color:red;width:200px;height:200px;">
</div>
-->
<form id="generate-form" method="post" action="api/jobs">
<fieldset id="txt2img">
<legend>
<input type="checkbox" name="enable_generate" id="enable_generate" checked>
<label for="enable_generate">Generate</label>
</legend>
<div id="search-box">
<textarea rows="3" id="prompt" name="prompt"></textarea>
</div>
<label for="iterations">Images to generate:</label>
<input value="1" type="number" id="iterations" name="iterations" size="4">
<label for="steps">Steps:</label>
<input value="50" type="number" id="steps" name="steps">
<label for="cfg_scale">Cfg Scale:</label>
<input value="7.5" type="number" id="cfg_scale" name="cfg_scale" step="any">
<label for="sampler_name">Sampler:</label>
<select id="sampler_name" name="sampler_name" value="k_lms">
<option value="ddim">DDIM</option>
<option value="plms">PLMS</option>
<option value="k_lms" selected>KLMS</option>
<option value="k_dpm_2">KDPM_2</option>
<option value="k_dpm_2_a">KDPM_2A</option>
<option value="k_dpmpp_2">KDPMPP_2</option>
<option value="k_dpmpp_2_a">KDPMPP_2A</option>
<option value="k_euler">KEULER</option>
<option value="k_euler_a">KEULER_A</option>
<option value="k_heun">KHEUN</option>
</select>
<input type="checkbox" name="seamless" id="seamless">
<label for="seamless">Seamless circular tiling</label>
<br>
<label title="Set to multiple of 64" for="width">Width:</label>
<select id="width" name="width" value="512">
<option value="64">64</option>
<option value="128">128</option>
<option value="192">192</option>
<option value="256">256</option>
<option value="320">320</option>
<option value="384">384</option>
<option value="448">448</option>
<option value="512" selected>512</option>
<option value="576">576</option>
<option value="640">640</option>
<option value="704">704</option>
<option value="768">768</option>
<option value="832">832</option>
<option value="896">896</option>
<option value="960">960</option>
<option value="1024">1024</option>
</select>
<label title="Set to multiple of 64" for="height">Height:</label>
<select id="height" name="height" value="512">
<option value="64">64</option>
<option value="128">128</option>
<option value="192">192</option>
<option value="256">256</option>
<option value="320">320</option>
<option value="384">384</option>
<option value="448">448</option>
<option value="512" selected>512</option>
<option value="576">576</option>
<option value="640">640</option>
<option value="704">704</option>
<option value="768">768</option>
<option value="832">832</option>
<option value="896">896</option>
<option value="960">960</option>
<option value="1024">1024</option>
</select>
<label title="Set to 0 for random seed" for="seed">Seed:</label>
<input value="0" type="number" id="seed" name="seed">
<button type="button" id="reset-seed">&olarr;</button>
<input type="checkbox" name="progress_images" id="progress_images">
<label for="progress_images">Display in-progress images (slower)</label>
<div>
<label title="If > 0, adds thresholding to restrict values for k-diffusion samplers (0 disables)" for="threshold">Threshold:</label>
<input value="0" type="number" id="threshold" name="threshold" step="0.1" min="0">
<label title="Perlin: optional 0-1 value adds a percentage of perlin noise to the initial noise" for="perlin">Perlin:</label>
<input value="0" type="number" id="perlin" name="perlin" step="0.01" min="0" max="1">
<button type="button" id="reset-all">Reset to Defaults</button>
</div>
<div id="variations">
<label
title="If > 0, generates variations on the initial seed instead of random seeds per iteration. Must be between 0 and 1. Higher values will be more different."
for="variation_amount">Variation amount (0 to disable):</label>
<input value="0" type="number" id="variation_amount" name="variation_amount" step="0.01" min="0" max="1">
<label title="list of variations to apply, in the format `seed:weight,seed:weight,..."
for="with_variations">With variations (seed:weight,seed:weight,...):</label>
<input value="" type="text" id="with_variations" name="with_variations">
</div>
</fieldset>
<fieldset id="initimg">
<legend>
<input type="checkbox" name="enable_init_image" id="enable_init_image" checked>
<label for="enable_init_image">Enable init image</label>
</legend>
<div>
<label title="Upload an image to use img2img" for="initimg">Initial image:</label>
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
<button type="button" id="remove-image">Remove Image</button>
</div>
<fieldset id="img2img">
<legend>
<input type="checkbox" name="enable_img2img" id="enable_img2img" checked>
<label for="enable_img2img">Enable Img2Img</label>
</legend>
<label for="strength">Img2Img Strength:</label>
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
<input type="checkbox" id="fit" name="fit" checked>
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height:</label>
</fieldset>
</fieldset>
<div id="postprocess">
<fieldset id="gfpgan">
<legend>
<input type="checkbox" name="enable_gfpgan" id="enable_gfpgan">
<label for="enable_gfpgan">Enable gfpgan</label>
</legend>
<label title="Strength of the gfpgan (face fixing) algorithm." for="facetool_strength">GPFGAN Strength:</label>
<input value="0.8" min="0" max="1" type="number" id="facetool_strength" name="facetool_strength" step="0.05">
</fieldset>
<fieldset id="upscale">
<legend>
<input type="checkbox" name="enable_upscale" id="enable_upscale">
<label for="enable_upscale">Enable Upscaling</label>
</legend>
<label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level:</label>
<select id="upscale_level" name="upscale_level" value="">
<option value="" selected>None</option>
<option value="2">2x</option>
<option value="4">4x</option>
</select>
<label title="Strength of the esrgan (upscaling) algorithm." for="upscale_strength">Upscale Strength:</label>
<input value="0.75" min="0" max="1" type="number" id="upscale_strength" name="upscale_strength" step="0.05">
</fieldset>
</div>
<input type="submit" id="submit" value="Generate">
</form>
<br>
<section id="progress-section">
<div id="progress-container">
<progress id="progress-bar" value="0" max="1"></progress>
<span id="cancel-button" title="Cancel">&#10006;</span>
<br>
<img id="progress-image" src='data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/svg"/>'>
<div id="scaling-inprocess-message">
<i><span>Postprocessing...</span><span id="processing_cnt">1</span>/<span id="processing_total">3</span></i>
</div>
</div>
</section>
<div id="results">
</div>
</main>
</body>
</html>

View File

@ -1,396 +0,0 @@
const socket = io();
var priorResultsLoadState = {
page: 0,
pages: 1,
per_page: 10,
total: 20,
offset: 0, // number of items generated since last load
loading: false,
initialized: false
};
function loadPriorResults() {
// Fix next page by offset
let offsetPages = priorResultsLoadState.offset / priorResultsLoadState.per_page;
priorResultsLoadState.page += offsetPages;
priorResultsLoadState.pages += offsetPages;
priorResultsLoadState.total += priorResultsLoadState.offset;
priorResultsLoadState.offset = 0;
if (priorResultsLoadState.loading) {
return;
}
if (priorResultsLoadState.page >= priorResultsLoadState.pages) {
return; // Nothing more to load
}
// Load
priorResultsLoadState.loading = true
let url = new URL('/api/images', document.baseURI);
url.searchParams.append('page', priorResultsLoadState.initialized ? priorResultsLoadState.page + 1 : priorResultsLoadState.page);
url.searchParams.append('per_page', priorResultsLoadState.per_page);
fetch(url.href, {
method: 'GET',
headers: new Headers({'content-type': 'application/json'})
})
.then(response => response.json())
.then(data => {
priorResultsLoadState.page = data.page;
priorResultsLoadState.pages = data.pages;
priorResultsLoadState.per_page = data.per_page;
priorResultsLoadState.total = data.total;
data.items.forEach(function(dreamId, index) {
let src = 'api/images/' + dreamId;
fetch('/api/images/' + dreamId + '/metadata', {
method: 'GET',
headers: new Headers({'content-type': 'application/json'})
})
.then(response => response.json())
.then(metadata => {
let seed = metadata.seed || 0; // TODO: Parse old metadata
appendOutput(src, seed, metadata, true);
});
});
// Load until page is full
if (!priorResultsLoadState.initialized) {
if (document.body.scrollHeight <= window.innerHeight) {
loadPriorResults();
}
}
})
.finally(() => {
priorResultsLoadState.loading = false;
priorResultsLoadState.initialized = true;
});
}
function resetForm() {
var form = document.getElementById('generate-form');
form.querySelector('fieldset').removeAttribute('disabled');
}
function initProgress(totalSteps, showProgressImages) {
// TODO: Progress could theoretically come from multiple jobs at the same time (in the future)
let progressSectionEle = document.querySelector('#progress-section');
progressSectionEle.style.display = 'initial';
let progressEle = document.querySelector('#progress-bar');
progressEle.setAttribute('max', totalSteps);
let progressImageEle = document.querySelector('#progress-image');
progressImageEle.src = BLANK_IMAGE_URL;
progressImageEle.style.display = showProgressImages ? 'initial': 'none';
}
function setProgress(step, totalSteps, src) {
let progressEle = document.querySelector('#progress-bar');
progressEle.setAttribute('value', step);
if (src) {
let progressImageEle = document.querySelector('#progress-image');
progressImageEle.src = src;
}
}
function resetProgress(hide = true) {
if (hide) {
let progressSectionEle = document.querySelector('#progress-section');
progressSectionEle.style.display = 'none';
}
let progressEle = document.querySelector('#progress-bar');
progressEle.setAttribute('value', 0);
}
function toBase64(file) {
return new Promise((resolve, reject) => {
const r = new FileReader();
r.readAsDataURL(file);
r.onload = () => resolve(r.result);
r.onerror = (error) => reject(error);
});
}
function ondragdream(event) {
let dream = event.target.dataset.dream;
event.dataTransfer.setData("dream", dream);
}
function seedClick(event) {
// Get element
var image = event.target.closest('figure').querySelector('img');
var dream = JSON.parse(decodeURIComponent(image.dataset.dream));
let form = document.querySelector("#generate-form");
for (const [k, v] of new FormData(form)) {
if (k == 'initimg') { continue; }
let formElem = form.querySelector(`*[name=${k}]`);
formElem.value = dream[k] !== undefined ? dream[k] : formElem.defaultValue;
}
document.querySelector("#seed").value = dream.seed;
document.querySelector('#iterations').value = 1; // Reset to 1 iteration since we clicked a single image (not a full job)
// NOTE: leaving this manual for the user for now - it was very confusing with this behavior
// document.querySelector("#with_variations").value = variations || '';
// if (document.querySelector("#variation_amount").value <= 0) {
// document.querySelector("#variation_amount").value = 0.2;
// }
saveFields(document.querySelector("#generate-form"));
}
function appendOutput(src, seed, config, toEnd=false) {
let outputNode = document.createElement("figure");
let altText = seed.toString() + " | " + config.prompt;
// img needs width and height for lazy loading to work
// TODO: store the full config in a data attribute on the image?
const figureContents = `
<a href="${src}" target="_blank">
<img src="${src}"
alt="${altText}"
title="${altText}"
loading="lazy"
width="256"
height="256"
draggable="true"
ondragstart="ondragdream(event, this)"
data-dream="${encodeURIComponent(JSON.stringify(config))}"
data-dreamId="${encodeURIComponent(config.dreamId)}">
</a>
<figcaption onclick="seedClick(event, this)">${seed}</figcaption>
`;
outputNode.innerHTML = figureContents;
if (toEnd) {
document.querySelector("#results").append(outputNode);
} else {
document.querySelector("#results").prepend(outputNode);
}
document.querySelector("#no-results-message")?.remove();
}
function saveFields(form) {
for (const [k, v] of new FormData(form)) {
if (typeof v !== 'object') { // Don't save 'file' type
localStorage.setItem(k, v);
}
}
}
function loadFields(form) {
for (const [k, v] of new FormData(form)) {
const item = localStorage.getItem(k);
if (item != null) {
form.querySelector(`*[name=${k}]`).value = item;
}
}
}
function clearFields(form) {
localStorage.clear();
let prompt = form.prompt.value;
form.reset();
form.prompt.value = prompt;
}
const BLANK_IMAGE_URL = 'data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/svg"/>';
async function generateSubmit(form) {
// Convert file data to base64
// TODO: Should probably uplaod files with formdata or something, and store them in the backend?
let formData = Object.fromEntries(new FormData(form));
if (!formData.enable_generate && !formData.enable_init_image) {
gen_label = document.querySelector("label[for=enable_generate]").innerHTML;
initimg_label = document.querySelector("label[for=enable_init_image]").innerHTML;
alert(`Error: one of "${gen_label}" or "${initimg_label}" must be set`);
}
formData.initimg_name = formData.initimg.name
formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null;
// Evaluate all checkboxes
let checkboxes = form.querySelectorAll('input[type=checkbox]');
checkboxes.forEach(function (checkbox) {
if (checkbox.checked) {
formData[checkbox.name] = 'true';
}
});
let strength = formData.strength;
let totalSteps = formData.initimg ? Math.floor(strength * formData.steps) : formData.steps;
let showProgressImages = formData.progress_images;
// Set enabling flags
// Initialize the progress bar
initProgress(totalSteps, showProgressImages);
// POST, use response to listen for events
fetch(form.action, {
method: form.method,
headers: new Headers({'content-type': 'application/json'}),
body: JSON.stringify(formData),
})
.then(response => response.json())
.then(data => {
var jobId = data.jobId;
socket.emit('join_room', { 'room': jobId });
});
form.querySelector('fieldset').setAttribute('disabled','');
}
function fieldSetEnableChecked(event) {
cb = event.target;
fields = cb.closest('fieldset');
fields.disabled = !cb.checked;
}
// Socket listeners
socket.on('job_started', (data) => {})
socket.on('dream_result', (data) => {
var jobId = data.jobId;
var dreamId = data.dreamId;
var dreamRequest = data.dreamRequest;
var src = 'api/images/' + dreamId;
priorResultsLoadState.offset += 1;
appendOutput(src, dreamRequest.seed, dreamRequest);
resetProgress(false);
})
socket.on('dream_progress', (data) => {
// TODO: it'd be nice if we could get a seed reported here, but the generator would need to be updated
var step = data.step;
var totalSteps = data.totalSteps;
var jobId = data.jobId;
var dreamId = data.dreamId;
var progressType = data.progressType
if (progressType === 'GENERATION') {
var src = data.hasProgressImage ?
'api/intermediates/' + dreamId + '/' + step
: null;
setProgress(step, totalSteps, src);
} else if (progressType === 'UPSCALING_STARTED') {
// step and totalSteps are used for upscale count on this message
document.getElementById("processing_cnt").textContent = step;
document.getElementById("processing_total").textContent = totalSteps;
document.getElementById("scaling-inprocess-message").style.display = "block";
} else if (progressType == 'UPSCALING_DONE') {
document.getElementById("scaling-inprocess-message").style.display = "none";
}
})
socket.on('job_canceled', (data) => {
resetForm();
resetProgress();
})
socket.on('job_done', (data) => {
jobId = data.jobId
socket.emit('leave_room', { 'room': jobId });
resetForm();
resetProgress();
})
window.onload = async () => {
document.querySelector("#prompt").addEventListener("keydown", (e) => {
if (e.key === "Enter" && !e.shiftKey) {
const form = e.target.form;
generateSubmit(form);
}
});
document.querySelector("#generate-form").addEventListener('submit', (e) => {
e.preventDefault();
const form = e.target;
generateSubmit(form);
});
document.querySelector("#generate-form").addEventListener('change', (e) => {
saveFields(e.target.form);
});
document.querySelector("#reset-seed").addEventListener('click', (e) => {
document.querySelector("#seed").value = 0;
saveFields(e.target.form);
});
document.querySelector("#reset-all").addEventListener('click', (e) => {
clearFields(e.target.form);
});
document.querySelector("#remove-image").addEventListener('click', (e) => {
initimg.value=null;
});
loadFields(document.querySelector("#generate-form"));
document.querySelector('#cancel-button').addEventListener('click', () => {
fetch('/api/cancel').catch(e => {
console.error(e);
});
});
document.documentElement.addEventListener('keydown', (e) => {
if (e.key === "Escape")
fetch('/api/cancel').catch(err => {
console.error(err);
});
});
if (!config.gfpgan_model_exists) {
document.querySelector("#gfpgan").style.display = 'none';
}
window.addEventListener("scroll", () => {
if ((window.innerHeight + window.pageYOffset) >= document.body.offsetHeight) {
loadPriorResults();
}
});
// Enable/disable forms by checkboxes
document.querySelectorAll("legend > input[type=checkbox]").forEach(function(cb) {
cb.addEventListener('change', fieldSetEnableChecked);
fieldSetEnableChecked({ target: cb})
});
// Load some of the previous results
loadPriorResults();
// Image drop/upload WIP
/*
let drop = document.getElementById('dropper');
function ondrop(event) {
let dreamData = event.dataTransfer.getData('dream');
if (dreamData) {
var dream = JSON.parse(decodeURIComponent(dreamData));
alert(dream.dreamId);
}
};
function ondragenter(event) {
event.preventDefault();
};
function ondragover(event) {
event.preventDefault();
};
function ondragleave(event) {
}
drop.addEventListener('drop', ondrop);
drop.addEventListener('dragenter', ondragenter);
drop.addEventListener('dragover', ondragover);
drop.addEventListener('dragleave', ondragleave);
*/
};

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -1,152 +0,0 @@
* {
font-family: 'Arial';
font-size: 100%;
}
body {
font-size: 1em;
}
textarea {
font-size: 0.95em;
}
header, form, #progress-section {
margin-left: auto;
margin-right: auto;
max-width: 1024px;
text-align: center;
}
fieldset {
border: none;
line-height: 2.2em;
}
select, input {
margin-right: 10px;
padding: 2px;
}
input[type=submit] {
background-color: #666;
color: white;
}
input[type=checkbox] {
margin-right: 0px;
width: 20px;
height: 20px;
vertical-align: middle;
}
input#seed {
margin-right: 0px;
}
div {
padding: 10px 10px 10px 10px;
}
header {
margin-bottom: 16px;
}
header h1 {
margin-bottom: 0;
font-size: 2em;
}
#search-box {
display: flex;
}
#scaling-inprocess-message {
font-weight: bold;
font-style: italic;
display: none;
}
#prompt {
flex-grow: 1;
padding: 5px 10px 5px 10px;
border: 1px solid #999;
outline: none;
}
#submit {
padding: 5px 10px 5px 10px;
border: 1px solid #999;
}
#reset-all, #remove-image {
margin-top: 12px;
font-size: 0.8em;
background-color: pink;
border: 1px solid #999;
border-radius: 4px;
}
#results {
text-align: center;
margin: auto;
padding-top: 10px;
}
#results figure {
display: inline-block;
margin: 10px;
}
#results figcaption {
font-size: 0.8em;
padding: 3px;
color: #888;
cursor: pointer;
}
#results img {
border-radius: 5px;
object-fit: cover;
}
#fieldset-config {
line-height:2em;
background-color: #F0F0F0;
}
input[type="number"] {
width: 60px;
}
#seed {
width: 150px;
}
button#reset-seed {
font-size: 1.7em;
background: #efefef;
border: 1px solid #999;
border-radius: 4px;
line-height: 0.8;
margin: 0 10px 0 0;
padding: 0 5px 3px;
vertical-align: middle;
}
label {
white-space: nowrap;
}
#progress-section {
display: none;
}
#progress-image {
width: 30vh;
height: 30vh;
}
#cancel-button {
cursor: pointer;
color: red;
}
#basic-parameters {
background-color: #EEEEEE;
}
#txt2img {
background-color: #DCDCDC;
}
#variations {
background-color: #EEEEEE;
}
#img2img {
background-color: #DCDCDC;
}
#gfpgan {
background-color: #EEEEEE;
}
#progress-section {
background-color: #F5F5F5;
}
.section-header {
text-align: left;
font-weight: bold;
padding: 0 0 0 0;
}
#no-results-message:not(:only-child) {
display: none;
}

View File

@ -1,137 +0,0 @@
<html lang="en">
<head>
<title>Stable Diffusion Dream Server</title>
<meta charset="utf-8">
<link rel="icon" type="image/x-icon" href="static/legacy_web/favicon.ico" />
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="static/legacy_web/index.css">
<script src="config.js"></script>
<script src="static/legacy_web/index.js"></script>
</head>
<body>
<header>
<h1>Stable Diffusion Dream Server</h1>
<div id="about">
For news and support for this web service, visit our <a href="http://github.com/lstein/stable-diffusion">GitHub site</a>
</div>
</header>
<main>
<form id="generate-form" method="post" action="#">
<fieldset id="txt2img">
<div id="search-box">
<textarea rows="3" id="prompt" name="prompt"></textarea>
<input type="submit" id="submit" value="Generate">
</div>
</fieldset>
<fieldset id="fieldset-config">
<div class="section-header">Basic options</div>
<label for="iterations">Images to generate:</label>
<input value="1" type="number" id="iterations" name="iterations" size="4">
<label for="steps">Steps:</label>
<input value="50" type="number" id="steps" name="steps">
<label for="cfg_scale">Cfg Scale:</label>
<input value="7.5" type="number" id="cfg_scale" name="cfg_scale" step="any">
<label for="sampler_name">Sampler:</label>
<select id="sampler_name" name="sampler_name" value="k_lms">
<option value="ddim">DDIM</option>
<option value="plms">PLMS</option>
<option value="k_lms" selected>KLMS</option>
<option value="k_dpm_2">KDPM_2</option>
<option value="k_dpm_2_a">KDPM_2A</option>
<option value="k_dpmpp_2">KDPMPP_2</option>
<option value="k_dpmpp_2_a">KDPMPP_2A</option>
<option value="k_euler">KEULER</option>
<option value="k_euler_a">KEULER_A</option>
<option value="k_heun">KHEUN</option>
</select>
<input type="checkbox" name="seamless" id="seamless">
<label for="seamless">Seamless circular tiling</label>
<br>
<label title="Set to multiple of 64" for="width">Width:</label>
<select id="width" name="width" value="512">
<option value="64">64</option> <option value="128">128</option>
<option value="192">192</option> <option value="256">256</option>
<option value="320">320</option> <option value="384">384</option>
<option value="448">448</option> <option value="512" selected>512</option>
<option value="576">576</option> <option value="640">640</option>
<option value="704">704</option> <option value="768">768</option>
<option value="832">832</option> <option value="896">896</option>
<option value="960">960</option> <option value="1024">1024</option>
</select>
<label title="Set to multiple of 64" for="height">Height:</label>
<select id="height" name="height" value="512">
<option value="64">64</option> <option value="128">128</option>
<option value="192">192</option> <option value="256">256</option>
<option value="320">320</option> <option value="384">384</option>
<option value="448">448</option> <option value="512" selected>512</option>
<option value="576">576</option> <option value="640">640</option>
<option value="704">704</option> <option value="768">768</option>
<option value="832">832</option> <option value="896">896</option>
<option value="960">960</option> <option value="1024">1024</option>
</select>
<label title="Set to -1 for random seed" for="seed">Seed:</label>
<input value="-1" type="number" id="seed" name="seed">
<button type="button" id="reset-seed">&olarr;</button>
<input type="checkbox" name="progress_images" id="progress_images">
<label for="progress_images">Display in-progress images (slower)</label>
<div>
<label title="If > 0, adds thresholding to restrict values for k-diffusion samplers (0 disables)" for="threshold">Threshold:</label>
<input value="0" type="number" id="threshold" name="threshold" step="0.1" min="0">
<label title="Perlin: optional 0-1 value adds a percentage of perlin noise to the initial noise" for="perlin">Perlin:</label>
<input value="0" type="number" id="perlin" name="perlin" step="0.01" min="0" max="1">
<button type="button" id="reset-all">Reset to Defaults</button>
</div>
<span id="variations">
<label title="If > 0, generates variations on the initial seed instead of random seeds per iteration. Must be between 0 and 1. Higher values will be more different." for="variation_amount">Variation amount (0 to disable):</label>
<input value="0" type="number" id="variation_amount" name="variation_amount" step="0.01" min="0" max="1">
<label title="list of variations to apply, in the format `seed:weight,seed:weight,..." for="with_variations">With variations (seed:weight,seed:weight,...):</label>
<input value="" type="text" id="with_variations" name="with_variations">
</span>
</fieldset>
<fieldset id="img2img">
<div class="section-header">Image-to-image options</div>
<label title="Upload an image to use img2img" for="initimg">Initial image:</label>
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
<button type="button" id="remove-image">Remove Image</button>
<br>
<label for="strength">Img2Img Strength:</label>
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
<input type="checkbox" id="fit" name="fit" checked>
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height</label>
</fieldset>
<fieldset id="gfpgan">
<div class="section-header">Post-processing options</div>
<label title="Strength of the gfpgan (face fixing) algorithm." for="facetool_strength">GPFGAN Strength (0 to disable):</label>
<input value="0.0" min="0" max="1" type="number" id="facetool_strength" name="facetool_strength" step="0.1">
<label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level</label>
<select id="upscale_level" name="upscale_level" value="">
<option value="" selected>None</option>
<option value="2">2x</option>
<option value="4">4x</option>
</select>
<label title="Strength of the esrgan (upscaling) algorithm." for="upscale_strength">Upscale Strength:</label>
<input value="0.75" min="0" max="1" type="number" id="upscale_strength" name="upscale_strength" step="0.05">
</fieldset>
</form>
<br>
<section id="progress-section">
<div id="progress-container">
<progress id="progress-bar" value="0" max="1"></progress>
<span id="cancel-button" title="Cancel">&#10006;</span>
<br>
<img id="progress-image" src='data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/svg"/>'>
<div id="scaling-inprocess-message">
<i><span>Postprocessing...</span><span id="processing_cnt">1/3</span></i>
</div>
</span>
</section>
<div id="results">
<div id="no-results-message">
<i><p>No results...</p></i>
</div>
</div>
</main>
</body>
</html>

View File

@ -1,213 +0,0 @@
function toBase64(file) {
return new Promise((resolve, reject) => {
const r = new FileReader();
r.readAsDataURL(file);
r.onload = () => resolve(r.result);
r.onerror = (error) => reject(error);
});
}
function appendOutput(src, seed, config) {
let outputNode = document.createElement("figure");
let variations = config.with_variations;
if (config.variation_amount > 0) {
variations = (variations ? variations + ',' : '') + seed + ':' + config.variation_amount;
}
let baseseed = (config.with_variations || config.variation_amount > 0) ? config.seed : seed;
let altText = baseseed + ' | ' + (variations ? variations + ' | ' : '') + config.prompt;
// img needs width and height for lazy loading to work
const figureContents = `
<a href="${src}" target="_blank">
<img src="${src}"
alt="${altText}"
title="${altText}"
loading="lazy"
width="256"
height="256">
</a>
<figcaption>${seed}</figcaption>
`;
outputNode.innerHTML = figureContents;
let figcaption = outputNode.querySelector('figcaption');
// Reload image config
figcaption.addEventListener('click', () => {
let form = document.querySelector("#generate-form");
for (const [k, v] of new FormData(form)) {
if (k == 'initimg') { continue; }
form.querySelector(`*[name=${k}]`).value = config[k];
}
document.querySelector("#seed").value = baseseed;
document.querySelector("#with_variations").value = variations || '';
if (document.querySelector("#variation_amount").value <= 0) {
document.querySelector("#variation_amount").value = 0.2;
}
saveFields(document.querySelector("#generate-form"));
});
document.querySelector("#results").prepend(outputNode);
}
function saveFields(form) {
for (const [k, v] of new FormData(form)) {
if (typeof v !== 'object') { // Don't save 'file' type
localStorage.setItem(k, v);
}
}
}
function loadFields(form) {
for (const [k, v] of new FormData(form)) {
const item = localStorage.getItem(k);
if (item != null) {
form.querySelector(`*[name=${k}]`).value = item;
}
}
}
function clearFields(form) {
localStorage.clear();
let prompt = form.prompt.value;
form.reset();
form.prompt.value = prompt;
}
const BLANK_IMAGE_URL = 'data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/svg"/>';
async function generateSubmit(form) {
const prompt = document.querySelector("#prompt").value;
// Convert file data to base64
let formData = Object.fromEntries(new FormData(form));
formData.initimg_name = formData.initimg.name
formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null;
let strength = formData.strength;
let totalSteps = formData.initimg ? Math.floor(strength * formData.steps) : formData.steps;
let progressSectionEle = document.querySelector('#progress-section');
progressSectionEle.style.display = 'initial';
let progressEle = document.querySelector('#progress-bar');
progressEle.setAttribute('max', totalSteps);
let progressImageEle = document.querySelector('#progress-image');
progressImageEle.src = BLANK_IMAGE_URL;
progressImageEle.style.display = {}.hasOwnProperty.call(formData, 'progress_images') ? 'initial': 'none';
// Post as JSON, using Fetch streaming to get results
fetch(form.action, {
method: form.method,
body: JSON.stringify(formData),
}).then(async (response) => {
const reader = response.body.getReader();
let noOutputs = true;
while (true) {
let {value, done} = await reader.read();
value = new TextDecoder().decode(value);
if (done) {
progressSectionEle.style.display = 'none';
break;
}
for (let event of value.split('\n').filter(e => e !== '')) {
const data = JSON.parse(event);
if (data.event === 'result') {
noOutputs = false;
appendOutput(data.url, data.seed, data.config);
progressEle.setAttribute('value', 0);
progressEle.setAttribute('max', totalSteps);
} else if (data.event === 'upscaling-started') {
document.getElementById("processing_cnt").textContent=data.processed_file_cnt;
document.getElementById("scaling-inprocess-message").style.display = "block";
} else if (data.event === 'upscaling-done') {
document.getElementById("scaling-inprocess-message").style.display = "none";
} else if (data.event === 'step') {
progressEle.setAttribute('value', data.step);
if (data.url) {
progressImageEle.src = data.url;
}
} else if (data.event === 'canceled') {
// avoid alerting as if this were an error case
noOutputs = false;
}
}
}
// Re-enable form, remove no-results-message
form.querySelector('fieldset').removeAttribute('disabled');
document.querySelector("#prompt").value = prompt;
document.querySelector('progress').setAttribute('value', '0');
if (noOutputs) {
alert("Error occurred while generating.");
}
});
// Disable form while generating
form.querySelector('fieldset').setAttribute('disabled','');
document.querySelector("#prompt").value = `Generating: "${prompt}"`;
}
async function fetchRunLog() {
try {
let response = await fetch('/run_log.json')
const data = await response.json();
for(let item of data.run_log) {
appendOutput(item.url, item.seed, item);
}
} catch (e) {
console.error(e);
}
}
window.onload = async () => {
document.querySelector("#prompt").addEventListener("keydown", (e) => {
if (e.key === "Enter" && !e.shiftKey) {
const form = e.target.form;
generateSubmit(form);
}
});
document.querySelector("#generate-form").addEventListener('submit', (e) => {
e.preventDefault();
const form = e.target;
generateSubmit(form);
});
document.querySelector("#generate-form").addEventListener('change', (e) => {
saveFields(e.target.form);
});
document.querySelector("#reset-seed").addEventListener('click', (e) => {
document.querySelector("#seed").value = -1;
saveFields(e.target.form);
});
document.querySelector("#reset-all").addEventListener('click', (e) => {
clearFields(e.target.form);
});
document.querySelector("#remove-image").addEventListener('click', (e) => {
initimg.value=null;
});
loadFields(document.querySelector("#generate-form"));
document.querySelector('#cancel-button').addEventListener('click', () => {
fetch('/cancel').catch(e => {
console.error(e);
});
});
document.documentElement.addEventListener('keydown', (e) => {
if (e.key === "Escape")
fetch('/cancel').catch(err => {
console.error(err);
});
});
if (!config.gfpgan_model_exists) {
document.querySelector("#gfpgan").style.display = 'none';
}
await fetchRunLog()
};