Merge branch 'main' into refactor/model-manager2/loader

This commit is contained in:
Lincoln Stein 2024-02-10 18:52:37 -05:00 committed by GitHub
commit 411ec1ed64
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
45 changed files with 1278 additions and 640 deletions

View File

@ -42,6 +42,7 @@ To use a community workflow, download the the `.json` node graph file and load i
+ [Oobabooga](#oobabooga) + [Oobabooga](#oobabooga)
+ [Prompt Tools](#prompt-tools) + [Prompt Tools](#prompt-tools)
+ [Remote Image](#remote-image) + [Remote Image](#remote-image)
+ [BriaAI Background Remove](#briaai-remove-background)
+ [Remove Background](#remove-background) + [Remove Background](#remove-background)
+ [Retroize](#retroize) + [Retroize](#retroize)
+ [Size Stepper Nodes](#size-stepper-nodes) + [Size Stepper Nodes](#size-stepper-nodes)
@ -434,6 +435,17 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image **Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
--------------------------------
### BriaAI Remove Background
**Description**: Implements one click background removal with BriaAI's new version 1.4 model which seems to be be producing better results than any other previous background removal tool.
**Node Link:** https://github.com/blessedcoolant/invoke_bria_rmbg
**View**
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_bria_rmbg/main/assets/preview.jpg" />
-------------------------------- --------------------------------
### Remove Background ### Remove Background

View File

@ -14,11 +14,19 @@ function is_bin_in_path {
} }
function git_show { function git_show {
git show -s --format='%h %s' $1 git show -s --format=oneline --abbrev-commit "$1" | cat
} }
if [[ -v "VIRTUAL_ENV" ]]; then
# we can't just call 'deactivate' because this function is not exported
# to the environment of this script from the bash process that runs the script
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
exit -1
fi
cd "$(dirname "$0")" cd "$(dirname "$0")"
echo
echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}" echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}"
echo "The current working directory is $(pwd)" echo "The current working directory is $(pwd)"
read -p "If that looks right, press any key to proceed, or CTRL-C to exit..." read -p "If that looks right, press any key to proceed, or CTRL-C to exit..."
@ -32,13 +40,6 @@ if ! is_bin_in_path python && is_bin_in_path python3; then
} }
fi fi
if [[ -v "VIRTUAL_ENV" ]]; then
# we can't just call 'deactivate' because this function is not exported
# to the environment of this script from the bash process that runs the script
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
exit -1
fi
VERSION=$( VERSION=$(
cd .. cd ..
python -c "from invokeai.version import __version__ as version; print(version)" python -c "from invokeai.version import __version__ as version; print(version)"
@ -47,38 +48,9 @@ PATCH=""
VERSION="v${VERSION}${PATCH}" VERSION="v${VERSION}${PATCH}"
echo -e "${BGREEN}HEAD${RESET}:" echo -e "${BGREEN}HEAD${RESET}:"
git_show git_show HEAD
echo echo
# ---------------------- FRONTEND ----------------------
pushd ../invokeai/frontend/web >/dev/null
echo
echo "Installing frontend dependencies..."
echo
pnpm i --frozen-lockfile
echo
echo "Building frontend..."
echo
pnpm build
popd
# ---------------------- BACKEND ----------------------
echo
echo "Building wheel..."
echo
# install the 'build' package in the user site packages, if needed
# could be improved by using a temporary venv, but it's tiny and harmless
if [[ $(python -c 'from importlib.util import find_spec; print(find_spec("build") is None)') == "True" ]]; then
pip install --user build
fi
rm -rf ../build
python -m build --wheel --outdir dist/ ../.
# ---------------------- # ----------------------
echo echo
@ -97,16 +69,13 @@ done
mkdir InvokeAI-Installer/lib mkdir InvokeAI-Installer/lib
cp lib/*.py InvokeAI-Installer/lib cp lib/*.py InvokeAI-Installer/lib
# Move the wheel
mv dist/*.whl InvokeAI-Installer/lib/
# Install scripts # Install scripts
# Mac/Linux # Mac/Linux
cp install.sh.in InvokeAI-Installer/install.sh cp install.sh.in InvokeAI-Installer/install.sh
chmod a+x InvokeAI-Installer/install.sh chmod a+x InvokeAI-Installer/install.sh
# Windows # Windows
perl -p -e "s/^set INVOKEAI_VERSION=.*/set INVOKEAI_VERSION=$VERSION/" install.bat.in >InvokeAI-Installer/install.bat cp install.bat.in InvokeAI-Installer/install.bat
cp WinLongPathsEnabled.reg InvokeAI-Installer/ cp WinLongPathsEnabled.reg InvokeAI-Installer/
# Zip everything up # Zip everything up

View File

@ -15,7 +15,6 @@ if "%1" == "use-cache" (
@rem Config @rem Config
@rem The version in the next line is replaced by an up to date release number @rem The version in the next line is replaced by an up to date release number
@rem when create_installer.sh is run. Change the release number there. @rem when create_installer.sh is run. Change the release number there.
set INVOKEAI_VERSION=latest
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/ set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
set PYTHON_URL=https://www.python.org/downloads/windows/ set PYTHON_URL=https://www.python.org/downloads/windows/

View File

@ -11,7 +11,7 @@ import sys
import venv import venv
from pathlib import Path from pathlib import Path
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from typing import Union from typing import Optional, Tuple
SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100" SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100"
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"] INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
@ -21,40 +21,20 @@ OS = platform.uname().system
ARCH = platform.uname().machine ARCH = platform.uname().machine
VERSION = "latest" VERSION = "latest"
### Feature flags
# Install the virtualenv into the runtime dir
FF_VENV_IN_RUNTIME = True
# Install the wheel packaged with the installer
FF_USE_LOCAL_WHEEL = True
class Installer: class Installer:
""" """
Deploys an InvokeAI installation into a given path Deploys an InvokeAI installation into a given path
""" """
reqs: list[str] = INSTALLER_REQS
def __init__(self) -> None: def __init__(self) -> None:
self.reqs = INSTALLER_REQS
self.preflight()
if os.getenv("VIRTUAL_ENV") is not None: if os.getenv("VIRTUAL_ENV") is not None:
print("A virtual environment is already activated. Please 'deactivate' before installation.") print("A virtual environment is already activated. Please 'deactivate' before installation.")
sys.exit(-1) sys.exit(-1)
self.bootstrap() self.bootstrap()
self.available_releases = get_github_releases()
def preflight(self) -> None:
"""
Preflight checks
"""
# TODO
# verify python version
# on macOS verify XCode tools are present
# verify libmesa, libglx on linux
# check that the system arch is not i386 (?)
# check that the system has a GPU, and the type of GPU
pass
def mktemp_venv(self) -> TemporaryDirectory: def mktemp_venv(self) -> TemporaryDirectory:
""" """
@ -78,12 +58,9 @@ class Installer:
return venv_dir return venv_dir
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory: def bootstrap(self, verbose: bool = False) -> TemporaryDirectory | None:
""" """
Bootstrap the installer venv with packages required at install time Bootstrap the installer venv with packages required at install time
:return: path to the virtual environment directory that was bootstrapped
:rtype: TemporaryDirectory
""" """
print("Initializing the installer. This may take a minute - please wait...") print("Initializing the installer. This may take a minute - please wait...")
@ -95,39 +72,27 @@ class Installer:
cmd.extend(self.reqs) cmd.extend(self.reqs)
try: try:
res = subprocess.check_output(cmd).decode() # upgrade pip to the latest version to avoid a confusing message
res = upgrade_pip(Path(venv_dir.name))
if verbose: if verbose:
print(res) print(res)
# run the install prerequisites installation
res = subprocess.check_output(cmd).decode()
if verbose:
print(res)
return venv_dir return venv_dir
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
print(e) print(e)
def app_venv(self, path: str = None): def app_venv(self, venv_parent) -> Path:
""" """
Create a virtualenv for the InvokeAI installation Create a virtualenv for the InvokeAI installation
""" """
# explicit venv location venv_dir = venv_parent / ".venv"
# currently unused in normal operation
# useful for testing or special cases
if path is not None:
venv_dir = Path(path)
# experimental / testing
elif not FF_VENV_IN_RUNTIME:
if OS == "Windows":
venv_dir_parent = os.getenv("APPDATA", "~/AppData/Roaming")
elif OS == "Darwin":
# there is no environment variable on macOS to find this
# TODO: confirm this is working as expected
venv_dir_parent = "~/Library/Application Support"
elif OS == "Linux":
venv_dir_parent = os.getenv("XDG_DATA_DIR", "~/.local/share")
venv_dir = Path(venv_dir_parent).expanduser().resolve() / f"InvokeAI/{VERSION}/venv"
# stable / current
else:
venv_dir = self.dest / ".venv"
# Prefer to copy python executables # Prefer to copy python executables
# so that updates to system python don't break InvokeAI # so that updates to system python don't break InvokeAI
@ -141,7 +106,7 @@ class Installer:
return venv_dir return venv_dir
def install( def install(
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None self, version=None, root: str = "~/invokeai", yes_to_all=False, find_links: Optional[Path] = None
) -> None: ) -> None:
""" """
Install the InvokeAI application into the given runtime path Install the InvokeAI application into the given runtime path
@ -158,15 +123,20 @@ class Installer:
import messages import messages
messages.welcome() messages.welcome(self.available_releases)
default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve() version = messages.choose_version(self.available_releases)
self.dest = default_path if yes_to_all else messages.dest_path(root)
auto_dest = Path(os.environ.get("INVOKEAI_ROOT", root)).expanduser().resolve()
destination = auto_dest if yes_to_all else messages.dest_path(root)
if destination is None:
print("Could not find or create the destination directory. Installation cancelled.")
sys.exit(0)
# create the venv for the app # create the venv for the app
self.venv = self.app_venv() self.venv = self.app_venv(venv_parent=destination)
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version) self.instance = InvokeAiInstance(runtime=destination, venv=self.venv, version=version)
# install dependencies and the InvokeAI application # install dependencies and the InvokeAI application
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None) (extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
@ -190,7 +160,7 @@ class InvokeAiInstance:
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported. A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
""" """
def __init__(self, runtime: Path, venv: Path, version: str) -> None: def __init__(self, runtime: Path, venv: Path, version: str = "stable") -> None:
self.runtime = runtime self.runtime = runtime
self.venv = venv self.venv = venv
self.pip = get_pip_from_venv(venv) self.pip = get_pip_from_venv(venv)
@ -199,6 +169,7 @@ class InvokeAiInstance:
set_sys_path(venv) set_sys_path(venv)
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve()) os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve()) os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
upgrade_pip(venv)
def get(self) -> tuple[Path, Path]: def get(self) -> tuple[Path, Path]:
""" """
@ -212,54 +183,7 @@ class InvokeAiInstance:
def install(self, extra_index_url=None, optional_modules=None, find_links=None): def install(self, extra_index_url=None, optional_modules=None, find_links=None):
""" """
Install this instance, including dependencies and the app itself Install the package from PyPi.
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
:type extra_index_url: str
"""
import messages
# install torch first to ensure the correct version gets installed.
# works with either source or wheel install with negligible impact on installation times.
messages.simple_banner("Installing PyTorch :fire:")
self.install_torch(extra_index_url, find_links)
messages.simple_banner("Installing the InvokeAI Application :art:")
self.install_app(extra_index_url, optional_modules, find_links)
def install_torch(self, extra_index_url=None, find_links=None):
"""
Install PyTorch
"""
from plumbum import FG, local
pip = local[self.pip]
(
pip[
"install",
"--require-virtualenv",
"numpy==1.26.3", # choose versions that won't be uninstalled during phase 2
"urllib3~=1.26.0",
"requests~=2.28.0",
"torch==2.1.2",
"torchmetrics==0.11.4",
"torchvision==0.16.2",
"--force-reinstall",
"--find-links" if find_links is not None else None,
find_links,
"--extra-index-url" if extra_index_url is not None else None,
extra_index_url,
]
& FG
)
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
"""
Install the application with pip.
Supports installation from PyPi or from a local source directory.
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes. :param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
:type extra_index_url: str :type extra_index_url: str
@ -271,53 +195,52 @@ class InvokeAiInstance:
:type find_links: Path :type find_links: Path
""" """
## this only applies to pypi installs; TODO actually use this import messages
if self.version == "pre":
# not currently used, but may be useful for "install most recent version" option
if self.version == "prerelease":
version = None version = None
pre = "--pre" pre_flag = "--pre"
elif self.version == "stable":
version = None
pre_flag = None
else: else:
version = self.version version = self.version
pre = None pre_flag = None
## TODO: only local wheel will be installed as of now; support for --version arg is TODO src = "invokeai"
if FF_USE_LOCAL_WHEEL: if optional_modules:
# if no wheel, try to do a source install before giving up src += optional_modules
try: if version:
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl"))) src += f"=={version}"
except StopIteration:
try:
src = Path(__file__).parents[1].expanduser().resolve()
# if the above directory contains one of these files, we'll do a source install
next(src.glob("pyproject.toml"))
next(src.glob("invokeai"))
except StopIteration:
print("Unable to find a wheel or perform a source install. Giving up.")
elif version == "source": messages.simple_banner("Installing the InvokeAI Application :art:")
# this makes an assumption about the location of the installer package in the source tree
src = Path(__file__).parents[1].expanduser().resolve()
else:
# will install from PyPi
src = f"invokeai=={version}" if version is not None else "invokeai"
from plumbum import FG, local from plumbum import FG, ProcessExecutionError, local # type: ignore
pip = local[self.pip] pip = local[self.pip]
( pipeline = pip[
pip[
"install", "install",
"--require-virtualenv", "--require-virtualenv",
"--force-reinstall",
"--use-pep517", "--use-pep517",
str(src) + (optional_modules if optional_modules else ""), str(src),
"--find-links" if find_links is not None else None, "--find-links" if find_links is not None else None,
find_links, find_links,
"--extra-index-url" if extra_index_url is not None else None, "--extra-index-url" if extra_index_url is not None else None,
extra_index_url, extra_index_url,
pre, pre_flag,
] ]
& FG
try:
_ = pipeline & FG
except ProcessExecutionError as e:
print(f"Error: {e}")
print(
"Could not install InvokeAI. Please try downloading the latest version of the installer and install again."
) )
sys.exit(1)
def configure(self): def configure(self):
""" """
@ -373,7 +296,6 @@ class InvokeAiInstance:
ext = "bat" if OS == "Windows" else "sh" ext = "bat" if OS == "Windows" else "sh"
# scripts = ['invoke', 'update']
scripts = ["invoke"] scripts = ["invoke"]
for script in scripts: for script in scripts:
@ -408,6 +330,23 @@ def get_pip_from_venv(venv_path: Path) -> str:
return str(venv_path.expanduser().resolve() / pip) return str(venv_path.expanduser().resolve() / pip)
def upgrade_pip(venv_path: Path) -> str | None:
"""
Upgrade the pip executable in the given virtual environment
"""
python = "Scripts\\python.exe" if OS == "Windows" else "bin/python"
python = str(venv_path.expanduser().resolve() / python)
try:
result = subprocess.check_output([python, "-m", "pip", "install", "--upgrade", "pip"]).decode()
except subprocess.CalledProcessError as e:
print(e)
result = None
return result
def set_sys_path(venv_path: Path) -> None: def set_sys_path(venv_path: Path) -> None:
""" """
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion, Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
@ -431,7 +370,43 @@ def set_sys_path(venv_path: Path) -> None:
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve())) sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
def get_torch_source() -> (Union[str, None], str): def get_github_releases() -> tuple[list, list] | None:
"""
Query Github for published (pre-)release versions.
Return a tuple where the first element is a list of stable releases and the second element is a list of pre-releases.
Return None if the query fails for any reason.
"""
import requests
## get latest releases using github api
url = "https://api.github.com/repos/invoke-ai/InvokeAI/releases"
releases, pre_releases = [], []
try:
res = requests.get(url)
res.raise_for_status()
tag_info = res.json()
for tag in tag_info:
if not tag["prerelease"]:
releases.append(tag["tag_name"].lstrip("v"))
else:
pre_releases.append(tag["tag_name"].lstrip("v"))
except requests.HTTPError as e:
print(f"Error: {e}")
print("Could not fetch version information from GitHub. Please check your network connection and try again.")
return
except Exception as e:
print(f"Error: {e}")
print("An unexpected error occurred while trying to fetch version information from GitHub. Please try again.")
return
releases.sort(reverse=True)
pre_releases.sort(reverse=True)
return releases, pre_releases
def get_torch_source() -> Tuple[str | None, str | None]:
""" """
Determine the extra index URL for pip to use for torch installation. Determine the extra index URL for pip to use for torch installation.
This depends on the OS and the graphics accelerator in use. This depends on the OS and the graphics accelerator in use.
@ -446,23 +421,24 @@ def get_torch_source() -> (Union[str, None], str):
:rtype: list :rtype: list
""" """
from messages import graphical_accelerator from messages import select_gpu
# device can be one of: "cuda", "rocm", "cpu", "idk" # device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
device = graphical_accelerator() device = select_gpu()
url = None url = None
optional_modules = "[onnx]" optional_modules = "[onnx]"
if OS == "Linux": if OS == "Linux":
if device == "rocm": if device.value == "rocm":
url = "https://download.pytorch.org/whl/rocm5.6" url = "https://download.pytorch.org/whl/rocm5.6"
elif device == "cpu": elif device.value == "cpu":
url = "https://download.pytorch.org/whl/cpu" url = "https://download.pytorch.org/whl/cpu"
if device == "cuda": elif OS == "Windows":
if device.value == "cuda":
url = "https://download.pytorch.org/whl/cu121" url = "https://download.pytorch.org/whl/cu121"
optional_modules = "[xformers,onnx-cuda]" optional_modules = "[xformers,onnx-cuda]"
if device == "cuda_and_dml": if device.value == "cuda_and_dml":
url = "https://download.pytorch.org/whl/cu121" url = "https://download.pytorch.org/whl/cu121"
optional_modules = "[xformers,onnx-directml]" optional_modules = "[xformers,onnx-directml]"

View File

@ -5,10 +5,11 @@ Installer user interaction
import os import os
import platform import platform
from enum import Enum
from pathlib import Path from pathlib import Path
from prompt_toolkit import HTML, prompt from prompt_toolkit import HTML, prompt
from prompt_toolkit.completion import PathCompleter from prompt_toolkit.completion import FuzzyWordCompleter, PathCompleter
from prompt_toolkit.validation import Validator from prompt_toolkit.validation import Validator
from rich import box, print from rich import box, print
from rich.console import Console, Group, group from rich.console import Console, Group, group
@ -35,16 +36,26 @@ else:
console = Console(style=Style(color="grey74", bgcolor="grey19")) console = Console(style=Style(color="grey74", bgcolor="grey19"))
def welcome(): def welcome(available_releases: tuple | None = None) -> None:
@group() @group()
def text(): def text():
if (platform_specific := _platform_specific_help()) != "": if (platform_specific := _platform_specific_help()) is not None:
yield platform_specific yield platform_specific
yield "" yield ""
yield Text.from_markup( yield Text.from_markup(
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.", "Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.",
justify="center", justify="center",
) )
if available_releases is not None:
latest_stable = available_releases[0][0]
last_pre = available_releases[1][0]
yield ""
yield Text.from_markup(
f"[red3]🠶[/] Latest stable release (recommended): [b bright_white]{latest_stable}", justify="center"
)
yield Text.from_markup(
f"[red3]🠶[/] Last published pre-release version: [b bright_white]{last_pre}", justify="center"
)
console.rule() console.rule()
print( print(
@ -61,19 +72,31 @@ def welcome():
console.line() console.line()
def confirm_install(dest: Path) -> bool: def choose_version(available_releases: tuple | None = None) -> str:
if dest.exists(): """
print(f":exclamation: Directory {dest} already exists :exclamation:") Prompt the user to choose an Invoke version to install
dest_confirmed = Confirm.ask( """
":stop_sign: (re)install in this location?",
default=False, # short circuit if we couldn't get a version list
# still try to install the latest stable version
if available_releases is None:
return "stable"
console.print(":grey_question: [orange3]Please choose an Invoke version to install.")
choices = available_releases[0] + available_releases[1]
response = prompt(
message=f" <Enter> to install the recommended release ({choices[0]}). <Tab> or type to pick a version: ",
complete_while_typing=True,
completer=FuzzyWordCompleter(choices),
) )
else:
print(f"InvokeAI will be installed in {dest}") console.print(f" Version {choices[0] if response == "" else response} will be installed.")
dest_confirmed = Confirm.ask("Use this location?", default=True)
console.line() console.line()
return dest_confirmed return "stable" if response == "" else response
def user_wants_auto_configuration() -> bool: def user_wants_auto_configuration() -> bool:
@ -109,7 +132,23 @@ def user_wants_auto_configuration() -> bool:
return choice.lower().startswith("a") return choice.lower().startswith("a")
def dest_path(dest=None) -> Path: def confirm_install(dest: Path) -> bool:
if dest.exists():
print(f":stop_sign: Directory {dest} already exists!")
print(" Is this location correct?")
default = False
else:
print(f":file_folder: InvokeAI will be installed in {dest}")
default = True
dest_confirmed = Confirm.ask(" Please confirm:", default=default)
console.line()
return dest_confirmed
def dest_path(dest=None) -> Path | None:
""" """
Prompt the user for the destination path and create the path Prompt the user for the destination path and create the path
@ -124,25 +163,21 @@ def dest_path(dest=None) -> Path:
else: else:
dest = Path.cwd().expanduser().resolve() dest = Path.cwd().expanduser().resolve()
prev_dest = init_path = dest prev_dest = init_path = dest
dest_confirmed = False
dest_confirmed = confirm_install(dest)
while not dest_confirmed: while not dest_confirmed:
# if the given destination already exists, the starting point for browsing is its parent directory. browse_start = (dest or Path.cwd()).expanduser().resolve()
# the user may have made a typo, or otherwise wants to place the root dir next to an existing one.
# if the destination dir does NOT exist, then the user must have changed their mind about the selection.
# since we can't read their mind, start browsing at Path.cwd().
browse_start = (prev_dest.parent if prev_dest.exists() else Path.cwd()).expanduser().resolve()
path_completer = PathCompleter( path_completer = PathCompleter(
only_directories=True, only_directories=True,
expanduser=True, expanduser=True,
get_paths=lambda: [browse_start], # noqa: B023 get_paths=lambda: [str(browse_start)], # noqa: B023
# get_paths=lambda: [".."].extend(list(browse_start.iterdir())) # get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
) )
console.line() console.line()
console.print(f"[orange3]Please select the destination directory for the installation:[/] \\[{browse_start}]: ")
console.print(f":grey_question: [orange3]Please select the install destination:[/] \\[{browse_start}]: ")
selected = prompt( selected = prompt(
">>> ", ">>> ",
complete_in_thread=True, complete_in_thread=True,
@ -155,6 +190,7 @@ def dest_path(dest=None) -> Path:
) )
prev_dest = dest prev_dest = dest
dest = Path(selected) dest = Path(selected)
console.line() console.line()
dest_confirmed = confirm_install(dest.expanduser().resolve()) dest_confirmed = confirm_install(dest.expanduser().resolve())
@ -182,41 +218,45 @@ def dest_path(dest=None) -> Path:
console.rule("Goodbye!") console.rule("Goodbye!")
def graphical_accelerator(): class GpuType(Enum):
CUDA = "cuda"
CUDA_AND_DML = "cuda_and_dml"
ROCM = "rocm"
CPU = "cpu"
AUTODETECT = "autodetect"
def select_gpu() -> GpuType:
""" """
Prompt the user to select the graphical accelerator in their system Prompt the user to select the GPU driver
This does not validate user's choices (yet), but only offers choices
valid for the platform.
CUDA is the fallback.
We may be able to detect the GPU driver by shelling out to `modprobe` or `lspci`,
but this is not yet supported or reliable. Also, some users may have exotic preferences.
""" """
if ARCH == "arm64" and OS != "Darwin": if ARCH == "arm64" and OS != "Darwin":
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.") print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
return "cpu" return GpuType.CPU
nvidia = ( nvidia = (
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)", "an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
"cuda", GpuType.CUDA,
) )
nvidia_with_dml = ( nvidia_with_dml = (
"an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA", "an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA",
"cuda_and_dml", GpuType.CUDA_AND_DML,
) )
amd = ( amd = (
"an [gold1 b]AMD[/] GPU (using ROCm™)", "an [gold1 b]AMD[/] GPU (using ROCm™)",
"rocm", GpuType.ROCM,
) )
cpu = ( cpu = (
"no compatible GPU, or specifically prefer to use the CPU", "Do not install any GPU support, use CPU for generation (slow)",
"cpu", GpuType.CPU,
) )
idk = ( autodetect = (
"I'm not sure what to choose", "I'm not sure what to choose",
"idk", GpuType.AUTODETECT,
) )
options = []
if OS == "Windows": if OS == "Windows":
options = [nvidia, nvidia_with_dml, cpu] options = [nvidia, nvidia_with_dml, cpu]
if OS == "Linux": if OS == "Linux":
@ -230,7 +270,7 @@ def graphical_accelerator():
return options[0][1] return options[0][1]
# "I don't know" is always added the last option # "I don't know" is always added the last option
options.append(idk) options.append(autodetect) # type: ignore
options = {str(i): opt for i, opt in enumerate(options, 1)} options = {str(i): opt for i, opt in enumerate(options, 1)}
@ -265,9 +305,9 @@ def graphical_accelerator():
), ),
) )
if options[choice][1] == "idk": if options[choice][1] is GpuType.AUTODETECT:
console.print( console.print(
"No problem. We will try to install a version that [i]should[/i] be compatible. :crossed_fingers:" "No problem. We will install CUDA support first :crossed_fingers: If Invoke does not detect a GPU, please re-run the installer and select one of the other GPU types."
) )
return options[choice][1] return options[choice][1]
@ -291,7 +331,7 @@ def windows_long_paths_registry() -> None:
""" """
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code: with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
syntax = Syntax(code.read(), line_numbers=True) syntax = Syntax(code.read(), line_numbers=True, lexer="regedit")
console.print( console.print(
Panel( Panel(
@ -301,7 +341,7 @@ def windows_long_paths_registry() -> None:
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.", "We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
"", "",
"This is the change that will be applied:", "This is the change that will be applied:",
syntax, str(syntax),
] ]
) )
), ),
@ -340,7 +380,7 @@ def introduction() -> None:
console.line(2) console.line(2)
def _platform_specific_help() -> str: def _platform_specific_help() -> Text | None:
if OS == "Darwin": if OS == "Darwin":
text = Text.from_markup( text = Text.from_markup(
"""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/].""" """[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/]."""
@ -354,5 +394,5 @@ def _platform_specific_help() -> str:
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]""" [deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]"""
) )
else: else:
text = "" return
return text return text

View File

@ -15,7 +15,7 @@ echo 4. Download and install models
echo 5. Change InvokeAI startup options echo 5. Change InvokeAI startup options
echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade
echo 7. Open the developer console echo 7. Open the developer console
echo 8. Update InvokeAI echo 8. Update InvokeAI (DEPRECATED - please use the installer)
echo 9. Run the InvokeAI image database maintenance script echo 9. Run the InvokeAI image database maintenance script
echo 10. Command-line help echo 10. Command-line help
echo Q - Quit echo Q - Quit
@ -52,7 +52,9 @@ IF /I "%choice%" == "1" (
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment *** echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
call cmd /k call cmd /k
) ELSE IF /I "%choice%" == "8" ( ) ELSE IF /I "%choice%" == "8" (
echo Running invokeai-update... echo UPDATING FROM WITHIN THE APP IS BEING DEPRECATED.
echo Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.
timeout 4
python -m invokeai.frontend.install.invokeai_update python -m invokeai.frontend.install.invokeai_update
) ELSE IF /I "%choice%" == "9" ( ) ELSE IF /I "%choice%" == "9" (
echo Running the db maintenance script... echo Running the db maintenance script...
@ -77,4 +79,3 @@ pause
:ending :ending
exit /b exit /b

View File

@ -90,7 +90,9 @@ do_choice() {
;; ;;
8) 8)
clear clear
printf "Update InvokeAI\n" printf "UPDATING FROM WITHIN THE APP IS BEING DEPRECATED\n"
printf "Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.\n"
sleep 4
python -m invokeai.frontend.install.invokeai_update python -m invokeai.frontend.install.invokeai_update
;; ;;
9) 9)
@ -122,7 +124,7 @@ do_dialog() {
5 "Change InvokeAI startup options" 5 "Change InvokeAI startup options"
6 "Re-run the configure script to fix a broken install or to complete a major upgrade" 6 "Re-run the configure script to fix a broken install or to complete a major upgrade"
7 "Open the developer console" 7 "Open the developer console"
8 "Update InvokeAI" 8 "Update InvokeAI (DEPRECATED - please use the installer)"
9 "Run the InvokeAI image database maintenance script" 9 "Run the InvokeAI image database maintenance script"
10 "Command-line help" 10 "Command-line help"
) )

View File

@ -1,72 +0,0 @@
@echo off
setlocal EnableExtensions EnableDelayedExpansion
PUSHD "%~dp0"
set INVOKE_AI_VERSION=latest
set arg=%1
if "%arg%" neq "" (
if "%arg:~0,2%" equ "/?" (
echo Usage: update.bat ^<release name or branch^>
echo Updates InvokeAI to use the indicated version of the code base.
echo Find the version or branch for the release you want, and pass it as the argument.
echo For example '.\update.bat v2.2.5' for release 2.2.5.
echo '.\update.bat main' for the latest development version
echo.
echo If no argument provided then will install the most recent release, equivalent to
echo '.\update.bat latest'
exit /b
) else (
set INVOKE_AI_VERSION=%arg%
)
)
set INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive/!INVOKE_AI_VERSION!.zip"
set INVOKE_AI_DEP=https://raw.githubusercontent.com/invoke-ai/InvokeAI/!INVOKE_AI_VERSION!/environments-and-requirements/requirements-base.txt
set INVOKE_AI_MODELS=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/configs/INITIAL_MODELS.yaml
call curl -I "%INVOKE_AI_DEP%" -fs >.tmp.out
if %errorlevel% neq 0 (
echo '!INVOKE_AI_VERSION!' is not a known branch name or tag. Please check the version and try again.
echo "Press any key to continue"
pause
exit /b
)
del .tmp.out
echo This script will update InvokeAI and all its dependencies to !INVOKE_AI_SRC!.
echo If you do not want to do this, press control-C now!
pause
call curl -L "%INVOKE_AI_DEP%" > environments-and-requirements/requirements-base.txt
call curl -L "%INVOKE_AI_MODELS%" > configs/INITIAL_MODELS.yaml
call .venv\Scripts\activate.bat
call .venv\Scripts\python -mpip install -r requirements.txt
if %errorlevel% neq 0 (
echo Installation of requirements failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
pause
exit /b
)
call .venv\Scripts\python -mpip install !INVOKE_AI_SRC!
if %errorlevel% neq 0 (
echo Installation of InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
pause
exit /b
)
@rem call .venv\Scripts\invokeai-configure --root=.
@rem if %errorlevel% neq 0 (
@rem echo Configuration InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
@rem pause
@rem exit /b
@rem )
echo InvokeAI has been updated to '%INVOKE_AI_VERSION%'
echo "Press any key to continue"
pause
endlocal

View File

@ -1,58 +0,0 @@
#!/usr/bin/env bash
set -eu
if [ $# -ge 1 ] && [ "${1:0:2}" == "-h" ]; then
echo "Usage: update.sh <release>"
echo "Updates InvokeAI to use the indicated version of the code base."
echo "Find the version or branch for the release you want, and pass it as the argument."
echo "For example: update.sh v2.2.5 for release 2.2.5."
echo " update.sh main for the current development version."
echo ""
echo "If no argument provided then will install the version tagged with 'latest', equivalent to"
echo "update.sh latest"
exit -1
fi
INVOKE_AI_VERSION=${1:-latest}
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive/$INVOKE_AI_VERSION.zip"
INVOKE_AI_DEP=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/environments-and-requirements/requirements-base.txt
INVOKE_AI_MODELS=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/configs/INITIAL_MODELS.yaml
# ensure we're in the correct folder in case user's CWD is somewhere else
scriptdir=$(dirname "$0")
cd "$scriptdir"
function _err_exit {
if test "$1" -ne 0
then
echo "Something went wrong while installing InvokeAI and/or its requirements."
echo "Update cannot continue. Please report this error to https://github.com/invoke-ai/InvokeAI/issues"
echo -e "Error code $1; Error caught was '$2'"
read -p "Press any key to exit..."
exit
fi
}
if ! curl -I "$INVOKE_AI_DEP" -fs >/dev/null; then
echo \'$INVOKE_AI_VERSION\' is not a known branch name or tag. Please check the version and try again.
exit
fi
echo This script will update InvokeAI and all its dependencies to version \'$INVOKE_AI_VERSION\'.
echo If you do not want to do this, press control-C now!
read -p "Press any key to continue, or CTRL-C to exit..."
curl -L "$INVOKE_AI_DEP" > environments-and-requirements/requirements-base.txt
curl -L "$INVOKE_AI_MODELS" > configs/INITIAL_MODELS.yaml
. .venv/bin/activate
./.venv/bin/python -mpip install -r requirements.txt
_err_exit $? "The pip program failed to install InvokeAI's requirements."
./.venv/bin/python -mpip install $INVOKE_AI_SRC
_err_exit $? "The pip program failed to install InvokeAI."
echo InvokeAI updated to \'$INVOKE_AI_VERSION\'

View File

@ -14,7 +14,7 @@ class SocketIO:
def __init__(self, app: FastAPI): def __init__(self, app: FastAPI):
self.__sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*") self.__sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*")
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="socket.io") self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="/ws/socket.io")
app.mount("/ws", self.__app) app.mount("/ws", self.__app)
self.__sio.on("subscribe_queue", handler=self._handle_sub_queue) self.__sio.on("subscribe_queue", handler=self._handle_sub_queue)

View File

@ -5,12 +5,12 @@ from typing import Literal
import cv2 import cv2
import numpy as np import numpy as np
import torch import torch
from basicsr.archs.rrdbnet_arch import RRDBNet
from PIL import Image from PIL import Image
from pydantic import ConfigDict from pydantic import ConfigDict
from invokeai.app.invocations.primitives import ImageField, ImageOutput from invokeai.app.invocations.primitives import ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
from invokeai.backend.util.devices import choose_torch_device from invokeai.backend.util.devices import choose_torch_device

View File

@ -54,6 +54,17 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
else None else None
) )
def stats_cleanup(graph_execution_state_id: str) -> None:
if profiler:
profile_path = profiler.stop()
stats_path = profile_path.with_suffix(".json")
self.__invoker.services.performance_statistics.dump_stats(
graph_execution_state_id=graph_execution_state_id, output_path=stats_path
)
with suppress(GESStatsNotFoundError):
self.__invoker.services.performance_statistics.log_stats(graph_execution_state_id)
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state_id)
while not stop_event.is_set(): while not stop_event.is_set():
try: try:
queue_item = self.__invoker.services.queue.get() queue_item = self.__invoker.services.queue.get()
@ -156,8 +167,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
pass pass
except CanceledException: except CanceledException:
with suppress(GESStatsNotFoundError): stats_cleanup(graph_execution_state.id)
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
pass pass
except Exception as e: except Exception as e:
@ -182,8 +192,6 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
error_type=e.__class__.__name__, error_type=e.__class__.__name__,
error=error, error=error,
) )
with suppress(GESStatsNotFoundError):
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
pass pass
# Check queue to see if this is canceled, and skip if so # Check queue to see if this is canceled, and skip if so
@ -215,21 +223,13 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
error=traceback.format_exc(), error=traceback.format_exc(),
) )
elif is_complete: elif is_complete:
with suppress(GESStatsNotFoundError):
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
self.__invoker.services.events.emit_graph_execution_complete( self.__invoker.services.events.emit_graph_execution_complete(
queue_batch_id=queue_item.session_queue_batch_id, queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id, queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id, queue_id=queue_item.session_queue_id,
graph_execution_state_id=graph_execution_state.id, graph_execution_state_id=graph_execution_state.id,
) )
if profiler: stats_cleanup(graph_execution_state.id)
profile_path = profiler.stop()
stats_path = profile_path.with_suffix(".json")
self.__invoker.services.performance_statistics.dump_stats(
graph_execution_state_id=graph_execution_state.id, output_path=stats_path
)
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
except KeyboardInterrupt: except KeyboardInterrupt:
pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor

View File

@ -113,9 +113,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
del self._stats[graph_execution_state_id] del self._stats[graph_execution_state_id]
del self._cache_stats[graph_execution_state_id] del self._cache_stats[graph_execution_state_id]
except KeyError as e: except KeyError as e:
msg = f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}." raise GESStatsNotFoundError(
logger.error(msg) f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}."
raise GESStatsNotFoundError(msg) from e ) from e
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary: def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
graph_stats_summary = self._get_graph_summary(graph_execution_state_id) graph_stats_summary = self._get_graph_summary(graph_execution_state_id)
@ -143,9 +143,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
try: try:
cache_stats = self._cache_stats[graph_execution_state_id] cache_stats = self._cache_stats[graph_execution_state_id]
except KeyError as e: except KeyError as e:
msg = f"Attempted to get model cache statistics for unknown graph {graph_execution_state_id}: {e}." raise GESStatsNotFoundError(
logger.error(msg) f"Attempted to get model cache statistics for unknown graph {graph_execution_state_id}: {e}."
raise GESStatsNotFoundError(msg) from e ) from e
return ModelCacheStatsSummary( return ModelCacheStatsSummary(
cache_hits=cache_stats.hits, cache_hits=cache_stats.hits,
@ -161,9 +161,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
try: try:
graph_stats = self._stats[graph_execution_state_id] graph_stats = self._stats[graph_execution_state_id]
except KeyError as e: except KeyError as e:
msg = f"Attempted to get graph statistics for unknown graph {graph_execution_state_id}: {e}." raise GESStatsNotFoundError(
logger.error(msg) f"Attempted to get graph statistics for unknown graph {graph_execution_state_id}: {e}."
raise GESStatsNotFoundError(msg) from e ) from e
return graph_stats.get_graph_stats_summary(graph_execution_state_id) return graph_stats.get_graph_stats_summary(graph_execution_state_id)
@ -171,8 +171,8 @@ class InvocationStatsService(InvocationStatsServiceBase):
try: try:
graph_stats = self._stats[graph_execution_state_id] graph_stats = self._stats[graph_execution_state_id]
except KeyError as e: except KeyError as e:
msg = f"Attempted to get node statistics for unknown graph {graph_execution_state_id}: {e}." raise GESStatsNotFoundError(
logger.error(msg) f"Attempted to get node statistics for unknown graph {graph_execution_state_id}: {e}."
raise GESStatsNotFoundError(msg) from e ) from e
return graph_stats.get_node_stats_summaries() return graph_stats.get_node_stats_summaries()

View File

@ -2,7 +2,7 @@
import copy import copy
import itertools import itertools
from typing import Annotated, Any, Optional, Union, get_args, get_origin, get_type_hints from typing import Annotated, Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints
import networkx as nx import networkx as nx
from pydantic import BaseModel, ConfigDict, field_validator, model_validator from pydantic import BaseModel, ConfigDict, field_validator, model_validator
@ -141,6 +141,16 @@ def are_connections_compatible(
return are_connection_types_compatible(from_node_field, to_node_field) return are_connection_types_compatible(from_node_field, to_node_field)
T = TypeVar("T")
def copydeep(obj: T) -> T:
"""Deep-copies an object. If it is a pydantic model, use the model's copy method."""
if isinstance(obj, BaseModel):
return obj.model_copy(deep=True)
return copy.deepcopy(obj)
class NodeAlreadyInGraphError(ValueError): class NodeAlreadyInGraphError(ValueError):
pass pass
@ -1118,17 +1128,22 @@ class GraphExecutionState(BaseModel):
def _prepare_inputs(self, node: BaseInvocation): def _prepare_inputs(self, node: BaseInvocation):
input_edges = [e for e in self.execution_graph.edges if e.destination.node_id == node.id] input_edges = [e for e in self.execution_graph.edges if e.destination.node_id == node.id]
# Inputs must be deep-copied, else if a node mutates the object, other nodes that get the same input
# will see the mutation.
if isinstance(node, CollectInvocation): if isinstance(node, CollectInvocation):
output_collection = [ output_collection = [
getattr(self.results[edge.source.node_id], edge.source.field) copydeep(getattr(self.results[edge.source.node_id], edge.source.field))
for edge in input_edges for edge in input_edges
if edge.destination.field == "item" if edge.destination.field == "item"
] ]
node.collection = output_collection node.collection = output_collection
else: else:
for edge in input_edges: for edge in input_edges:
output_value = getattr(self.results[edge.source.node_id], edge.source.field) setattr(
setattr(node, edge.destination.field, output_value) node,
edge.destination.field,
copydeep(getattr(self.results[edge.source.node_id], edge.source.field)),
)
# TODO: Add API for modifying underlying graph that checks if the change will be valid given the current execution state # TODO: Add API for modifying underlying graph that checks if the change will be valid given the current execution state
def _is_edge_valid(self, edge: Edge) -> bool: def _is_edge_valid(self, edge: Edge) -> bool:

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018-2022 BasicSR Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,18 @@
"""
Adapted from https://github.com/XPixelGroup/BasicSR
License: Apache-2.0
As of Feb 2024, `basicsr` appears to be unmaintained. It imports a function from `torchvision` that is removed in
`torchvision` 0.17. Here is the deprecation warning:
UserWarning: The torchvision.transforms.functional_tensor module is deprecated in 0.15 and will be **removed in
0.17**. Please don't rely on it. You probably just need to use APIs in torchvision.transforms.functional or in
torchvision.transforms.v2.functional.
As a result, a dependency on `basicsr` means we cannot keep our `torchvision` dependency up to date.
Because we only rely on a single class `RRDBNet` from `basicsr`, we've copied the relevant code here and removed the
dependency on `basicsr`.
The code is almost unchanged, only a few type annotations have been added. The license is also copied.
"""

View File

@ -0,0 +1,75 @@
from typing import Type
import torch
from torch import nn as nn
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
@torch.no_grad()
def default_init_weights(
module_list: list[nn.Module] | nn.Module, scale: float = 1, bias_fill: float = 0, **kwargs
) -> None:
"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, _BatchNorm):
init.constant_(m.weight, 1)
if m.bias is not None:
m.bias.data.fill_(bias_fill)
def make_layer(basic_block: Type[nn.Module], num_basic_block: int, **kwarg) -> nn.Sequential:
"""Make layers by stacking the same blocks.
Args:
basic_block (Type[nn.Module]): nn.Module class for basic block.
num_basic_block (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
layers = []
for _ in range(num_basic_block):
layers.append(basic_block(**kwarg))
return nn.Sequential(*layers)
# TODO: may write a cpp file
def pixel_unshuffle(x: torch.Tensor, scale: int) -> torch.Tensor:
"""Pixel unshuffle.
Args:
x (Tensor): Input feature with shape (b, c, hh, hw).
scale (int): Downsample ratio.
Returns:
Tensor: the pixel unshuffled feature.
"""
b, c, hh, hw = x.size()
out_channel = c * (scale**2)
assert hh % scale == 0 and hw % scale == 0
h = hh // scale
w = hw // scale
x_view = x.view(b, c, h, scale, w, scale)
return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)

View File

@ -0,0 +1,125 @@
import torch
from torch import nn as nn
from torch.nn import functional as F
from .arch_util import default_init_weights, make_layer, pixel_unshuffle
class ResidualDenseBlock(nn.Module):
"""Residual Dense Block.
Used in RRDB block in ESRGAN.
Args:
num_feat (int): Channel number of intermediate features.
num_grow_ch (int): Channels for each growth.
"""
def __init__(self, num_feat: int = 64, num_grow_ch: int = 32) -> None:
super(ResidualDenseBlock, self).__init__()
self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
# initialization
default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
# Empirically, we use 0.2 to scale the residual for better performance
return x5 * 0.2 + x
class RRDB(nn.Module):
"""Residual in Residual Dense Block.
Used in RRDB-Net in ESRGAN.
Args:
num_feat (int): Channel number of intermediate features.
num_grow_ch (int): Channels for each growth.
"""
def __init__(self, num_feat: int, num_grow_ch: int = 32) -> None:
super(RRDB, self).__init__()
self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.rdb1(x)
out = self.rdb2(out)
out = self.rdb3(out)
# Empirically, we use 0.2 to scale the residual for better performance
return out * 0.2 + x
class RRDBNet(nn.Module):
"""Networks consisting of Residual in Residual Dense Block, which is used
in ESRGAN.
ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
We extend ESRGAN for scale x2 and scale x1.
Note: This is one option for scale 1, scale 2 in RRDBNet.
We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
Args:
num_in_ch (int): Channel number of inputs.
num_out_ch (int): Channel number of outputs.
num_feat (int): Channel number of intermediate features.
Default: 64
num_block (int): Block number in the trunk network. Defaults: 23
num_grow_ch (int): Channels for each growth. Default: 32.
"""
def __init__(
self,
num_in_ch: int,
num_out_ch: int,
scale: int = 4,
num_feat: int = 64,
num_block: int = 23,
num_grow_ch: int = 32,
) -> None:
super(RRDBNet, self).__init__()
self.scale = scale
if scale == 2:
num_in_ch = num_in_ch * 4
elif scale == 1:
num_in_ch = num_in_ch * 16
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
# upsample
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.scale == 2:
feat = pixel_unshuffle(x, scale=2)
elif self.scale == 1:
feat = pixel_unshuffle(x, scale=4)
else:
feat = x
feat = self.conv_first(feat)
body_feat = self.conv_body(self.body(feat))
feat = feat + body_feat
# upsample
feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode="nearest")))
feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode="nearest")))
out = self.conv_last(self.lrelu(self.conv_hr(feat)))
return out

View File

@ -7,10 +7,10 @@ import cv2
import numpy as np import numpy as np
import numpy.typing as npt import numpy.typing as npt
import torch import torch
from basicsr.archs.rrdbnet_arch import RRDBNet
from cv2.typing import MatLike from cv2.typing import MatLike
from tqdm import tqdm from tqdm import tqdm
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
from invokeai.backend.util.devices import choose_torch_device from invokeai.backend.util.devices import choose_torch_device
""" """

View File

@ -56,7 +56,7 @@
"nodeEditor": "Knoten Editor", "nodeEditor": "Knoten Editor",
"statusMergingModels": "Modelle zusammenführen", "statusMergingModels": "Modelle zusammenführen",
"ipAdapter": "IP Adapter", "ipAdapter": "IP Adapter",
"controlAdapter": "Control Adapter", "controlAdapter": "Control-Adapter",
"auto": "Automatisch", "auto": "Automatisch",
"controlNet": "ControlNet", "controlNet": "ControlNet",
"imageFailedToLoad": "Kann Bild nicht laden", "imageFailedToLoad": "Kann Bild nicht laden",
@ -75,12 +75,12 @@
"linear": "Linear", "linear": "Linear",
"imagePrompt": "Bild Prompt", "imagePrompt": "Bild Prompt",
"checkpoint": "Checkpoint", "checkpoint": "Checkpoint",
"inpaint": "inpaint", "inpaint": "Inpaint",
"simple": "Einfach", "simple": "Einfach",
"template": "Vorlage", "template": "Vorlage",
"outputs": "Ausgabe", "outputs": "Ausgabe",
"data": "Daten", "data": "Daten",
"safetensors": "Safetensors", "safetensors": "Safe-Tensors",
"outpaint": "Ausmalen", "outpaint": "Ausmalen",
"details": "Details", "details": "Details",
"format": "Format", "format": "Format",
@ -115,7 +115,8 @@
"orderBy": "Ordnen nach", "orderBy": "Ordnen nach",
"saveAs": "Speicher als", "saveAs": "Speicher als",
"updated": "Aktualisiert", "updated": "Aktualisiert",
"copy": "Kopieren" "copy": "Kopieren",
"aboutHeading": "Nutzen Sie Ihre kreative Energie"
}, },
"gallery": { "gallery": {
"generations": "Erzeugungen", "generations": "Erzeugungen",
@ -146,26 +147,30 @@
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.", "deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen", "autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
"noImageSelected": "Kein Bild ausgewählt", "noImageSelected": "Kein Bild ausgewählt",
"problemDeletingImagesDesc": "Eins oder mehr Bilder könnten nicht gelöscht werden", "problemDeletingImagesDesc": "Ein oder mehrere Bilder konnten nicht gelöscht werden",
"starImage": "Bild markieren", "starImage": "Bild markieren",
"assets": "Ressourcen", "assets": "Ressourcen",
"unstarImage": "Markierung Entfernen", "unstarImage": "Markierung Entfernen",
"image": "Bild", "image": "Bild",
"deleteSelection": "Lösche markierte" "deleteSelection": "Lösche markierte",
"dropToUpload": "$t(gallery.drop) zum hochladen",
"dropOrUpload": "$t(gallery.drop) oder hochladen",
"drop": "Ablegen",
"problemDeletingImages": "Problem beim Löschen der Bilder"
}, },
"hotkeys": { "hotkeys": {
"keyboardShortcuts": "Tastenkürzel", "keyboardShortcuts": "Tastenkürzel",
"appHotkeys": "App-Tastenkombinationen", "appHotkeys": "App-Tastenkombinationen",
"generalHotkeys": "Allgemeine Tastenkürzel", "generalHotkeys": "Allgemein",
"galleryHotkeys": "Galerie Tastenkürzel", "galleryHotkeys": "Galerie",
"unifiedCanvasHotkeys": "Unified Canvas Tastenkürzel", "unifiedCanvasHotkeys": "Leinwand",
"invoke": { "invoke": {
"desc": "Ein Bild erzeugen", "desc": "Ein Bild erzeugen",
"title": "Invoke" "title": "Invoke"
}, },
"cancel": { "cancel": {
"title": "Abbrechen", "title": "Abbrechen",
"desc": "Bilderzeugung abbrechen" "desc": "Aktuelle Bilderzeugung abbrechen"
}, },
"focusPrompt": { "focusPrompt": {
"title": "Fokussiere Prompt", "title": "Fokussiere Prompt",
@ -351,42 +356,55 @@
"title": "Staging-Bild akzeptieren", "title": "Staging-Bild akzeptieren",
"desc": "Akzeptieren Sie das aktuelle Bild des Staging-Bereichs" "desc": "Akzeptieren Sie das aktuelle Bild des Staging-Bereichs"
}, },
"nodesHotkeys": "Knoten Tastenkürzel", "nodesHotkeys": "Knoten",
"addNodes": { "addNodes": {
"title": "Knotenpunkt hinzufügen", "title": "Knotenpunkt hinzufügen",
"desc": "Öffnet das Menü zum Hinzufügen von Knoten" "desc": "Öffnet das Menü zum Hinzufügen von Knoten"
}, },
"cancelAndClear": { "cancelAndClear": {
"title": "Abbruch und leeren" "title": "Abbruch und leeren",
"desc": "Aktuelle Berechnung abbrechen und alle wartenden löschen"
}, },
"noHotkeysFound": "Kein Hotkey gefunden", "noHotkeysFound": "Kein Hotkey gefunden",
"searchHotkeys": "Hotkeys durchsuchen", "searchHotkeys": "Hotkeys durchsuchen",
"clearSearch": "Suche leeren" "clearSearch": "Suche leeren",
"resetOptionsAndGallery": {
"desc": "Optionen und Galerie-Panels zurücksetzen",
"title": "Optionen und Galerie zurücksetzen"
},
"remixImage": {
"desc": "Alle Parameter außer Seed vom aktuellen Bild verwenden",
"title": "Remix des Bilds erstellen"
},
"toggleOptionsAndGallery": {
"title": "Optionen und Galerie umschalten",
"desc": "Optionen und Galerie-Panels öffnen und schließen"
}
}, },
"modelManager": { "modelManager": {
"modelAdded": "Model hinzugefügt", "modelAdded": "Model hinzugefügt",
"modelUpdated": "Model aktualisiert", "modelUpdated": "Model aktualisiert",
"modelEntryDeleted": "Modelleintrag gelöscht", "modelEntryDeleted": "Modelleintrag gelöscht",
"cannotUseSpaces": "Leerzeichen können nicht verwendet werden", "cannotUseSpaces": "Leerzeichen können nicht verwendet werden",
"addNew": "Neue hinzufügen", "addNew": "Neu hinzufügen",
"addNewModel": "Neues Model hinzufügen", "addNewModel": "Neues Modell hinzufügen",
"addManually": "Manuell hinzufügen", "addManually": "Manuell hinzufügen",
"nameValidationMsg": "Geben Sie einen Namen für Ihr Model ein", "nameValidationMsg": "Geben Sie einen Namen für Ihr Model ein",
"description": "Beschreibung", "description": "Beschreibung",
"descriptionValidationMsg": "Fügen Sie eine Beschreibung für Ihr Model hinzu", "descriptionValidationMsg": "Fügen Sie eine Beschreibung für Ihr Model hinzu",
"config": "Konfiguration", "config": "Konfiguration",
"configValidationMsg": "Pfad zur Konfigurationsdatei Ihres Models.", "configValidationMsg": "Pfad zur Konfigurationsdatei Ihres Modells.",
"modelLocation": "Ort des Models", "modelLocation": "Ort des Models",
"modelLocationValidationMsg": "Pfad zum Speicherort Ihres Models", "modelLocationValidationMsg": "Pfad zum Speicherort Ihres Models",
"vaeLocation": "VAE Ort", "vaeLocation": "VAE Ort",
"vaeLocationValidationMsg": "Pfad zum Speicherort Ihres VAE.", "vaeLocationValidationMsg": "Pfad zum Speicherort Ihres VAE.",
"width": "Breite", "width": "Breite",
"widthValidationMsg": "Standardbreite Ihres Models.", "widthValidationMsg": "Standardbreite Ihres Modells.",
"height": "Höhe", "height": "Höhe",
"heightValidationMsg": "Standardbhöhe Ihres Models.", "heightValidationMsg": "Standardbhöhe Ihres Models.",
"addModel": "Model hinzufügen", "addModel": "Modell hinzufügen",
"updateModel": "Model aktualisieren", "updateModel": "Model aktualisieren",
"availableModels": "Verfügbare Models", "availableModels": "Verfügbare Modelle",
"search": "Suche", "search": "Suche",
"load": "Laden", "load": "Laden",
"active": "Aktiv", "active": "Aktiv",
@ -483,7 +501,7 @@
"quickAdd": "Schnell hinzufügen", "quickAdd": "Schnell hinzufügen",
"simpleModelDesc": "Geben Sie einen Pfad zu einem lokalen Diffusers-Modell, einem lokalen Checkpoint-/Safetensors-Modell, einer HuggingFace-Repo-ID oder einer Checkpoint-/Diffusers-Modell-URL an.", "simpleModelDesc": "Geben Sie einen Pfad zu einem lokalen Diffusers-Modell, einem lokalen Checkpoint-/Safetensors-Modell, einer HuggingFace-Repo-ID oder einer Checkpoint-/Diffusers-Modell-URL an.",
"modelDeleted": "Modell gelöscht", "modelDeleted": "Modell gelöscht",
"inpainting": "v1 Inpainting", "inpainting": "V1-Inpainting",
"modelUpdateFailed": "Modellaktualisierung fehlgeschlagen", "modelUpdateFailed": "Modellaktualisierung fehlgeschlagen",
"useCustomConfig": "Benutzerdefinierte Konfiguration verwenden", "useCustomConfig": "Benutzerdefinierte Konfiguration verwenden",
"settings": "Einstellungen", "settings": "Einstellungen",
@ -500,12 +518,14 @@
"interpolationType": "Interpolationstyp", "interpolationType": "Interpolationstyp",
"oliveModels": "Olives", "oliveModels": "Olives",
"variant": "Variante", "variant": "Variante",
"loraModels": "LoRAs", "loraModels": "\"LoRAs\"",
"modelDeleteFailed": "Modell konnte nicht gelöscht werden", "modelDeleteFailed": "Modell konnte nicht gelöscht werden",
"mergedModelName": "Zusammengeführter Modellname", "mergedModelName": "Zusammengeführter Modellname",
"checkpointOrSafetensors": "$t(common.checkpoint) / $t(common.safetensors)", "checkpointOrSafetensors": "$t(common.checkpoint) / $t(common.safetensors)",
"formMessageDiffusersModelLocation": "Diffusers Modell Speicherort", "formMessageDiffusersModelLocation": "Diffusers Modell Speicherort",
"noModelSelected": "Kein Modell ausgewählt" "noModelSelected": "Kein Modell ausgewählt",
"conversionNotSupported": "Umwandlung nicht unterstützt",
"configFile": "Konfigurationsdatei"
}, },
"parameters": { "parameters": {
"images": "Bilder", "images": "Bilder",
@ -583,7 +603,8 @@
"resetWebUIDesc2": "Wenn die Bilder nicht in der Galerie angezeigt werden oder etwas anderes nicht funktioniert, versuchen Sie bitte, die Einstellungen zurückzusetzen, bevor Sie einen Fehler auf GitHub melden.", "resetWebUIDesc2": "Wenn die Bilder nicht in der Galerie angezeigt werden oder etwas anderes nicht funktioniert, versuchen Sie bitte, die Einstellungen zurückzusetzen, bevor Sie einen Fehler auf GitHub melden.",
"resetComplete": "Die Web-Oberfläche wurde zurückgesetzt.", "resetComplete": "Die Web-Oberfläche wurde zurückgesetzt.",
"models": "Modelle", "models": "Modelle",
"useSlidersForAll": "Schieberegler für alle Optionen verwenden" "useSlidersForAll": "Schieberegler für alle Optionen verwenden",
"showAdvancedOptions": "Erweiterte Optionen anzeigen"
}, },
"toast": { "toast": {
"tempFoldersEmptied": "Temp-Ordner geleert", "tempFoldersEmptied": "Temp-Ordner geleert",
@ -626,7 +647,7 @@
"upscale": "Verwenden Sie ESRGAN, um das Bild unmittelbar nach der Erzeugung zu vergrößern.", "upscale": "Verwenden Sie ESRGAN, um das Bild unmittelbar nach der Erzeugung zu vergrößern.",
"faceCorrection": "Gesichtskorrektur mit GFPGAN oder Codeformer: Der Algorithmus erkennt Gesichter im Bild und korrigiert alle Fehler. Ein hoher Wert verändert das Bild stärker, was zu attraktiveren Gesichtern führt. Codeformer mit einer höheren Genauigkeit bewahrt das Originalbild auf Kosten einer stärkeren Gesichtskorrektur.", "faceCorrection": "Gesichtskorrektur mit GFPGAN oder Codeformer: Der Algorithmus erkennt Gesichter im Bild und korrigiert alle Fehler. Ein hoher Wert verändert das Bild stärker, was zu attraktiveren Gesichtern führt. Codeformer mit einer höheren Genauigkeit bewahrt das Originalbild auf Kosten einer stärkeren Gesichtskorrektur.",
"imageToImage": "Bild zu Bild lädt ein beliebiges Bild als Ausgangsbild, aus dem dann zusammen mit dem Prompt ein neues Bild erzeugt wird. Je höher der Wert ist, desto stärker wird das Ergebnisbild verändert. Werte von 0,0 bis 1,0 sind möglich, der empfohlene Bereich ist .25-.75", "imageToImage": "Bild zu Bild lädt ein beliebiges Bild als Ausgangsbild, aus dem dann zusammen mit dem Prompt ein neues Bild erzeugt wird. Je höher der Wert ist, desto stärker wird das Ergebnisbild verändert. Werte von 0,0 bis 1,0 sind möglich, der empfohlene Bereich ist .25-.75",
"boundingBox": "Der Begrenzungsrahmen ist derselbe wie die Einstellungen für Breite und Höhe bei Text zu Bild oder Bild zu Bild. Es wird nur der Bereich innerhalb des Rahmens verarbeitet.", "boundingBox": "Der Begrenzungsrahmen ist derselbe wie die Einstellungen für Breite und Höhe bei Text-zu-Bild oder Bild-zu-Bild. Es wird nur der Bereich innerhalb des Rahmens verarbeitet.",
"seamCorrection": "Steuert die Behandlung von sichtbaren Übergängen, die zwischen den erzeugten Bildern auf der Leinwand auftreten.", "seamCorrection": "Steuert die Behandlung von sichtbaren Übergängen, die zwischen den erzeugten Bildern auf der Leinwand auftreten.",
"infillAndScaling": "Verwalten Sie Infill-Methoden (für maskierte oder gelöschte Bereiche der Leinwand) und Skalierung (nützlich für kleine Begrenzungsrahmengrößen)." "infillAndScaling": "Verwalten Sie Infill-Methoden (für maskierte oder gelöschte Bereiche der Leinwand) und Skalierung (nützlich für kleine Begrenzungsrahmengrößen)."
} }
@ -656,7 +677,7 @@
"redo": "Wiederherstellen", "redo": "Wiederherstellen",
"clearCanvas": "Leinwand löschen", "clearCanvas": "Leinwand löschen",
"canvasSettings": "Leinwand-Einstellungen", "canvasSettings": "Leinwand-Einstellungen",
"showIntermediates": "Zwischenprodukte anzeigen", "showIntermediates": "Zwischenbilder anzeigen",
"showGrid": "Gitternetz anzeigen", "showGrid": "Gitternetz anzeigen",
"snapToGrid": "Am Gitternetz einrasten", "snapToGrid": "Am Gitternetz einrasten",
"darkenOutsideSelection": "Außerhalb der Auswahl verdunkeln", "darkenOutsideSelection": "Außerhalb der Auswahl verdunkeln",
@ -694,7 +715,7 @@
"showResultsOff": "Zeige Ergebnisse (Aus)" "showResultsOff": "Zeige Ergebnisse (Aus)"
}, },
"accessibility": { "accessibility": {
"modelSelect": "Model Auswahl", "modelSelect": "Modell-Auswahl",
"uploadImage": "Bild hochladen", "uploadImage": "Bild hochladen",
"previousImage": "Voriges Bild", "previousImage": "Voriges Bild",
"useThisParameter": "Benutze diesen Parameter", "useThisParameter": "Benutze diesen Parameter",
@ -706,11 +727,11 @@
"modifyConfig": "Optionen einstellen", "modifyConfig": "Optionen einstellen",
"toggleAutoscroll": "Auroscroll ein/ausschalten", "toggleAutoscroll": "Auroscroll ein/ausschalten",
"toggleLogViewer": "Log Betrachter ein/ausschalten", "toggleLogViewer": "Log Betrachter ein/ausschalten",
"showOptionsPanel": "Zeige Optionen", "showOptionsPanel": "Seitenpanel anzeigen",
"reset": "Zurücksetzten", "reset": "Zurücksetzten",
"nextImage": "Nächstes Bild", "nextImage": "Nächstes Bild",
"zoomOut": "Verkleinern", "zoomOut": "Verkleinern",
"rotateCounterClockwise": "Gegen den Uhrzeigersinn verdrehen", "rotateCounterClockwise": "Gegen den Uhrzeigersinn drehen",
"showGalleryPanel": "Galeriefenster anzeigen", "showGalleryPanel": "Galeriefenster anzeigen",
"exitViewer": "Betrachten beenden", "exitViewer": "Betrachten beenden",
"menu": "Menü", "menu": "Menü",
@ -732,7 +753,7 @@
"selectBoard": "Ordner aussuchen", "selectBoard": "Ordner aussuchen",
"cancel": "Abbrechen", "cancel": "Abbrechen",
"addBoard": "Ordner hinzufügen", "addBoard": "Ordner hinzufügen",
"uncategorized": "Nicht kategorisiert", "uncategorized": "Ohne Kategorie",
"downloadBoard": "Ordner runterladen", "downloadBoard": "Ordner runterladen",
"changeBoard": "Ordner wechseln", "changeBoard": "Ordner wechseln",
"loading": "Laden...", "loading": "Laden...",
@ -742,15 +763,15 @@
"deleteBoard": "Löschen Ordner", "deleteBoard": "Löschen Ordner",
"deleteBoardAndImages": "Löschen Ordner und Bilder", "deleteBoardAndImages": "Löschen Ordner und Bilder",
"deletedBoardsCannotbeRestored": "Gelöschte Ordner könnte nicht wiederhergestellt werden", "deletedBoardsCannotbeRestored": "Gelöschte Ordner könnte nicht wiederhergestellt werden",
"movingImagesToBoard_one": "Verschiebe {{count}} Bild zu Ordner", "movingImagesToBoard_one": "Verschiebe {{count}} Bild zu Ordner:",
"movingImagesToBoard_other": "Verschiebe {{count}} Bilder in Ordner" "movingImagesToBoard_other": "Verschiebe {{count}} Bilder in Ordner:"
}, },
"controlnet": { "controlnet": {
"showAdvanced": "Zeige Erweitert", "showAdvanced": "Zeige Erweitert",
"contentShuffleDescription": "Mischt den Inhalt von einem Bild", "contentShuffleDescription": "Mischt den Inhalt von einem Bild",
"addT2IAdapter": "$t(common.t2iAdapter) hinzufügen", "addT2IAdapter": "$t(common.t2iAdapter) hinzufügen",
"importImageFromCanvas": "Importieren Bild von Zeichenfläche", "importImageFromCanvas": "Bild von Zeichenfläche importieren",
"lineartDescription": "Konvertiere Bild zu Lineart", "lineartDescription": "Konvertiere Bild in Strichzeichnung",
"importMaskFromCanvas": "Importiere Maske von Zeichenfläche", "importMaskFromCanvas": "Importiere Maske von Zeichenfläche",
"hed": "HED", "hed": "HED",
"hideAdvanced": "Verstecke Erweitert", "hideAdvanced": "Verstecke Erweitert",
@ -764,7 +785,7 @@
"depthMidasDescription": "Tiefenmap erstellen mit Midas", "depthMidasDescription": "Tiefenmap erstellen mit Midas",
"controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))", "controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))",
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) ist aktiv, $t(common.controlNet) ist deaktiviert", "t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) ist aktiv, $t(common.controlNet) ist deaktiviert",
"weight": "Breite", "weight": "Einfluss",
"selectModel": "Wähle ein Modell", "selectModel": "Wähle ein Modell",
"depthMidas": "Tiefe (Midas)", "depthMidas": "Tiefe (Midas)",
"w": "W", "w": "W",
@ -786,17 +807,17 @@
"toggleControlNet": "Schalten ControlNet um", "toggleControlNet": "Schalten ControlNet um",
"delete": "Löschen", "delete": "Löschen",
"controlAdapter_one": "Control Adapter", "controlAdapter_one": "Control Adapter",
"controlAdapter_other": "Control Adapters", "controlAdapter_other": "Control Adapter",
"colorMapTileSize": "Tile Größe", "colorMapTileSize": "Kachelgröße",
"depthZoeDescription": "Tiefenmap erstellen mit Zoe", "depthZoeDescription": "Tiefenmap erstellen mit Zoe",
"setControlImageDimensions": "Setze Control Bild Auflösung auf Breite/Höhe", "setControlImageDimensions": "Setze Control-Bild Auflösung auf Breite/Höhe",
"handAndFace": "Hand und Gesicht", "handAndFace": "Hand und Gesicht",
"enableIPAdapter": "Aktiviere IP Adapter", "enableIPAdapter": "Aktiviere IP Adapter",
"resize": "Größe ändern", "resize": "Größe ändern",
"resetControlImage": "Zurücksetzen vom Referenz Bild", "resetControlImage": "Zurücksetzen vom Referenz Bild",
"balanced": "Ausgewogen", "balanced": "Ausgewogen",
"prompt": "Prompt", "prompt": "Prompt",
"resizeMode": "Größenänderungsmodus", "resizeMode": "Größe",
"processor": "Prozessor", "processor": "Prozessor",
"saveControlImage": "Speichere Referenz Bild", "saveControlImage": "Speichere Referenz Bild",
"safe": "Speichern", "safe": "Speichern",
@ -807,33 +828,33 @@
"mlsdDescription": "Minimalistischer Liniensegmentdetektor", "mlsdDescription": "Minimalistischer Liniensegmentdetektor",
"openPoseDescription": "Schätzung der menschlichen Pose mit Openpose", "openPoseDescription": "Schätzung der menschlichen Pose mit Openpose",
"control": "Kontrolle", "control": "Kontrolle",
"coarse": "Coarse", "coarse": "Grob",
"crop": "Zuschneiden", "crop": "Zuschneiden",
"pidiDescription": "PIDI-Bildverarbeitung", "pidiDescription": "PIDI-Bildverarbeitung",
"mediapipeFace": "Mediapipe Gesichter", "mediapipeFace": "Mediapipe Gesichter",
"mlsd": "M-LSD", "mlsd": "M-LSD",
"controlMode": "Steuermodus", "controlMode": "Steuermodus",
"cannyDescription": "Canny Ecken Erkennung", "cannyDescription": "Canny Umrisserkennung",
"lineart": "Lineart", "lineart": "Linienzeichnung",
"lineartAnimeDescription": "Lineart-Verarbeitung im Anime-Stil", "lineartAnimeDescription": "Lineart-Verarbeitung im Anime-Stil",
"minConfidence": "Minimales Vertrauen", "minConfidence": "Minimales Vertrauen",
"megaControl": "Mega-Kontrolle", "megaControl": "Mega-Kontrolle",
"autoConfigure": "Prozessor automatisch konfigurieren", "autoConfigure": "Prozessor automatisch konfigurieren",
"normalBaeDescription": "Normale BAE-Verarbeitung", "normalBaeDescription": "Normale BAE-Verarbeitung",
"noneDescription": "Es wurde keine Verarbeitung angewendet", "noneDescription": "Es wurde keine Verarbeitung angewendet",
"openPose": "Openpose", "openPose": "Openpose / \"Pose nutzen\"",
"lineartAnime": "Lineart Anime", "lineartAnime": "Lineart Anime / \"Strichzeichnung Anime\"",
"mediapipeFaceDescription": "Gesichtserkennung mit Mediapipe", "mediapipeFaceDescription": "Gesichtserkennung mit Mediapipe",
"canny": "Canny", "canny": "\"Canny\"",
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung", "hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
"scribble": "Scribble", "scribble": "Scribble",
"maxFaces": "Maximal Anzahl Gesichter", "maxFaces": "Maximale Anzahl Gesichter",
"resizeSimple": "Größe ändern (einfach)", "resizeSimple": "Größe ändern (einfach)",
"large": "Groß", "large": "Groß",
"modelSize": "Modell Größe", "modelSize": "Modell Größe",
"small": "Klein", "small": "Klein",
"base": "Basis", "base": "Basis",
"depthAnything": "Depth Anything", "depthAnything": "Depth Anything / \"Tiefe irgendwas\"",
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik" "depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik"
}, },
"queue": { "queue": {
@ -876,7 +897,7 @@
"enqueueing": "Stapel in der Warteschlange", "enqueueing": "Stapel in der Warteschlange",
"queueMaxExceeded": "Maximum von {{max_queue_size}} Elementen erreicht, würde {{skip}} Elemente überspringen", "queueMaxExceeded": "Maximum von {{max_queue_size}} Elementen erreicht, würde {{skip}} Elemente überspringen",
"cancelBatchFailed": "Problem beim Abbruch vom Stapel", "cancelBatchFailed": "Problem beim Abbruch vom Stapel",
"clearQueueAlertDialog2": "bist du sicher die Warteschlange zu leeren?", "clearQueueAlertDialog2": "Warteschlange wirklich leeren?",
"pruneSucceeded": "{{item_count}} abgeschlossene Elemente aus der Warteschlange entfernt", "pruneSucceeded": "{{item_count}} abgeschlossene Elemente aus der Warteschlange entfernt",
"pauseSucceeded": "Prozessor angehalten", "pauseSucceeded": "Prozessor angehalten",
"cancelFailed": "Problem beim Stornieren des Auftrags", "cancelFailed": "Problem beim Stornieren des Auftrags",
@ -890,78 +911,175 @@
"resumeSucceeded": "Prozessor wieder aufgenommen", "resumeSucceeded": "Prozessor wieder aufgenommen",
"resumeTooltip": "Prozessor wieder aufnehmen", "resumeTooltip": "Prozessor wieder aufnehmen",
"time": "Zeit", "time": "Zeit",
"batchQueuedDesc_one": "{{count}} Eintrage ans {{direction}} der Wartschlange hinzugefügt", "batchQueuedDesc_one": "{{count}} Eintrag ans {{direction}} der Wartschlange hinzugefügt",
"batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt" "batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt",
"openQueue": "Warteschlange öffnen",
"batchFailedToQueue": "Fehler beim Einreihen in die Stapelverarbeitung",
"batchFieldValues": "Stapelverarbeitungswerte",
"batchQueued": "Stapelverarbeitung eingereiht",
"graphQueued": "Graph eingereiht",
"graphFailedToQueue": "Fehler beim Einreihen des Graphen"
}, },
"metadata": { "metadata": {
"negativePrompt": "Negativ Beschreibung", "negativePrompt": "Negativ Beschreibung",
"metadata": "Meta-Data", "metadata": "Meta-Daten",
"strength": "Bild zu Bild stärke", "strength": "Bild zu Bild Stärke",
"imageDetails": "Bild Details", "imageDetails": "Bild Details",
"model": "Modell", "model": "Modell",
"noImageDetails": "Keine Bild Details gefunden", "noImageDetails": "Keine Bild Details gefunden",
"cfgScale": "CFG-Skala", "cfgScale": "CFG-Skala",
"fit": "Bild zu Bild passen", "fit": "Bild zu Bild anpassen",
"height": "Höhe", "height": "Höhe",
"noMetaData": "Keine Meta-Data gefunden", "noMetaData": "Keine Meta-Daten gefunden",
"width": "Breite", "width": "Breite",
"createdBy": "Erstellt von", "createdBy": "Erstellt von",
"steps": "Schritte", "steps": "Schritte",
"seamless": "Nahtlos", "seamless": "Nahtlos",
"positivePrompt": "Positiver Prompt", "positivePrompt": "Positiver Prompt",
"generationMode": "Generierungsmodus", "generationMode": "Generierungsmodus",
"Threshold": "Noise Schwelle", "Threshold": "Rauschen-Schwelle",
"seed": "Samen", "seed": "Seed",
"perlin": "Perlin Noise", "perlin": "Perlin-Rauschen",
"hiresFix": "Optimierung für hohe Auflösungen", "hiresFix": "Optimierung für hohe Auflösungen",
"initImage": "Erstes Bild", "initImage": "Erstes Bild",
"variations": "Samengewichtspaare", "variations": "Seed-Gewichtungs-Paare",
"vae": "VAE", "vae": "VAE",
"workflow": "Arbeitsablauf", "workflow": "Arbeitsablauf",
"scheduler": "Scheduler", "scheduler": "Planer",
"noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden", "noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden",
"recallParameters": "Recall Parameters" "recallParameters": "Parameter wiederherstellen"
}, },
"popovers": { "popovers": {
"noiseUseCPU": { "noiseUseCPU": {
"heading": "Nutze Prozessor rauschen" "heading": "Nutze Prozessor rauschen",
"paragraphs": [
"Entscheidet, ob auf der CPU oder GPU Rauschen erzeugt wird.",
"Mit aktiviertem CPU-Rauschen wird ein bestimmter Seedwert das gleiche Bild auf jeder Maschine erzeugen.",
"CPU-Rauschen einzuschalten beeinflusst nicht die Systemleistung."
]
}, },
"paramModel": { "paramModel": {
"heading": "Modell" "heading": "Modell",
"paragraphs": [
"Modell für die Entrauschungsschritte.",
"Verschiedene Modelle werden in der Regel so trainiert, dass sie sich auf die Erzeugung bestimmter Ästhetik und/oder Inhalte spezialisiert."
]
}, },
"paramIterations": { "paramIterations": {
"heading": "Iterationen" "heading": "Iterationen",
"paragraphs": [
"Die Anzahl der Bilder, die erzeugt werden sollen.",
"Wenn \"Dynamische Prompts\" aktiviert ist, wird jeder einzelne Prompt so oft generiert."
]
}, },
"paramCFGScale": { "paramCFGScale": {
"heading": "CFG-Skala" "heading": "CFG-Skala",
"paragraphs": [
"Bestimmt, wie viel Ihr Prompt den Erzeugungsprozess beeinflusst."
]
}, },
"paramSteps": { "paramSteps": {
"heading": "Schritte" "heading": "Schritte",
"paragraphs": [
"Anzahl der Schritte, die bei jeder Generierung durchgeführt werden.",
"Höhere Schrittzahlen werden in der Regel bessere Bilder ergeben, aber mehr Zeit benötigen."
]
}, },
"lora": { "lora": {
"heading": "LoRA Gewichte" "heading": "LoRA Gewichte",
"paragraphs": [
"Höhere LoRA-Wichtungen führen zu größeren Auswirkungen auf das endgültige Bild."
]
}, },
"infillMethod": { "infillMethod": {
"heading": "Füllmethode" "heading": "Füllmethode",
"paragraphs": [
"Infill-Methode für den ausgewählten Bereich."
]
}, },
"paramVAE": { "paramVAE": {
"heading": "VAE" "heading": "VAE",
"paragraphs": [
"Verwendetes Modell, um den KI-Ausgang in das endgültige Bild zu übersetzen."
]
},
"paramRatio": {
"heading": "Seitenverhältnis",
"paragraphs": [
"Das Seitenverhältnis des erzeugten Bildes.",
"Für SD1.5-Modelle wird eine Bildgröße von 512x512 Pixel empfohlen, für SDXL-Modelle sind es 1024x1024 Pixel."
]
},
"paramDenoisingStrength": {
"paragraphs": [
"Wie viel Rauschen dem Eingabebild hinzugefügt wird.",
"0 wird zu einem identischen Bild führen, während 1 zu einem völlig neuen Bild führt."
],
"heading": "Stärke der Entrauschung"
},
"paramVAEPrecision": {
"heading": "VAE-Präzision",
"paragraphs": [
"Die bei der VAE-Kodierung und Dekodierung verwendete Präzision. FP16/Halbpräzision ist effizienter, aber auf Kosten kleiner Bildvariationen."
]
},
"paramCFGRescaleMultiplier": {
"heading": "CFG Rescale Multiplikator",
"paragraphs": [
"Rescale-Multiplikator für die CFG-Lenkung, der für Modelle verwendet wird, die mit dem zero-terminal SNR (ztsnr) trainiert wurden. Empfohlener Wert: 0,7."
]
},
"scaleBeforeProcessing": {
"paragraphs": [
"Skaliert den ausgewählten Bereich auf die Größe, die für das Modell am besten geeignet ist."
],
"heading": "Skalieren vor der Verarbeitung"
},
"paramSeed": {
"paragraphs": [
"Kontrolliert das für die Erzeugung verwendete Startrauschen.",
"Deaktivieren Sie “Random Seed”, um identische Ergebnisse mit den gleichen Generierungseinstellungen zu erzeugen."
],
"heading": "Seed"
},
"dynamicPromptsMaxPrompts": {
"paragraphs": [
"Beschränkt die Anzahl der Prompts, die von \"Dynamic Prompts\" generiert werden können."
],
"heading": "Maximale Prompts"
},
"dynamicPromptsSeedBehaviour": {
"paragraphs": [
"Bestimmt, wie der Seed-Wert beim Erzeugen von Prompts verwendet wird.",
"Verwenden Sie dies, um schnelle Variationen eines einzigen Seeds zu erkunden.",
"Wenn Sie z. B. 5 Prompts haben, wird jedes Bild den selben Seed-Wert verwenden.",
"\"Per Bild\" wird einen einzigartigen Seed-Wert für jedes Bild verwenden. Dies bietet mehr Variationen."
],
"heading": "Seed-Verhalten"
} }
}, },
"ui": { "ui": {
"lockRatio": "Verhältnis sperren", "lockRatio": "Verhältnis sperren",
"hideProgressImages": "Verstecke Prozess Bild", "hideProgressImages": "Verstecke Prozess Bild",
"showProgressImages": "Zeige Prozess Bild" "showProgressImages": "Zeige Prozess Bild",
"swapSizes": "Tausche Größen"
}, },
"invocationCache": { "invocationCache": {
"disable": "Deaktivieren", "disable": "Deaktivieren",
"misses": "Cache Nötig", "misses": "Cache nicht genutzt",
"hits": "Cache Treffer", "hits": "Cache Treffer",
"enable": "Aktivieren", "enable": "Aktivieren",
"clear": "Leeren", "clear": "Leeren",
"maxCacheSize": "Maximale Cache Größe", "maxCacheSize": "Maximale Cache Größe",
"cacheSize": "Cache Größe", "cacheSize": "Cache Größe",
"useCache": "Benutze Cache" "useCache": "Benutze Cache",
"enableFailed": "Problem beim Aktivieren des Zwischenspeichers",
"disableFailed": "Problem bei Deaktivierung des Cache",
"enableSucceeded": "Zwischenspeicher aktiviert",
"disableSucceeded": "Invocation-Cache deaktiviert",
"clearSucceeded": "Zwischenspeicher gelöscht",
"invocationCache": "Zwischenspeicher",
"clearFailed": "Problem beim Löschen des Zwischenspeichers"
}, },
"embedding": { "embedding": {
"noMatchingEmbedding": "Keine passenden Embeddings", "noMatchingEmbedding": "Keine passenden Embeddings",
@ -1000,10 +1118,151 @@
"colorCodeEdges": "Farbkodierte Kanten", "colorCodeEdges": "Farbkodierte Kanten",
"addNodeToolTip": "Knoten hinzufügen (Umschalt+A, Leertaste)", "addNodeToolTip": "Knoten hinzufügen (Umschalt+A, Leertaste)",
"boardField": "Ordner", "boardField": "Ordner",
"boardFieldDescription": "Ein Galerie Ordner" "boardFieldDescription": "Ein Galerie Ordner",
"collectionFieldType": "{{name}} Sammlung",
"controlCollectionDescription": "Kontrollinformationen zwischen Knotenpunkten weitergegeben.",
"connectionWouldCreateCycle": "Verbindung würde einen Kreislauf/cycle schaffen",
"ipAdapterDescription": "Ein Adapter für die Bildabfrage (IP-Adapter) / Bildprompt-Adapter.",
"controlField": "Kontrolle",
"inputFields": "Eingabefelder",
"imageField": "Bild",
"inputMayOnlyHaveOneConnection": "Eingang darf nur eine Verbindung haben",
"integerCollectionDescription": "Eine Sammlung ganzer Zahlen.",
"integerDescription": "\"Integer\" sind ganze Zahlen ohne Dezimalpunkt.",
"conditioningPolymorphic": "Konditionierung polymorphisch",
"conditioningPolymorphicDescription": "Die Konditionierung kann zwischen den Knoten weitergegeben werden.",
"invalidOutputSchema": "Ungültiges Ausgabeschema",
"ipAdapterModel": "IP-Adapter Modell",
"conditioningFieldDescription": "Die Konditionierung kann zwischen den Knotenpunkten weitergegeben werden.",
"ipAdapterCollectionDescription": "Eine Sammlung von IP-Adaptern.",
"collectionDescription": "Zu erledigen",
"imageFieldDescription": "Bilder können zwischen Knoten weitergegeben werden.",
"imagePolymorphic": "Bild Polymorphie",
"imagePolymorphicDescription": "Eine Bildersammlung.",
"inputField": "Eingabefeld",
"hideLegendNodes": "Feldtyp-Legende ausblenden",
"collectionItemDescription": "Zu erledigen",
"inputNode": "Eingangsknoten",
"integer": "Ganze Zahl",
"integerCollection": "Ganzzahlige Sammlung",
"addLinearView": "Zur linearen Ansicht hinzufügen",
"currentImageDescription": "Zeigt das aktuelle Bild im Node-Editor an",
"ipAdapter": "IP-Adapter",
"hideMinimapnodes": "Miniatur-Kartenansicht ausblenden",
"imageCollection": "Bildersammlung",
"imageCollectionDescription": "Eine Sammlung von Bildern.",
"denoiseMaskField": "Entrauschen-Maske",
"ipAdapterCollection": "IP-Adapter Sammlung",
"newWorkflowDesc2": "Ihr aktueller Arbeitsablauf hat ungespeicherte Änderungen.",
"problemSettingTitle": "Problem beim Einstellen des Titels",
"noConnectionData": "Keine Verbindungsdaten",
"outputField": "Ausgabefeld",
"outputFieldInInput": "Ausgabefeld im Eingang",
"problemReadingWorkflow": "Problem beim Lesen des Arbeitsablaufs vom Bild",
"reloadNodeTemplates": "Knoten-Vorlagen neu laden",
"newWorkflow": "Neuer Arbeitsablauf",
"newWorkflowDesc": "Einen neuen Arbeitsablauf erstellen?",
"noFieldsLinearview": "Keine Felder zur linearen Ansicht hinzugefügt",
"clearWorkflow": "Arbeitsablauf löschen",
"clearWorkflowDesc": "Diesen Arbeitsablauf löschen und neu starten?",
"noConnectionInProgress": "Es besteht keine Verbindung",
"notes": "Anmerkungen",
"nodeVersion": "Knoten Version",
"noOutputSchemaName": "Kein Name des Ausgabeschemas im ref-Objekt gefunden",
"node": "Knoten",
"nodeSearch": "Knoten suchen",
"removeLinearView": "Entfernen aus Linear View",
"nodeOutputs": "Knoten-Ausgänge",
"nodeTemplate": "Knoten-Vorlage",
"nodeType": "Knotentyp",
"noFieldType": "Kein Feldtyp",
"oNNXModelField": "ONNX-Modell",
"noMatchingNodes": "Keine passenden Knoten",
"noNodeSelected": "Kein Knoten gewählt",
"noImageFoundState": "Kein Anfangsbild im Status gefunden",
"nodeOpacity": "Knoten-Deckkraft",
"noOutputRecorded": "Keine Ausgänge aufgezeichnet",
"outputSchemaNotFound": "Ausgabeschema nicht gefunden",
"oNNXModelFieldDescription": "ONNX-Modellfeld.",
"outputNode": "Ausgabeknoten",
"pickOne": "Eins auswählen",
"problemReadingMetadata": "Problem beim Lesen von Metadaten aus dem Bild",
"notesDescription": "Anmerkungen zum Arbeitsablauf hinzufügen",
"outputFields": "Ausgabefelder",
"sDXLRefinerModelField": "Refiner-Modell",
"sDXLMainModelFieldDescription": "SDXL Modellfeld.",
"clearWorkflowDesc2": "Ihr aktueller Arbeitsablauf hat ungespeicherte Änderungen.",
"skipped": "Übersprungen",
"schedulerDescription": "Zu erledigen",
"scheduler": "Planer",
"showGraphNodes": "Graph Overlay anzeigen",
"showMinimapnodes": "MiniMap anzeigen",
"sDXLMainModelField": "SDXL Modell",
"skippedReservedInput": "Reserviertes Eingabefeld übersprungen",
"sDXLRefinerModelFieldDescription": "Zu erledigen",
"showLegendNodes": "Feldtyp-Legende anzeigen",
"skippedReservedOutput": "Reserviertes Ausgangsfeld übersprungen",
"skippingInputNoTemplate": "Überspringe Eingabefeld ohne Vorlage",
"executionStateCompleted": "Erledigt",
"denoiseMaskFieldDescription": "Denoise Maske kann zwischen Knoten weitergegeben werden",
"downloadWorkflow": "Workflow JSON herunterladen",
"executionStateInProgress": "In Bearbeitung",
"snapToGridHelp": "Knoten am Gitternetz einrasten bei Bewegung",
"controlCollection": "Control-Sammlung",
"controlFieldDescription": "Control-Informationen zwischen Knotenpunkten weitergegeben.",
"latentsField": "Latents",
"mainModelFieldDescription": "Zu erledigen",
"missingTemplate": "Ungültiger Knoten: Knoten {{node}} vom Typ {{type}} fehlt Vorlage (nicht installiert?)",
"skippingUnknownInputType": "Überspringe unbekannten Eingabe-Feldtyp",
"stringCollectionDescription": "Eine Sammlung von Zeichenfolgen.",
"string": "Zeichenfolge",
"stringCollection": "Sammlung von Zeichenfolgen",
"stringDescription": "Zeichenfolgen (Strings) sind Text.",
"fieldTypesMustMatch": "Feldtypen müssen übereinstimmen",
"fitViewportNodes": "An Ansichtsgröße anpassen",
"missingCanvaInitMaskImages": "Fehlende Startbilder und Masken auf der Arbeitsfläche",
"missingCanvaInitImage": "Fehlendes Startbild auf der Arbeitsfläche",
"ipAdapterModelDescription": "IP-Adapter-Modellfeld",
"latentsPolymorphicDescription": "Zwischen Nodes können Latents weitergegeben werden.",
"loadingNodes": "Lade Nodes...",
"latentsCollectionDescription": "Zwischen Knoten können Latents weitergegeben werden.",
"mismatchedVersion": "Ungültiger Knoten: Knoten {{node}} vom Typ {{type}} hat keine passende Version (Update versuchen?)",
"colorCollectionDescription": "Zu erledigen",
"ipAdapterPolymorphicDescription": "Eine Sammlung von IP-Adaptern.",
"fullyContainNodesHelp": "Nodes müssen vollständig innerhalb der Auswahlbox sein, um ausgewählt werden zu können",
"latentsFieldDescription": "Zwischen Nodes können Latents weitergegeben werden.",
"noWorkflow": "Kein Workflow",
"hideGraphNodes": "Graph Overlay verbergen",
"sourceNode": "Quellknoten",
"executionStateError": "Fehler",
"latentsCollection": "Latents Sammlung",
"maybeIncompatible": "Möglicherweise inkompatibel mit installierten",
"nodePack": "Knoten-Pack",
"skippingUnknownOutputType": "Überspringe unbekannten Ausgabe-Feldtyp",
"loadWorkflow": "Lade Workflow",
"snapToGrid": "Am Gitternetz einrasten",
"skippingReservedFieldType": "Überspringe reservierten Feldtyp",
"loRAModelField": "LoRA",
"loRAModelFieldDescription": "Zu erledigen",
"mainModelField": "Modell",
"doesNotExist": "existiert nicht",
"vaeField": "VAE",
"unknownOutput": "Unbekannte Ausgabe: {{name}}",
"updateNode": "Knoten updaten",
"edge": "Rand / Kante",
"sourceNodeDoesNotExist": "Ungültiger Rand: Quell- / Ausgabe-Knoten {{node}} existiert nicht",
"updateAllNodes": "Update Knoten",
"allNodesUpdated": "Alle Knoten aktualisiert",
"unknownTemplate": "Unbekannte Vorlage",
"floatDescription": "Floats sind Zahlen mit einem Dezimalpunkt.",
"updateApp": "Update App",
"vaeFieldDescription": "VAE Submodell.",
"unknownInput": "Unbekannte Eingabe: {{name}}",
"unknownNodeType": "Unbekannter Knotentyp",
"float": "Kommazahlen"
}, },
"hrf": { "hrf": {
"enableHrf": "Aktivieren Sie die Korrektur für hohe Auflösungen", "enableHrf": "Korrektur für hohe Auflösungen",
"upscaleMethod": "Vergrößerungsmethoden", "upscaleMethod": "Vergrößerungsmethoden",
"enableHrfTooltip": "Generieren Sie mit einer niedrigeren Anfangsauflösung, skalieren Sie auf die Basisauflösung hoch und führen Sie dann Image-to-Image aus.", "enableHrfTooltip": "Generieren Sie mit einer niedrigeren Anfangsauflösung, skalieren Sie auf die Basisauflösung hoch und führen Sie dann Image-to-Image aus.",
"metadata": { "metadata": {
@ -1026,7 +1285,14 @@
"noLoRAsInstalled": "Keine LoRAs installiert", "noLoRAsInstalled": "Keine LoRAs installiert",
"selectLoRA": "Wählen ein LoRA aus", "selectLoRA": "Wählen ein LoRA aus",
"esrganModel": "ESRGAN Modell", "esrganModel": "ESRGAN Modell",
"addLora": "LoRA hinzufügen" "addLora": "LoRA hinzufügen",
"defaultVAE": "Standard VAE",
"noLoRAsLoaded": "Keine LoRAs geladen",
"lora": "LoRA",
"allLoRAsAdded": "Alle LoRAs hinzugefügt",
"incompatibleBaseModel": "Inkompatibles Basismodell",
"noMainModelSelected": "Kein Hauptmodell ausgewählt",
"loraAlreadyAdded": "LoRA bereits hinzugefügt"
}, },
"accordions": { "accordions": {
"generation": { "generation": {
@ -1050,5 +1316,40 @@
"infillTab": "Füllung", "infillTab": "Füllung",
"title": "Compositing" "title": "Compositing"
} }
},
"workflows": {
"workflows": "Arbeitsabläufe",
"noSystemWorkflows": "Keine System-Arbeitsabläufe",
"workflowName": "Arbeitsablauf-Name",
"workflowIsOpen": "Arbeitsablauf ist offen",
"saveWorkflowAs": "Arbeitsablauf speichern als",
"searchWorkflows": "Suche Arbeitsabläufe",
"newWorkflowCreated": "Neuer Arbeitsablauf erstellt",
"problemSavingWorkflow": "Problem beim Speichern des Arbeitsablaufs",
"noRecentWorkflows": "Keine kürzlichen Arbeitsabläufe",
"problemLoading": "Problem beim Laden von Arbeitsabläufen",
"downloadWorkflow": "Speichern als",
"savingWorkflow": "Speichere Arbeitsablauf...",
"saveWorkflow": "Arbeitsablauf speichern",
"noWorkflows": "Keine Arbeitsabläufe",
"workflowLibrary": "Bibliothek",
"defaultWorkflows": "Standard-Arbeitsabläufe",
"unnamedWorkflow": "Unbenannter Arbeitsablauf",
"noDescription": "Keine Beschreibung",
"clearWorkflowSearchFilter": "Suchfilter zurücksetzen",
"workflowEditorMenu": "Arbeitsablauf-Editor Menü",
"deleteWorkflow": "Arbeitsablauf löschen",
"userWorkflows": "Meine Arbeitsabläufe",
"workflowSaved": "Arbeitsablauf gespeichert",
"uploadWorkflow": "Aus Datei laden",
"projectWorkflows": "Projekt-Arbeitsabläufe",
"openWorkflow": "Arbeitsablauf öffnen",
"noUserWorkflows": "Keine Benutzer-Arbeitsabläufe",
"saveWorkflowToProject": "Arbeitsablauf in Projekt speichern",
"workflowCleared": "Arbeitsablauf gelöscht",
"loading": "Lade Arbeitsabläufe"
},
"app": {
"storeNotInitialized": "App-Store ist nicht initialisiert"
} }
} }

View File

@ -1,4 +1,6 @@
import { useAppToaster } from 'app/components/Toaster'; import { useAppToaster } from 'app/components/Toaster';
import { useAppDispatch } from 'app/store/storeHooks';
import { imageDownloaded } from 'features/gallery/store/actions';
import { useCallback } from 'react'; import { useCallback } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
@ -8,6 +10,7 @@ export const useDownloadImage = () => {
const toaster = useAppToaster(); const toaster = useAppToaster();
const { t } = useTranslation(); const { t } = useTranslation();
const imageUrlToBlob = useImageUrlToBlob(); const imageUrlToBlob = useImageUrlToBlob();
const dispatch = useAppDispatch();
const downloadImage = useCallback( const downloadImage = useCallback(
async (image_url: string, image_name: string) => { async (image_url: string, image_name: string) => {
@ -26,6 +29,7 @@ export const useDownloadImage = () => {
document.body.appendChild(a); document.body.appendChild(a);
a.click(); a.click();
window.URL.revokeObjectURL(url); window.URL.revokeObjectURL(url);
dispatch(imageDownloaded());
} catch (err) { } catch (err) {
toaster({ toaster({
title: t('toast.problemDownloadingImage'), title: t('toast.problemDownloadingImage'),
@ -36,7 +40,7 @@ export const useDownloadImage = () => {
}); });
} }
}, },
[t, toaster, imageUrlToBlob] [t, toaster, imageUrlToBlob, dispatch]
); );
return { downloadImage }; return { downloadImage };

View File

@ -5,6 +5,7 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIDndImage from 'common/components/IAIDndImage'; import IAIDndImage from 'common/components/IAIDndImage';
import IAIDndImageIcon from 'common/components/IAIDndImageIcon'; import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
import { roundToMultiple } from 'common/util/roundDownToMultiple';
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice'; import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
import { useControlAdapterControlImage } from 'features/controlAdapters/hooks/useControlAdapterControlImage'; import { useControlAdapterControlImage } from 'features/controlAdapters/hooks/useControlAdapterControlImage';
import { useControlAdapterProcessedControlImage } from 'features/controlAdapters/hooks/useControlAdapterProcessedControlImage'; import { useControlAdapterProcessedControlImage } from 'features/controlAdapters/hooks/useControlAdapterProcessedControlImage';
@ -91,19 +92,14 @@ const ControlAdapterImagePreview = ({ isSmall, id }: Props) => {
return; return;
} }
const width = roundToMultiple(controlImage.width, 8);
const height = roundToMultiple(controlImage.height, 8);
if (activeTabName === 'unifiedCanvas') { if (activeTabName === 'unifiedCanvas') {
dispatch( dispatch(setBoundingBoxDimensions({ width, height }, optimalDimension));
setBoundingBoxDimensions(
{
width: controlImage.width,
height: controlImage.height,
},
optimalDimension
)
);
} else { } else {
dispatch(widthChanged(controlImage.width)); dispatch(widthChanged(width));
dispatch(heightChanged(controlImage.height)); dispatch(heightChanged(height));
} }
}, [controlImage, activeTabName, dispatch, optimalDimension]); }, [controlImage, activeTabName, dispatch, optimalDimension]);

View File

@ -14,3 +14,5 @@ export const requestedBoardImagesDeletion = createAction<RequestedBoardImagesDel
export const sentImageToCanvas = createAction('gallery/sentImageToCanvas'); export const sentImageToCanvas = createAction('gallery/sentImageToCanvas');
export const sentImageToImg2Img = createAction('gallery/sentImageToImg2Img'); export const sentImageToImg2Img = createAction('gallery/sentImageToImg2Img');
export const imageDownloaded = createAction('gallery/imageDownloaded');

View File

@ -1,84 +0,0 @@
import type { ContextMenuProps } from '@invoke-ai/ui-library';
import { ContextMenu, MenuGroup, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useFieldInputKind } from 'features/nodes/hooks/useFieldInputKind';
import { useFieldLabel } from 'features/nodes/hooks/useFieldLabel';
import { useFieldTemplateTitle } from 'features/nodes/hooks/useFieldTemplateTitle';
import {
selectWorkflowSlice,
workflowExposedFieldAdded,
workflowExposedFieldRemoved,
} from 'features/nodes/store/workflowSlice';
import type { ReactNode } from 'react';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiMinusBold, PiPlusBold } from 'react-icons/pi';
type Props = {
nodeId: string;
fieldName: string;
kind: 'input' | 'output';
children: ContextMenuProps<HTMLDivElement>['children'];
};
const FieldContextMenu = ({ nodeId, fieldName, kind, children }: Props) => {
const dispatch = useAppDispatch();
const label = useFieldLabel(nodeId, fieldName);
const fieldTemplateTitle = useFieldTemplateTitle(nodeId, fieldName, kind);
const input = useFieldInputKind(nodeId, fieldName);
const { t } = useTranslation();
const selectIsExposed = useMemo(
() =>
createSelector(selectWorkflowSlice, (workflow) => {
return Boolean(workflow.exposedFields.find((f) => f.nodeId === nodeId && f.fieldName === fieldName));
}),
[fieldName, nodeId]
);
const mayExpose = useMemo(() => input && ['any', 'direct'].includes(input), [input]);
const isExposed = useAppSelector(selectIsExposed);
const handleExposeField = useCallback(() => {
dispatch(workflowExposedFieldAdded({ nodeId, fieldName }));
}, [dispatch, fieldName, nodeId]);
const handleUnexposeField = useCallback(() => {
dispatch(workflowExposedFieldRemoved({ nodeId, fieldName }));
}, [dispatch, fieldName, nodeId]);
const menuItems = useMemo(() => {
const menuItems: ReactNode[] = [];
if (mayExpose && !isExposed) {
menuItems.push(
<MenuItem key={`${nodeId}.${fieldName}.expose-field`} icon={<PiPlusBold />} onClick={handleExposeField}>
{t('nodes.addLinearView')}
</MenuItem>
);
}
if (mayExpose && isExposed) {
menuItems.push(
<MenuItem key={`${nodeId}.${fieldName}.unexpose-field`} icon={<PiMinusBold />} onClick={handleUnexposeField}>
{t('nodes.removeLinearView')}
</MenuItem>
);
}
return menuItems;
}, [fieldName, handleExposeField, handleUnexposeField, isExposed, mayExpose, nodeId, t]);
const renderMenuFunc = useCallback(
() =>
!menuItems.length ? null : (
<MenuList visibility="visible">
<MenuGroup title={label || fieldTemplateTitle || t('nodes.unknownField')}>{menuItems}</MenuGroup>
</MenuList>
),
[fieldTemplateTitle, label, menuItems, t]
);
return <ContextMenu renderMenu={renderMenuFunc}>{children}</ContextMenu>;
};
export default memo(FieldContextMenu);

View File

@ -0,0 +1,67 @@
import { IconButton } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import {
selectWorkflowSlice,
workflowExposedFieldAdded,
workflowExposedFieldRemoved,
} from 'features/nodes/store/workflowSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiMinusBold, PiPlusBold } from 'react-icons/pi';
type Props = {
nodeId: string;
fieldName: string;
};
const FieldLinearViewToggle = ({ nodeId, fieldName }: Props) => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
const selectIsExposed = useMemo(
() =>
createSelector(selectWorkflowSlice, (workflow) => {
return Boolean(workflow.exposedFields.find((f) => f.nodeId === nodeId && f.fieldName === fieldName));
}),
[fieldName, nodeId]
);
const isExposed = useAppSelector(selectIsExposed);
const handleExposeField = useCallback(() => {
dispatch(workflowExposedFieldAdded({ nodeId, fieldName }));
}, [dispatch, fieldName, nodeId]);
const handleUnexposeField = useCallback(() => {
dispatch(workflowExposedFieldRemoved({ nodeId, fieldName }));
}, [dispatch, fieldName, nodeId]);
if (!isExposed) {
return (
<IconButton
variant="ghost"
tooltip={t('nodes.addLinearView')}
aria-label={t('nodes.addLinearView')}
icon={<PiPlusBold />}
onClick={handleExposeField}
pointerEvents="auto"
size="xs"
/>
);
} else {
return (
<IconButton
variant="ghost"
tooltip={t('nodes.removeLinearView')}
aria-label={t('nodes.removeLinearView')}
icon={<PiMinusBold />}
onClick={handleUnexposeField}
pointerEvents="auto"
size="xs"
/>
);
}
};
export default memo(FieldLinearViewToggle);

View File

@ -4,12 +4,12 @@ import { useDoesInputHaveValue } from 'features/nodes/hooks/useDoesInputHaveValu
import { useFieldInputInstance } from 'features/nodes/hooks/useFieldInputInstance'; import { useFieldInputInstance } from 'features/nodes/hooks/useFieldInputInstance';
import { useFieldInputTemplate } from 'features/nodes/hooks/useFieldInputTemplate'; import { useFieldInputTemplate } from 'features/nodes/hooks/useFieldInputTemplate';
import type { PropsWithChildren } from 'react'; import type { PropsWithChildren } from 'react';
import { memo, useMemo } from 'react'; import { memo, useCallback, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import EditableFieldTitle from './EditableFieldTitle'; import EditableFieldTitle from './EditableFieldTitle';
import FieldContextMenu from './FieldContextMenu';
import FieldHandle from './FieldHandle'; import FieldHandle from './FieldHandle';
import FieldLinearViewToggle from './FieldLinearViewToggle';
import InputFieldRenderer from './InputFieldRenderer'; import InputFieldRenderer from './InputFieldRenderer';
interface Props { interface Props {
@ -22,6 +22,7 @@ const InputField = ({ nodeId, fieldName }: Props) => {
const fieldTemplate = useFieldInputTemplate(nodeId, fieldName); const fieldTemplate = useFieldInputTemplate(nodeId, fieldName);
const fieldInstance = useFieldInputInstance(nodeId, fieldName); const fieldInstance = useFieldInputInstance(nodeId, fieldName);
const doesFieldHaveValue = useDoesInputHaveValue(nodeId, fieldName); const doesFieldHaveValue = useDoesInputHaveValue(nodeId, fieldName);
const [isHovered, setIsHovered] = useState(false);
const { isConnected, isConnectionInProgress, isConnectionStartField, connectionError, shouldDim } = const { isConnected, isConnectionInProgress, isConnectionStartField, connectionError, shouldDim } =
useConnectionState({ nodeId, fieldName, kind: 'input' }); useConnectionState({ nodeId, fieldName, kind: 'input' });
@ -46,6 +47,14 @@ const InputField = ({ nodeId, fieldName }: Props) => {
return false; return false;
}, [fieldTemplate, isConnected, doesFieldHaveValue]); }, [fieldTemplate, isConnected, doesFieldHaveValue]);
const onMouseEnter = useCallback(() => {
setIsHovered(true);
}, []);
const onMouseLeave = useCallback(() => {
setIsHovered(false);
}, []);
if (!fieldTemplate || !fieldInstance) { if (!fieldTemplate || !fieldInstance) {
return ( return (
<InputFieldWrapper shouldDim={shouldDim}> <InputFieldWrapper shouldDim={shouldDim}>
@ -87,19 +96,17 @@ const InputField = ({ nodeId, fieldName }: Props) => {
return ( return (
<InputFieldWrapper shouldDim={shouldDim}> <InputFieldWrapper shouldDim={shouldDim}>
<FormControl isInvalid={isMissingInput} isDisabled={isConnected} orientation="vertical" px={2}> <FormControl isInvalid={isMissingInput} isDisabled={isConnected} orientation="vertical" px={2}>
<Flex flexDir="column" w="full" gap={1}> <Flex flexDir="column" w="full" gap={1} onMouseEnter={onMouseEnter} onMouseLeave={onMouseLeave}>
<FieldContextMenu nodeId={nodeId} fieldName={fieldName} kind="input"> <Flex>
{(ref) => (
<EditableFieldTitle <EditableFieldTitle
ref={ref}
nodeId={nodeId} nodeId={nodeId}
fieldName={fieldName} fieldName={fieldName}
kind="input" kind="input"
isMissingInput={isMissingInput} isMissingInput={isMissingInput}
withTooltip withTooltip
/> />
)} {isHovered && <FieldLinearViewToggle nodeId={nodeId} fieldName={fieldName} />}
</FieldContextMenu> </Flex>
<InputFieldRenderer nodeId={nodeId} fieldName={fieldName} /> <InputFieldRenderer nodeId={nodeId} fieldName={fieldName} />
</Flex> </Flex>
</FormControl> </FormControl>

View File

@ -1,7 +1,7 @@
import type { RootState } from 'app/store/store'; import type { RootState } from 'app/store/store';
import type { LoRAMetadataItem } from 'features/nodes/types/metadata'; import type { LoRAMetadataItem } from 'features/nodes/types/metadata';
import { zLoRAMetadataItem } from 'features/nodes/types/metadata'; import { zLoRAMetadataItem } from 'features/nodes/types/metadata';
import { forEach, size } from 'lodash-es'; import { filter, size } from 'lodash-es';
import type { NonNullableGraph, SDXLLoraLoaderInvocation } from 'services/api/types'; import type { NonNullableGraph, SDXLLoraLoaderInvocation } from 'services/api/types';
import { import {
@ -31,8 +31,8 @@ export const addSDXLLoRAsToGraph = (
* So we need to inject a LoRA chain into the graph. * So we need to inject a LoRA chain into the graph.
*/ */
const { loras } = state.lora; const enabledLoRAs = filter(state.lora.loras, (l) => l.isEnabled ?? false);
const loraCount = size(loras); const loraCount = size(enabledLoRAs);
if (loraCount === 0) { if (loraCount === 0) {
return; return;
@ -59,7 +59,7 @@ export const addSDXLLoRAsToGraph = (
let lastLoraNodeId = ''; let lastLoraNodeId = '';
let currentLoraIndex = 0; let currentLoraIndex = 0;
forEach(loras, (lora) => { enabledLoRAs.forEach((lora) => {
const { model_name, base_model, weight } = lora; const { model_name, base_model, weight } = lora;
const currentLoraNodeId = `${LORA_LOADER}_${model_name.replace('.', '_')}`; const currentLoraNodeId = `${LORA_LOADER}_${model_name.replace('.', '_')}`;

View File

@ -123,6 +123,7 @@ export const buildCanvasImageToImageGraph = (state: RootState, initialImage: Ima
id: DENOISE_LATENTS, id: DENOISE_LATENTS,
is_intermediate, is_intermediate,
cfg_scale, cfg_scale,
cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
denoising_start: 1 - strength, denoising_start: 1 - strength,

View File

@ -58,6 +58,7 @@ export const buildCanvasInpaintGraph = (
negativePrompt, negativePrompt,
model, model,
cfgScale: cfg_scale, cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
img2imgStrength: strength, img2imgStrength: strength,
@ -152,6 +153,7 @@ export const buildCanvasInpaintGraph = (
is_intermediate, is_intermediate,
steps: steps, steps: steps,
cfg_scale: cfg_scale, cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler, scheduler: scheduler,
denoising_start: 1 - strength, denoising_start: 1 - strength,
denoising_end: 1, denoising_end: 1,
@ -175,6 +177,7 @@ export const buildCanvasInpaintGraph = (
is_intermediate, is_intermediate,
steps: canvasCoherenceSteps, steps: canvasCoherenceSteps,
cfg_scale: cfg_scale, cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler, scheduler: scheduler,
denoising_start: 1 - canvasCoherenceStrength, denoising_start: 1 - canvasCoherenceStrength,
denoising_end: 1, denoising_end: 1,

View File

@ -60,6 +60,7 @@ export const buildCanvasOutpaintGraph = (
negativePrompt, negativePrompt,
model, model,
cfgScale: cfg_scale, cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
img2imgStrength: strength, img2imgStrength: strength,
@ -161,6 +162,7 @@ export const buildCanvasOutpaintGraph = (
is_intermediate, is_intermediate,
steps: steps, steps: steps,
cfg_scale: cfg_scale, cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler, scheduler: scheduler,
denoising_start: 1 - strength, denoising_start: 1 - strength,
denoising_end: 1, denoising_end: 1,
@ -184,6 +186,7 @@ export const buildCanvasOutpaintGraph = (
is_intermediate, is_intermediate,
steps: canvasCoherenceSteps, steps: canvasCoherenceSteps,
cfg_scale: cfg_scale, cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler, scheduler: scheduler,
denoising_start: 1 - canvasCoherenceStrength, denoising_start: 1 - canvasCoherenceStrength,
denoising_end: 1, denoising_end: 1,

View File

@ -124,6 +124,7 @@ export const buildCanvasSDXLImageToImageGraph = (state: RootState, initialImage:
id: SDXL_DENOISE_LATENTS, id: SDXL_DENOISE_LATENTS,
is_intermediate, is_intermediate,
cfg_scale, cfg_scale,
cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength, denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,

View File

@ -60,6 +60,7 @@ export const buildCanvasSDXLInpaintGraph = (
negativePrompt, negativePrompt,
model, model,
cfgScale: cfg_scale, cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
seed, seed,
@ -151,6 +152,7 @@ export const buildCanvasSDXLInpaintGraph = (
is_intermediate, is_intermediate,
steps: steps, steps: steps,
cfg_scale: cfg_scale, cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler, scheduler: scheduler,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength, denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
denoising_end: refinerModel ? refinerStart : 1, denoising_end: refinerModel ? refinerStart : 1,
@ -174,6 +176,7 @@ export const buildCanvasSDXLInpaintGraph = (
is_intermediate, is_intermediate,
steps: canvasCoherenceSteps, steps: canvasCoherenceSteps,
cfg_scale: cfg_scale, cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler, scheduler: scheduler,
denoising_start: 1 - canvasCoherenceStrength, denoising_start: 1 - canvasCoherenceStrength,
denoising_end: 1, denoising_end: 1,

View File

@ -62,6 +62,7 @@ export const buildCanvasSDXLOutpaintGraph = (
negativePrompt, negativePrompt,
model, model,
cfgScale: cfg_scale, cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
seed, seed,
@ -160,6 +161,7 @@ export const buildCanvasSDXLOutpaintGraph = (
is_intermediate, is_intermediate,
steps: steps, steps: steps,
cfg_scale: cfg_scale, cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler, scheduler: scheduler,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength, denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
denoising_end: refinerModel ? refinerStart : 1, denoising_end: refinerModel ? refinerStart : 1,
@ -183,6 +185,7 @@ export const buildCanvasSDXLOutpaintGraph = (
is_intermediate, is_intermediate,
steps: canvasCoherenceSteps, steps: canvasCoherenceSteps,
cfg_scale: cfg_scale, cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler, scheduler: scheduler,
denoising_start: 1 - canvasCoherenceStrength, denoising_start: 1 - canvasCoherenceStrength,
denoising_end: 1, denoising_end: 1,

View File

@ -117,6 +117,7 @@ export const buildCanvasSDXLTextToImageGraph = (state: RootState): NonNullableGr
id: SDXL_DENOISE_LATENTS, id: SDXL_DENOISE_LATENTS,
is_intermediate, is_intermediate,
cfg_scale, cfg_scale,
cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
denoising_start: 0, denoising_start: 0,

View File

@ -115,6 +115,7 @@ export const buildCanvasTextToImageGraph = (state: RootState): NonNullableGraph
id: DENOISE_LATENTS, id: DENOISE_LATENTS,
is_intermediate, is_intermediate,
cfg_scale, cfg_scale,
cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
denoising_start: 0, denoising_start: 0,

View File

@ -123,6 +123,7 @@ export const buildLinearImageToImageGraph = (state: RootState): NonNullableGraph
type: 'denoise_latents', type: 'denoise_latents',
id: DENOISE_LATENTS, id: DENOISE_LATENTS,
cfg_scale, cfg_scale,
cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
denoising_start: 1 - strength, denoising_start: 1 - strength,

View File

@ -126,6 +126,7 @@ export const buildLinearSDXLImageToImageGraph = (state: RootState): NonNullableG
type: 'denoise_latents', type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS, id: SDXL_DENOISE_LATENTS,
cfg_scale, cfg_scale,
cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength, denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,

View File

@ -109,6 +109,7 @@ export const buildLinearSDXLTextToImageGraph = (state: RootState): NonNullableGr
type: 'denoise_latents', type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS, id: SDXL_DENOISE_LATENTS,
cfg_scale, cfg_scale,
cfg_rescale_multiplier,
scheduler, scheduler,
steps, steps,
denoising_start: 0, denoising_start: 0,

View File

@ -23,7 +23,7 @@ import ParamMainModelSelect from 'features/parameters/components/MainModel/Param
import { selectGenerationSlice } from 'features/parameters/store/generationSlice'; import { selectGenerationSlice } from 'features/parameters/store/generationSlice';
import { useExpanderToggle } from 'features/settingsAccordions/hooks/useExpanderToggle'; import { useExpanderToggle } from 'features/settingsAccordions/hooks/useExpanderToggle';
import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle'; import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle';
import { filter, size } from 'lodash-es'; import { filter } from 'lodash-es';
import { memo } from 'react'; import { memo } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
@ -33,7 +33,7 @@ const formLabelProps: FormLabelProps = {
const badgesSelector = createMemoizedSelector(selectLoraSlice, selectGenerationSlice, (lora, generation) => { const badgesSelector = createMemoizedSelector(selectLoraSlice, selectGenerationSlice, (lora, generation) => {
const enabledLoRAsCount = filter(lora.loras, (l) => !!l.isEnabled).length; const enabledLoRAsCount = filter(lora.loras, (l) => !!l.isEnabled).length;
const loraTabBadges = size(lora.loras) ? [enabledLoRAsCount] : []; const loraTabBadges = enabledLoRAsCount ? [enabledLoRAsCount] : [];
const accordionBadges: (string | number)[] = []; const accordionBadges: (string | number)[] = [];
if (generation.model) { if (generation.model) {
accordionBadges.push(generation.model.model_name); accordionBadges.push(generation.model.model_name);

View File

@ -1,6 +1,12 @@
import { useAppDispatch } from 'app/store/storeHooks';
import { $builtWorkflow } from 'features/nodes/hooks/useWorkflowWatcher'; import { $builtWorkflow } from 'features/nodes/hooks/useWorkflowWatcher';
import { workflowDownloaded } from 'features/workflowLibrary/store/actions';
import { useCallback } from 'react';
const downloadWorkflow = () => { export const useDownloadWorkflow = () => {
const dispatch = useAppDispatch();
const downloadWorkflow = useCallback(() => {
const workflow = $builtWorkflow.get(); const workflow = $builtWorkflow.get();
if (!workflow) { if (!workflow) {
return; return;
@ -12,8 +18,8 @@ const downloadWorkflow = () => {
document.body.appendChild(a); document.body.appendChild(a);
a.click(); a.click();
a.remove(); a.remove();
}; dispatch(workflowDownloaded());
}, [dispatch]);
export const useDownloadWorkflow = () => {
return downloadWorkflow; return downloadWorkflow;
}; };

View File

@ -3,6 +3,7 @@ import { useAppDispatch } from 'app/store/storeHooks';
import { workflowLoadRequested } from 'features/nodes/store/actions'; import { workflowLoadRequested } from 'features/nodes/store/actions';
import { addToast } from 'features/system/store/systemSlice'; import { addToast } from 'features/system/store/systemSlice';
import { makeToast } from 'features/system/util/makeToast'; import { makeToast } from 'features/system/util/makeToast';
import { workflowLoadedFromFile } from 'features/workflowLibrary/store/actions';
import type { RefObject } from 'react'; import type { RefObject } from 'react';
import { useCallback } from 'react'; import { useCallback } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
@ -29,6 +30,7 @@ export const useLoadWorkflowFromFile: UseLoadWorkflowFromFile = ({ resetRef }) =
try { try {
const parsedJSON = JSON.parse(String(rawJSON)); const parsedJSON = JSON.parse(String(rawJSON));
dispatch(workflowLoadRequested({ workflow: parsedJSON, asCopy: true })); dispatch(workflowLoadRequested({ workflow: parsedJSON, asCopy: true }));
dispatch(workflowLoadedFromFile());
} catch (e) { } catch (e) {
// There was a problem reading the file // There was a problem reading the file
logger.error(t('nodes.unableToLoadWorkflow')); logger.error(t('nodes.unableToLoadWorkflow'));

View File

@ -4,6 +4,7 @@ import { useAppDispatch } from 'app/store/storeHooks';
import { $builtWorkflow } from 'features/nodes/hooks/useWorkflowWatcher'; import { $builtWorkflow } from 'features/nodes/hooks/useWorkflowWatcher';
import { workflowIDChanged, workflowSaved } from 'features/nodes/store/workflowSlice'; import { workflowIDChanged, workflowSaved } from 'features/nodes/store/workflowSlice';
import type { WorkflowV2 } from 'features/nodes/types/workflow'; import type { WorkflowV2 } from 'features/nodes/types/workflow';
import { workflowUpdated } from 'features/workflowLibrary/store/actions';
import { useCallback, useRef } from 'react'; import { useCallback, useRef } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { useCreateWorkflowMutation, useUpdateWorkflowMutation, workflowsApi } from 'services/api/endpoints/workflows'; import { useCreateWorkflowMutation, useUpdateWorkflowMutation, workflowsApi } from 'services/api/endpoints/workflows';
@ -41,6 +42,7 @@ export const useSaveLibraryWorkflow: UseSaveLibraryWorkflow = () => {
try { try {
if (isWorkflowWithID(workflow)) { if (isWorkflowWithID(workflow)) {
await updateWorkflow(workflow).unwrap(); await updateWorkflow(workflow).unwrap();
dispatch(workflowUpdated());
} else { } else {
const data = await createWorkflow(workflow).unwrap(); const data = await createWorkflow(workflow).unwrap();
dispatch(workflowIDChanged(data.workflow.id)); dispatch(workflowIDChanged(data.workflow.id));

View File

@ -9,6 +9,7 @@ import {
workflowSaved, workflowSaved,
} from 'features/nodes/store/workflowSlice'; } from 'features/nodes/store/workflowSlice';
import type { WorkflowCategory } from 'features/nodes/types/workflow'; import type { WorkflowCategory } from 'features/nodes/types/workflow';
import { newWorkflowSaved } from 'features/workflowLibrary/store/actions';
import { useCallback, useRef } from 'react'; import { useCallback, useRef } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { useCreateWorkflowMutation, workflowsApi } from 'services/api/endpoints/workflows'; import { useCreateWorkflowMutation, workflowsApi } from 'services/api/endpoints/workflows';
@ -56,6 +57,7 @@ export const useSaveWorkflowAs: UseSaveWorkflowAs = () => {
dispatch(workflowNameChanged(data.workflow.name)); dispatch(workflowNameChanged(data.workflow.name));
dispatch(workflowCategoryChanged(data.workflow.meta.category)); dispatch(workflowCategoryChanged(data.workflow.meta.category));
dispatch(workflowSaved()); dispatch(workflowSaved());
dispatch(newWorkflowSaved({ category }));
onSuccess && onSuccess(); onSuccess && onSuccess();
toast.update(toastRef.current, { toast.update(toastRef.current, {

View File

@ -0,0 +1,10 @@
import { createAction } from '@reduxjs/toolkit';
import type { WorkflowCategory } from 'features/nodes/types/workflow';
export const workflowDownloaded = createAction('workflowLibrary/workflowDownloaded');
export const workflowLoadedFromFile = createAction('workflowLibrary/workflowLoadedFromFile');
export const newWorkflowSaved = createAction<{ category: WorkflowCategory }>('workflowLibrary/newWorkflowSaved');
export const workflowUpdated = createAction('workflowLibrary/workflowUpdated');

View File

@ -34,14 +34,13 @@ classifiers = [
dependencies = [ dependencies = [
# Core generation dependencies, pinned for reproducible builds. # Core generation dependencies, pinned for reproducible builds.
"accelerate==0.26.1", "accelerate==0.26.1",
"basicsr==1.4.2",
"clip_anytorch==2.5.2", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "clip_anytorch==2.5.2", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
"compel==2.0.2", "compel==2.0.2",
"controlnet-aux==0.0.7", "controlnet-aux==0.0.7",
"diffusers[torch]==0.26.1", "diffusers[torch]==0.26.2",
"invisible-watermark==0.2.0", # needed to install SDXL base and refiner using their repo_ids "invisible-watermark==0.2.0", # needed to install SDXL base and refiner using their repo_ids
"mediapipe==0.10.7", # needed for "mediapipeface" controlnet model "mediapipe==0.10.7", # needed for "mediapipeface" controlnet model
"numpy==1.26.3", # >1.24.0 is needed to use the 'strict' argument to np.testing.assert_array_equal() "numpy==1.26.4", # >1.24.0 is needed to use the 'strict' argument to np.testing.assert_array_equal()
"onnx==1.15.0", "onnx==1.15.0",
"onnxruntime==1.16.3", "onnxruntime==1.16.3",
"opencv-python==4.9.0.80", "opencv-python==4.9.0.80",
@ -55,13 +54,13 @@ dependencies = [
"transformers==4.37.2", "transformers==4.37.2",
# Core application dependencies, pinned for reproducible builds. # Core application dependencies, pinned for reproducible builds.
"fastapi-events==0.10.0", "fastapi-events==0.10.1",
"fastapi==0.108.0", "fastapi==0.109.2",
"huggingface-hub==0.20.3", "huggingface-hub==0.20.3",
"pydantic-settings==2.1.0", "pydantic-settings==2.1.0",
"pydantic==2.5.3", "pydantic==2.6.1",
"python-socketio==5.11.0", "python-socketio==5.11.1",
"uvicorn[standard]==0.25.0", "uvicorn[standard]==0.27.1",
# Auxiliary dependencies, pinned only if necessary. # Auxiliary dependencies, pinned only if necessary.
"albumentations", "albumentations",
@ -111,7 +110,7 @@ dependencies = [
] ]
"dev" = ["jurigged", "pudb", "snakeviz", "gprof2dot"] "dev" = ["jurigged", "pudb", "snakeviz", "gprof2dot"]
"test" = [ "test" = [
"ruff==0.1.11", "ruff==0.2.1",
"ruff-lsp", "ruff-lsp",
"mypy", "mypy",
"pre-commit", "pre-commit",
@ -205,13 +204,6 @@ output = "coverage/index.xml"
#=== Begin: Ruff #=== Begin: Ruff
[tool.ruff] [tool.ruff]
line-length = 120 line-length = 120
ignore = [
"E501", # https://docs.astral.sh/ruff/rules/line-too-long/
"C901", # https://docs.astral.sh/ruff/rules/complex-structure/
"B008", # https://docs.astral.sh/ruff/rules/function-call-in-default-argument/
"B904", # https://docs.astral.sh/ruff/rules/raise-without-from-inside-except/
]
select = ["B", "C", "E", "F", "W", "I"]
exclude = [ exclude = [
".git", ".git",
"__pycache__", "__pycache__",
@ -220,6 +212,15 @@ exclude = [
"invokeai/frontend/web/node_modules/", "invokeai/frontend/web/node_modules/",
".venv*", ".venv*",
] ]
[tool.ruff.lint]
ignore = [
"E501", # https://docs.astral.sh/ruff/rules/line-too-long/
"C901", # https://docs.astral.sh/ruff/rules/complex-structure/
"B008", # https://docs.astral.sh/ruff/rules/function-call-in-default-argument/
"B904", # https://docs.astral.sh/ruff/rules/raise-without-from-inside-except/
]
select = ["B", "C", "E", "F", "W", "I"]
#=== End: Ruff #=== End: Ruff
#=== Begin: MyPy #=== Begin: MyPy