mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
33 Commits
bugfix/han
...
feat/ui/re
Author | SHA1 | Date | |
---|---|---|---|
54e46faa54 | |||
183945077d | |||
0fea08df7d | |||
5fe40d228e | |||
1c507d088a | |||
87893d29e9 | |||
5667465029 | |||
9021841a49 | |||
15703c8045 | |||
eb2bbf1609 | |||
aca953044e | |||
f5a0050a00 | |||
9e38558633 | |||
93e589a738 | |||
4f3dd6dbca | |||
643ef964ac | |||
d8349ed42f | |||
53f2008893 | |||
60acd4e02f | |||
82c471ec2a | |||
47d76f8033 | |||
b02e11d2b5 | |||
24d67c77e1 | |||
b704941119 | |||
dd3b955b8a | |||
7613ef3d30 | |||
fba7f36038 | |||
2bfd4407ad | |||
265ccab15f | |||
ebf1f1bf6b | |||
07ce7685b1 | |||
521d91ea58 | |||
7521dff206 |
@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ARG TORCH_VERSION=2.1.2
|
||||
ARG TORCHVISION_VERSION=0.16.2
|
||||
ARG TORCH_VERSION=2.1.0
|
||||
ARG TORCHVISION_VERSION=0.16
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||
fi &&\
|
||||
@ -54,7 +54,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
pip install -e ".[xformers]"; \
|
||||
else \
|
||||
pip install $extra_index_url_arg -e "."; \
|
||||
pip install -e "."; \
|
||||
fi
|
||||
|
||||
# #### Build the Web UI ------------------------------------
|
||||
|
@ -28,7 +28,7 @@ This is done via Docker Desktop preferences
|
||||
|
||||
### Configure Invoke environment
|
||||
|
||||
1. Make a copy of `.env.sample` and name it `.env` (`cp .env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||
a. the desired location of the InvokeAI runtime directory, or
|
||||
b. an existing, v3.0.0 compatible runtime directory.
|
||||
1. Execute `run.sh`
|
||||
|
@ -21,7 +21,7 @@ run() {
|
||||
printf "%s\n" "$build_args"
|
||||
fi
|
||||
|
||||
docker compose build $build_args $service_name
|
||||
docker compose build $build_args
|
||||
unset build_args
|
||||
|
||||
printf "%s\n" "starting service $service_name"
|
||||
|
@ -94,8 +94,6 @@ A model that helps generate creative QR codes that still scan. Can also be used
|
||||
**Openpose**:
|
||||
The OpenPose control model allows for the identification of the general pose of a character by pre-processing an existing image with a clear human structure. With advanced options, Openpose can also detect the face or hands in the image.
|
||||
|
||||
*Note:* The DWPose Processor has replaced the OpenPose processor in Invoke. Workflows and generations that relied on the OpenPose Processor will need to be updated to use the DWPose Processor instead.
|
||||
|
||||
**Mediapipe Face**:
|
||||
|
||||
The MediaPipe Face identification processor is able to clearly identify facial features in order to capture vivid expressions of human faces.
|
||||
|
@ -230,13 +230,13 @@ manager, please follow these steps:
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
invokeai-web
|
||||
invokeai --web
|
||||
```
|
||||
|
||||
=== "Public Webserver"
|
||||
|
||||
```bash
|
||||
invokeai-web --host 0.0.0.0
|
||||
invokeai --web --host 0.0.0.0
|
||||
```
|
||||
|
||||
=== "CLI"
|
||||
@ -402,4 +402,4 @@ environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||
staff will **not** be able to help you out. Caveat Emptor!
|
||||
|
||||
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
@ -69,7 +69,7 @@ a token and copy it, since you will need in for the next step.
|
||||
|
||||
### Setup
|
||||
|
||||
Set up your environmnent variables. In the `docker` directory, make a copy of `.env.sample` and name it `.env`. Make changes as necessary.
|
||||
Set up your environmnent variables. In the `docker` directory, make a copy of `env.sample` and name it `.env`. Make changes as necessary.
|
||||
|
||||
Any environment variables supported by InvokeAI can be set here - please see the [CONFIGURATION](../features/CONFIGURATION.md) for further detail.
|
||||
|
||||
|
@ -32,7 +32,6 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||
+ [Image Picker](#image-picker)
|
||||
+ [Image Resize Plus](#image-resize-plus)
|
||||
+ [Latent Upscale](#latent-upscale)
|
||||
+ [Load Video Frame](#load-video-frame)
|
||||
+ [Make 3D](#make-3d)
|
||||
+ [Mask Operations](#mask-operations)
|
||||
@ -43,7 +42,6 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [Oobabooga](#oobabooga)
|
||||
+ [Prompt Tools](#prompt-tools)
|
||||
+ [Remote Image](#remote-image)
|
||||
+ [BriaAI Background Remove](#briaai-remove-background)
|
||||
+ [Remove Background](#remove-background)
|
||||
+ [Retroize](#retroize)
|
||||
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||
@ -291,13 +289,6 @@ View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-resize-plus-node/master/.readme/node.png" width="500" />
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Latent Upscale
|
||||
|
||||
**Description:** This node uses a small (~2.4mb) model to upscale the latents used in a Stable Diffusion 1.5 or Stable Diffusion XL image generation, rather than the typical interpolation method, avoiding the traditional downsides of the latent upscale technique.
|
||||
|
||||
**Node Link:** [https://github.com/gogurtenjoyer/latent-upscale](https://github.com/gogurtenjoyer/latent-upscale)
|
||||
|
||||
--------------------------------
|
||||
### Load Video Frame
|
||||
|
||||
@ -443,17 +434,6 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
||||
|
||||
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
|
||||
|
||||
--------------------------------
|
||||
|
||||
### BriaAI Remove Background
|
||||
|
||||
**Description**: Implements one click background removal with BriaAI's new version 1.4 model which seems to be be producing better results than any other previous background removal tool.
|
||||
|
||||
**Node Link:** https://github.com/blessedcoolant/invoke_bria_rmbg
|
||||
|
||||
**View**
|
||||
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_bria_rmbg/main/assets/preview.jpg" />
|
||||
|
||||
--------------------------------
|
||||
### Remove Background
|
||||
|
||||
|
@ -81,7 +81,7 @@ their descriptions.
|
||||
| ONNX Text to Latents | Generates latents from conditionings. |
|
||||
| ONNX Model Loader | Loads a main model, outputting its submodels. |
|
||||
| OpenCV Inpaint | Simple inpaint using opencv. |
|
||||
| DW Openpose Processor | Applies Openpose processing to image |
|
||||
| Openpose Processor | Applies Openpose processing to image |
|
||||
| PIDI Processor | Applies PIDI processing to image |
|
||||
| Prompts from File | Loads prompts from a text file |
|
||||
| Random Integer | Outputs a single random integer. |
|
||||
|
@ -14,19 +14,11 @@ function is_bin_in_path {
|
||||
}
|
||||
|
||||
function git_show {
|
||||
git show -s --format=oneline --abbrev-commit "$1" | cat
|
||||
git show -s --format='%h %s' $1
|
||||
}
|
||||
|
||||
if [[ -v "VIRTUAL_ENV" ]]; then
|
||||
# we can't just call 'deactivate' because this function is not exported
|
||||
# to the environment of this script from the bash process that runs the script
|
||||
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo
|
||||
echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}"
|
||||
echo "The current working directory is $(pwd)"
|
||||
read -p "If that looks right, press any key to proceed, or CTRL-C to exit..."
|
||||
@ -40,6 +32,13 @@ if ! is_bin_in_path python && is_bin_in_path python3; then
|
||||
}
|
||||
fi
|
||||
|
||||
if [[ -v "VIRTUAL_ENV" ]]; then
|
||||
# we can't just call 'deactivate' because this function is not exported
|
||||
# to the environment of this script from the bash process that runs the script
|
||||
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
VERSION=$(
|
||||
cd ..
|
||||
python -c "from invokeai.version import __version__ as version; print(version)"
|
||||
@ -48,9 +47,38 @@ PATCH=""
|
||||
VERSION="v${VERSION}${PATCH}"
|
||||
|
||||
echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show HEAD
|
||||
git_show
|
||||
echo
|
||||
|
||||
# ---------------------- FRONTEND ----------------------
|
||||
|
||||
pushd ../invokeai/frontend/web >/dev/null
|
||||
echo
|
||||
echo "Installing frontend dependencies..."
|
||||
echo
|
||||
pnpm i --frozen-lockfile
|
||||
echo
|
||||
echo "Building frontend..."
|
||||
echo
|
||||
pnpm build
|
||||
popd
|
||||
|
||||
# ---------------------- BACKEND ----------------------
|
||||
|
||||
echo
|
||||
echo "Building wheel..."
|
||||
echo
|
||||
|
||||
# install the 'build' package in the user site packages, if needed
|
||||
# could be improved by using a temporary venv, but it's tiny and harmless
|
||||
if [[ $(python -c 'from importlib.util import find_spec; print(find_spec("build") is None)') == "True" ]]; then
|
||||
pip install --user build
|
||||
fi
|
||||
|
||||
rm -rf ../build
|
||||
|
||||
python -m build --wheel --outdir dist/ ../.
|
||||
|
||||
# ----------------------
|
||||
|
||||
echo
|
||||
@ -69,13 +97,16 @@ done
|
||||
mkdir InvokeAI-Installer/lib
|
||||
cp lib/*.py InvokeAI-Installer/lib
|
||||
|
||||
# Move the wheel
|
||||
mv dist/*.whl InvokeAI-Installer/lib/
|
||||
|
||||
# Install scripts
|
||||
# Mac/Linux
|
||||
cp install.sh.in InvokeAI-Installer/install.sh
|
||||
chmod a+x InvokeAI-Installer/install.sh
|
||||
|
||||
# Windows
|
||||
cp install.bat.in InvokeAI-Installer/install.bat
|
||||
perl -p -e "s/^set INVOKEAI_VERSION=.*/set INVOKEAI_VERSION=$VERSION/" install.bat.in >InvokeAI-Installer/install.bat
|
||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
|
||||
# Zip everything up
|
||||
|
@ -15,6 +15,7 @@ if "%1" == "use-cache" (
|
||||
@rem Config
|
||||
@rem The version in the next line is replaced by an up to date release number
|
||||
@rem when create_installer.sh is run. Change the release number there.
|
||||
set INVOKEAI_VERSION=latest
|
||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||
|
@ -11,7 +11,7 @@ import sys
|
||||
import venv
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Optional, Tuple
|
||||
from typing import Union
|
||||
|
||||
SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100"
|
||||
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
||||
@ -21,20 +21,40 @@ OS = platform.uname().system
|
||||
ARCH = platform.uname().machine
|
||||
VERSION = "latest"
|
||||
|
||||
### Feature flags
|
||||
# Install the virtualenv into the runtime dir
|
||||
FF_VENV_IN_RUNTIME = True
|
||||
|
||||
# Install the wheel packaged with the installer
|
||||
FF_USE_LOCAL_WHEEL = True
|
||||
|
||||
|
||||
class Installer:
|
||||
"""
|
||||
Deploys an InvokeAI installation into a given path
|
||||
"""
|
||||
|
||||
reqs: list[str] = INSTALLER_REQS
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.reqs = INSTALLER_REQS
|
||||
self.preflight()
|
||||
if os.getenv("VIRTUAL_ENV") is not None:
|
||||
print("A virtual environment is already activated. Please 'deactivate' before installation.")
|
||||
sys.exit(-1)
|
||||
self.bootstrap()
|
||||
self.available_releases = get_github_releases()
|
||||
|
||||
def preflight(self) -> None:
|
||||
"""
|
||||
Preflight checks
|
||||
"""
|
||||
|
||||
# TODO
|
||||
# verify python version
|
||||
# on macOS verify XCode tools are present
|
||||
# verify libmesa, libglx on linux
|
||||
# check that the system arch is not i386 (?)
|
||||
# check that the system has a GPU, and the type of GPU
|
||||
|
||||
pass
|
||||
|
||||
def mktemp_venv(self) -> TemporaryDirectory:
|
||||
"""
|
||||
@ -58,9 +78,12 @@ class Installer:
|
||||
|
||||
return venv_dir
|
||||
|
||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory | None:
|
||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory:
|
||||
"""
|
||||
Bootstrap the installer venv with packages required at install time
|
||||
|
||||
:return: path to the virtual environment directory that was bootstrapped
|
||||
:rtype: TemporaryDirectory
|
||||
"""
|
||||
|
||||
print("Initializing the installer. This may take a minute - please wait...")
|
||||
@ -72,27 +95,39 @@ class Installer:
|
||||
cmd.extend(self.reqs)
|
||||
|
||||
try:
|
||||
# upgrade pip to the latest version to avoid a confusing message
|
||||
res = upgrade_pip(Path(venv_dir.name))
|
||||
if verbose:
|
||||
print(res)
|
||||
|
||||
# run the install prerequisites installation
|
||||
res = subprocess.check_output(cmd).decode()
|
||||
|
||||
if verbose:
|
||||
print(res)
|
||||
|
||||
return venv_dir
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e)
|
||||
|
||||
def app_venv(self, venv_parent) -> Path:
|
||||
def app_venv(self, path: str = None):
|
||||
"""
|
||||
Create a virtualenv for the InvokeAI installation
|
||||
"""
|
||||
|
||||
venv_dir = venv_parent / ".venv"
|
||||
# explicit venv location
|
||||
# currently unused in normal operation
|
||||
# useful for testing or special cases
|
||||
if path is not None:
|
||||
venv_dir = Path(path)
|
||||
|
||||
# experimental / testing
|
||||
elif not FF_VENV_IN_RUNTIME:
|
||||
if OS == "Windows":
|
||||
venv_dir_parent = os.getenv("APPDATA", "~/AppData/Roaming")
|
||||
elif OS == "Darwin":
|
||||
# there is no environment variable on macOS to find this
|
||||
# TODO: confirm this is working as expected
|
||||
venv_dir_parent = "~/Library/Application Support"
|
||||
elif OS == "Linux":
|
||||
venv_dir_parent = os.getenv("XDG_DATA_DIR", "~/.local/share")
|
||||
venv_dir = Path(venv_dir_parent).expanduser().resolve() / f"InvokeAI/{VERSION}/venv"
|
||||
|
||||
# stable / current
|
||||
else:
|
||||
venv_dir = self.dest / ".venv"
|
||||
|
||||
# Prefer to copy python executables
|
||||
# so that updates to system python don't break InvokeAI
|
||||
@ -106,7 +141,7 @@ class Installer:
|
||||
return venv_dir
|
||||
|
||||
def install(
|
||||
self, version=None, root: str = "~/invokeai", yes_to_all=False, find_links: Optional[Path] = None
|
||||
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None
|
||||
) -> None:
|
||||
"""
|
||||
Install the InvokeAI application into the given runtime path
|
||||
@ -123,20 +158,15 @@ class Installer:
|
||||
|
||||
import messages
|
||||
|
||||
messages.welcome(self.available_releases)
|
||||
messages.welcome()
|
||||
|
||||
version = messages.choose_version(self.available_releases)
|
||||
|
||||
auto_dest = Path(os.environ.get("INVOKEAI_ROOT", root)).expanduser().resolve()
|
||||
destination = auto_dest if yes_to_all else messages.dest_path(root)
|
||||
if destination is None:
|
||||
print("Could not find or create the destination directory. Installation cancelled.")
|
||||
sys.exit(0)
|
||||
default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve()
|
||||
self.dest = default_path if yes_to_all else messages.dest_path(root)
|
||||
|
||||
# create the venv for the app
|
||||
self.venv = self.app_venv(venv_parent=destination)
|
||||
self.venv = self.app_venv()
|
||||
|
||||
self.instance = InvokeAiInstance(runtime=destination, venv=self.venv, version=version)
|
||||
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
|
||||
|
||||
# install dependencies and the InvokeAI application
|
||||
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
|
||||
@ -160,7 +190,7 @@ class InvokeAiInstance:
|
||||
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
||||
"""
|
||||
|
||||
def __init__(self, runtime: Path, venv: Path, version: str = "stable") -> None:
|
||||
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
|
||||
self.runtime = runtime
|
||||
self.venv = venv
|
||||
self.pip = get_pip_from_venv(venv)
|
||||
@ -169,7 +199,6 @@ class InvokeAiInstance:
|
||||
set_sys_path(venv)
|
||||
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
||||
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
||||
upgrade_pip(venv)
|
||||
|
||||
def get(self) -> tuple[Path, Path]:
|
||||
"""
|
||||
@ -183,7 +212,54 @@ class InvokeAiInstance:
|
||||
|
||||
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||
"""
|
||||
Install the package from PyPi.
|
||||
Install this instance, including dependencies and the app itself
|
||||
|
||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||
:type extra_index_url: str
|
||||
"""
|
||||
|
||||
import messages
|
||||
|
||||
# install torch first to ensure the correct version gets installed.
|
||||
# works with either source or wheel install with negligible impact on installation times.
|
||||
messages.simple_banner("Installing PyTorch :fire:")
|
||||
self.install_torch(extra_index_url, find_links)
|
||||
|
||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||
self.install_app(extra_index_url, optional_modules, find_links)
|
||||
|
||||
def install_torch(self, extra_index_url=None, find_links=None):
|
||||
"""
|
||||
Install PyTorch
|
||||
"""
|
||||
|
||||
from plumbum import FG, local
|
||||
|
||||
pip = local[self.pip]
|
||||
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"numpy==1.26.3", # choose versions that won't be uninstalled during phase 2
|
||||
"urllib3~=1.26.0",
|
||||
"requests~=2.28.0",
|
||||
"torch==2.1.2",
|
||||
"torchmetrics==0.11.4",
|
||||
"torchvision==0.16.2",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
]
|
||||
& FG
|
||||
)
|
||||
|
||||
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||
"""
|
||||
Install the application with pip.
|
||||
Supports installation from PyPi or from a local source directory.
|
||||
|
||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||
:type extra_index_url: str
|
||||
@ -195,52 +271,53 @@ class InvokeAiInstance:
|
||||
:type find_links: Path
|
||||
"""
|
||||
|
||||
import messages
|
||||
|
||||
# not currently used, but may be useful for "install most recent version" option
|
||||
if self.version == "prerelease":
|
||||
## this only applies to pypi installs; TODO actually use this
|
||||
if self.version == "pre":
|
||||
version = None
|
||||
pre_flag = "--pre"
|
||||
elif self.version == "stable":
|
||||
version = None
|
||||
pre_flag = None
|
||||
pre = "--pre"
|
||||
else:
|
||||
version = self.version
|
||||
pre_flag = None
|
||||
pre = None
|
||||
|
||||
src = "invokeai"
|
||||
if optional_modules:
|
||||
src += optional_modules
|
||||
if version:
|
||||
src += f"=={version}"
|
||||
## TODO: only local wheel will be installed as of now; support for --version arg is TODO
|
||||
if FF_USE_LOCAL_WHEEL:
|
||||
# if no wheel, try to do a source install before giving up
|
||||
try:
|
||||
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl")))
|
||||
except StopIteration:
|
||||
try:
|
||||
src = Path(__file__).parents[1].expanduser().resolve()
|
||||
# if the above directory contains one of these files, we'll do a source install
|
||||
next(src.glob("pyproject.toml"))
|
||||
next(src.glob("invokeai"))
|
||||
except StopIteration:
|
||||
print("Unable to find a wheel or perform a source install. Giving up.")
|
||||
|
||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||
elif version == "source":
|
||||
# this makes an assumption about the location of the installer package in the source tree
|
||||
src = Path(__file__).parents[1].expanduser().resolve()
|
||||
else:
|
||||
# will install from PyPi
|
||||
src = f"invokeai=={version}" if version is not None else "invokeai"
|
||||
|
||||
from plumbum import FG, ProcessExecutionError, local # type: ignore
|
||||
from plumbum import FG, local
|
||||
|
||||
pip = local[self.pip]
|
||||
|
||||
pipeline = pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"--force-reinstall",
|
||||
"--use-pep517",
|
||||
str(src),
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
pre_flag,
|
||||
]
|
||||
|
||||
try:
|
||||
_ = pipeline & FG
|
||||
except ProcessExecutionError as e:
|
||||
print(f"Error: {e}")
|
||||
print(
|
||||
"Could not install InvokeAI. Please try downloading the latest version of the installer and install again."
|
||||
)
|
||||
sys.exit(1)
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"--use-pep517",
|
||||
str(src) + (optional_modules if optional_modules else ""),
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
pre,
|
||||
]
|
||||
& FG
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
"""
|
||||
@ -296,6 +373,7 @@ class InvokeAiInstance:
|
||||
|
||||
ext = "bat" if OS == "Windows" else "sh"
|
||||
|
||||
# scripts = ['invoke', 'update']
|
||||
scripts = ["invoke"]
|
||||
|
||||
for script in scripts:
|
||||
@ -330,23 +408,6 @@ def get_pip_from_venv(venv_path: Path) -> str:
|
||||
return str(venv_path.expanduser().resolve() / pip)
|
||||
|
||||
|
||||
def upgrade_pip(venv_path: Path) -> str | None:
|
||||
"""
|
||||
Upgrade the pip executable in the given virtual environment
|
||||
"""
|
||||
|
||||
python = "Scripts\\python.exe" if OS == "Windows" else "bin/python"
|
||||
python = str(venv_path.expanduser().resolve() / python)
|
||||
|
||||
try:
|
||||
result = subprocess.check_output([python, "-m", "pip", "install", "--upgrade", "pip"]).decode()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e)
|
||||
result = None
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def set_sys_path(venv_path: Path) -> None:
|
||||
"""
|
||||
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
||||
@ -370,43 +431,7 @@ def set_sys_path(venv_path: Path) -> None:
|
||||
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
||||
|
||||
|
||||
def get_github_releases() -> tuple[list, list] | None:
|
||||
"""
|
||||
Query Github for published (pre-)release versions.
|
||||
Return a tuple where the first element is a list of stable releases and the second element is a list of pre-releases.
|
||||
Return None if the query fails for any reason.
|
||||
"""
|
||||
|
||||
import requests
|
||||
|
||||
## get latest releases using github api
|
||||
url = "https://api.github.com/repos/invoke-ai/InvokeAI/releases"
|
||||
releases, pre_releases = [], []
|
||||
try:
|
||||
res = requests.get(url)
|
||||
res.raise_for_status()
|
||||
tag_info = res.json()
|
||||
for tag in tag_info:
|
||||
if not tag["prerelease"]:
|
||||
releases.append(tag["tag_name"].lstrip("v"))
|
||||
else:
|
||||
pre_releases.append(tag["tag_name"].lstrip("v"))
|
||||
except requests.HTTPError as e:
|
||||
print(f"Error: {e}")
|
||||
print("Could not fetch version information from GitHub. Please check your network connection and try again.")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
print("An unexpected error occurred while trying to fetch version information from GitHub. Please try again.")
|
||||
return
|
||||
|
||||
releases.sort(reverse=True)
|
||||
pre_releases.sort(reverse=True)
|
||||
|
||||
return releases, pre_releases
|
||||
|
||||
|
||||
def get_torch_source() -> Tuple[str | None, str | None]:
|
||||
def get_torch_source() -> (Union[str, None], str):
|
||||
"""
|
||||
Determine the extra index URL for pip to use for torch installation.
|
||||
This depends on the OS and the graphics accelerator in use.
|
||||
@ -421,26 +446,25 @@ def get_torch_source() -> Tuple[str | None, str | None]:
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
from messages import select_gpu
|
||||
from messages import graphical_accelerator
|
||||
|
||||
# device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
|
||||
device = select_gpu()
|
||||
# device can be one of: "cuda", "rocm", "cpu", "idk"
|
||||
device = graphical_accelerator()
|
||||
|
||||
url = None
|
||||
optional_modules = "[onnx]"
|
||||
if OS == "Linux":
|
||||
if device.value == "rocm":
|
||||
if device == "rocm":
|
||||
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||
elif device.value == "cpu":
|
||||
elif device == "cpu":
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
|
||||
elif OS == "Windows":
|
||||
if device.value == "cuda":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
if device.value == "cuda_and_dml":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-directml]"
|
||||
if device == "cuda":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
if device == "cuda_and_dml":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-directml]"
|
||||
|
||||
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
||||
|
||||
|
@ -5,11 +5,10 @@ Installer user interaction
|
||||
|
||||
import os
|
||||
import platform
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from prompt_toolkit import HTML, prompt
|
||||
from prompt_toolkit.completion import FuzzyWordCompleter, PathCompleter
|
||||
from prompt_toolkit.completion import PathCompleter
|
||||
from prompt_toolkit.validation import Validator
|
||||
from rich import box, print
|
||||
from rich.console import Console, Group, group
|
||||
@ -36,26 +35,16 @@ else:
|
||||
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
||||
|
||||
|
||||
def welcome(available_releases: tuple | None = None) -> None:
|
||||
def welcome():
|
||||
@group()
|
||||
def text():
|
||||
if (platform_specific := _platform_specific_help()) is not None:
|
||||
if (platform_specific := _platform_specific_help()) != "":
|
||||
yield platform_specific
|
||||
yield ""
|
||||
yield Text.from_markup(
|
||||
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.",
|
||||
justify="center",
|
||||
)
|
||||
if available_releases is not None:
|
||||
latest_stable = available_releases[0][0]
|
||||
last_pre = available_releases[1][0]
|
||||
yield ""
|
||||
yield Text.from_markup(
|
||||
f"[red3]🠶[/] Latest stable release (recommended): [b bright_white]{latest_stable}", justify="center"
|
||||
)
|
||||
yield Text.from_markup(
|
||||
f"[red3]🠶[/] Last published pre-release version: [b bright_white]{last_pre}", justify="center"
|
||||
)
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
@ -72,30 +61,19 @@ def welcome(available_releases: tuple | None = None) -> None:
|
||||
console.line()
|
||||
|
||||
|
||||
def choose_version(available_releases: tuple | None = None) -> str:
|
||||
"""
|
||||
Prompt the user to choose an Invoke version to install
|
||||
"""
|
||||
|
||||
# short circuit if we couldn't get a version list
|
||||
# still try to install the latest stable version
|
||||
if available_releases is None:
|
||||
return "stable"
|
||||
|
||||
console.print(":grey_question: [orange3]Please choose an Invoke version to install.")
|
||||
|
||||
choices = available_releases[0] + available_releases[1]
|
||||
|
||||
response = prompt(
|
||||
message=f" <Enter> to install the recommended release ({choices[0]}). <Tab> or type to pick a version: ",
|
||||
complete_while_typing=True,
|
||||
completer=FuzzyWordCompleter(choices),
|
||||
)
|
||||
console.print(f" Version {choices[0] if response == '' else response} will be installed.")
|
||||
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":exclamation: Directory {dest} already exists :exclamation:")
|
||||
dest_confirmed = Confirm.ask(
|
||||
":stop_sign: (re)install in this location?",
|
||||
default=False,
|
||||
)
|
||||
else:
|
||||
print(f"InvokeAI will be installed in {dest}")
|
||||
dest_confirmed = Confirm.ask("Use this location?", default=True)
|
||||
console.line()
|
||||
|
||||
return "stable" if response == "" else response
|
||||
return dest_confirmed
|
||||
|
||||
|
||||
def user_wants_auto_configuration() -> bool:
|
||||
@ -131,23 +109,7 @@ def user_wants_auto_configuration() -> bool:
|
||||
return choice.lower().startswith("a")
|
||||
|
||||
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":stop_sign: Directory {dest} already exists!")
|
||||
print(" Is this location correct?")
|
||||
default = False
|
||||
else:
|
||||
print(f":file_folder: InvokeAI will be installed in {dest}")
|
||||
default = True
|
||||
|
||||
dest_confirmed = Confirm.ask(" Please confirm:", default=default)
|
||||
|
||||
console.line()
|
||||
|
||||
return dest_confirmed
|
||||
|
||||
|
||||
def dest_path(dest=None) -> Path | None:
|
||||
def dest_path(dest=None) -> Path:
|
||||
"""
|
||||
Prompt the user for the destination path and create the path
|
||||
|
||||
@ -162,21 +124,25 @@ def dest_path(dest=None) -> Path | None:
|
||||
else:
|
||||
dest = Path.cwd().expanduser().resolve()
|
||||
prev_dest = init_path = dest
|
||||
dest_confirmed = False
|
||||
|
||||
dest_confirmed = confirm_install(dest)
|
||||
|
||||
while not dest_confirmed:
|
||||
browse_start = (dest or Path.cwd()).expanduser().resolve()
|
||||
# if the given destination already exists, the starting point for browsing is its parent directory.
|
||||
# the user may have made a typo, or otherwise wants to place the root dir next to an existing one.
|
||||
# if the destination dir does NOT exist, then the user must have changed their mind about the selection.
|
||||
# since we can't read their mind, start browsing at Path.cwd().
|
||||
browse_start = (prev_dest.parent if prev_dest.exists() else Path.cwd()).expanduser().resolve()
|
||||
|
||||
path_completer = PathCompleter(
|
||||
only_directories=True,
|
||||
expanduser=True,
|
||||
get_paths=lambda: [str(browse_start)], # noqa: B023
|
||||
get_paths=lambda: [browse_start], # noqa: B023
|
||||
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
||||
)
|
||||
|
||||
console.line()
|
||||
|
||||
console.print(f":grey_question: [orange3]Please select the install destination:[/] \\[{browse_start}]: ")
|
||||
console.print(f"[orange3]Please select the destination directory for the installation:[/] \\[{browse_start}]: ")
|
||||
selected = prompt(
|
||||
">>> ",
|
||||
complete_in_thread=True,
|
||||
@ -189,7 +155,6 @@ def dest_path(dest=None) -> Path | None:
|
||||
)
|
||||
prev_dest = dest
|
||||
dest = Path(selected)
|
||||
|
||||
console.line()
|
||||
|
||||
dest_confirmed = confirm_install(dest.expanduser().resolve())
|
||||
@ -217,45 +182,41 @@ def dest_path(dest=None) -> Path | None:
|
||||
console.rule("Goodbye!")
|
||||
|
||||
|
||||
class GpuType(Enum):
|
||||
CUDA = "cuda"
|
||||
CUDA_AND_DML = "cuda_and_dml"
|
||||
ROCM = "rocm"
|
||||
CPU = "cpu"
|
||||
AUTODETECT = "autodetect"
|
||||
|
||||
|
||||
def select_gpu() -> GpuType:
|
||||
def graphical_accelerator():
|
||||
"""
|
||||
Prompt the user to select the GPU driver
|
||||
Prompt the user to select the graphical accelerator in their system
|
||||
This does not validate user's choices (yet), but only offers choices
|
||||
valid for the platform.
|
||||
CUDA is the fallback.
|
||||
We may be able to detect the GPU driver by shelling out to `modprobe` or `lspci`,
|
||||
but this is not yet supported or reliable. Also, some users may have exotic preferences.
|
||||
"""
|
||||
|
||||
if ARCH == "arm64" and OS != "Darwin":
|
||||
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
|
||||
return GpuType.CPU
|
||||
return "cpu"
|
||||
|
||||
nvidia = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||
GpuType.CUDA,
|
||||
"cuda",
|
||||
)
|
||||
nvidia_with_dml = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA",
|
||||
GpuType.CUDA_AND_DML,
|
||||
"cuda_and_dml",
|
||||
)
|
||||
amd = (
|
||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||
GpuType.ROCM,
|
||||
"rocm",
|
||||
)
|
||||
cpu = (
|
||||
"Do not install any GPU support, use CPU for generation (slow)",
|
||||
GpuType.CPU,
|
||||
"no compatible GPU, or specifically prefer to use the CPU",
|
||||
"cpu",
|
||||
)
|
||||
autodetect = (
|
||||
idk = (
|
||||
"I'm not sure what to choose",
|
||||
GpuType.AUTODETECT,
|
||||
"idk",
|
||||
)
|
||||
|
||||
options = []
|
||||
if OS == "Windows":
|
||||
options = [nvidia, nvidia_with_dml, cpu]
|
||||
if OS == "Linux":
|
||||
@ -269,7 +230,7 @@ def select_gpu() -> GpuType:
|
||||
return options[0][1]
|
||||
|
||||
# "I don't know" is always added the last option
|
||||
options.append(autodetect) # type: ignore
|
||||
options.append(idk)
|
||||
|
||||
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
||||
|
||||
@ -304,9 +265,9 @@ def select_gpu() -> GpuType:
|
||||
),
|
||||
)
|
||||
|
||||
if options[choice][1] is GpuType.AUTODETECT:
|
||||
if options[choice][1] == "idk":
|
||||
console.print(
|
||||
"No problem. We will install CUDA support first :crossed_fingers: If Invoke does not detect a GPU, please re-run the installer and select one of the other GPU types."
|
||||
"No problem. We will try to install a version that [i]should[/i] be compatible. :crossed_fingers:"
|
||||
)
|
||||
|
||||
return options[choice][1]
|
||||
@ -330,7 +291,7 @@ def windows_long_paths_registry() -> None:
|
||||
"""
|
||||
|
||||
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
|
||||
syntax = Syntax(code.read(), line_numbers=True, lexer="regedit")
|
||||
syntax = Syntax(code.read(), line_numbers=True)
|
||||
|
||||
console.print(
|
||||
Panel(
|
||||
@ -340,7 +301,7 @@ def windows_long_paths_registry() -> None:
|
||||
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
|
||||
"",
|
||||
"This is the change that will be applied:",
|
||||
str(syntax),
|
||||
syntax,
|
||||
]
|
||||
)
|
||||
),
|
||||
@ -379,7 +340,7 @@ def introduction() -> None:
|
||||
console.line(2)
|
||||
|
||||
|
||||
def _platform_specific_help() -> Text | None:
|
||||
def _platform_specific_help() -> str:
|
||||
if OS == "Darwin":
|
||||
text = Text.from_markup(
|
||||
"""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/]."""
|
||||
@ -393,5 +354,5 @@ def _platform_specific_help() -> Text | None:
|
||||
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]"""
|
||||
)
|
||||
else:
|
||||
return
|
||||
text = ""
|
||||
return text
|
||||
|
@ -15,7 +15,7 @@ echo 4. Download and install models
|
||||
echo 5. Change InvokeAI startup options
|
||||
echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade
|
||||
echo 7. Open the developer console
|
||||
echo 8. Update InvokeAI (DEPRECATED - please use the installer)
|
||||
echo 8. Update InvokeAI
|
||||
echo 9. Run the InvokeAI image database maintenance script
|
||||
echo 10. Command-line help
|
||||
echo Q - Quit
|
||||
@ -52,10 +52,8 @@ IF /I "%choice%" == "1" (
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%choice%" == "8" (
|
||||
echo UPDATING FROM WITHIN THE APP IS BEING DEPRECATED.
|
||||
echo Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.
|
||||
timeout 4
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
echo Running invokeai-update...
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
) ELSE IF /I "%choice%" == "9" (
|
||||
echo Running the db maintenance script...
|
||||
python .venv\Scripts\invokeai-db-maintenance.exe
|
||||
@ -79,3 +77,4 @@ pause
|
||||
|
||||
:ending
|
||||
exit /b
|
||||
|
||||
|
@ -90,9 +90,7 @@ do_choice() {
|
||||
;;
|
||||
8)
|
||||
clear
|
||||
printf "UPDATING FROM WITHIN THE APP IS BEING DEPRECATED\n"
|
||||
printf "Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.\n"
|
||||
sleep 4
|
||||
printf "Update InvokeAI\n"
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
;;
|
||||
9)
|
||||
@ -124,7 +122,7 @@ do_dialog() {
|
||||
5 "Change InvokeAI startup options"
|
||||
6 "Re-run the configure script to fix a broken install or to complete a major upgrade"
|
||||
7 "Open the developer console"
|
||||
8 "Update InvokeAI (DEPRECATED - please use the installer)"
|
||||
8 "Update InvokeAI"
|
||||
9 "Run the InvokeAI image database maintenance script"
|
||||
10 "Command-line help"
|
||||
)
|
||||
|
72
installer/templates/update.bat.in
Normal file
72
installer/templates/update.bat.in
Normal file
@ -0,0 +1,72 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set INVOKE_AI_VERSION=latest
|
||||
set arg=%1
|
||||
if "%arg%" neq "" (
|
||||
if "%arg:~0,2%" equ "/?" (
|
||||
echo Usage: update.bat ^<release name or branch^>
|
||||
echo Updates InvokeAI to use the indicated version of the code base.
|
||||
echo Find the version or branch for the release you want, and pass it as the argument.
|
||||
echo For example '.\update.bat v2.2.5' for release 2.2.5.
|
||||
echo '.\update.bat main' for the latest development version
|
||||
echo.
|
||||
echo If no argument provided then will install the most recent release, equivalent to
|
||||
echo '.\update.bat latest'
|
||||
exit /b
|
||||
) else (
|
||||
set INVOKE_AI_VERSION=%arg%
|
||||
)
|
||||
)
|
||||
|
||||
set INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive/!INVOKE_AI_VERSION!.zip"
|
||||
set INVOKE_AI_DEP=https://raw.githubusercontent.com/invoke-ai/InvokeAI/!INVOKE_AI_VERSION!/environments-and-requirements/requirements-base.txt
|
||||
set INVOKE_AI_MODELS=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/configs/INITIAL_MODELS.yaml
|
||||
|
||||
call curl -I "%INVOKE_AI_DEP%" -fs >.tmp.out
|
||||
if %errorlevel% neq 0 (
|
||||
echo '!INVOKE_AI_VERSION!' is not a known branch name or tag. Please check the version and try again.
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
del .tmp.out
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to !INVOKE_AI_SRC!.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
pause
|
||||
|
||||
call curl -L "%INVOKE_AI_DEP%" > environments-and-requirements/requirements-base.txt
|
||||
call curl -L "%INVOKE_AI_MODELS%" > configs/INITIAL_MODELS.yaml
|
||||
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
call .venv\Scripts\python -mpip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of requirements failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
call .venv\Scripts\python -mpip install !INVOKE_AI_SRC!
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@rem call .venv\Scripts\invokeai-configure --root=.
|
||||
|
||||
@rem if %errorlevel% neq 0 (
|
||||
@rem echo Configuration InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
@rem pause
|
||||
@rem exit /b
|
||||
@rem )
|
||||
|
||||
echo InvokeAI has been updated to '%INVOKE_AI_VERSION%'
|
||||
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
endlocal
|
58
installer/templates/update.sh.in
Normal file
58
installer/templates/update.sh.in
Normal file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
if [ $# -ge 1 ] && [ "${1:0:2}" == "-h" ]; then
|
||||
echo "Usage: update.sh <release>"
|
||||
echo "Updates InvokeAI to use the indicated version of the code base."
|
||||
echo "Find the version or branch for the release you want, and pass it as the argument."
|
||||
echo "For example: update.sh v2.2.5 for release 2.2.5."
|
||||
echo " update.sh main for the current development version."
|
||||
echo ""
|
||||
echo "If no argument provided then will install the version tagged with 'latest', equivalent to"
|
||||
echo "update.sh latest"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
INVOKE_AI_VERSION=${1:-latest}
|
||||
|
||||
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive/$INVOKE_AI_VERSION.zip"
|
||||
INVOKE_AI_DEP=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/environments-and-requirements/requirements-base.txt
|
||||
INVOKE_AI_MODELS=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/configs/INITIAL_MODELS.yaml
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "Update cannot continue. Please report this error to https://github.com/invoke-ai/InvokeAI/issues"
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
if ! curl -I "$INVOKE_AI_DEP" -fs >/dev/null; then
|
||||
echo \'$INVOKE_AI_VERSION\' is not a known branch name or tag. Please check the version and try again.
|
||||
exit
|
||||
fi
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to version \'$INVOKE_AI_VERSION\'.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
curl -L "$INVOKE_AI_DEP" > environments-and-requirements/requirements-base.txt
|
||||
curl -L "$INVOKE_AI_MODELS" > configs/INITIAL_MODELS.yaml
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
./.venv/bin/python -mpip install -r requirements.txt
|
||||
_err_exit $? "The pip program failed to install InvokeAI's requirements."
|
||||
|
||||
./.venv/bin/python -mpip install $INVOKE_AI_SRC
|
||||
_err_exit $? "The pip program failed to install InvokeAI."
|
||||
|
||||
echo InvokeAI updated to \'$INVOKE_AI_VERSION\'
|
@ -14,7 +14,7 @@ class SocketIO:
|
||||
|
||||
def __init__(self, app: FastAPI):
|
||||
self.__sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*")
|
||||
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="/ws/socket.io")
|
||||
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="socket.io")
|
||||
app.mount("/ws", self.__app)
|
||||
|
||||
self.__sio.on("subscribe_queue", handler=self._handle_sub_queue)
|
||||
|
@ -17,6 +17,7 @@ from controlnet_aux import (
|
||||
MidasDetector,
|
||||
MLSDdetector,
|
||||
NormalBaeDetector,
|
||||
OpenposeDetector,
|
||||
PidiNetDetector,
|
||||
SamDetector,
|
||||
ZoeDetector,
|
||||
@ -30,7 +31,6 @@ from invokeai.app.invocations.util import validate_begin_end_step, validate_weig
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
|
||||
|
||||
from ...backend.model_management import BaseModelType
|
||||
from .baseinvocation import (
|
||||
@ -276,6 +276,31 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"openpose_image_processor",
|
||||
title="Openpose Processor",
|
||||
tags=["controlnet", "openpose", "pose"],
|
||||
category="controlnet",
|
||||
version="1.2.0",
|
||||
)
|
||||
class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Openpose processing to image"""
|
||||
|
||||
hand_and_face: bool = InputField(default=False, description="Whether to use hands and face mode")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = openpose_processor(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
hand_and_face=self.hand_and_face,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"midas_depth_image_processor",
|
||||
title="Midas Depth Processor",
|
||||
@ -599,7 +624,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
offload: bool = InputField(default=False)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
def run_processor(self, image):
|
||||
depth_anything_detector = DepthAnythingDetector()
|
||||
depth_anything_detector.load_model(model_size=self.model_size)
|
||||
|
||||
@ -608,30 +633,3 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"dw_openpose_image_processor",
|
||||
title="DW Openpose Image Processor",
|
||||
tags=["controlnet", "dwpose", "openpose"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
)
|
||||
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates an openpose pose from an image using DWPose"""
|
||||
|
||||
draw_body: bool = InputField(default=True)
|
||||
draw_face: bool = InputField(default=False)
|
||||
draw_hands: bool = InputField(default=False)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
dw_openpose = DWOpenposeDetector()
|
||||
processed_image = dw_openpose(
|
||||
image,
|
||||
draw_face=self.draw_face,
|
||||
draw_hands=self.draw_hands,
|
||||
draw_body=self.draw_body,
|
||||
resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
@ -5,12 +5,12 @@ from typing import Literal
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from PIL import Image
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
|
@ -154,7 +154,7 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Image record not found")
|
||||
raise
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error("Problem getting image metadata")
|
||||
self.__invoker.services.logger.error("Problem getting image DTO")
|
||||
raise e
|
||||
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
|
@ -54,17 +54,6 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
else None
|
||||
)
|
||||
|
||||
def stats_cleanup(graph_execution_state_id: str) -> None:
|
||||
if profiler:
|
||||
profile_path = profiler.stop()
|
||||
stats_path = profile_path.with_suffix(".json")
|
||||
self.__invoker.services.performance_statistics.dump_stats(
|
||||
graph_execution_state_id=graph_execution_state_id, output_path=stats_path
|
||||
)
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self.__invoker.services.performance_statistics.log_stats(graph_execution_state_id)
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state_id)
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
queue_item = self.__invoker.services.queue.get()
|
||||
@ -167,7 +156,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
pass
|
||||
|
||||
except CanceledException:
|
||||
stats_cleanup(graph_execution_state.id)
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
@ -192,6 +182,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
error_type=e.__class__.__name__,
|
||||
error=error,
|
||||
)
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||
pass
|
||||
|
||||
# Check queue to see if this is canceled, and skip if so
|
||||
@ -223,13 +215,21 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
error=traceback.format_exc(),
|
||||
)
|
||||
elif is_complete:
|
||||
self.__invoker.services.events.emit_graph_execution_complete(
|
||||
queue_batch_id=queue_item.session_queue_batch_id,
|
||||
queue_item_id=queue_item.session_queue_item_id,
|
||||
queue_id=queue_item.session_queue_id,
|
||||
graph_execution_state_id=graph_execution_state.id,
|
||||
)
|
||||
stats_cleanup(graph_execution_state.id)
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
|
||||
self.__invoker.services.events.emit_graph_execution_complete(
|
||||
queue_batch_id=queue_item.session_queue_batch_id,
|
||||
queue_item_id=queue_item.session_queue_item_id,
|
||||
queue_id=queue_item.session_queue_id,
|
||||
graph_execution_state_id=graph_execution_state.id,
|
||||
)
|
||||
if profiler:
|
||||
profile_path = profiler.stop()
|
||||
stats_path = profile_path.with_suffix(".json")
|
||||
self.__invoker.services.performance_statistics.dump_stats(
|
||||
graph_execution_state_id=graph_execution_state.id, output_path=stats_path
|
||||
)
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor
|
||||
|
@ -106,9 +106,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
del self._stats[graph_execution_state_id]
|
||||
del self._cache_stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
msg = f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
logger.error(msg)
|
||||
raise GESStatsNotFoundError(msg) from e
|
||||
|
||||
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
|
||||
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)
|
||||
@ -136,9 +136,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
try:
|
||||
cache_stats = self._cache_stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get model cache statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
msg = f"Attempted to get model cache statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
logger.error(msg)
|
||||
raise GESStatsNotFoundError(msg) from e
|
||||
|
||||
return ModelCacheStatsSummary(
|
||||
cache_hits=cache_stats.hits,
|
||||
@ -154,9 +154,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
try:
|
||||
graph_stats = self._stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get graph statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
msg = f"Attempted to get graph statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
logger.error(msg)
|
||||
raise GESStatsNotFoundError(msg) from e
|
||||
|
||||
return graph_stats.get_graph_stats_summary(graph_execution_state_id)
|
||||
|
||||
@ -164,8 +164,8 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
try:
|
||||
graph_stats = self._stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get node statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
msg = f"Attempted to get node statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
logger.error(msg)
|
||||
raise GESStatsNotFoundError(msg) from e
|
||||
|
||||
return graph_stats.get_node_stats_summaries()
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
from typing import Annotated, Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints
|
||||
from typing import Annotated, Any, Optional, Union, get_args, get_origin, get_type_hints
|
||||
|
||||
import networkx as nx
|
||||
from pydantic import BaseModel, ConfigDict, field_validator, model_validator
|
||||
@ -141,16 +141,6 @@ def are_connections_compatible(
|
||||
return are_connection_types_compatible(from_node_field, to_node_field)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def copydeep(obj: T) -> T:
|
||||
"""Deep-copies an object. If it is a pydantic model, use the model's copy method."""
|
||||
if isinstance(obj, BaseModel):
|
||||
return obj.model_copy(deep=True)
|
||||
return copy.deepcopy(obj)
|
||||
|
||||
|
||||
class NodeAlreadyInGraphError(ValueError):
|
||||
pass
|
||||
|
||||
@ -540,7 +530,7 @@ class Graph(BaseModel):
|
||||
except NodeNotFoundError:
|
||||
return False
|
||||
|
||||
def get_node(self, node_path: str) -> BaseInvocation:
|
||||
def get_node(self, node_path: str) -> InvocationsUnion:
|
||||
"""Gets a node from the graph using a node path."""
|
||||
# Materialized graphs may have nodes at the top level
|
||||
graph, node_id = self._get_graph_and_node(node_path)
|
||||
@ -891,7 +881,7 @@ class GraphExecutionState(BaseModel):
|
||||
# If next is still none, there's no next node, return None
|
||||
return next_node
|
||||
|
||||
def complete(self, node_id: str, output: BaseInvocationOutput) -> None:
|
||||
def complete(self, node_id: str, output: InvocationOutputsUnion):
|
||||
"""Marks a node as complete"""
|
||||
|
||||
if node_id not in self.execution_graph.nodes:
|
||||
@ -1128,22 +1118,17 @@ class GraphExecutionState(BaseModel):
|
||||
|
||||
def _prepare_inputs(self, node: BaseInvocation):
|
||||
input_edges = [e for e in self.execution_graph.edges if e.destination.node_id == node.id]
|
||||
# Inputs must be deep-copied, else if a node mutates the object, other nodes that get the same input
|
||||
# will see the mutation.
|
||||
if isinstance(node, CollectInvocation):
|
||||
output_collection = [
|
||||
copydeep(getattr(self.results[edge.source.node_id], edge.source.field))
|
||||
getattr(self.results[edge.source.node_id], edge.source.field)
|
||||
for edge in input_edges
|
||||
if edge.destination.field == "item"
|
||||
]
|
||||
node.collection = output_collection
|
||||
else:
|
||||
for edge in input_edges:
|
||||
setattr(
|
||||
node,
|
||||
edge.destination.field,
|
||||
copydeep(getattr(self.results[edge.source.node_id], edge.source.field)),
|
||||
)
|
||||
output_value = getattr(self.results[edge.source.node_id], edge.source.field)
|
||||
setattr(node, edge.destination.field, output_value)
|
||||
|
||||
# TODO: Add API for modifying underlying graph that checks if the change will be valid given the current execution state
|
||||
def _is_edge_valid(self, edge: Edge) -> bool:
|
||||
|
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2018-2022 BasicSR Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,18 +0,0 @@
|
||||
"""
|
||||
Adapted from https://github.com/XPixelGroup/BasicSR
|
||||
License: Apache-2.0
|
||||
|
||||
As of Feb 2024, `basicsr` appears to be unmaintained. It imports a function from `torchvision` that is removed in
|
||||
`torchvision` 0.17. Here is the deprecation warning:
|
||||
|
||||
UserWarning: The torchvision.transforms.functional_tensor module is deprecated in 0.15 and will be **removed in
|
||||
0.17**. Please don't rely on it. You probably just need to use APIs in torchvision.transforms.functional or in
|
||||
torchvision.transforms.v2.functional.
|
||||
|
||||
As a result, a dependency on `basicsr` means we cannot keep our `torchvision` dependency up to date.
|
||||
|
||||
Because we only rely on a single class `RRDBNet` from `basicsr`, we've copied the relevant code here and removed the
|
||||
dependency on `basicsr`.
|
||||
|
||||
The code is almost unchanged, only a few type annotations have been added. The license is also copied.
|
||||
"""
|
@ -1,75 +0,0 @@
|
||||
from typing import Type
|
||||
|
||||
import torch
|
||||
from torch import nn as nn
|
||||
from torch.nn import init as init
|
||||
from torch.nn.modules.batchnorm import _BatchNorm
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def default_init_weights(
|
||||
module_list: list[nn.Module] | nn.Module, scale: float = 1, bias_fill: float = 0, **kwargs
|
||||
) -> None:
|
||||
"""Initialize network weights.
|
||||
|
||||
Args:
|
||||
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
|
||||
scale (float): Scale initialized weights, especially for residual
|
||||
blocks. Default: 1.
|
||||
bias_fill (float): The value to fill bias. Default: 0
|
||||
kwargs (dict): Other arguments for initialization function.
|
||||
"""
|
||||
if not isinstance(module_list, list):
|
||||
module_list = [module_list]
|
||||
for module in module_list:
|
||||
for m in module.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
init.kaiming_normal_(m.weight, **kwargs)
|
||||
m.weight.data *= scale
|
||||
if m.bias is not None:
|
||||
m.bias.data.fill_(bias_fill)
|
||||
elif isinstance(m, nn.Linear):
|
||||
init.kaiming_normal_(m.weight, **kwargs)
|
||||
m.weight.data *= scale
|
||||
if m.bias is not None:
|
||||
m.bias.data.fill_(bias_fill)
|
||||
elif isinstance(m, _BatchNorm):
|
||||
init.constant_(m.weight, 1)
|
||||
if m.bias is not None:
|
||||
m.bias.data.fill_(bias_fill)
|
||||
|
||||
|
||||
def make_layer(basic_block: Type[nn.Module], num_basic_block: int, **kwarg) -> nn.Sequential:
|
||||
"""Make layers by stacking the same blocks.
|
||||
|
||||
Args:
|
||||
basic_block (Type[nn.Module]): nn.Module class for basic block.
|
||||
num_basic_block (int): number of blocks.
|
||||
|
||||
Returns:
|
||||
nn.Sequential: Stacked blocks in nn.Sequential.
|
||||
"""
|
||||
layers = []
|
||||
for _ in range(num_basic_block):
|
||||
layers.append(basic_block(**kwarg))
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
|
||||
# TODO: may write a cpp file
|
||||
def pixel_unshuffle(x: torch.Tensor, scale: int) -> torch.Tensor:
|
||||
"""Pixel unshuffle.
|
||||
|
||||
Args:
|
||||
x (Tensor): Input feature with shape (b, c, hh, hw).
|
||||
scale (int): Downsample ratio.
|
||||
|
||||
Returns:
|
||||
Tensor: the pixel unshuffled feature.
|
||||
"""
|
||||
b, c, hh, hw = x.size()
|
||||
out_channel = c * (scale**2)
|
||||
assert hh % scale == 0 and hw % scale == 0
|
||||
h = hh // scale
|
||||
w = hw // scale
|
||||
x_view = x.view(b, c, h, scale, w, scale)
|
||||
return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
|
@ -1,125 +0,0 @@
|
||||
import torch
|
||||
from torch import nn as nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from .arch_util import default_init_weights, make_layer, pixel_unshuffle
|
||||
|
||||
|
||||
class ResidualDenseBlock(nn.Module):
|
||||
"""Residual Dense Block.
|
||||
|
||||
Used in RRDB block in ESRGAN.
|
||||
|
||||
Args:
|
||||
num_feat (int): Channel number of intermediate features.
|
||||
num_grow_ch (int): Channels for each growth.
|
||||
"""
|
||||
|
||||
def __init__(self, num_feat: int = 64, num_grow_ch: int = 32) -> None:
|
||||
super(ResidualDenseBlock, self).__init__()
|
||||
self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
|
||||
self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
|
||||
self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
|
||||
self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
|
||||
self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
|
||||
|
||||
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
||||
|
||||
# initialization
|
||||
default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
x1 = self.lrelu(self.conv1(x))
|
||||
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
|
||||
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
|
||||
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
|
||||
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
|
||||
# Empirically, we use 0.2 to scale the residual for better performance
|
||||
return x5 * 0.2 + x
|
||||
|
||||
|
||||
class RRDB(nn.Module):
|
||||
"""Residual in Residual Dense Block.
|
||||
|
||||
Used in RRDB-Net in ESRGAN.
|
||||
|
||||
Args:
|
||||
num_feat (int): Channel number of intermediate features.
|
||||
num_grow_ch (int): Channels for each growth.
|
||||
"""
|
||||
|
||||
def __init__(self, num_feat: int, num_grow_ch: int = 32) -> None:
|
||||
super(RRDB, self).__init__()
|
||||
self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
|
||||
self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
|
||||
self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
out = self.rdb1(x)
|
||||
out = self.rdb2(out)
|
||||
out = self.rdb3(out)
|
||||
# Empirically, we use 0.2 to scale the residual for better performance
|
||||
return out * 0.2 + x
|
||||
|
||||
|
||||
class RRDBNet(nn.Module):
|
||||
"""Networks consisting of Residual in Residual Dense Block, which is used
|
||||
in ESRGAN.
|
||||
|
||||
ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
|
||||
|
||||
We extend ESRGAN for scale x2 and scale x1.
|
||||
Note: This is one option for scale 1, scale 2 in RRDBNet.
|
||||
We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
|
||||
and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
|
||||
|
||||
Args:
|
||||
num_in_ch (int): Channel number of inputs.
|
||||
num_out_ch (int): Channel number of outputs.
|
||||
num_feat (int): Channel number of intermediate features.
|
||||
Default: 64
|
||||
num_block (int): Block number in the trunk network. Defaults: 23
|
||||
num_grow_ch (int): Channels for each growth. Default: 32.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_in_ch: int,
|
||||
num_out_ch: int,
|
||||
scale: int = 4,
|
||||
num_feat: int = 64,
|
||||
num_block: int = 23,
|
||||
num_grow_ch: int = 32,
|
||||
) -> None:
|
||||
super(RRDBNet, self).__init__()
|
||||
self.scale = scale
|
||||
if scale == 2:
|
||||
num_in_ch = num_in_ch * 4
|
||||
elif scale == 1:
|
||||
num_in_ch = num_in_ch * 16
|
||||
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
||||
self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
|
||||
self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
# upsample
|
||||
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
||||
|
||||
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
if self.scale == 2:
|
||||
feat = pixel_unshuffle(x, scale=2)
|
||||
elif self.scale == 1:
|
||||
feat = pixel_unshuffle(x, scale=4)
|
||||
else:
|
||||
feat = x
|
||||
feat = self.conv_first(feat)
|
||||
body_feat = self.conv_body(self.body(feat))
|
||||
feat = feat + body_feat
|
||||
# upsample
|
||||
feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode="nearest")))
|
||||
feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode="nearest")))
|
||||
out = self.conv_last(self.lrelu(self.conv_hr(feat)))
|
||||
return out
|
@ -1,81 +0,0 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from controlnet_aux.util import resize_image
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.dw_openpose.utils import draw_bodypose, draw_facepose, draw_handpose
|
||||
from invokeai.backend.image_util.dw_openpose.wholebody import Wholebody
|
||||
|
||||
|
||||
def draw_pose(pose, H, W, draw_face=True, draw_body=True, draw_hands=True, resolution=512):
|
||||
bodies = pose["bodies"]
|
||||
faces = pose["faces"]
|
||||
hands = pose["hands"]
|
||||
candidate = bodies["candidate"]
|
||||
subset = bodies["subset"]
|
||||
canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8)
|
||||
|
||||
if draw_body:
|
||||
canvas = draw_bodypose(canvas, candidate, subset)
|
||||
|
||||
if draw_hands:
|
||||
canvas = draw_handpose(canvas, hands)
|
||||
|
||||
if draw_face:
|
||||
canvas = draw_facepose(canvas, faces)
|
||||
|
||||
dwpose_image = resize_image(
|
||||
canvas,
|
||||
resolution,
|
||||
)
|
||||
dwpose_image = Image.fromarray(dwpose_image)
|
||||
|
||||
return dwpose_image
|
||||
|
||||
|
||||
class DWOpenposeDetector:
|
||||
"""
|
||||
Code from the original implementation of the DW Openpose Detector.
|
||||
Credits: https://github.com/IDEA-Research/DWPose
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.pose_estimation = Wholebody()
|
||||
|
||||
def __call__(
|
||||
self, image: Image.Image, draw_face=False, draw_body=True, draw_hands=False, resolution=512
|
||||
) -> Image.Image:
|
||||
np_image = np.array(image)
|
||||
H, W, C = np_image.shape
|
||||
|
||||
with torch.no_grad():
|
||||
candidate, subset = self.pose_estimation(np_image)
|
||||
nums, keys, locs = candidate.shape
|
||||
candidate[..., 0] /= float(W)
|
||||
candidate[..., 1] /= float(H)
|
||||
body = candidate[:, :18].copy()
|
||||
body = body.reshape(nums * 18, locs)
|
||||
score = subset[:, :18]
|
||||
for i in range(len(score)):
|
||||
for j in range(len(score[i])):
|
||||
if score[i][j] > 0.3:
|
||||
score[i][j] = int(18 * i + j)
|
||||
else:
|
||||
score[i][j] = -1
|
||||
|
||||
un_visible = subset < 0.3
|
||||
candidate[un_visible] = -1
|
||||
|
||||
# foot = candidate[:, 18:24]
|
||||
|
||||
faces = candidate[:, 24:92]
|
||||
|
||||
hands = candidate[:, 92:113]
|
||||
hands = np.vstack([hands, candidate[:, 113:]])
|
||||
|
||||
bodies = {"candidate": body, "subset": score}
|
||||
pose = {"bodies": bodies, "hands": hands, "faces": faces}
|
||||
|
||||
return draw_pose(
|
||||
pose, H, W, draw_face=draw_face, draw_hands=draw_hands, draw_body=draw_body, resolution=resolution
|
||||
)
|
@ -1,128 +0,0 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def nms(boxes, scores, nms_thr):
|
||||
"""Single class NMS implemented in Numpy."""
|
||||
x1 = boxes[:, 0]
|
||||
y1 = boxes[:, 1]
|
||||
x2 = boxes[:, 2]
|
||||
y2 = boxes[:, 3]
|
||||
|
||||
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
|
||||
order = scores.argsort()[::-1]
|
||||
|
||||
keep = []
|
||||
while order.size > 0:
|
||||
i = order[0]
|
||||
keep.append(i)
|
||||
xx1 = np.maximum(x1[i], x1[order[1:]])
|
||||
yy1 = np.maximum(y1[i], y1[order[1:]])
|
||||
xx2 = np.minimum(x2[i], x2[order[1:]])
|
||||
yy2 = np.minimum(y2[i], y2[order[1:]])
|
||||
|
||||
w = np.maximum(0.0, xx2 - xx1 + 1)
|
||||
h = np.maximum(0.0, yy2 - yy1 + 1)
|
||||
inter = w * h
|
||||
ovr = inter / (areas[i] + areas[order[1:]] - inter)
|
||||
|
||||
inds = np.where(ovr <= nms_thr)[0]
|
||||
order = order[inds + 1]
|
||||
|
||||
return keep
|
||||
|
||||
|
||||
def multiclass_nms(boxes, scores, nms_thr, score_thr):
|
||||
"""Multiclass NMS implemented in Numpy. Class-aware version."""
|
||||
final_dets = []
|
||||
num_classes = scores.shape[1]
|
||||
for cls_ind in range(num_classes):
|
||||
cls_scores = scores[:, cls_ind]
|
||||
valid_score_mask = cls_scores > score_thr
|
||||
if valid_score_mask.sum() == 0:
|
||||
continue
|
||||
else:
|
||||
valid_scores = cls_scores[valid_score_mask]
|
||||
valid_boxes = boxes[valid_score_mask]
|
||||
keep = nms(valid_boxes, valid_scores, nms_thr)
|
||||
if len(keep) > 0:
|
||||
cls_inds = np.ones((len(keep), 1)) * cls_ind
|
||||
dets = np.concatenate([valid_boxes[keep], valid_scores[keep, None], cls_inds], 1)
|
||||
final_dets.append(dets)
|
||||
if len(final_dets) == 0:
|
||||
return None
|
||||
return np.concatenate(final_dets, 0)
|
||||
|
||||
|
||||
def demo_postprocess(outputs, img_size, p6=False):
|
||||
grids = []
|
||||
expanded_strides = []
|
||||
strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
|
||||
|
||||
hsizes = [img_size[0] // stride for stride in strides]
|
||||
wsizes = [img_size[1] // stride for stride in strides]
|
||||
|
||||
for hsize, wsize, stride in zip(hsizes, wsizes, strides, strict=False):
|
||||
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
|
||||
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
|
||||
grids.append(grid)
|
||||
shape = grid.shape[:2]
|
||||
expanded_strides.append(np.full((*shape, 1), stride))
|
||||
|
||||
grids = np.concatenate(grids, 1)
|
||||
expanded_strides = np.concatenate(expanded_strides, 1)
|
||||
outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
|
||||
outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
|
||||
|
||||
return outputs
|
||||
|
||||
|
||||
def preprocess(img, input_size, swap=(2, 0, 1)):
|
||||
if len(img.shape) == 3:
|
||||
padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
|
||||
else:
|
||||
padded_img = np.ones(input_size, dtype=np.uint8) * 114
|
||||
|
||||
r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
|
||||
resized_img = cv2.resize(
|
||||
img,
|
||||
(int(img.shape[1] * r), int(img.shape[0] * r)),
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
).astype(np.uint8)
|
||||
padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
|
||||
|
||||
padded_img = padded_img.transpose(swap)
|
||||
padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
|
||||
return padded_img, r
|
||||
|
||||
|
||||
def inference_detector(session, oriImg):
|
||||
input_shape = (640, 640)
|
||||
img, ratio = preprocess(oriImg, input_shape)
|
||||
|
||||
ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
|
||||
output = session.run(None, ort_inputs)
|
||||
predictions = demo_postprocess(output[0], input_shape)[0]
|
||||
|
||||
boxes = predictions[:, :4]
|
||||
scores = predictions[:, 4:5] * predictions[:, 5:]
|
||||
|
||||
boxes_xyxy = np.ones_like(boxes)
|
||||
boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
|
||||
boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
|
||||
boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
|
||||
boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
|
||||
boxes_xyxy /= ratio
|
||||
dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
|
||||
if dets is not None:
|
||||
final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
|
||||
isscore = final_scores > 0.3
|
||||
iscat = final_cls_inds == 0
|
||||
isbbox = [i and j for (i, j) in zip(isscore, iscat, strict=False)]
|
||||
final_boxes = final_boxes[isbbox]
|
||||
else:
|
||||
final_boxes = np.array([])
|
||||
|
||||
return final_boxes
|
@ -1,361 +0,0 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
|
||||
from typing import List, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
|
||||
|
||||
def preprocess(
|
||||
img: np.ndarray, out_bbox, input_size: Tuple[int, int] = (192, 256)
|
||||
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||
"""Do preprocessing for RTMPose model inference.
|
||||
|
||||
Args:
|
||||
img (np.ndarray): Input image in shape.
|
||||
input_size (tuple): Input image size in shape (w, h).
|
||||
|
||||
Returns:
|
||||
tuple:
|
||||
- resized_img (np.ndarray): Preprocessed image.
|
||||
- center (np.ndarray): Center of image.
|
||||
- scale (np.ndarray): Scale of image.
|
||||
"""
|
||||
# get shape of image
|
||||
img_shape = img.shape[:2]
|
||||
out_img, out_center, out_scale = [], [], []
|
||||
if len(out_bbox) == 0:
|
||||
out_bbox = [[0, 0, img_shape[1], img_shape[0]]]
|
||||
for i in range(len(out_bbox)):
|
||||
x0 = out_bbox[i][0]
|
||||
y0 = out_bbox[i][1]
|
||||
x1 = out_bbox[i][2]
|
||||
y1 = out_bbox[i][3]
|
||||
bbox = np.array([x0, y0, x1, y1])
|
||||
|
||||
# get center and scale
|
||||
center, scale = bbox_xyxy2cs(bbox, padding=1.25)
|
||||
|
||||
# do affine transformation
|
||||
resized_img, scale = top_down_affine(input_size, scale, center, img)
|
||||
|
||||
# normalize image
|
||||
mean = np.array([123.675, 116.28, 103.53])
|
||||
std = np.array([58.395, 57.12, 57.375])
|
||||
resized_img = (resized_img - mean) / std
|
||||
|
||||
out_img.append(resized_img)
|
||||
out_center.append(center)
|
||||
out_scale.append(scale)
|
||||
|
||||
return out_img, out_center, out_scale
|
||||
|
||||
|
||||
def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray:
|
||||
"""Inference RTMPose model.
|
||||
|
||||
Args:
|
||||
sess (ort.InferenceSession): ONNXRuntime session.
|
||||
img (np.ndarray): Input image in shape.
|
||||
|
||||
Returns:
|
||||
outputs (np.ndarray): Output of RTMPose model.
|
||||
"""
|
||||
all_out = []
|
||||
# build input
|
||||
for i in range(len(img)):
|
||||
input = [img[i].transpose(2, 0, 1)]
|
||||
|
||||
# build output
|
||||
sess_input = {sess.get_inputs()[0].name: input}
|
||||
sess_output = []
|
||||
for out in sess.get_outputs():
|
||||
sess_output.append(out.name)
|
||||
|
||||
# run model
|
||||
outputs = sess.run(sess_output, sess_input)
|
||||
all_out.append(outputs)
|
||||
|
||||
return all_out
|
||||
|
||||
|
||||
def postprocess(
|
||||
outputs: List[np.ndarray],
|
||||
model_input_size: Tuple[int, int],
|
||||
center: Tuple[int, int],
|
||||
scale: Tuple[int, int],
|
||||
simcc_split_ratio: float = 2.0,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Postprocess for RTMPose model output.
|
||||
|
||||
Args:
|
||||
outputs (np.ndarray): Output of RTMPose model.
|
||||
model_input_size (tuple): RTMPose model Input image size.
|
||||
center (tuple): Center of bbox in shape (x, y).
|
||||
scale (tuple): Scale of bbox in shape (w, h).
|
||||
simcc_split_ratio (float): Split ratio of simcc.
|
||||
|
||||
Returns:
|
||||
tuple:
|
||||
- keypoints (np.ndarray): Rescaled keypoints.
|
||||
- scores (np.ndarray): Model predict scores.
|
||||
"""
|
||||
all_key = []
|
||||
all_score = []
|
||||
for i in range(len(outputs)):
|
||||
# use simcc to decode
|
||||
simcc_x, simcc_y = outputs[i]
|
||||
keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio)
|
||||
|
||||
# rescale keypoints
|
||||
keypoints = keypoints / model_input_size * scale[i] + center[i] - scale[i] / 2
|
||||
all_key.append(keypoints[0])
|
||||
all_score.append(scores[0])
|
||||
|
||||
return np.array(all_key), np.array(all_score)
|
||||
|
||||
|
||||
def bbox_xyxy2cs(bbox: np.ndarray, padding: float = 1.0) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Transform the bbox format from (x,y,w,h) into (center, scale)
|
||||
|
||||
Args:
|
||||
bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
|
||||
as (left, top, right, bottom)
|
||||
padding (float): BBox padding factor that will be multilied to scale.
|
||||
Default: 1.0
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing center and scale.
|
||||
- np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
|
||||
(n, 2)
|
||||
- np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
|
||||
(n, 2)
|
||||
"""
|
||||
# convert single bbox from (4, ) to (1, 4)
|
||||
dim = bbox.ndim
|
||||
if dim == 1:
|
||||
bbox = bbox[None, :]
|
||||
|
||||
# get bbox center and scale
|
||||
x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3])
|
||||
center = np.hstack([x1 + x2, y1 + y2]) * 0.5
|
||||
scale = np.hstack([x2 - x1, y2 - y1]) * padding
|
||||
|
||||
if dim == 1:
|
||||
center = center[0]
|
||||
scale = scale[0]
|
||||
|
||||
return center, scale
|
||||
|
||||
|
||||
def _fix_aspect_ratio(bbox_scale: np.ndarray, aspect_ratio: float) -> np.ndarray:
|
||||
"""Extend the scale to match the given aspect ratio.
|
||||
|
||||
Args:
|
||||
scale (np.ndarray): The image scale (w, h) in shape (2, )
|
||||
aspect_ratio (float): The ratio of ``w/h``
|
||||
|
||||
Returns:
|
||||
np.ndarray: The reshaped image scale in (2, )
|
||||
"""
|
||||
w, h = np.hsplit(bbox_scale, [1])
|
||||
bbox_scale = np.where(w > h * aspect_ratio, np.hstack([w, w / aspect_ratio]), np.hstack([h * aspect_ratio, h]))
|
||||
return bbox_scale
|
||||
|
||||
|
||||
def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray:
|
||||
"""Rotate a point by an angle.
|
||||
|
||||
Args:
|
||||
pt (np.ndarray): 2D point coordinates (x, y) in shape (2, )
|
||||
angle_rad (float): rotation angle in radian
|
||||
|
||||
Returns:
|
||||
np.ndarray: Rotated point in shape (2, )
|
||||
"""
|
||||
sn, cs = np.sin(angle_rad), np.cos(angle_rad)
|
||||
rot_mat = np.array([[cs, -sn], [sn, cs]])
|
||||
return rot_mat @ pt
|
||||
|
||||
|
||||
def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray:
|
||||
"""To calculate the affine matrix, three pairs of points are required. This
|
||||
function is used to get the 3rd point, given 2D points a & b.
|
||||
|
||||
The 3rd point is defined by rotating vector `a - b` by 90 degrees
|
||||
anticlockwise, using b as the rotation center.
|
||||
|
||||
Args:
|
||||
a (np.ndarray): The 1st point (x,y) in shape (2, )
|
||||
b (np.ndarray): The 2nd point (x,y) in shape (2, )
|
||||
|
||||
Returns:
|
||||
np.ndarray: The 3rd point.
|
||||
"""
|
||||
direction = a - b
|
||||
c = b + np.r_[-direction[1], direction[0]]
|
||||
return c
|
||||
|
||||
|
||||
def get_warp_matrix(
|
||||
center: np.ndarray,
|
||||
scale: np.ndarray,
|
||||
rot: float,
|
||||
output_size: Tuple[int, int],
|
||||
shift: Tuple[float, float] = (0.0, 0.0),
|
||||
inv: bool = False,
|
||||
) -> np.ndarray:
|
||||
"""Calculate the affine transformation matrix that can warp the bbox area
|
||||
in the input image to the output size.
|
||||
|
||||
Args:
|
||||
center (np.ndarray[2, ]): Center of the bounding box (x, y).
|
||||
scale (np.ndarray[2, ]): Scale of the bounding box
|
||||
wrt [width, height].
|
||||
rot (float): Rotation angle (degree).
|
||||
output_size (np.ndarray[2, ] | list(2,)): Size of the
|
||||
destination heatmaps.
|
||||
shift (0-100%): Shift translation ratio wrt the width/height.
|
||||
Default (0., 0.).
|
||||
inv (bool): Option to inverse the affine transform direction.
|
||||
(inv=False: src->dst or inv=True: dst->src)
|
||||
|
||||
Returns:
|
||||
np.ndarray: A 2x3 transformation matrix
|
||||
"""
|
||||
shift = np.array(shift)
|
||||
src_w = scale[0]
|
||||
dst_w = output_size[0]
|
||||
dst_h = output_size[1]
|
||||
|
||||
# compute transformation matrix
|
||||
rot_rad = np.deg2rad(rot)
|
||||
src_dir = _rotate_point(np.array([0.0, src_w * -0.5]), rot_rad)
|
||||
dst_dir = np.array([0.0, dst_w * -0.5])
|
||||
|
||||
# get four corners of the src rectangle in the original image
|
||||
src = np.zeros((3, 2), dtype=np.float32)
|
||||
src[0, :] = center + scale * shift
|
||||
src[1, :] = center + src_dir + scale * shift
|
||||
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
|
||||
|
||||
# get four corners of the dst rectangle in the input image
|
||||
dst = np.zeros((3, 2), dtype=np.float32)
|
||||
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
|
||||
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
|
||||
dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
|
||||
|
||||
if inv:
|
||||
warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src))
|
||||
else:
|
||||
warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst))
|
||||
|
||||
return warp_mat
|
||||
|
||||
|
||||
def top_down_affine(
|
||||
input_size: dict, bbox_scale: dict, bbox_center: dict, img: np.ndarray
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Get the bbox image as the model input by affine transform.
|
||||
|
||||
Args:
|
||||
input_size (dict): The input size of the model.
|
||||
bbox_scale (dict): The bbox scale of the img.
|
||||
bbox_center (dict): The bbox center of the img.
|
||||
img (np.ndarray): The original image.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing center and scale.
|
||||
- np.ndarray[float32]: img after affine transform.
|
||||
- np.ndarray[float32]: bbox scale after affine transform.
|
||||
"""
|
||||
w, h = input_size
|
||||
warp_size = (int(w), int(h))
|
||||
|
||||
# reshape bbox to fixed aspect ratio
|
||||
bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h)
|
||||
|
||||
# get the affine matrix
|
||||
center = bbox_center
|
||||
scale = bbox_scale
|
||||
rot = 0
|
||||
warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h))
|
||||
|
||||
# do affine transform
|
||||
img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR)
|
||||
|
||||
return img, bbox_scale
|
||||
|
||||
|
||||
def get_simcc_maximum(simcc_x: np.ndarray, simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Get maximum response location and value from simcc representations.
|
||||
|
||||
Note:
|
||||
instance number: N
|
||||
num_keypoints: K
|
||||
heatmap height: H
|
||||
heatmap width: W
|
||||
|
||||
Args:
|
||||
simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
|
||||
simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)
|
||||
|
||||
Returns:
|
||||
tuple:
|
||||
- locs (np.ndarray): locations of maximum heatmap responses in shape
|
||||
(K, 2) or (N, K, 2)
|
||||
- vals (np.ndarray): values of maximum heatmap responses in shape
|
||||
(K,) or (N, K)
|
||||
"""
|
||||
N, K, Wx = simcc_x.shape
|
||||
simcc_x = simcc_x.reshape(N * K, -1)
|
||||
simcc_y = simcc_y.reshape(N * K, -1)
|
||||
|
||||
# get maximum value locations
|
||||
x_locs = np.argmax(simcc_x, axis=1)
|
||||
y_locs = np.argmax(simcc_y, axis=1)
|
||||
locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
|
||||
max_val_x = np.amax(simcc_x, axis=1)
|
||||
max_val_y = np.amax(simcc_y, axis=1)
|
||||
|
||||
# get maximum value across x and y axis
|
||||
mask = max_val_x > max_val_y
|
||||
max_val_x[mask] = max_val_y[mask]
|
||||
vals = max_val_x
|
||||
locs[vals <= 0.0] = -1
|
||||
|
||||
# reshape
|
||||
locs = locs.reshape(N, K, 2)
|
||||
vals = vals.reshape(N, K)
|
||||
|
||||
return locs, vals
|
||||
|
||||
|
||||
def decode(simcc_x: np.ndarray, simcc_y: np.ndarray, simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Modulate simcc distribution with Gaussian.
|
||||
|
||||
Args:
|
||||
simcc_x (np.ndarray[K, Wx]): model predicted simcc in x.
|
||||
simcc_y (np.ndarray[K, Wy]): model predicted simcc in y.
|
||||
simcc_split_ratio (int): The split ratio of simcc.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing center and scale.
|
||||
- np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2)
|
||||
- np.ndarray[float32]: scores in shape (K,) or (n, K)
|
||||
"""
|
||||
keypoints, scores = get_simcc_maximum(simcc_x, simcc_y)
|
||||
keypoints /= simcc_split_ratio
|
||||
|
||||
return keypoints, scores
|
||||
|
||||
|
||||
def inference_pose(session, out_bbox, oriImg):
|
||||
h, w = session.get_inputs()[0].shape[2:]
|
||||
model_input_size = (w, h)
|
||||
resized_img, center, scale = preprocess(oriImg, out_bbox, model_input_size)
|
||||
outputs = inference(session, resized_img)
|
||||
keypoints, scores = postprocess(outputs, model_input_size, center, scale)
|
||||
|
||||
return keypoints, scores
|
@ -1,155 +0,0 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
|
||||
import math
|
||||
|
||||
import cv2
|
||||
import matplotlib
|
||||
import numpy as np
|
||||
|
||||
eps = 0.01
|
||||
|
||||
|
||||
def draw_bodypose(canvas, candidate, subset):
|
||||
H, W, C = canvas.shape
|
||||
candidate = np.array(candidate)
|
||||
subset = np.array(subset)
|
||||
|
||||
stickwidth = 4
|
||||
|
||||
limbSeq = [
|
||||
[2, 3],
|
||||
[2, 6],
|
||||
[3, 4],
|
||||
[4, 5],
|
||||
[6, 7],
|
||||
[7, 8],
|
||||
[2, 9],
|
||||
[9, 10],
|
||||
[10, 11],
|
||||
[2, 12],
|
||||
[12, 13],
|
||||
[13, 14],
|
||||
[2, 1],
|
||||
[1, 15],
|
||||
[15, 17],
|
||||
[1, 16],
|
||||
[16, 18],
|
||||
[3, 17],
|
||||
[6, 18],
|
||||
]
|
||||
|
||||
colors = [
|
||||
[255, 0, 0],
|
||||
[255, 85, 0],
|
||||
[255, 170, 0],
|
||||
[255, 255, 0],
|
||||
[170, 255, 0],
|
||||
[85, 255, 0],
|
||||
[0, 255, 0],
|
||||
[0, 255, 85],
|
||||
[0, 255, 170],
|
||||
[0, 255, 255],
|
||||
[0, 170, 255],
|
||||
[0, 85, 255],
|
||||
[0, 0, 255],
|
||||
[85, 0, 255],
|
||||
[170, 0, 255],
|
||||
[255, 0, 255],
|
||||
[255, 0, 170],
|
||||
[255, 0, 85],
|
||||
]
|
||||
|
||||
for i in range(17):
|
||||
for n in range(len(subset)):
|
||||
index = subset[n][np.array(limbSeq[i]) - 1]
|
||||
if -1 in index:
|
||||
continue
|
||||
Y = candidate[index.astype(int), 0] * float(W)
|
||||
X = candidate[index.astype(int), 1] * float(H)
|
||||
mX = np.mean(X)
|
||||
mY = np.mean(Y)
|
||||
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
|
||||
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
|
||||
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
|
||||
cv2.fillConvexPoly(canvas, polygon, colors[i])
|
||||
|
||||
canvas = (canvas * 0.6).astype(np.uint8)
|
||||
|
||||
for i in range(18):
|
||||
for n in range(len(subset)):
|
||||
index = int(subset[n][i])
|
||||
if index == -1:
|
||||
continue
|
||||
x, y = candidate[index][0:2]
|
||||
x = int(x * W)
|
||||
y = int(y * H)
|
||||
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
|
||||
|
||||
return canvas
|
||||
|
||||
|
||||
def draw_handpose(canvas, all_hand_peaks):
|
||||
H, W, C = canvas.shape
|
||||
|
||||
edges = [
|
||||
[0, 1],
|
||||
[1, 2],
|
||||
[2, 3],
|
||||
[3, 4],
|
||||
[0, 5],
|
||||
[5, 6],
|
||||
[6, 7],
|
||||
[7, 8],
|
||||
[0, 9],
|
||||
[9, 10],
|
||||
[10, 11],
|
||||
[11, 12],
|
||||
[0, 13],
|
||||
[13, 14],
|
||||
[14, 15],
|
||||
[15, 16],
|
||||
[0, 17],
|
||||
[17, 18],
|
||||
[18, 19],
|
||||
[19, 20],
|
||||
]
|
||||
|
||||
for peaks in all_hand_peaks:
|
||||
peaks = np.array(peaks)
|
||||
|
||||
for ie, e in enumerate(edges):
|
||||
x1, y1 = peaks[e[0]]
|
||||
x2, y2 = peaks[e[1]]
|
||||
x1 = int(x1 * W)
|
||||
y1 = int(y1 * H)
|
||||
x2 = int(x2 * W)
|
||||
y2 = int(y2 * H)
|
||||
if x1 > eps and y1 > eps and x2 > eps and y2 > eps:
|
||||
cv2.line(
|
||||
canvas,
|
||||
(x1, y1),
|
||||
(x2, y2),
|
||||
matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255,
|
||||
thickness=2,
|
||||
)
|
||||
|
||||
for _, keyponit in enumerate(peaks):
|
||||
x, y = keyponit
|
||||
x = int(x * W)
|
||||
y = int(y * H)
|
||||
if x > eps and y > eps:
|
||||
cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
|
||||
return canvas
|
||||
|
||||
|
||||
def draw_facepose(canvas, all_lmks):
|
||||
H, W, C = canvas.shape
|
||||
for lmks in all_lmks:
|
||||
lmks = np.array(lmks)
|
||||
for lmk in lmks:
|
||||
x, y = lmk
|
||||
x = int(x * W)
|
||||
y = int(y * H)
|
||||
if x > eps and y > eps:
|
||||
cv2.circle(canvas, (x, y), 3, (255, 255, 255), thickness=-1)
|
||||
return canvas
|
@ -1,67 +0,0 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
# Modified pathing to suit Invoke
|
||||
|
||||
import pathlib
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.util import download_with_progress_bar
|
||||
|
||||
from .onnxdet import inference_detector
|
||||
from .onnxpose import inference_pose
|
||||
|
||||
DWPOSE_MODELS = {
|
||||
"yolox_l.onnx": {
|
||||
"local": "any/annotators/dwpose/yolox_l.onnx",
|
||||
"url": "https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx?download=true",
|
||||
},
|
||||
"dw-ll_ucoco_384.onnx": {
|
||||
"local": "any/annotators/dwpose/dw-ll_ucoco_384.onnx",
|
||||
"url": "https://huggingface.co/yzd-v/DWPose/resolve/main/dw-ll_ucoco_384.onnx?download=true",
|
||||
},
|
||||
}
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
|
||||
class Wholebody:
|
||||
def __init__(self):
|
||||
device = choose_torch_device()
|
||||
|
||||
providers = ["CUDAExecutionProvider"] if device == "cuda" else ["CPUExecutionProvider"]
|
||||
|
||||
DET_MODEL_PATH = pathlib.Path(config.models_path / DWPOSE_MODELS["yolox_l.onnx"]["local"])
|
||||
if not DET_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DWPOSE_MODELS["yolox_l.onnx"]["url"], DET_MODEL_PATH)
|
||||
|
||||
POSE_MODEL_PATH = pathlib.Path(config.models_path / DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["local"])
|
||||
if not POSE_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["url"], POSE_MODEL_PATH)
|
||||
|
||||
onnx_det = DET_MODEL_PATH
|
||||
onnx_pose = POSE_MODEL_PATH
|
||||
|
||||
self.session_det = ort.InferenceSession(path_or_bytes=onnx_det, providers=providers)
|
||||
self.session_pose = ort.InferenceSession(path_or_bytes=onnx_pose, providers=providers)
|
||||
|
||||
def __call__(self, oriImg):
|
||||
det_result = inference_detector(self.session_det, oriImg)
|
||||
keypoints, scores = inference_pose(self.session_pose, det_result, oriImg)
|
||||
|
||||
keypoints_info = np.concatenate((keypoints, scores[..., None]), axis=-1)
|
||||
# compute neck joint
|
||||
neck = np.mean(keypoints_info[:, [5, 6]], axis=1)
|
||||
# neck score when visualizing pred
|
||||
neck[:, 2:4] = np.logical_and(keypoints_info[:, 5, 2:4] > 0.3, keypoints_info[:, 6, 2:4] > 0.3).astype(int)
|
||||
new_keypoints_info = np.insert(keypoints_info, 17, neck, axis=1)
|
||||
mmpose_idx = [17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3]
|
||||
openpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17]
|
||||
new_keypoints_info[:, openpose_idx] = new_keypoints_info[:, mmpose_idx]
|
||||
keypoints_info = new_keypoints_info
|
||||
|
||||
keypoints, scores = keypoints_info[..., :2], keypoints_info[..., 2]
|
||||
|
||||
return keypoints, scores
|
@ -7,10 +7,10 @@ import cv2
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from cv2.typing import MatLike
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
"""
|
||||
|
@ -287,14 +287,6 @@ class ModelCache(object):
|
||||
if torch.device(source_device).type == torch.device(target_device).type:
|
||||
return
|
||||
|
||||
if target_device.type == "cuda":
|
||||
vram_device = (
|
||||
target_device if target_device.index is not None else torch.device(str(target_device), index=0)
|
||||
)
|
||||
free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device))
|
||||
if cache_entry.size > free_mem:
|
||||
raise torch.cuda.OutOfMemoryError
|
||||
|
||||
start_model_to_time = time.time()
|
||||
snapshot_before = self._capture_memory_snapshot()
|
||||
cache_entry.model.to(target_device)
|
||||
@ -364,10 +356,6 @@ class ModelCache(object):
|
||||
self.cache.logger.debug(f"Locking {self.key} in {self.cache.execution_device}")
|
||||
self.cache._print_cuda_stats()
|
||||
|
||||
except torch.cuda.OutOfMemoryError:
|
||||
self.cache.logger.warning("Out of GPU memory encountered.")
|
||||
self.cache_entry.unlock()
|
||||
raise
|
||||
except Exception:
|
||||
self.cache_entry.unlock()
|
||||
raise
|
||||
@ -536,6 +524,7 @@ class ModelCache(object):
|
||||
break
|
||||
if not cache_entry.locked and cache_entry.loaded:
|
||||
self._move_model_to_device(model_key, self.storage_device)
|
||||
|
||||
vram_in_use = torch.cuda.memory_allocated()
|
||||
self.logger.debug(f"{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB")
|
||||
|
||||
|
@ -1,11 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import Callable, List, Union
|
||||
from typing import List, Union
|
||||
|
||||
import torch.nn as nn
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
@ -27,51 +26,70 @@ def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
|
||||
@contextmanager
|
||||
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axes: List[str]):
|
||||
# Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor
|
||||
to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = []
|
||||
try:
|
||||
# Hard coded to skip down block layers, allowing for seamless tiling at the expense of prompt adherence
|
||||
skipped_layers = 1
|
||||
to_restore = []
|
||||
|
||||
for m_name, m in model.named_modules():
|
||||
if not isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
continue
|
||||
|
||||
if isinstance(model, UNet2DConditionModel) and m_name.startswith("down_blocks.") and ".resnets." in m_name:
|
||||
# down_blocks.1.resnets.1.conv1
|
||||
_, block_num, _, resnet_num, submodule_name = m_name.split(".")
|
||||
block_num = int(block_num)
|
||||
resnet_num = int(resnet_num)
|
||||
|
||||
if block_num >= len(model.down_blocks) - skipped_layers:
|
||||
if isinstance(model, UNet2DConditionModel):
|
||||
if ".attentions." in m_name:
|
||||
continue
|
||||
|
||||
# Skip the second resnet (could be configurable)
|
||||
if resnet_num > 0:
|
||||
if ".resnets." in m_name:
|
||||
if ".conv2" in m_name:
|
||||
continue
|
||||
if ".conv_shortcut" in m_name:
|
||||
continue
|
||||
|
||||
"""
|
||||
if isinstance(model, UNet2DConditionModel):
|
||||
if False and ".upsamplers." in m_name:
|
||||
continue
|
||||
|
||||
# Skip Conv2d layers (could be configurable)
|
||||
if submodule_name == "conv2":
|
||||
if False and ".downsamplers." in m_name:
|
||||
continue
|
||||
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
if True and ".resnets." in m_name:
|
||||
if True and ".conv1" in m_name:
|
||||
if False and "down_blocks" in m_name:
|
||||
continue
|
||||
if False and "mid_block" in m_name:
|
||||
continue
|
||||
if False and "up_blocks" in m_name:
|
||||
continue
|
||||
|
||||
to_restore.append((m, m._conv_forward))
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
if True and ".conv2" in m_name:
|
||||
continue
|
||||
|
||||
if True and ".conv_shortcut" in m_name:
|
||||
continue
|
||||
|
||||
if True and ".attentions." in m_name:
|
||||
continue
|
||||
|
||||
if False and m_name in ["conv_in", "conv_out"]:
|
||||
continue
|
||||
"""
|
||||
|
||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
|
||||
to_restore.append((m, m._conv_forward))
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
|
||||
yield
|
||||
|
||||
|
@ -4,7 +4,23 @@ The app makes heavy use of Redux Toolkit, its Query library, and `nanostores`.
|
||||
|
||||
## Redux
|
||||
|
||||
TODO
|
||||
We use [Redux Toolkit] + RTK Query extensively.
|
||||
|
||||
### Persistence
|
||||
|
||||
The usual persistence layer for redux is [redux-persist]. Unfortunately, it is abandoned, and not possible to fork. Past releases of it depend on a malicious package that was removed from npm, so it's very difficult (impossible?) to build. The current state of the repo is also non-functional, as it was abandoned mid-rewrite.
|
||||
|
||||
We had a need to debounce our persistence, and patched redux-persist's build directly to do so. This didn't feel great. We've since moved to [redux-remember], a well-designed, minimal, and actively maintained library.
|
||||
|
||||
#### Slice migration
|
||||
|
||||
When rehydrating state, we sometimes need to migrate data. This is handled by the `unserialize` function in [store.ts], which is used by redux-remember to rehydrate persisted state. This function uses some lodash utils to strip out unknown keys, and merge any new keys into the rehydrated state.
|
||||
|
||||
Sometimes the shape of state changes, but it keeps the same property name in the slice. In that case, we need to _transform_ incoming data.
|
||||
|
||||
To handle this, each persisted slice must have a `SliceConfig`, which includes its latest initial value, and a migrate function. The migrate function, defined in the slice, does does any transformations and updates the version.
|
||||
|
||||
The version of the slice is currently only incremented when we need to run _transform_ migrations. If keys are added or removed from state, the version is not bumped.
|
||||
|
||||
## `nanostores`
|
||||
|
||||
@ -36,3 +52,7 @@ const myStringOption = useStore($myStringOption);
|
||||
- For performance-critical code and in callbacks, redux selectors can be problematic due to the declarative reactivity system. Consider refactoring to use `nanostores` if there's a **measurable** performance issue.
|
||||
|
||||
[nanostores]: https://github.com/nanostores/nanostores/
|
||||
[Redux Toolkit]: https://redux-toolkit.js.org/
|
||||
[redux-persist]: https://github.com/rt2zz/redux-persist
|
||||
[redux-remember]: https://github.com/zewish/redux-remember
|
||||
[store.ts]: invokeai/frontend/web/src/app/store/store.ts
|
||||
|
@ -52,10 +52,9 @@
|
||||
"@chakra-ui/react-use-size": "^2.1.0",
|
||||
"@dagrejs/graphlib": "^2.1.13",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.0.16",
|
||||
"@invoke-ai/ui-library": "^0.0.18",
|
||||
"@invoke-ai/ui-library": "^0.0.20",
|
||||
"@mantine/form": "6.0.21",
|
||||
"@nanostores/react": "^0.7.1",
|
||||
"@reduxjs/toolkit": "2.0.1",
|
||||
|
56
invokeai/frontend/web/pnpm-lock.yaml
generated
56
invokeai/frontend/web/pnpm-lock.yaml
generated
@ -22,9 +22,6 @@ dependencies:
|
||||
'@dnd-kit/core':
|
||||
specifier: ^6.1.0
|
||||
version: 6.1.0(react-dom@18.2.0)(react@18.2.0)
|
||||
'@dnd-kit/sortable':
|
||||
specifier: ^8.0.0
|
||||
version: 8.0.0(@dnd-kit/core@6.1.0)(react@18.2.0)
|
||||
'@dnd-kit/utilities':
|
||||
specifier: ^3.2.2
|
||||
version: 3.2.2(react@18.2.0)
|
||||
@ -32,8 +29,8 @@ dependencies:
|
||||
specifier: ^5.0.16
|
||||
version: 5.0.16
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.18
|
||||
version: 0.0.18(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.16)(@internationalized/date@3.5.1)(@types/react@18.2.48)(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0)
|
||||
specifier: ^0.0.20
|
||||
version: 0.0.20(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.16)(@internationalized/date@3.5.1)(@types/react@18.2.48)(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0)
|
||||
'@mantine/form':
|
||||
specifier: 6.0.21
|
||||
version: 6.0.21(react@18.2.0)
|
||||
@ -1703,6 +1700,13 @@ packages:
|
||||
dependencies:
|
||||
regenerator-runtime: 0.14.1
|
||||
|
||||
/@babel/runtime@7.23.9:
|
||||
resolution: {integrity: sha512-0CX6F+BI2s9dkUqr08KFrAIZgNFj75rdBU/DjCyYLIaV/quFjkk6T+EJ2LkZHyZTbEV4L5p97mNkUsHl2wLFAw==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
dependencies:
|
||||
regenerator-runtime: 0.14.1
|
||||
dev: false
|
||||
|
||||
/@babel/template@7.22.15:
|
||||
resolution: {integrity: sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
@ -1963,7 +1967,7 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/dom-utils': 2.1.0
|
||||
react: 18.2.0
|
||||
react-focus-lock: 2.9.6(@types/react@18.2.48)(react@18.2.0)
|
||||
react-focus-lock: 2.9.7(@types/react@18.2.48)(react@18.2.0)
|
||||
transitivePeerDependencies:
|
||||
- '@types/react'
|
||||
dev: false
|
||||
@ -2887,18 +2891,6 @@ packages:
|
||||
tslib: 2.6.2
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/sortable@8.0.0(@dnd-kit/core@6.1.0)(react@18.2.0):
|
||||
resolution: {integrity: sha512-U3jk5ebVXe1Lr7c2wU7SBZjcWdQP+j7peHJfCspnA81enlu88Mgd7CC8Q+pub9ubP7eKVETzJW+IBAhsqbSu/g==}
|
||||
peerDependencies:
|
||||
'@dnd-kit/core': ^6.1.0
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
'@dnd-kit/core': 6.1.0(react-dom@18.2.0)(react@18.2.0)
|
||||
'@dnd-kit/utilities': 3.2.2(react@18.2.0)
|
||||
react: 18.2.0
|
||||
tslib: 2.6.2
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/utilities@3.2.2(react@18.2.0):
|
||||
resolution: {integrity: sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==}
|
||||
peerDependencies:
|
||||
@ -3007,7 +2999,7 @@ packages:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@babel/runtime': 7.23.8
|
||||
'@babel/runtime': 7.23.9
|
||||
'@emotion/babel-plugin': 11.11.0
|
||||
'@emotion/is-prop-valid': 1.2.1
|
||||
'@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0)
|
||||
@ -3602,8 +3594,8 @@ packages:
|
||||
prettier: 3.2.4
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.18(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.16)(@internationalized/date@3.5.1)(@types/react@18.2.48)(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0):
|
||||
resolution: {integrity: sha512-Yme+2+pzYy3TPb7ZT0hYmBwahH29ZRSVIxLKSexh3BsbJXbTzGssRQU78QvK6Ymxemgbso3P8Rs+IW0zNhQKjQ==}
|
||||
/@invoke-ai/ui-library@0.0.20(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.16)(@internationalized/date@3.5.1)(@types/react@18.2.48)(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0):
|
||||
resolution: {integrity: sha512-8BDL9LWbmpAZHTJB0B+zMJ0kBq7vQF0tem6q9WB03CPqdE307FiDmZ++NZF7BP8Rp4Sivdi6OagaXL7WV6e0Pw==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
@ -3625,8 +3617,8 @@ packages:
|
||||
framer-motion: 10.18.0(react-dom@18.2.0)(react@18.2.0)
|
||||
lodash-es: 4.17.21
|
||||
nanostores: 0.9.5
|
||||
overlayscrollbars: 2.4.7
|
||||
overlayscrollbars-react: 0.5.4(overlayscrollbars@2.4.7)(react@18.2.0)
|
||||
overlayscrollbars: 2.5.0
|
||||
overlayscrollbars-react: 0.5.4(overlayscrollbars@2.5.0)(react@18.2.0)
|
||||
react: 18.2.0
|
||||
react-dom: 18.2.0(react@18.2.0)
|
||||
react-i18next: 14.0.1(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0)
|
||||
@ -11348,13 +11340,13 @@ packages:
|
||||
react: 18.2.0
|
||||
dev: false
|
||||
|
||||
/overlayscrollbars-react@0.5.4(overlayscrollbars@2.4.7)(react@18.2.0):
|
||||
/overlayscrollbars-react@0.5.4(overlayscrollbars@2.5.0)(react@18.2.0):
|
||||
resolution: {integrity: sha512-FPKx9XnXovTnI4+2JXig5uEaTLSEJ6svOwPzIfBBXTHBRNsz2+WhYUmfM0K/BNYxjgDEwuPm+NQhEoOA0RoG1g==}
|
||||
peerDependencies:
|
||||
overlayscrollbars: ^2.0.0
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
overlayscrollbars: 2.4.7
|
||||
overlayscrollbars: 2.5.0
|
||||
react: 18.2.0
|
||||
dev: false
|
||||
|
||||
@ -11362,8 +11354,8 @@ packages:
|
||||
resolution: {integrity: sha512-C7tmhetwMv9frEvIT/RfkAVEgbjRNz/Gh2zE8BVmN+jl35GRaAnz73rlGQCMRoC2arpACAXyMNnJkzHb7GBrcA==}
|
||||
dev: false
|
||||
|
||||
/overlayscrollbars@2.4.7:
|
||||
resolution: {integrity: sha512-02X2/nHno35dzebCx+EO2tRDaKAOltZqUKdUqvq3Pt8htCuhJbYi+mjr0CYerVeGRRoZ2Uo6/8XrNg//DJJ+GA==}
|
||||
/overlayscrollbars@2.5.0:
|
||||
resolution: {integrity: sha512-CWVC2dwS07XZfLHDm5GmZN1iYggiJ8Vufnvzwt0gwR9Yz1hVckKeTxg7VILZeYVGhDYJHZ1Xc8Xfys5dWZ1qiA==}
|
||||
dev: false
|
||||
|
||||
/p-limit@2.3.0:
|
||||
@ -11851,7 +11843,7 @@ packages:
|
||||
peerDependencies:
|
||||
react: ^15.3.0 || ^16.0.0 || ^17.0.0 || ^18.0.0
|
||||
dependencies:
|
||||
'@babel/runtime': 7.23.8
|
||||
'@babel/runtime': 7.23.9
|
||||
react: 18.2.0
|
||||
dev: false
|
||||
|
||||
@ -11937,8 +11929,8 @@ packages:
|
||||
resolution: {integrity: sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==}
|
||||
dev: false
|
||||
|
||||
/react-focus-lock@2.9.6(@types/react@18.2.48)(react@18.2.0):
|
||||
resolution: {integrity: sha512-B7gYnCjHNrNYwY2juS71dHbf0+UpXXojt02svxybj8N5bxceAkzPChKEncHuratjUHkIFNCn06k2qj1DRlzTug==}
|
||||
/react-focus-lock@2.9.7(@types/react@18.2.48)(react@18.2.0):
|
||||
resolution: {integrity: sha512-EfhX040SELLqnQ9JftqsmQCG49iByg8F5X5m19Er+n371OaETZ35dlNPZrLOOTlnnwD4c2Zv0KDgabDTc7dPHw==}
|
||||
peerDependencies:
|
||||
'@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
@ -11946,7 +11938,7 @@ packages:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@babel/runtime': 7.23.8
|
||||
'@babel/runtime': 7.23.9
|
||||
'@types/react': 18.2.48
|
||||
focus-lock: 1.0.0
|
||||
prop-types: 15.8.1
|
||||
@ -12008,7 +12000,7 @@ packages:
|
||||
react-native:
|
||||
optional: true
|
||||
dependencies:
|
||||
'@babel/runtime': 7.23.8
|
||||
'@babel/runtime': 7.23.9
|
||||
html-parse-stringify: 3.0.1
|
||||
i18next: 23.7.16
|
||||
react: 18.2.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -175,7 +175,6 @@
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"template": "Template",
|
||||
"toResolve": "To resolve",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddourings using Textual Inversion using the main script.",
|
||||
@ -236,9 +235,6 @@
|
||||
"fill": "Fill",
|
||||
"h": "H",
|
||||
"handAndFace": "Hand and Face",
|
||||
"face": "Face",
|
||||
"body": "Body",
|
||||
"hands": "Hands",
|
||||
"hed": "HED",
|
||||
"hedDescription": "Holistically-Nested Edge Detection",
|
||||
"hideAdvanced": "Hide Advanced",
|
||||
@ -265,8 +261,8 @@
|
||||
"noneDescription": "No processing applied",
|
||||
"normalBae": "Normal BAE",
|
||||
"normalBaeDescription": "Normal BAE processing",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"dwOpenposeDescription": "Human pose estimation using DW Openpose",
|
||||
"openPose": "Openpose",
|
||||
"openPoseDescription": "Human pose estimation using Openpose",
|
||||
"pidi": "PIDI",
|
||||
"pidiDescription": "PIDI image processing",
|
||||
"processor": "Processor",
|
||||
@ -901,7 +897,6 @@
|
||||
"doesNotExist": "does not exist",
|
||||
"downloadWorkflow": "Download Workflow JSON",
|
||||
"edge": "Edge",
|
||||
"editMode": "Edit in Workflow Editor",
|
||||
"enum": "Enum",
|
||||
"enumDescription": "Enums are values that may be one of a number of options.",
|
||||
"executionStateCompleted": "Completed",
|
||||
@ -997,10 +992,8 @@
|
||||
"problemReadingMetadata": "Problem reading metadata from image",
|
||||
"problemReadingWorkflow": "Problem reading workflow from image",
|
||||
"problemSettingTitle": "Problem Setting Title",
|
||||
"resetToDefaultValue": "Reset to default value",
|
||||
"reloadNodeTemplates": "Reload Node Templates",
|
||||
"removeLinearView": "Remove from Linear View",
|
||||
"reorderLinearView": "Reorder Linear View",
|
||||
"newWorkflow": "New Workflow",
|
||||
"newWorkflowDesc": "Create a new workflow?",
|
||||
"newWorkflowDesc2": "Your current workflow has unsaved changes.",
|
||||
@ -1071,7 +1064,6 @@
|
||||
"vaeModelFieldDescription": "TODO",
|
||||
"validateConnections": "Validate Connections and Graph",
|
||||
"validateConnectionsHelp": "Prevent invalid connections from being made, and invalid graphs from being invoked",
|
||||
"viewMode": "Use in Linear View",
|
||||
"unableToGetWorkflowVersion": "Unable to get workflow schema version",
|
||||
"unrecognizedWorkflowVersion": "Unrecognized workflow schema version {{version}}",
|
||||
"version": "Version",
|
||||
@ -1424,8 +1416,9 @@
|
||||
"clipSkip": {
|
||||
"heading": "CLIP Skip",
|
||||
"paragraphs": [
|
||||
"How many layers of the CLIP model to skip.",
|
||||
"Certain models are better suited to be used with CLIP Skip."
|
||||
"Choose how many layers of the CLIP model to skip.",
|
||||
"Some models work better with certain CLIP Skip settings.",
|
||||
"A higher value typically results in a less detailed image."
|
||||
]
|
||||
},
|
||||
"paramNegativeConditioning": {
|
||||
@ -1445,8 +1438,7 @@
|
||||
"paramScheduler": {
|
||||
"heading": "Scheduler",
|
||||
"paragraphs": [
|
||||
"Scheduler used during the generation process.",
|
||||
"Each scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output."
|
||||
"Scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output."
|
||||
]
|
||||
},
|
||||
"compositingBlur": {
|
||||
@ -1463,52 +1455,47 @@
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "Mode",
|
||||
"paragraphs": ["Method used to create a coherent image with the newly generated masked area."]
|
||||
"paragraphs": ["The mode of the Coherence Pass."]
|
||||
},
|
||||
"compositingCoherenceSteps": {
|
||||
"heading": "Steps",
|
||||
"paragraphs": ["Number of steps in the Coherence Pass.", "Similar to Generation Steps."]
|
||||
"paragraphs": ["Number of denoising steps used in the Coherence Pass.", "Same as the main Steps parameter."]
|
||||
},
|
||||
"compositingStrength": {
|
||||
"heading": "Strength",
|
||||
"paragraphs": ["Amount of noise added for the Coherence Pass.", "Similar to Denoising Strength."]
|
||||
"paragraphs": [
|
||||
"Denoising strength for the Coherence Pass.",
|
||||
"Same as the Image to Image Denoising Strength parameter."
|
||||
]
|
||||
},
|
||||
"compositingMaskAdjustments": {
|
||||
"heading": "Mask Adjustments",
|
||||
"paragraphs": ["Adjust the mask."]
|
||||
},
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraphs": [
|
||||
"Which steps of the denoising process will have the ControlNet applied.",
|
||||
"ControlNets applied at the beginning of the process guide composition, and ControlNets applied at the end guide details."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"heading": "Control Mode",
|
||||
"paragraphs": ["Lends more weight to either the prompt or ControlNet."]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Resize Mode",
|
||||
"paragraphs": ["How the ControlNet image will be fit to the image output size."]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet",
|
||||
"paragraphs": [
|
||||
"ControlNets provide guidance to the generation process, helping create images with controlled composition, structure, or style, depending on the model selected."
|
||||
]
|
||||
},
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraphs": [
|
||||
"The part of the of the denoising process that will have the Control Adapter applied.",
|
||||
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"heading": "Control Mode",
|
||||
"paragraphs": ["Lend more weight to either the prompt or ControlNet."]
|
||||
},
|
||||
"controlNetProcessor": {
|
||||
"heading": "Processor",
|
||||
"paragraphs": [
|
||||
"Method of processing the input image to guide the generation process. Different processors will providedifferent effects or styles in your generated images."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Resize Mode",
|
||||
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": [
|
||||
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
|
||||
]
|
||||
"paragraphs": ["How strongly the ControlNet will impact the generated image."]
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"heading": "Dynamic Prompts",
|
||||
@ -1531,23 +1518,13 @@
|
||||
"Per Image will use a unique seed for each image. This provides more variation."
|
||||
]
|
||||
},
|
||||
"imageFit": {
|
||||
"heading": "Fit Initial Image to Output Size",
|
||||
"paragraphs": [
|
||||
"Resizes the initial image to the width and height of the output image. Recommended to enable."
|
||||
]
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "Infill Method",
|
||||
"paragraphs": ["Method of infilling during the Outpainting or Inpainting process."]
|
||||
"paragraphs": ["Method to infill the selected area."]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA",
|
||||
"paragraphs": ["Lightweight models that are used in conjunction with base models."]
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": ["Weight of the LoRA. Higher weight will lead to larger impacts on the final image."]
|
||||
"heading": "LoRA Weight",
|
||||
"paragraphs": ["Higher LoRA weight will lead to larger impacts on the final image."]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
"heading": "Use CPU Noise",
|
||||
@ -1557,25 +1534,14 @@
|
||||
"There is no performance impact to enabling CPU Noise."
|
||||
]
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "Aspect",
|
||||
"paragraphs": [
|
||||
"Aspect ratio of the generated image. Changing the ratio will update the Width and Height accordingly.",
|
||||
"“Optimize” will set the Width and Height to optimal dimensions for the chosen model."
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"heading": "CFG Scale",
|
||||
"paragraphs": [
|
||||
"Controls how much the prompt influences the generation process.",
|
||||
"High CFG Scale values can result in over-saturation and distorted generation results. "
|
||||
]
|
||||
"paragraphs": ["Controls how much your prompt influences the generation process."]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"heading": "CFG Rescale Multiplier",
|
||||
"paragraphs": [
|
||||
"Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr).",
|
||||
"Suggested value of 0.7 for these models."
|
||||
"Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr). Suggested value 0.7."
|
||||
]
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
@ -1585,16 +1551,6 @@
|
||||
"0 will result in an identical image, while 1 will result in a completely new image."
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "Height",
|
||||
"paragraphs": ["Height of the generated image. Must be a multiple of 8."]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "Enable High Resolution Fix",
|
||||
"paragraphs": [
|
||||
"Generate high quality images at a larger resolution than optimal for the model. Generally used to prevent duplication in the generated image."
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "Iterations",
|
||||
"paragraphs": [
|
||||
@ -1605,7 +1561,8 @@
|
||||
"paramModel": {
|
||||
"heading": "Model",
|
||||
"paragraphs": [
|
||||
"Model used for generation. Different models are trained to specialize in producing different aesthetic results and content."
|
||||
"Model used for the denoising steps.",
|
||||
"Different models are typically trained to specialize in producing particular aesthetic results and content."
|
||||
]
|
||||
},
|
||||
"paramRatio": {
|
||||
@ -1619,7 +1576,7 @@
|
||||
"heading": "Seed",
|
||||
"paragraphs": [
|
||||
"Controls the starting noise used for generation.",
|
||||
"Disable the “Random” option to produce identical results with the same generation settings."
|
||||
"Disable “Random Seed” to produce identical results with the same generation settings."
|
||||
]
|
||||
},
|
||||
"paramSteps": {
|
||||
@ -1629,10 +1586,6 @@
|
||||
"Higher step counts will typically create better images but will require more generation time."
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "Upscale Method",
|
||||
"paragraphs": ["Method used to upscale the image for High Resolution Fix."]
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE",
|
||||
"paragraphs": ["Model used for translating AI output into the final image."]
|
||||
@ -1640,82 +1593,14 @@
|
||||
"paramVAEPrecision": {
|
||||
"heading": "VAE Precision",
|
||||
"paragraphs": [
|
||||
"The precision used during VAE encoding and decoding.",
|
||||
"Fp16/Half precision is more efficient, at the expense of minor image variations."
|
||||
]
|
||||
},
|
||||
"paramWidth": {
|
||||
"heading": "Width",
|
||||
"paragraphs": ["Width of the generated image. Must be a multiple of 8."]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Downscale",
|
||||
"paragraphs": [
|
||||
"How much downscaling occurs before infilling.",
|
||||
"Higher downscaling will improve performance and reduce quality."
|
||||
]
|
||||
},
|
||||
"refinerModel": {
|
||||
"heading": "Refiner Model",
|
||||
"paragraphs": [
|
||||
"Model used during the refiner portion of the generation process.",
|
||||
"Similar to the Generation Model."
|
||||
]
|
||||
},
|
||||
"refinerPositiveAestheticScore": {
|
||||
"heading": "Positive Aesthetic Score",
|
||||
"paragraphs": [
|
||||
"Weight generations to be more similar to images with a high aesthetic score, based on the training data."
|
||||
]
|
||||
},
|
||||
"refinerNegativeAestheticScore": {
|
||||
"heading": "Negative Aesthetic Score",
|
||||
"paragraphs": [
|
||||
"Weight generations to be more similar to images with a low aesthetic score, based on the training data."
|
||||
]
|
||||
},
|
||||
"refinerScheduler": {
|
||||
"heading": "Scheduler",
|
||||
"paragraphs": [
|
||||
"Scheduler used during the refiner portion of the generation process.",
|
||||
"Similar to the Generation Scheduler."
|
||||
]
|
||||
},
|
||||
"refinerStart": {
|
||||
"heading": "Refiner Start",
|
||||
"paragraphs": [
|
||||
"Where in the generation process the refiner will start to be used.",
|
||||
"0 means the refiner will be used for the entire generation process, 0.8 means the refiner will be used for the last 20% of the generation process."
|
||||
]
|
||||
},
|
||||
"refinerSteps": {
|
||||
"heading": "Steps",
|
||||
"paragraphs": [
|
||||
"Number of steps that will be performed during the refiner portion of the generation process.",
|
||||
"Similar to the Generation Steps."
|
||||
]
|
||||
},
|
||||
"refinerCfgScale": {
|
||||
"heading": "CFG Scale",
|
||||
"paragraphs": [
|
||||
"Controls how much the prompt influences the generation process.",
|
||||
"Similar to the Generation CFG Scale."
|
||||
"The precision used during VAE encoding and decoding. FP16/half precision is more efficient, at the expense of minor image variations."
|
||||
]
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"heading": "Scale Before Processing",
|
||||
"paragraphs": [
|
||||
"“Auto” scales the selected area to the size best suited for the model before the image generation process.",
|
||||
"“Manual” allows you to choose the width and height the selected area will be scaled to before the image generation process."
|
||||
"Scales the selected area to the size best suited for the model before the image generation process."
|
||||
]
|
||||
},
|
||||
"seamlessTilingXAxis": {
|
||||
"heading": "Seamless Tiling X Axis",
|
||||
"paragraphs": ["Seamlessly tile an image along the horizontal axis."]
|
||||
},
|
||||
"seamlessTilingYAxis": {
|
||||
"heading": "Seamless Tiling Y Axis",
|
||||
"paragraphs": ["Seamlessly tile an image along the vertical axis."]
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
@ -1724,6 +1609,13 @@
|
||||
"showProgressImages": "Show Progress Images",
|
||||
"swapSizes": "Swap Sizes"
|
||||
},
|
||||
"viewer": {
|
||||
"viewerModeImage": "Image",
|
||||
"viewerModeInfo": "Info",
|
||||
"viewerModeProgress": "Progress",
|
||||
"dropLabel": "View Image",
|
||||
"noProgress": "Nothing in Progress"
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"accept": "Accept",
|
||||
"activeLayer": "Active Layer",
|
||||
|
@ -795,8 +795,7 @@
|
||||
"workflowDeleted": "Flusso di lavoro eliminato",
|
||||
"problemRetrievingWorkflow": "Problema nel recupero del flusso di lavoro",
|
||||
"resetInitialImage": "Reimposta l'immagine iniziale",
|
||||
"uploadInitialImage": "Carica l'immagine iniziale",
|
||||
"problemDownloadingImage": "Impossibile scaricare l'immagine"
|
||||
"uploadInitialImage": "Carica l'immagine iniziale"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@ -1135,14 +1134,7 @@
|
||||
"newWorkflow": "Nuovo flusso di lavoro",
|
||||
"newWorkflowDesc": "Creare un nuovo flusso di lavoro?",
|
||||
"newWorkflowDesc2": "Il flusso di lavoro attuale presenta modifiche non salvate.",
|
||||
"unsupportedAnyOfLength": "unione di troppi elementi ({{count}})",
|
||||
"clearWorkflowDesc": "Cancellare questo flusso di lavoro e avviarne uno nuovo?",
|
||||
"clearWorkflow": "Cancella il flusso di lavoro",
|
||||
"clearWorkflowDesc2": "Il tuo flusso di lavoro attuale presenta modifiche non salvate.",
|
||||
"viewMode": "Utilizzare nella vista lineare",
|
||||
"reorderLinearView": "Riordina la vista lineare",
|
||||
"editMode": "Modifica nell'editor del flusso di lavoro",
|
||||
"resetToDefaultValue": "Ripristina il valore predefinito"
|
||||
"unsupportedAnyOfLength": "unione di troppi elementi ({{count}})"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@ -1199,6 +1191,7 @@
|
||||
"f": "F",
|
||||
"h": "A",
|
||||
"prompt": "Prompt",
|
||||
"openPoseDescription": "Stima della posa umana utilizzando Openpose",
|
||||
"resizeMode": "Ridimensionamento",
|
||||
"weight": "Peso",
|
||||
"selectModel": "Seleziona un modello",
|
||||
@ -1245,11 +1238,7 @@
|
||||
"large": "Grande",
|
||||
"small": "Piccolo",
|
||||
"depthAnythingDescription": "Generazione di mappe di profondità utilizzando la tecnica Depth Anything",
|
||||
"modelSize": "Dimensioni del modello",
|
||||
"dwOpenposeDescription": "Stima della posa umana utilizzando DW Openpose",
|
||||
"face": "Viso",
|
||||
"body": "Corpo",
|
||||
"hands": "Mani"
|
||||
"modelSize": "Dimensioni del modello"
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
@ -1379,8 +1368,7 @@
|
||||
"popovers": {
|
||||
"paramScheduler": {
|
||||
"paragraphs": [
|
||||
"Il campionatore utilizzato durante il processo di generazione.",
|
||||
"Ciascun campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello."
|
||||
"Il campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello."
|
||||
],
|
||||
"heading": "Campionatore"
|
||||
},
|
||||
@ -1393,8 +1381,8 @@
|
||||
"compositingCoherenceSteps": {
|
||||
"heading": "Passi",
|
||||
"paragraphs": [
|
||||
"Numero di passi utilizzati nel Passaggio di Coerenza.",
|
||||
"Simile ai passi di generazione."
|
||||
"Numero di passi di riduzione del rumore utilizzati nel Passaggio di Coerenza.",
|
||||
"Uguale al parametro principale Passi."
|
||||
]
|
||||
},
|
||||
"compositingBlur": {
|
||||
@ -1406,13 +1394,14 @@
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "Modalità",
|
||||
"paragraphs": [
|
||||
"Metodo utilizzato per creare un'immagine coerente con l'area mascherata appena generata."
|
||||
"La modalità del Passaggio di Coerenza."
|
||||
]
|
||||
},
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"Scegli quanti livelli del modello CLIP saltare.",
|
||||
"Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip."
|
||||
"Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip.",
|
||||
"Un valore più alto in genere produce un'immagine meno dettagliata."
|
||||
]
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
@ -1424,8 +1413,8 @@
|
||||
"compositingStrength": {
|
||||
"heading": "Forza",
|
||||
"paragraphs": [
|
||||
"Quantità di rumore aggiunta per il Passaggio di Coerenza.",
|
||||
"Simile alla forza di riduzione del rumore."
|
||||
"Intensità di riduzione del rumore per il passaggio di coerenza.",
|
||||
"Uguale al parametro intensità di riduzione del rumore da immagine a immagine."
|
||||
]
|
||||
},
|
||||
"paramNegativeConditioning": {
|
||||
@ -1451,8 +1440,8 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Percentuale passi Inizio / Fine",
|
||||
"paragraphs": [
|
||||
"La parte del processo di rimozione del rumore in cui verrà applicato l'adattatore di controllo.",
|
||||
"In genere, gli adattatori di controllo applicati all'inizio del processo guidano la composizione, mentre quelli applicati alla fine guidano i dettagli."
|
||||
"A quali passi del processo di rimozione del rumore verrà applicato ControlNet.",
|
||||
"I ControlNet applicati all'inizio del processo guidano la composizione, mentre i ControlNet applicati alla fine guidano i dettagli."
|
||||
]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
@ -1465,7 +1454,7 @@
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"paragraphs": [
|
||||
"\"Auto\" ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine."
|
||||
"Ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine."
|
||||
],
|
||||
"heading": "Scala prima dell'elaborazione"
|
||||
},
|
||||
@ -1500,21 +1489,20 @@
|
||||
"paramVAEPrecision": {
|
||||
"heading": "Precisione VAE",
|
||||
"paragraphs": [
|
||||
"La precisione utilizzata durante la codifica e decodifica VAE.",
|
||||
"Fp16/Mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine."
|
||||
"La precisione utilizzata durante la codifica e decodifica VAE. FP16/mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine."
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
"paragraphs": [
|
||||
"Controlla il rumore iniziale utilizzato per la generazione.",
|
||||
"Disabilita l'opzione \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione."
|
||||
"Disabilita seme \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione."
|
||||
],
|
||||
"heading": "Seme"
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Modalità ridimensionamento",
|
||||
"paragraphs": [
|
||||
"Metodo per adattare le dimensioni dell'immagine in ingresso dell'adattatore di controllo alle dimensioni della generazione di output."
|
||||
"Come l'immagine ControlNet verrà adattata alle dimensioni di output dell'immagine."
|
||||
]
|
||||
},
|
||||
"dynamicPromptsSeedBehaviour": {
|
||||
@ -1529,7 +1517,8 @@
|
||||
"paramModel": {
|
||||
"heading": "Modello",
|
||||
"paragraphs": [
|
||||
"Modello utilizzato per la generazione. Diversi modelli vengono addestrati per specializzarsi nella produzione di risultati e contenuti estetici diversi."
|
||||
"Modello utilizzato per i passaggi di riduzione del rumore.",
|
||||
"Diversi modelli sono generalmente addestrati per specializzarsi nella produzione di particolari risultati e contenuti estetici."
|
||||
]
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
@ -1547,26 +1536,25 @@
|
||||
},
|
||||
"infillMethod": {
|
||||
"paragraphs": [
|
||||
"Metodo di riempimento durante il processo di Outpainting o Inpainting."
|
||||
"Metodo per riempire l'area selezionata."
|
||||
],
|
||||
"heading": "Metodo di riempimento"
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Peso",
|
||||
"paragraphs": [
|
||||
"Peso dell'adattatore di controllo. Un peso maggiore porterà a impatti maggiori sull'immagine finale."
|
||||
"Quanto forte sarà l'impatto di ControlNet sull'immagine generata."
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"heading": "Scala CFG",
|
||||
"paragraphs": [
|
||||
"Controlla quanto il prompt influenza il processo di generazione.",
|
||||
"Valori elevati della scala CFG possono provocare una saturazione eccessiva e distorsioni nei risultati della generazione. "
|
||||
"Controlla quanto il tuo prompt influenza il processo di generazione."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"paragraphs": [
|
||||
"Attribuisce più peso al prompt oppure a ControlNet."
|
||||
"Attribuisce più peso al prompt o a ControlNet."
|
||||
],
|
||||
"heading": "Modalità di controllo"
|
||||
},
|
||||
@ -1578,9 +1566,9 @@
|
||||
]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA",
|
||||
"heading": "Peso LoRA",
|
||||
"paragraphs": [
|
||||
"Modelli leggeri utilizzati insieme ai modelli base."
|
||||
"Un peso LoRA più elevato porterà a impatti maggiori sull'immagine finale."
|
||||
]
|
||||
},
|
||||
"controlNet": {
|
||||
@ -1592,65 +1580,8 @@
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"heading": "Moltiplicatore di riscala CFG",
|
||||
"paragraphs": [
|
||||
"Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr).",
|
||||
"Valore suggerito di 0.7 per questi modelli."
|
||||
"Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr). Valore suggerito 0.7."
|
||||
]
|
||||
},
|
||||
"controlNetProcessor": {
|
||||
"heading": "Processore",
|
||||
"paragraphs": [
|
||||
"Metodo di elaborazione dell'immagine di input per guidare il processo di generazione. Processori diversi forniranno effetti o stili diversi nelle immagini generate."
|
||||
]
|
||||
},
|
||||
"imageFit": {
|
||||
"heading": "Adatta l'immagine iniziale alle dimensioni di output",
|
||||
"paragraphs": [
|
||||
"Ridimensiona l'immagine iniziale in base alla larghezza e all'altezza dell'immagine di output. Si consiglia di abilitarlo."
|
||||
]
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "Peso",
|
||||
"paragraphs": [
|
||||
"Peso del LoRA. Un peso maggiore comporterà un impatto maggiore sull'immagine finale."
|
||||
]
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "Aspetto",
|
||||
"paragraphs": [
|
||||
"Proporzioni dell'immagine generata. La modifica del rapporto aggiornerà di conseguenza la larghezza e l'altezza.",
|
||||
"\"Ottimizza\" imposterà la larghezza e l'altezza alle dimensioni ottimali per il modello scelto."
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "Altezza",
|
||||
"paragraphs": [
|
||||
"Altezza dell'immagine generata. Deve essere un multiplo di 8."
|
||||
]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "Abilita correzione alta risoluzione",
|
||||
"paragraphs": [
|
||||
"Genera immagini di alta qualità con una risoluzione maggiore di quella ottimale per il modello. Generalmente utilizzato per impedire la duplicazione nell'immagine generata."
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "Metodo di ampliamento",
|
||||
"paragraphs": [
|
||||
"Metodo utilizzato per eseguire l'ampliamento dell'immagine per la correzione ad alta risoluzione."
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Ridimensiona",
|
||||
"paragraphs": [
|
||||
"Quanto ridimensionamento avviene prima del riempimento.",
|
||||
"Un ridimensionamento più elevato migliorerà le prestazioni e ridurrà la qualità."
|
||||
]
|
||||
},
|
||||
"paramWidth": {
|
||||
"paragraphs": [
|
||||
"Larghezza dell'immagine generata. Deve essere un multiplo di 8."
|
||||
],
|
||||
"heading": "Larghezza"
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@ -1741,9 +1672,7 @@
|
||||
"downloadWorkflow": "Salva su file",
|
||||
"uploadWorkflow": "Carica da file",
|
||||
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||
"noWorkflows": "Nessun flusso di lavoro",
|
||||
"workflowCleared": "Flusso di lavoro cancellato",
|
||||
"saveWorkflowToProject": "Salva flusso di lavoro nel progetto"
|
||||
"noWorkflows": "Nessun flusso di lavoro"
|
||||
},
|
||||
"app": {
|
||||
"storeNotInitialized": "Il negozio non è inizializzato"
|
||||
|
@ -555,6 +555,7 @@
|
||||
"balanced": "バランス",
|
||||
"prompt": "プロンプト",
|
||||
"depthMidasDescription": "Midasを使用して深度マップを生成",
|
||||
"openPoseDescription": "Openposeを使用してポーズを推定",
|
||||
"control": "コントロール",
|
||||
"resizeMode": "リサイズモード",
|
||||
"weight": "重み",
|
||||
|
@ -333,6 +333,7 @@
|
||||
"h": "H",
|
||||
"prompt": "프롬프트",
|
||||
"depthMidasDescription": "Midas를 사용하여 Depth map 생성하기",
|
||||
"openPoseDescription": "Openpose를 이용한 사람 포즈 추정",
|
||||
"control": "Control",
|
||||
"resizeMode": "크기 조정 모드",
|
||||
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) 사용 가능,$t(common.controlNet) 사용 불가능",
|
||||
@ -369,6 +370,7 @@
|
||||
"normalBaeDescription": "Normal BAE 처리",
|
||||
"noneDescription": "처리되지 않음",
|
||||
"saveControlImage": "Control Image 저장",
|
||||
"openPose": "Openpose",
|
||||
"toggleControlNet": "해당 ControlNet으로 전환",
|
||||
"delete": "삭제",
|
||||
"controlAdapter_other": "Control Adapter(s)",
|
||||
|
@ -1033,6 +1033,7 @@
|
||||
"prompt": "Prompt",
|
||||
"depthMidasDescription": "Genereer diepteblad via Midas",
|
||||
"controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))",
|
||||
"openPoseDescription": "Menselijke pose-benadering via Openpose",
|
||||
"control": "Controle",
|
||||
"resizeMode": "Modus schaling",
|
||||
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) ingeschakeld, $t(common.controlNet)s uitgeschakeld",
|
||||
@ -1071,6 +1072,7 @@
|
||||
"normalBaeDescription": "Normale BAE-verwerking",
|
||||
"noneDescription": "Geen verwerking toegepast",
|
||||
"saveControlImage": "Bewaar controle-afbeelding",
|
||||
"openPose": "Openpose",
|
||||
"toggleControlNet": "Zet deze ControlNet aan/uit",
|
||||
"delete": "Verwijder",
|
||||
"controlAdapter_one": "Control-adapter",
|
||||
@ -1217,14 +1219,16 @@
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"Kies hoeveel CLIP-modellagen je wilt overslaan.",
|
||||
"Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen."
|
||||
"Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen.",
|
||||
"Een hogere waarde geeft meestal een minder gedetailleerde afbeelding."
|
||||
],
|
||||
"heading": "Overslaan CLIP"
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "Model",
|
||||
"paragraphs": [
|
||||
"Model gebruikt voor de ontruisingsstappen."
|
||||
"Model gebruikt voor de ontruisingsstappen.",
|
||||
"Verschillende modellen zijn meestal getraind om zich te specialiseren in het maken van bepaalde esthetische resultaten en materiaal."
|
||||
]
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
|
@ -1155,6 +1155,7 @@
|
||||
"resetControlImage": "Сбросить контрольное изображение",
|
||||
"prompt": "Запрос",
|
||||
"controlnet": "$t(controlnet.controlAdapter_one) №{{number}} $t(common.controlNet)",
|
||||
"openPoseDescription": "Оценка позы человека с помощью Openpose",
|
||||
"resizeMode": "Режим изменения размера",
|
||||
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) включен, $t(common.controlNet)s отключен",
|
||||
"weight": "Вес",
|
||||
@ -1353,14 +1354,16 @@
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"Выберите, сколько слоев модели CLIP нужно пропустить.",
|
||||
"Некоторые модели работают лучше с определенными настройками пропуска CLIP."
|
||||
"Некоторые модели работают лучше с определенными настройками пропуска CLIP.",
|
||||
"Более высокое значение обычно приводит к менее детализированному изображению."
|
||||
],
|
||||
"heading": "CLIP пропуск"
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "Модель",
|
||||
"paragraphs": [
|
||||
"Модель, используемая для шагов шумоподавления."
|
||||
"Модель, используемая для шагов шумоподавления.",
|
||||
"Различные модели обычно обучаются, чтобы специализироваться на достижении определенных эстетических результатов и содержания."
|
||||
]
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
|
@ -259,6 +259,7 @@
|
||||
"mediapipeFace": "Mediapipe Yüz",
|
||||
"megaControl": "Aşırı Yönetim",
|
||||
"mlsd": "M-LSD",
|
||||
"openPoseDescription": "Openpose kullanarak poz belirleme",
|
||||
"setControlImageDimensions": "Yönetim Görseli Boyutlarını En/Boydan Al",
|
||||
"pidi": "PIDI",
|
||||
"scribble": "çiziktirme",
|
||||
@ -272,6 +273,7 @@
|
||||
"mlsdDescription": "Minimalist Line Segment Detector (Kolay Çizgi Parçası Algılama)",
|
||||
"normalBae": "Normal BAE",
|
||||
"normalBaeDescription": "Normal BAE işleme",
|
||||
"openPose": "Openpose",
|
||||
"resetControlImage": "Yönetim Görselini Kaldır",
|
||||
"enableIPAdapter": "IP Aracını Etkinleştir",
|
||||
"lineart": "Çizim",
|
||||
|
@ -1143,6 +1143,7 @@
|
||||
"balanced": "平衡",
|
||||
"prompt": "Prompt (提示词控制)",
|
||||
"depthMidasDescription": "使用 Midas 生成深度图",
|
||||
"openPoseDescription": "使用 Openpose 进行人体姿态估计",
|
||||
"resizeMode": "缩放模式",
|
||||
"weight": "权重",
|
||||
"selectModel": "选择一个模型",
|
||||
@ -1206,6 +1207,7 @@
|
||||
"megaControl": "Mega Control (超级控制)",
|
||||
"depthZoe": "Depth (Zoe)",
|
||||
"colorMap": "Color",
|
||||
"openPose": "Openpose",
|
||||
"controlAdapter_other": "Control Adapters",
|
||||
"lineartAnime": "Lineart Anime",
|
||||
"canny": "Canny",
|
||||
@ -1444,14 +1446,16 @@
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"选择要跳过 CLIP 模型多少层。",
|
||||
"部分模型跳过特定数值的层时效果会更好。"
|
||||
"部分模型跳过特定数值的层时效果会更好。",
|
||||
"较高的数值通常会导致图像细节更少。"
|
||||
],
|
||||
"heading": "CLIP 跳过层"
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "模型",
|
||||
"paragraphs": [
|
||||
"用于去噪过程的模型。"
|
||||
"用于去噪过程的模型。",
|
||||
"不同的模型一般会通过接受训练来专门产生特定的美学内容和结果。"
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
|
@ -2,7 +2,7 @@ import type { UnknownAction } from '@reduxjs/toolkit';
|
||||
import { isAnyGraphBuilt } from 'features/nodes/store/actions';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodeTemplatesSlice';
|
||||
import { cloneDeep } from 'lodash-es';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import type { Graph } from 'services/api/types';
|
||||
import { socketGeneratorProgress } from 'services/events/actions';
|
||||
|
||||
@ -18,7 +18,7 @@ export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
|
||||
}
|
||||
}
|
||||
|
||||
if (appInfoApi.endpoints.getOpenAPISchema.matchFulfilled(action)) {
|
||||
if (receivedOpenAPISchema.fulfilled.match(action)) {
|
||||
return {
|
||||
...action,
|
||||
payload: '<OpenAPI schema omitted>',
|
||||
|
@ -23,7 +23,6 @@ import { addControlNetImageProcessedListener } from './listeners/controlNetImage
|
||||
import { addEnqueueRequestedCanvasListener } from './listeners/enqueueRequestedCanvas';
|
||||
import { addEnqueueRequestedLinear } from './listeners/enqueueRequestedLinear';
|
||||
import { addEnqueueRequestedNodes } from './listeners/enqueueRequestedNodes';
|
||||
import { addGetOpenAPISchemaListener } from './listeners/getOpenAPISchema';
|
||||
import {
|
||||
addImageAddedToBoardFulfilledListener,
|
||||
addImageAddedToBoardRejectedListener,
|
||||
@ -48,6 +47,7 @@ import { addInitialImageSelectedListener } from './listeners/initialImageSelecte
|
||||
import { addModelSelectedListener } from './listeners/modelSelected';
|
||||
import { addModelsLoadedListener } from './listeners/modelsLoaded';
|
||||
import { addDynamicPromptsListener } from './listeners/promptChanged';
|
||||
import { addReceivedOpenAPISchemaListener } from './listeners/receivedOpenAPISchema';
|
||||
import { addSocketConnectedEventListener as addSocketConnectedListener } from './listeners/socketio/socketConnected';
|
||||
import { addSocketDisconnectedEventListener as addSocketDisconnectedListener } from './listeners/socketio/socketDisconnected';
|
||||
import { addGeneratorProgressEventListener as addGeneratorProgressListener } from './listeners/socketio/socketGeneratorProgress';
|
||||
@ -150,7 +150,7 @@ addImageRemovedFromBoardRejectedListener();
|
||||
addBoardIdSelectedListener();
|
||||
|
||||
// Node schemas
|
||||
addGetOpenAPISchemaListener();
|
||||
addReceivedOpenAPISchemaListener();
|
||||
|
||||
// Workflows
|
||||
addWorkflowLoadRequestedListener();
|
||||
|
@ -1,25 +1,25 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { canvasBatchIdsReset, commitStagingAreaImage, discardStagedImages } from 'features/canvas/store/canvasSlice';
|
||||
import { matchAnyStagingAreaDismissed } from 'features/canvas/store/canvasSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const matcher = isAnyOf(commitStagingAreaImage, discardStagedImages);
|
||||
|
||||
export const addCommitStagingAreaImageListener = () => {
|
||||
startAppListening({
|
||||
matcher,
|
||||
matcher: matchAnyStagingAreaDismissed,
|
||||
effect: async (_, { dispatch, getState }) => {
|
||||
const log = logger('canvas');
|
||||
const state = getState();
|
||||
const { batchIds } = state.canvas;
|
||||
const { canvasBatchIds } = state.progress;
|
||||
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: batchIds }, { fixedCacheKey: 'cancelByBatchIds' })
|
||||
queueApi.endpoints.cancelByBatchIds.initiate(
|
||||
{ batch_ids: canvasBatchIds },
|
||||
{ fixedCacheKey: 'cancelByBatchIds' }
|
||||
)
|
||||
);
|
||||
const { canceled } = await req.unwrap();
|
||||
req.reset();
|
||||
@ -32,7 +32,6 @@ export const addCommitStagingAreaImageListener = () => {
|
||||
})
|
||||
);
|
||||
}
|
||||
dispatch(canvasBatchIdsReset());
|
||||
} catch {
|
||||
log.error('Failed to cancel canvas batches');
|
||||
dispatch(
|
||||
|
@ -1,5 +1,5 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import type { ImageCache } from 'services/api/types';
|
||||
@ -28,7 +28,7 @@ export const addFirstListImagesListener = () => {
|
||||
if (data.ids.length > 0) {
|
||||
// Select the first image
|
||||
const firstImage = imagesSelectors.selectAll(data)[0];
|
||||
dispatch(imageSelected(firstImage ?? null));
|
||||
dispatch(imageSelectionChanged(firstImage ?? null));
|
||||
}
|
||||
},
|
||||
});
|
||||
|
@ -1,5 +1,5 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { boardIdSelected, galleryViewChanged, imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { imagesSelectors } from 'services/api/util';
|
||||
@ -37,17 +37,17 @@ export const addBoardIdSelectedListener = () => {
|
||||
|
||||
if (boardImagesData && boardIdSelected.match(action) && action.payload.selectedImageName) {
|
||||
const selectedImage = imagesSelectors.selectById(boardImagesData, action.payload.selectedImageName);
|
||||
dispatch(imageSelected(selectedImage || null));
|
||||
dispatch(imageSelectionChanged(selectedImage ?? null));
|
||||
} else if (boardImagesData) {
|
||||
const firstImage = imagesSelectors.selectAll(boardImagesData)[0];
|
||||
dispatch(imageSelected(firstImage || null));
|
||||
dispatch(imageSelectionChanged(firstImage ?? null));
|
||||
} else {
|
||||
// board has no images - deselect
|
||||
dispatch(imageSelected(null));
|
||||
dispatch(imageSelectionChanged(null));
|
||||
}
|
||||
} else {
|
||||
// fallback - deselect
|
||||
dispatch(imageSelected(null));
|
||||
dispatch(imageSelectionChanged(null));
|
||||
}
|
||||
},
|
||||
});
|
||||
|
@ -2,13 +2,14 @@ import { logger } from 'app/logging/logger';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { canvasBatchIdAdded, stagingAreaInitialized } from 'features/canvas/store/canvasSlice';
|
||||
import { stagingAreaInitialized } from 'features/canvas/store/canvasSlice';
|
||||
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
|
||||
import { getCanvasData } from 'features/canvas/util/getCanvasData';
|
||||
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
|
||||
import { canvasGraphBuilt } from 'features/nodes/store/actions';
|
||||
import { buildCanvasGraph } from 'features/nodes/util/graph/buildCanvasGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { canvasBatchEnqueued } from 'features/progress/store/progressSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
@ -121,8 +122,6 @@ export const addEnqueueRequestedCanvasListener = () => {
|
||||
const enqueueResult = await req.unwrap();
|
||||
req.reset();
|
||||
|
||||
const batchId = enqueueResult.batch.batch_id as string; // we know the is a string, backend provides it
|
||||
|
||||
// Prep the canvas staging area if it is not yet initialized
|
||||
if (!state.canvas.layerState.stagingArea.boundingBox) {
|
||||
dispatch(
|
||||
@ -135,8 +134,9 @@ export const addEnqueueRequestedCanvasListener = () => {
|
||||
);
|
||||
}
|
||||
|
||||
// Associate the session with the canvas session ID
|
||||
dispatch(canvasBatchIdAdded(batchId));
|
||||
if (enqueueResult.batch.batch_id) {
|
||||
dispatch(canvasBatchEnqueued(enqueueResult.batch.batch_id));
|
||||
}
|
||||
} catch {
|
||||
// no-op
|
||||
}
|
||||
|
@ -1,19 +1,10 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { galleryImageClicked, imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import { imagesSelectors } from 'services/api/util';
|
||||
|
||||
import { startAppListening } from '..';
|
||||
|
||||
export const galleryImageClicked = createAction<{
|
||||
imageDTO: ImageDTO;
|
||||
shiftKey: boolean;
|
||||
ctrlKey: boolean;
|
||||
metaKey: boolean;
|
||||
}>('gallery/imageClicked');
|
||||
|
||||
/**
|
||||
* This listener handles the logic for selecting images in the gallery.
|
||||
*
|
||||
@ -52,16 +43,16 @@ export const addGalleryImageClickedListener = () => {
|
||||
const start = Math.min(lastClickedIndex, currentClickedIndex);
|
||||
const end = Math.max(lastClickedIndex, currentClickedIndex);
|
||||
const imagesToSelect = imageDTOs.slice(start, end + 1);
|
||||
dispatch(selectionChanged(selection.concat(imagesToSelect)));
|
||||
dispatch(imageSelectionChanged(selection.concat(imagesToSelect)));
|
||||
}
|
||||
} else if (ctrlKey || metaKey) {
|
||||
if (selection.some((i) => i.image_name === imageDTO.image_name) && selection.length > 1) {
|
||||
dispatch(selectionChanged(selection.filter((n) => n.image_name !== imageDTO.image_name)));
|
||||
dispatch(imageSelectionChanged(selection.filter((n) => n.image_name !== imageDTO.image_name)));
|
||||
} else {
|
||||
dispatch(selectionChanged(selection.concat(imageDTO)));
|
||||
dispatch(imageSelectionChanged(selection.concat(imageDTO)));
|
||||
}
|
||||
} else {
|
||||
dispatch(selectionChanged([imageDTO]));
|
||||
dispatch(imageSelectionChanged(imageDTO));
|
||||
}
|
||||
},
|
||||
});
|
||||
|
@ -9,7 +9,7 @@ import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||
import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions';
|
||||
import { isModalOpenChanged } from 'features/deleteImageModal/store/slice';
|
||||
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { isImageFieldInputInstance } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
@ -62,9 +62,9 @@ export const addRequestedSingleImageDeletionListener = () => {
|
||||
const newSelectedImageDTO = filteredImageDTOs[newSelectedImageIndex];
|
||||
|
||||
if (newSelectedImageDTO) {
|
||||
dispatch(imageSelected(newSelectedImageDTO));
|
||||
dispatch(imageSelectionChanged(newSelectedImageDTO));
|
||||
} else {
|
||||
dispatch(imageSelected(null));
|
||||
dispatch(imageSelectionChanged(null));
|
||||
}
|
||||
}
|
||||
|
||||
@ -160,9 +160,9 @@ export const addRequestedMultipleImageDeletionListener = () => {
|
||||
const newSelectedImageDTO = data ? imagesSelectors.selectAll(data)[0] : undefined;
|
||||
|
||||
if (newSelectedImageDTO) {
|
||||
dispatch(imageSelected(newSelectedImageDTO));
|
||||
dispatch(imageSelectionChanged(newSelectedImageDTO));
|
||||
} else {
|
||||
dispatch(imageSelected(null));
|
||||
dispatch(imageSelectionChanged(null));
|
||||
}
|
||||
|
||||
dispatch(isModalOpenChanged(false));
|
||||
|
@ -7,7 +7,7 @@ import {
|
||||
controlAdapterIsEnabledChanged,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { initialImageChanged, selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
@ -37,14 +37,14 @@ export const addImageDroppedListener = () => {
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on current image
|
||||
* Image dropped on viewer
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_CURRENT_IMAGE' &&
|
||||
overData.actionType === 'SET_VIEWER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
dispatch(imageSelected(activeData.payload.imageDTO));
|
||||
dispatch(imageSelectionChanged(activeData.payload.imageDTO));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
import { selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
@ -25,7 +25,7 @@ export const addImagesStarredListener = () => {
|
||||
updatedSelection.push(selectedImageDTO);
|
||||
}
|
||||
});
|
||||
dispatch(selectionChanged(updatedSelection));
|
||||
dispatch(imageSelectionChanged(updatedSelection));
|
||||
},
|
||||
});
|
||||
};
|
||||
|
@ -1,4 +1,4 @@
|
||||
import { selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
@ -25,7 +25,7 @@ export const addImagesUnstarredListener = () => {
|
||||
updatedSelection.push(selectedImageDTO);
|
||||
}
|
||||
});
|
||||
dispatch(selectionChanged(updatedSelection));
|
||||
dispatch(imageSelectionChanged(updatedSelection));
|
||||
},
|
||||
});
|
||||
};
|
||||
|
@ -3,18 +3,18 @@ import { parseify } from 'common/util/serialize';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodeTemplatesSlice';
|
||||
import { parseSchema } from 'features/nodes/util/schema/parseSchema';
|
||||
import { size } from 'lodash-es';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
|
||||
import { startAppListening } from '..';
|
||||
|
||||
export const addGetOpenAPISchemaListener = () => {
|
||||
export const addReceivedOpenAPISchemaListener = () => {
|
||||
startAppListening({
|
||||
matcher: appInfoApi.endpoints.getOpenAPISchema.matchFulfilled,
|
||||
actionCreator: receivedOpenAPISchema.fulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const log = logger('system');
|
||||
const schemaJSON = action.payload;
|
||||
|
||||
log.debug({ schemaJSON: parseify(schemaJSON) }, 'Received OpenAPI schema');
|
||||
log.debug({ schemaJSON }, 'Received OpenAPI schema');
|
||||
const { nodesAllowlist, nodesDenylist } = getState().config;
|
||||
|
||||
const nodeTemplates = parseSchema(schemaJSON, nodesAllowlist, nodesDenylist);
|
||||
@ -26,14 +26,10 @@ export const addGetOpenAPISchemaListener = () => {
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: appInfoApi.endpoints.getOpenAPISchema.matchRejected,
|
||||
actionCreator: receivedOpenAPISchema.rejected,
|
||||
effect: (action) => {
|
||||
// If action.meta.condition === true, the request was canceled/skipped because another request was in flight or
|
||||
// the value was already in the cache. We don't want to log these errors.
|
||||
if (!action.meta.condition) {
|
||||
const log = logger('system');
|
||||
log.error({ error: parseify(action.error) }, 'Problem retrieving OpenAPI Schema');
|
||||
}
|
||||
const log = logger('system');
|
||||
log.error({ error: parseify(action.error) }, 'Problem retrieving OpenAPI Schema');
|
||||
},
|
||||
});
|
||||
};
|
@ -1,9 +1,10 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import { isEqual, size } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import { api } from 'services/api';
|
||||
import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import { socketConnected } from 'services/events/actions';
|
||||
|
||||
import { startAppListening } from '../..';
|
||||
@ -76,4 +77,17 @@ export const addSocketConnectedEventListener = () => {
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: socketConnected,
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const { nodeTemplates, config } = getState();
|
||||
// We only want to re-fetch the schema if we don't have any node templates
|
||||
if (!size(nodeTemplates.templates) && !config.disabledTabs.includes('nodes')) {
|
||||
// This request is a createAsyncThunk - resetting API state as in the above listener
|
||||
// will not trigger this request, so we need to manually do it.
|
||||
dispatch(receivedOpenAPISchema());
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
@ -1,10 +1,11 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { boardIdSelected, galleryViewChanged, imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { LINEAR_UI_OUTPUT, nodeIDDenyList } from 'features/nodes/util/graph/constants';
|
||||
import { imageInvocationComplete } from 'features/progress/store/progressSlice';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { imagesAdapter } from 'services/api/util';
|
||||
@ -29,7 +30,7 @@ export const addInvocationCompleteEventListener = () => {
|
||||
// This complete event has an associated image output
|
||||
if (isImageOutput(result) && !nodeTypeDenylist.includes(node.type) && !nodeIDDenyList.includes(source_node_id)) {
|
||||
const { image_name } = result.image;
|
||||
const { canvas, gallery } = getState();
|
||||
const { gallery, progress } = getState();
|
||||
|
||||
// This populates the `getImageDTO` cache
|
||||
const imageDTORequest = dispatch(
|
||||
@ -41,8 +42,10 @@ export const addInvocationCompleteEventListener = () => {
|
||||
const imageDTO = await imageDTORequest.unwrap();
|
||||
imageDTORequest.unsubscribe();
|
||||
|
||||
dispatch(imageInvocationComplete({ data, imageDTO }));
|
||||
|
||||
// Add canvas images to the staging area
|
||||
if (canvas.batchIds.includes(queue_batch_id) && [LINEAR_UI_OUTPUT].includes(data.source_node_id)) {
|
||||
if (progress.canvasBatchIds.includes(queue_batch_id) && [LINEAR_UI_OUTPUT].includes(data.source_node_id)) {
|
||||
dispatch(addImageToStagingArea(imageDTO));
|
||||
}
|
||||
|
||||
@ -102,7 +105,7 @@ export const addInvocationCompleteEventListener = () => {
|
||||
);
|
||||
}
|
||||
|
||||
dispatch(imageSelected(imageDTO));
|
||||
dispatch(imageSelectionChanged(imageDTO));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import { WorkflowMigrationError, WorkflowVersionError } from 'features/nodes/typ
|
||||
import { validateWorkflow } from 'features/nodes/util/workflow/validateWorkflow';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { t } from 'i18next';
|
||||
import { z } from 'zod';
|
||||
import { fromZodError } from 'zod-validation-error';
|
||||
@ -52,6 +53,7 @@ export const addWorkflowLoadRequestedListener = () => {
|
||||
});
|
||||
}
|
||||
|
||||
dispatch(setActiveTab('nodes'));
|
||||
requestAnimationFrame(() => {
|
||||
$flow.get()?.fitView();
|
||||
});
|
||||
|
@ -20,6 +20,7 @@ import { nodesTemplatesSlice } from 'features/nodes/store/nodeTemplatesSlice';
|
||||
import { workflowPersistConfig, workflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import { generationPersistConfig, generationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { postprocessingPersistConfig, postprocessingSlice } from 'features/parameters/store/postprocessingSlice';
|
||||
import { progressPersistConfig, progressSlice } from 'features/progress/store/progressSlice';
|
||||
import { queueSlice } from 'features/queue/store/queueSlice';
|
||||
import { sdxlPersistConfig, sdxlSlice } from 'features/sdxl/store/sdxlSlice';
|
||||
import { configSlice } from 'features/system/store/configSlice';
|
||||
@ -61,6 +62,7 @@ const allReducers = {
|
||||
[queueSlice.name]: queueSlice.reducer,
|
||||
[workflowSlice.name]: workflowSlice.reducer,
|
||||
[hrfSlice.name]: hrfSlice.reducer,
|
||||
[progressSlice.name]: progressSlice.reducer,
|
||||
[api.reducerPath]: api.reducer,
|
||||
};
|
||||
|
||||
@ -105,6 +107,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[loraPersistConfig.name]: loraPersistConfig,
|
||||
[modelManagerPersistConfig.name]: modelManagerPersistConfig,
|
||||
[hrfPersistConfig.name]: hrfPersistConfig,
|
||||
[progressPersistConfig.name]: progressPersistConfig,
|
||||
};
|
||||
|
||||
const unserialize: UnserializeFunction = (data, key) => {
|
||||
|
@ -37,17 +37,18 @@ type IAIDndImageProps = FlexProps & {
|
||||
isSelected?: boolean;
|
||||
thumbnail?: boolean;
|
||||
noContentFallback?: ReactElement;
|
||||
useThumbailFallback?: boolean;
|
||||
withHoverOverlay?: boolean;
|
||||
children?: JSX.Element;
|
||||
uploadElement?: ReactNode;
|
||||
dataTestId?: string;
|
||||
fallbackSrc?: string;
|
||||
};
|
||||
|
||||
const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
const {
|
||||
imageDTO,
|
||||
onError,
|
||||
onLoad,
|
||||
onClick,
|
||||
withMetadataOverlay = false,
|
||||
isDropDisabled = false,
|
||||
@ -64,7 +65,7 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
thumbnail = false,
|
||||
noContentFallback = defaultNoContentFallback,
|
||||
uploadElement = defaultUploadElement,
|
||||
useThumbailFallback,
|
||||
fallbackSrc,
|
||||
withHoverOverlay = false,
|
||||
children,
|
||||
onMouseOver,
|
||||
@ -150,8 +151,8 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
<Image
|
||||
src={thumbnail ? imageDTO.thumbnail_url : imageDTO.image_url}
|
||||
fallbackStrategy="beforeLoadOrError"
|
||||
fallbackSrc={useThumbailFallback ? imageDTO.thumbnail_url : undefined}
|
||||
fallback={useThumbailFallback ? undefined : <IAILoadingImageFallback image={imageDTO} />}
|
||||
fallbackSrc={fallbackSrc ?? imageDTO.thumbnail_url}
|
||||
fallback={fallbackSrc ? undefined : <IAILoadingImageFallback image={imageDTO} />}
|
||||
onError={onError}
|
||||
draggable={false}
|
||||
w={imageDTO.width}
|
||||
@ -161,6 +162,7 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
borderRadius="base"
|
||||
sx={imageSx}
|
||||
data-testid={dataTestId}
|
||||
onLoad={onLoad}
|
||||
/>
|
||||
{withMetadataOverlay && <ImageMetadataOverlay imageDTO={imageDTO} />}
|
||||
<SelectionOverlay isSelected={isSelected} isHovered={withHoverOverlay ? isHovered : false} />
|
||||
@ -175,11 +177,13 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
</>
|
||||
)}
|
||||
{!imageDTO && isUploadDisabled && noContentFallback}
|
||||
{imageDTO && !isDragDisabled && (
|
||||
{imageDTO && !isDragDisabled && draggableData && (
|
||||
<IAIDraggable data={draggableData} disabled={isDragDisabled || !imageDTO} onClick={onClick} />
|
||||
)}
|
||||
{children}
|
||||
{!isDropDisabled && <IAIDroppable data={droppableData} disabled={isDropDisabled} dropLabel={dropLabel} />}
|
||||
{!isDropDisabled && droppableData && (
|
||||
<IAIDroppable data={droppableData} disabled={isDropDisabled} dropLabel={dropLabel} />
|
||||
)}
|
||||
</Flex>
|
||||
)}
|
||||
</ImageContextMenu>
|
||||
|
@ -13,46 +13,28 @@ export type Feature =
|
||||
| 'compositingCoherenceSteps'
|
||||
| 'compositingStrength'
|
||||
| 'compositingMaskAdjustments'
|
||||
| 'controlNet'
|
||||
| 'controlNetBeginEnd'
|
||||
| 'controlNetControlMode'
|
||||
| 'controlNetProcessor'
|
||||
| 'controlNetResizeMode'
|
||||
| 'controlNet'
|
||||
| 'controlNetWeight'
|
||||
| 'dynamicPrompts'
|
||||
| 'dynamicPromptsMaxPrompts'
|
||||
| 'dynamicPromptsSeedBehaviour'
|
||||
| 'imageFit'
|
||||
| 'infillMethod'
|
||||
| 'lora'
|
||||
| 'loraWeight'
|
||||
| 'noiseUseCPU'
|
||||
| 'paramAspect'
|
||||
| 'paramCFGScale'
|
||||
| 'paramCFGRescaleMultiplier'
|
||||
| 'paramDenoisingStrength'
|
||||
| 'paramHeight'
|
||||
| 'paramHrf'
|
||||
| 'paramIterations'
|
||||
| 'paramModel'
|
||||
| 'paramRatio'
|
||||
| 'paramSeed'
|
||||
| 'paramSteps'
|
||||
| 'paramUpscaleMethod'
|
||||
| 'paramVAE'
|
||||
| 'paramVAEPrecision'
|
||||
| 'paramWidth'
|
||||
| 'patchmatchDownScaleSize'
|
||||
| 'refinerModel'
|
||||
| 'refinerNegativeAestheticScore'
|
||||
| 'refinerPositiveAestheticScore'
|
||||
| 'refinerScheduler'
|
||||
| 'refinerStart'
|
||||
| 'refinerSteps'
|
||||
| 'refinerCfgScale'
|
||||
| 'scaleBeforeProcessing'
|
||||
| 'seamlessTilingXAxis'
|
||||
| 'seamlessTilingYAxis';
|
||||
| 'scaleBeforeProcessing';
|
||||
|
||||
export type PopoverData = PopoverProps & {
|
||||
image?: string;
|
||||
@ -64,57 +46,21 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
paramNegativeConditioning: {
|
||||
placement: 'right',
|
||||
},
|
||||
clipSkip: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
controlNet: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000105880',
|
||||
},
|
||||
controlNetBeginEnd: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178148',
|
||||
},
|
||||
controlNetWeight: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178148',
|
||||
},
|
||||
lora: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159072',
|
||||
},
|
||||
loraWeight: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159072-concepts-low-rank-adaptations-loras-',
|
||||
},
|
||||
compositingBlur: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
compositingBlurMethod: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
compositingCoherenceMode: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
compositingCoherenceSteps: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
compositingStrength: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838',
|
||||
},
|
||||
infillMethod: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158841-infill-and-scaling',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158841',
|
||||
},
|
||||
scaleBeforeProcessing: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158841',
|
||||
},
|
||||
paramCFGScale: {
|
||||
href: 'https://www.youtube.com/watch?v=1OeHEJrsTpI',
|
||||
},
|
||||
paramCFGRescaleMultiplier: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
paramDenoisingStrength: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000094998-image-to-image',
|
||||
},
|
||||
paramHrf: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000096700-how-can-i-get-larger-images-what-does-upscaling-do-',
|
||||
},
|
||||
paramIterations: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159073',
|
||||
},
|
||||
@ -124,10 +70,7 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
},
|
||||
paramScheduler: {
|
||||
placement: 'right',
|
||||
href: 'https://www.youtube.com/watch?v=1OeHEJrsTpI',
|
||||
},
|
||||
paramSeed: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000096684-what-is-a-seed-how-do-i-use-it-to-recreate-the-same-image-',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159073',
|
||||
},
|
||||
paramModel: {
|
||||
placement: 'right',
|
||||
@ -138,53 +81,15 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
},
|
||||
controlNetControlMode: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178148',
|
||||
},
|
||||
controlNetProcessor: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000105880-using-controlnet',
|
||||
},
|
||||
controlNetResizeMode: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178148',
|
||||
},
|
||||
paramVAE: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
paramVAEPrecision: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
paramUpscaleMethod: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000096700-how-can-i-get-larger-images-what-does-upscaling-do-',
|
||||
},
|
||||
refinerModel: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerNegativeAestheticScore: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerPositiveAestheticScore: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerScheduler: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerStart: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerSteps: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerCfgScale: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
seamlessTilingXAxis: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
seamlessTilingYAxis: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
@ -1,28 +1,19 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppToaster } from 'app/components/Toaster';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { imageDownloaded } from 'features/gallery/store/actions';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import { useImageUrlToBlob } from './useImageUrlToBlob';
|
||||
|
||||
export const useDownloadImage = () => {
|
||||
const toaster = useAppToaster();
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const authToken = useStore($authToken);
|
||||
const imageUrlToBlob = useImageUrlToBlob();
|
||||
|
||||
const downloadImage = useCallback(
|
||||
async (image_url: string, image_name: string) => {
|
||||
try {
|
||||
const requestOpts = authToken
|
||||
? {
|
||||
headers: {
|
||||
Authorization: `Bearer ${authToken}`,
|
||||
},
|
||||
}
|
||||
: {};
|
||||
const blob = await fetch(image_url, requestOpts).then((resp) => resp.blob());
|
||||
const blob = await imageUrlToBlob(image_url);
|
||||
|
||||
if (!blob) {
|
||||
throw new Error('Unable to create Blob');
|
||||
}
|
||||
@ -35,7 +26,6 @@ export const useDownloadImage = () => {
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
window.URL.revokeObjectURL(url);
|
||||
dispatch(imageDownloaded());
|
||||
} catch (err) {
|
||||
toaster({
|
||||
title: t('toast.problemDownloadingImage'),
|
||||
@ -46,7 +36,7 @@ export const useDownloadImage = () => {
|
||||
});
|
||||
}
|
||||
},
|
||||
[t, toaster, dispatch, authToken]
|
||||
[t, toaster, imageUrlToBlob]
|
||||
);
|
||||
|
||||
return { downloadImage };
|
||||
|
@ -28,7 +28,6 @@ import { Layer, Stage } from 'react-konva';
|
||||
|
||||
import IAICanvasBoundingBoxOverlay from './IAICanvasBoundingBoxOverlay';
|
||||
import IAICanvasGrid from './IAICanvasGrid';
|
||||
import IAICanvasIntermediateImage from './IAICanvasIntermediateImage';
|
||||
import IAICanvasMaskCompositer from './IAICanvasMaskCompositer';
|
||||
import IAICanvasMaskLines from './IAICanvasMaskLines';
|
||||
import IAICanvasObjectRenderer from './IAICanvasObjectRenderer';
|
||||
@ -55,7 +54,7 @@ const IAICanvas = () => {
|
||||
const shouldShowBoundingBox = useAppSelector((s) => s.canvas.shouldShowBoundingBox);
|
||||
const shouldShowGrid = useAppSelector((s) => s.canvas.shouldShowGrid);
|
||||
const stageScale = useAppSelector((s) => s.canvas.stageScale);
|
||||
const shouldShowIntermediates = useAppSelector((s) => s.canvas.shouldShowIntermediates);
|
||||
// const shouldShowIntermediates = useAppSelector((s) => s.canvas.shouldShowIntermediates);
|
||||
const shouldAntialias = useAppSelector((s) => s.canvas.shouldAntialias);
|
||||
const shouldRestrictStrokesToBox = useAppSelector((s) => s.canvas.shouldRestrictStrokesToBox);
|
||||
const { stageCoordinates, stageDimensions } = useAppSelector(selector);
|
||||
@ -184,7 +183,7 @@ const IAICanvas = () => {
|
||||
<Layer id="preview" imageSmoothingEnabled={shouldAntialias}>
|
||||
{!isStaging && <IAICanvasToolPreview visible={tool !== 'move'} listening={false} />}
|
||||
<IAICanvasStagingArea listening={false} visible={isStaging} />
|
||||
{shouldShowIntermediates && <IAICanvasIntermediateImage />}
|
||||
{/* {shouldShowIntermediates && <IAICanvasIntermediateImage />} */}
|
||||
<IAICanvasBoundingBox visible={shouldShowBoundingBox && !isStaging} />
|
||||
</Layer>
|
||||
</ChakraStage>
|
||||
|
@ -1,20 +1,33 @@
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectCanvasSlice } from 'features/canvas/store/canvasSlice';
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { selectProgressSlice } from 'features/progress/store/progressSlice';
|
||||
import { memo, useEffect, useState } from 'react';
|
||||
import { Image as KonvaImage } from 'react-konva';
|
||||
|
||||
const progressImageSelector = createMemoizedSelector([selectSystemSlice, selectCanvasSlice], (system, canvas) => {
|
||||
const { denoiseProgress } = system;
|
||||
const { batchIds } = canvas;
|
||||
export const progressImageSelector = createMemoizedSelector(
|
||||
[selectProgressSlice, selectCanvasSlice],
|
||||
(progress, canvas) => {
|
||||
const isLatestProgressFromCanvas =
|
||||
progress.latestDenoiseProgress && progress.canvasBatchIds.includes(progress.latestDenoiseProgress.queue_batch_id);
|
||||
|
||||
return {
|
||||
progressImage:
|
||||
denoiseProgress && batchIds.includes(denoiseProgress.batch_id) ? denoiseProgress.progress_image : undefined,
|
||||
boundingBox: canvas.layerState.stagingArea.boundingBox,
|
||||
};
|
||||
});
|
||||
const { selectedImageIndex, images } = canvas.layerState.stagingArea;
|
||||
const _currentStagingAreaImage =
|
||||
images.length > 0 && selectedImageIndex !== undefined ? images[selectedImageIndex] : undefined;
|
||||
|
||||
const isProgressImageIncomplete =
|
||||
progress.latestDenoiseProgress?.graph_execution_state_id !==
|
||||
progress.latestImageOutputEvent?.graph_execution_state_id;
|
||||
|
||||
return {
|
||||
progressImage:
|
||||
progress.latestDenoiseProgress && isLatestProgressFromCanvas && isProgressImageIncomplete
|
||||
? progress.latestDenoiseProgress.progress_image
|
||||
: undefined,
|
||||
boundingBox: canvas.layerState.stagingArea.boundingBox,
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
const IAICanvasIntermediateImage = () => {
|
||||
const { progressImage, boundingBox } = useAppSelector(progressImageSelector);
|
||||
|
@ -5,7 +5,7 @@ import type { GroupConfig } from 'konva/lib/Group';
|
||||
import { memo } from 'react';
|
||||
import { Group, Rect } from 'react-konva';
|
||||
|
||||
import IAICanvasImage from './IAICanvasImage';
|
||||
import { IAICanvasStagingAreaImage } from './IAICanvasStagingAreaImage';
|
||||
|
||||
const dash = [4, 4];
|
||||
|
||||
@ -37,12 +37,11 @@ const selector = createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
type Props = GroupConfig;
|
||||
|
||||
const IAICanvasStagingArea = (props: Props) => {
|
||||
const { currentStagingAreaImage, shouldShowStagingImage, shouldShowStagingOutline, x, y, width, height } =
|
||||
useAppSelector(selector);
|
||||
const { shouldShowStagingOutline, x, y, width, height } = useAppSelector(selector);
|
||||
|
||||
return (
|
||||
<Group {...props}>
|
||||
{shouldShowStagingImage && currentStagingAreaImage && <IAICanvasImage canvasImage={currentStagingAreaImage} />}
|
||||
<IAICanvasStagingAreaImage />
|
||||
{shouldShowStagingOutline && (
|
||||
<Group listening={false}>
|
||||
<Rect
|
||||
|
@ -0,0 +1,71 @@
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import IAICanvasIntermediateImage, {
|
||||
progressImageSelector,
|
||||
} from 'features/canvas/components/IAICanvasIntermediateImage';
|
||||
import { selectCanvasSlice } from 'features/canvas/store/canvasSlice';
|
||||
import { selectProgressSlice } from 'features/progress/store/progressSlice';
|
||||
import { memo } from 'react';
|
||||
import { Image } from 'react-konva';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import useImage from 'use-image';
|
||||
|
||||
import IAICanvasImageErrorFallback from './IAICanvasImageErrorFallback';
|
||||
|
||||
const selector = createMemoizedSelector([selectProgressSlice, selectCanvasSlice], (progress, canvas) => {
|
||||
const { selectedImageIndex, images } = canvas.layerState.stagingArea;
|
||||
|
||||
const currentStagingAreaImage =
|
||||
images.length > 0 && selectedImageIndex !== undefined ? images[selectedImageIndex] : undefined;
|
||||
|
||||
const progressImage =
|
||||
progress.latestDenoiseProgress && progress.canvasBatchIds.includes(progress.latestDenoiseProgress.queue_batch_id)
|
||||
? progress.latestDenoiseProgress.progress_image
|
||||
: undefined;
|
||||
|
||||
const boundingBox = canvas.layerState.stagingArea.boundingBox ?? {
|
||||
...canvas.boundingBoxCoordinates,
|
||||
...canvas.boundingBoxDimensions,
|
||||
};
|
||||
|
||||
return {
|
||||
currentStagingAreaImage,
|
||||
progressImage,
|
||||
boundingBox,
|
||||
};
|
||||
});
|
||||
|
||||
export const IAICanvasStagingAreaImage = memo(() => {
|
||||
const { currentStagingAreaImage, boundingBox } = useAppSelector(selector);
|
||||
const { progressImage } = useAppSelector(progressImageSelector);
|
||||
const { currentData: imageDTO, isError } = useGetImageDTOQuery(currentStagingAreaImage?.imageName ?? skipToken);
|
||||
const [stagedImageEl, stagedImageElStatus] = useImage(
|
||||
imageDTO?.image_url ?? '',
|
||||
$authToken.get() ? 'use-credentials' : 'anonymous'
|
||||
);
|
||||
|
||||
if (currentStagingAreaImage && (isError || stagedImageElStatus === 'failed')) {
|
||||
return <IAICanvasImageErrorFallback canvasImage={currentStagingAreaImage} />;
|
||||
}
|
||||
|
||||
if (progressImage) {
|
||||
return <IAICanvasIntermediateImage />;
|
||||
}
|
||||
|
||||
if (stagedImageEl) {
|
||||
return (
|
||||
<Image
|
||||
x={boundingBox.x}
|
||||
y={boundingBox.y}
|
||||
width={boundingBox.width}
|
||||
height={boundingBox.height}
|
||||
image={stagedImageEl}
|
||||
listening={false}
|
||||
/>
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
IAICanvasStagingAreaImage.displayName = 'IAICanvasStagingAreaImage';
|
@ -1,12 +1,14 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { selectProgressSlice } from 'features/progress/store/progressSlice';
|
||||
|
||||
import { selectCanvasSlice } from './canvasSlice';
|
||||
import { isCanvasBaseImage } from './canvasTypes';
|
||||
|
||||
export const isStagingSelector = createSelector(
|
||||
selectProgressSlice,
|
||||
selectCanvasSlice,
|
||||
(canvas) => canvas.batchIds.length > 0 || canvas.layerState.stagingArea.images.length > 0
|
||||
(progress, canvas) => progress.canvasBatchIds.length > 0 || canvas.layerState.stagingArea.images.length > 0
|
||||
);
|
||||
|
||||
export const initialCanvasImageSelector = createMemoizedSelector(selectCanvasSlice, (canvas) =>
|
||||
|
@ -1,5 +1,5 @@
|
||||
import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSlice } from '@reduxjs/toolkit';
|
||||
import { createSlice, isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { roundDownToMultiple, roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import calculateCoordinates from 'features/canvas/util/calculateCoordinates';
|
||||
@ -15,9 +15,7 @@ import { getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/
|
||||
import type { IRect, Vector2d } from 'konva/lib/types';
|
||||
import { clamp, cloneDeep } from 'lodash-es';
|
||||
import type { RgbaColor } from 'react-colorful';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import { socketQueueItemStatusChanged } from 'services/events/actions';
|
||||
|
||||
import type {
|
||||
BoundingBoxScaleMethod,
|
||||
@ -79,7 +77,6 @@ export const initialCanvasState: CanvasState = {
|
||||
stageCoordinates: { x: 0, y: 0 },
|
||||
stageDimensions: { width: 0, height: 0 },
|
||||
stageScale: 1,
|
||||
batchIds: [],
|
||||
aspectRatio: {
|
||||
id: '1:1',
|
||||
value: 1,
|
||||
@ -180,7 +177,6 @@ export const canvasSlice = createSlice({
|
||||
],
|
||||
};
|
||||
state.futureLayerStates = [];
|
||||
state.batchIds = [];
|
||||
|
||||
const newScale = calculateScale(
|
||||
stageDimensions.width,
|
||||
@ -237,12 +233,6 @@ export const canvasSlice = createSlice({
|
||||
setShouldShowBoundingBox: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldShowBoundingBox = action.payload;
|
||||
},
|
||||
canvasBatchIdAdded: (state, action: PayloadAction<string>) => {
|
||||
state.batchIds.push(action.payload);
|
||||
},
|
||||
canvasBatchIdsReset: (state) => {
|
||||
state.batchIds = [];
|
||||
},
|
||||
stagingAreaInitialized: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
@ -293,7 +283,6 @@ export const canvasSlice = createSlice({
|
||||
state.futureLayerStates = [];
|
||||
state.shouldShowStagingOutline = true;
|
||||
state.shouldShowStagingImage = true;
|
||||
state.batchIds = [];
|
||||
},
|
||||
addFillRect: (state) => {
|
||||
const { boundingBoxCoordinates, boundingBoxDimensions, brushColor } = state;
|
||||
@ -426,7 +415,6 @@ export const canvasSlice = createSlice({
|
||||
state.pastLayerStates.push(cloneDeep(state.layerState));
|
||||
state.layerState = cloneDeep(initialLayerState);
|
||||
state.futureLayerStates = [];
|
||||
state.batchIds = [];
|
||||
state.boundingBoxCoordinates = {
|
||||
...initialCanvasState.boundingBoxCoordinates,
|
||||
};
|
||||
@ -536,7 +524,6 @@ export const canvasSlice = createSlice({
|
||||
state.futureLayerStates = [];
|
||||
state.shouldShowStagingOutline = true;
|
||||
state.shouldShowStagingImage = true;
|
||||
state.batchIds = [];
|
||||
},
|
||||
setBoundingBoxScaleMethod: {
|
||||
reducer: (state, action: PayloadActionWithOptimalDimension<BoundingBoxScaleMethod>) => {
|
||||
@ -644,23 +631,6 @@ export const canvasSlice = createSlice({
|
||||
optimalDimension
|
||||
);
|
||||
});
|
||||
|
||||
builder.addCase(socketQueueItemStatusChanged, (state, action) => {
|
||||
const batch_status = action.payload.data.batch_status;
|
||||
if (!state.batchIds.includes(batch_status.batch_id)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (batch_status.in_progress === 0 && batch_status.pending === 0) {
|
||||
state.batchIds = state.batchIds.filter((id) => id !== batch_status.batch_id);
|
||||
}
|
||||
});
|
||||
builder.addMatcher(queueApi.endpoints.clearQueue.matchFulfilled, (state) => {
|
||||
state.batchIds = [];
|
||||
});
|
||||
builder.addMatcher(queueApi.endpoints.cancelByBatchIds.matchFulfilled, (state, action) => {
|
||||
state.batchIds = state.batchIds.filter((id) => !action.meta.arg.originalArgs.batch_ids.includes(id));
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
@ -713,8 +683,6 @@ export const {
|
||||
stagingAreaInitialized,
|
||||
setShouldAntialias,
|
||||
canvasResized,
|
||||
canvasBatchIdAdded,
|
||||
canvasBatchIdsReset,
|
||||
aspectRatioChanged,
|
||||
scaledBoundingBoxDimensionsReset,
|
||||
} = canvasSlice.actions;
|
||||
@ -736,3 +704,5 @@ export const canvasPersistConfig: PersistConfig<CanvasState> = {
|
||||
migrate: migrateCanvasState,
|
||||
persistDenylist: [],
|
||||
};
|
||||
|
||||
export const matchAnyStagingAreaDismissed = isAnyOf(commitStagingAreaImage, discardStagedImages);
|
||||
|
@ -142,7 +142,6 @@ export interface CanvasState {
|
||||
stageDimensions: Dimensions;
|
||||
stageScale: number;
|
||||
generationMode?: GenerationMode;
|
||||
batchIds: string[];
|
||||
aspectRatio: AspectRatioState;
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,6 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
|
||||
import { roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
||||
import { useControlAdapterControlImage } from 'features/controlAdapters/hooks/useControlAdapterControlImage';
|
||||
import { useControlAdapterProcessedControlImage } from 'features/controlAdapters/hooks/useControlAdapterProcessedControlImage';
|
||||
@ -92,14 +91,19 @@ const ControlAdapterImagePreview = ({ isSmall, id }: Props) => {
|
||||
return;
|
||||
}
|
||||
|
||||
const width = roundToMultiple(controlImage.width, 8);
|
||||
const height = roundToMultiple(controlImage.height, 8);
|
||||
|
||||
if (activeTabName === 'unifiedCanvas') {
|
||||
dispatch(setBoundingBoxDimensions({ width, height }, optimalDimension));
|
||||
dispatch(
|
||||
setBoundingBoxDimensions(
|
||||
{
|
||||
width: controlImage.width,
|
||||
height: controlImage.height,
|
||||
},
|
||||
optimalDimension
|
||||
)
|
||||
);
|
||||
} else {
|
||||
dispatch(widthChanged(width));
|
||||
dispatch(heightChanged(height));
|
||||
dispatch(widthChanged(controlImage.width));
|
||||
dispatch(heightChanged(controlImage.height));
|
||||
}
|
||||
}, [controlImage, activeTabName, dispatch, optimalDimension]);
|
||||
|
||||
|
@ -6,7 +6,6 @@ import CannyProcessor from './processors/CannyProcessor';
|
||||
import ColorMapProcessor from './processors/ColorMapProcessor';
|
||||
import ContentShuffleProcessor from './processors/ContentShuffleProcessor';
|
||||
import DepthAnyThingProcessor from './processors/DepthAnyThingProcessor';
|
||||
import DWOpenposeProcessor from './processors/DWOpenposeProcessor';
|
||||
import HedProcessor from './processors/HedProcessor';
|
||||
import LineartAnimeProcessor from './processors/LineartAnimeProcessor';
|
||||
import LineartProcessor from './processors/LineartProcessor';
|
||||
@ -14,6 +13,7 @@ import MediapipeFaceProcessor from './processors/MediapipeFaceProcessor';
|
||||
import MidasDepthProcessor from './processors/MidasDepthProcessor';
|
||||
import MlsdImageProcessor from './processors/MlsdImageProcessor';
|
||||
import NormalBaeProcessor from './processors/NormalBaeProcessor';
|
||||
import OpenposeProcessor from './processors/OpenposeProcessor';
|
||||
import PidiProcessor from './processors/PidiProcessor';
|
||||
import ZoeDepthProcessor from './processors/ZoeDepthProcessor';
|
||||
|
||||
@ -73,8 +73,8 @@ const ControlAdapterProcessorComponent = ({ id }: Props) => {
|
||||
return <NormalBaeProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
|
||||
}
|
||||
|
||||
if (processorNode.type === 'dw_openpose_image_processor') {
|
||||
return <DWOpenposeProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
|
||||
if (processorNode.type === 'openpose_image_processor') {
|
||||
return <OpenposeProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
|
||||
}
|
||||
|
||||
if (processorNode.type === 'pidi_image_processor') {
|
||||
|
@ -1,6 +1,5 @@
|
||||
import { CompositeRangeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useControlAdapterBeginEndStepPct } from 'features/controlAdapters/hooks/useControlAdapterBeginEndStepPct';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import {
|
||||
@ -62,10 +61,12 @@ export const ParamControlAdapterBeginEnd = memo(({ id }: Props) => {
|
||||
}
|
||||
|
||||
return (
|
||||
<FormControl isDisabled={!isEnabled} orientation="vertical">
|
||||
<InformationalPopover feature="controlNetBeginEnd">
|
||||
<FormLabel>{t('controlnet.beginEndStepPercent')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<FormControl
|
||||
isDisabled={!isEnabled}
|
||||
// feature="controlNetBeginEnd"
|
||||
orientation="vertical"
|
||||
>
|
||||
<FormLabel>{t('controlnet.beginEndStepPercent')}</FormLabel>
|
||||
<CompositeRangeSlider
|
||||
aria-label={ariaLabel}
|
||||
value={value}
|
||||
|
@ -2,7 +2,6 @@ import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import { useControlAdapterProcessorNode } from 'features/controlAdapters/hooks/useControlAdapterProcessorNode';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants';
|
||||
@ -59,9 +58,7 @@ const ParamControlAdapterProcessorSelect = ({ id }: Props) => {
|
||||
}
|
||||
return (
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<InformationalPopover feature="controlNetProcessor">
|
||||
<FormLabel>{t('controlnet.processor')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<FormLabel>{t('controlnet.processor')}</FormLabel>
|
||||
<Combobox value={value} options={options} onChange={onChange} />
|
||||
</FormControl>
|
||||
);
|
||||
|
@ -1,92 +0,0 @@
|
||||
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import { useProcessorNodeChanged } from 'features/controlAdapters/components/hooks/useProcessorNodeChanged';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants';
|
||||
import type { RequiredDWOpenposeImageProcessorInvocation } from 'features/controlAdapters/store/types';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './common/ProcessorWrapper';
|
||||
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS.dw_openpose_image_processor
|
||||
.default as RequiredDWOpenposeImageProcessorInvocation;
|
||||
|
||||
type Props = {
|
||||
controlNetId: string;
|
||||
processorNode: RequiredDWOpenposeImageProcessorInvocation;
|
||||
isEnabled: boolean;
|
||||
};
|
||||
|
||||
const DWOpenposeProcessor = (props: Props) => {
|
||||
const { controlNetId, processorNode, isEnabled } = props;
|
||||
const { image_resolution, draw_body, draw_face, draw_hands } = processorNode;
|
||||
const processorChanged = useProcessorNodeChanged();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleDrawBodyChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
processorChanged(controlNetId, { draw_body: e.target.checked });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleDrawFaceChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
processorChanged(controlNetId, { draw_face: e.target.checked });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleDrawHandsChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
processorChanged(controlNetId, { draw_hands: e.target.checked });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleImageResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { image_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<Flex sx={{ flexDir: 'row', gap: 6 }}>
|
||||
<FormControl isDisabled={!isEnabled} w="max-content">
|
||||
<FormLabel>{t('controlnet.body')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_body} isChecked={draw_body} onChange={handleDrawBodyChanged} />
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled} w="max-content">
|
||||
<FormLabel>{t('controlnet.face')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_face} isChecked={draw_face} onChange={handleDrawFaceChanged} />
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled} w="max-content">
|
||||
<FormLabel>{t('controlnet.hands')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_hands} isChecked={draw_hands} onChange={handleDrawHandsChanged} />
|
||||
</FormControl>
|
||||
</Flex>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.imageResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={image_resolution}
|
||||
onChange={handleImageResolutionChanged}
|
||||
defaultValue={DEFAULTS.image_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={image_resolution}
|
||||
onChange={handleImageResolutionChanged}
|
||||
defaultValue={DEFAULTS.image_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(DWOpenposeProcessor);
|
@ -0,0 +1,92 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import { useProcessorNodeChanged } from 'features/controlAdapters/components/hooks/useProcessorNodeChanged';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants';
|
||||
import type { RequiredOpenposeImageProcessorInvocation } from 'features/controlAdapters/store/types';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './common/ProcessorWrapper';
|
||||
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS.openpose_image_processor.default as RequiredOpenposeImageProcessorInvocation;
|
||||
|
||||
type Props = {
|
||||
controlNetId: string;
|
||||
processorNode: RequiredOpenposeImageProcessorInvocation;
|
||||
isEnabled: boolean;
|
||||
};
|
||||
|
||||
const OpenposeProcessor = (props: Props) => {
|
||||
const { controlNetId, processorNode, isEnabled } = props;
|
||||
const { image_resolution, detect_resolution, hand_and_face } = processorNode;
|
||||
const processorChanged = useProcessorNodeChanged();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleDetectResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { detect_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleImageResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { image_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleHandAndFaceChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
processorChanged(controlNetId, { hand_and_face: e.target.checked });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.detectResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={DEFAULTS.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={DEFAULTS.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.imageResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={image_resolution}
|
||||
onChange={handleImageResolutionChanged}
|
||||
defaultValue={DEFAULTS.image_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={image_resolution}
|
||||
onChange={handleImageResolutionChanged}
|
||||
defaultValue={DEFAULTS.image_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.handAndFace')}</FormLabel>
|
||||
<Switch isChecked={hand_and_face} onChange={handleHandAndFaceChanged} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(OpenposeProcessor);
|
@ -205,21 +205,20 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
|
||||
image_resolution: 512,
|
||||
},
|
||||
},
|
||||
dw_openpose_image_processor: {
|
||||
type: 'dw_openpose_image_processor',
|
||||
openpose_image_processor: {
|
||||
type: 'openpose_image_processor',
|
||||
get label() {
|
||||
return i18n.t('controlnet.dwOpenpose');
|
||||
return i18n.t('controlnet.openPose');
|
||||
},
|
||||
get description() {
|
||||
return i18n.t('controlnet.dwOpenposeDescription');
|
||||
return i18n.t('controlnet.openPoseDescription');
|
||||
},
|
||||
default: {
|
||||
id: 'dw_openpose_image_processor',
|
||||
type: 'dw_openpose_image_processor',
|
||||
id: 'openpose_image_processor',
|
||||
type: 'openpose_image_processor',
|
||||
detect_resolution: 512,
|
||||
image_resolution: 512,
|
||||
draw_body: true,
|
||||
draw_face: false,
|
||||
draw_hands: false,
|
||||
hand_and_face: false,
|
||||
},
|
||||
},
|
||||
pidi_image_processor: {
|
||||
@ -267,7 +266,7 @@ export const CONTROLNET_MODEL_DEFAULT_PROCESSORS: {
|
||||
lineart_anime: 'lineart_anime_image_processor',
|
||||
softedge: 'hed_image_processor',
|
||||
shuffle: 'content_shuffle_image_processor',
|
||||
openpose: 'dw_openpose_image_processor',
|
||||
openpose: 'openpose_image_processor',
|
||||
mediapipe: 'mediapipe_face_processor',
|
||||
pidi: 'pidi_image_processor',
|
||||
zoe: 'zoe_depth_image_processor',
|
||||
|
@ -11,7 +11,6 @@ import type {
|
||||
ColorMapImageProcessorInvocation,
|
||||
ContentShuffleImageProcessorInvocation,
|
||||
DepthAnythingImageProcessorInvocation,
|
||||
DWOpenposeImageProcessorInvocation,
|
||||
HedImageProcessorInvocation,
|
||||
LineartAnimeImageProcessorInvocation,
|
||||
LineartImageProcessorInvocation,
|
||||
@ -19,6 +18,7 @@ import type {
|
||||
MidasDepthImageProcessorInvocation,
|
||||
MlsdImageProcessorInvocation,
|
||||
NormalbaeImageProcessorInvocation,
|
||||
OpenposeImageProcessorInvocation,
|
||||
PidiImageProcessorInvocation,
|
||||
ZoeDepthImageProcessorInvocation,
|
||||
} from 'services/api/types';
|
||||
@ -40,7 +40,7 @@ export type ControlAdapterProcessorNode =
|
||||
| MidasDepthImageProcessorInvocation
|
||||
| MlsdImageProcessorInvocation
|
||||
| NormalbaeImageProcessorInvocation
|
||||
| DWOpenposeImageProcessorInvocation
|
||||
| OpenposeImageProcessorInvocation
|
||||
| PidiImageProcessorInvocation
|
||||
| ZoeDepthImageProcessorInvocation;
|
||||
|
||||
@ -143,11 +143,11 @@ export type RequiredNormalbaeImageProcessorInvocation = O.Required<
|
||||
>;
|
||||
|
||||
/**
|
||||
* The DW Openpose processor node, with parameters flagged as required
|
||||
* The Openpose processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredDWOpenposeImageProcessorInvocation = O.Required<
|
||||
DWOpenposeImageProcessorInvocation,
|
||||
'type' | 'image_resolution' | 'draw_body' | 'draw_face' | 'draw_hands'
|
||||
export type RequiredOpenposeImageProcessorInvocation = O.Required<
|
||||
OpenposeImageProcessorInvocation,
|
||||
'type' | 'detect_resolution' | 'image_resolution' | 'hand_and_face'
|
||||
>;
|
||||
|
||||
/**
|
||||
@ -179,7 +179,7 @@ export type RequiredControlAdapterProcessorNode =
|
||||
| RequiredMidasDepthImageProcessorInvocation
|
||||
| RequiredMlsdImageProcessorInvocation
|
||||
| RequiredNormalbaeImageProcessorInvocation
|
||||
| RequiredDWOpenposeImageProcessorInvocation
|
||||
| RequiredOpenposeImageProcessorInvocation
|
||||
| RequiredPidiImageProcessorInvocation
|
||||
| RequiredZoeDepthImageProcessorInvocation,
|
||||
'id'
|
||||
@ -299,10 +299,10 @@ export const isNormalbaeImageProcessorInvocation = (obj: unknown): obj is Normal
|
||||
};
|
||||
|
||||
/**
|
||||
* Type guard for DWOpenposeImageProcessorInvocation
|
||||
* Type guard for OpenposeImageProcessorInvocation
|
||||
*/
|
||||
export const isDWOpenposeImageProcessorInvocation = (obj: unknown): obj is DWOpenposeImageProcessorInvocation => {
|
||||
if (isObject(obj) && 'type' in obj && obj.type === 'dw_openpose_image_processor') {
|
||||
export const isOpenposeImageProcessorInvocation = (obj: unknown): obj is OpenposeImageProcessorInvocation => {
|
||||
if (isObject(obj) && 'type' in obj && obj.type === 'openpose_image_processor') {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -1,23 +0,0 @@
|
||||
import type { DragEndEvent } from '@dnd-kit/core';
|
||||
import { SortableContext, verticalListSortingStrategy } from '@dnd-kit/sortable';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo } from 'react';
|
||||
|
||||
import { DndContextTypesafe } from './DndContextTypesafe';
|
||||
|
||||
type Props = PropsWithChildren & {
|
||||
items: string[];
|
||||
onDragEnd(event: DragEndEvent): void;
|
||||
};
|
||||
|
||||
const DndSortable = (props: Props) => {
|
||||
return (
|
||||
<DndContextTypesafe onDragEnd={props.onDragEnd}>
|
||||
<SortableContext items={props.items} strategy={verticalListSortingStrategy}>
|
||||
{props.children}
|
||||
</SortableContext>
|
||||
</DndContextTypesafe>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(DndSortable);
|
@ -12,14 +12,19 @@ import type {
|
||||
} from '@dnd-kit/core';
|
||||
import type { BoardId } from 'features/gallery/store/types';
|
||||
import type { FieldInputInstance, FieldInputTemplate } from 'features/nodes/types/field';
|
||||
import type { ViewerMode } from 'features/ui/store/uiTypes';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
type BaseDropData = {
|
||||
id: string;
|
||||
};
|
||||
|
||||
export type CurrentImageDropData = BaseDropData & {
|
||||
actionType: 'SET_CURRENT_IMAGE';
|
||||
export type ViewerImageDropData = BaseDropData & {
|
||||
actionType: 'SET_VIEWER_IMAGE';
|
||||
context: {
|
||||
viewerMode: ViewerMode;
|
||||
currentImageName?: string | null;
|
||||
};
|
||||
};
|
||||
|
||||
export type InitialImageDropData = BaseDropData & {
|
||||
@ -59,13 +64,13 @@ export type RemoveFromBoardDropData = BaseDropData & {
|
||||
};
|
||||
|
||||
export type TypesafeDroppableData =
|
||||
| CurrentImageDropData
|
||||
| InitialImageDropData
|
||||
| ControlAdapterDropData
|
||||
| CanvasInitialImageDropData
|
||||
| NodesImageDropData
|
||||
| AddToBoardDropData
|
||||
| RemoveFromBoardDropData;
|
||||
| RemoveFromBoardDropData
|
||||
| ViewerImageDropData;
|
||||
|
||||
type BaseDragData = {
|
||||
id: string;
|
||||
|
@ -13,8 +13,11 @@ export const isValidDrop = (overData: TypesafeDroppableData | undefined, active:
|
||||
}
|
||||
|
||||
switch (actionType) {
|
||||
case 'SET_CURRENT_IMAGE':
|
||||
return payloadType === 'IMAGE_DTO';
|
||||
case 'SET_VIEWER_IMAGE':
|
||||
return (
|
||||
payloadType === 'IMAGE_DTO' &&
|
||||
overData.context.currentImageName !== active.data.current.payload.imageDTO.image_name
|
||||
);
|
||||
case 'SET_INITIAL_IMAGE':
|
||||
return payloadType === 'IMAGE_DTO';
|
||||
case 'SET_CONTROL_ADAPTER_IMAGE':
|
||||
|
@ -1,295 +0,0 @@
|
||||
import { ButtonGroup, Flex, IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppToaster } from 'app/components/Toaster';
|
||||
import { upscaleRequested } from 'app/store/middleware/listenerMiddleware/listeners/upscaleRequested';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { DeleteImageButton } from 'features/deleteImageModal/components/DeleteImageButton';
|
||||
import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice';
|
||||
import SingleSelectionMenuItems from 'features/gallery/components/ImageContextMenu/SingleSelectionMenuItems';
|
||||
import { sentImageToImg2Img } from 'features/gallery/store/actions';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectGallerySlice } from 'features/gallery/store/gallerySlice';
|
||||
import ParamUpscalePopover from 'features/parameters/components/Upscale/ParamUpscaleSettings';
|
||||
import { useRecallParameters } from 'features/parameters/hooks/useRecallParameters';
|
||||
import { initialImageSelected } from 'features/parameters/store/actions';
|
||||
import { useIsQueueMutationInProgress } from 'features/queue/hooks/useIsQueueMutationInProgress';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { setShouldShowImageDetails, setShouldShowProgressInViewer } from 'features/ui/store/uiSlice';
|
||||
import { useGetAndLoadEmbeddedWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadEmbeddedWorkflow';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import {
|
||||
PiArrowsCounterClockwiseBold,
|
||||
PiAsteriskBold,
|
||||
PiDotsThreeOutlineFill,
|
||||
PiFlowArrowBold,
|
||||
PiHourglassHighBold,
|
||||
PiInfoBold,
|
||||
PiPlantBold,
|
||||
PiQuotesBold,
|
||||
PiRulerBold,
|
||||
} from 'react-icons/pi';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
|
||||
|
||||
const selectShouldDisableToolbarButtons = createSelector(
|
||||
selectSystemSlice,
|
||||
selectGallerySlice,
|
||||
selectLastSelectedImage,
|
||||
(system, gallery, lastSelectedImage) => {
|
||||
const hasProgressImage = Boolean(system.denoiseProgress?.progress_image);
|
||||
return hasProgressImage || !lastSelectedImage;
|
||||
}
|
||||
);
|
||||
|
||||
const CurrentImageButtons = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const isConnected = useAppSelector((s) => s.system.isConnected);
|
||||
const shouldShowImageDetails = useAppSelector((s) => s.ui.shouldShowImageDetails);
|
||||
const shouldShowProgressInViewer = useAppSelector((s) => s.ui.shouldShowProgressInViewer);
|
||||
const lastSelectedImage = useAppSelector(selectLastSelectedImage);
|
||||
const shouldDisableToolbarButtons = useAppSelector(selectShouldDisableToolbarButtons);
|
||||
|
||||
const isUpscalingEnabled = useFeatureStatus('upscaling').isFeatureEnabled;
|
||||
const isQueueMutationInProgress = useIsQueueMutationInProgress();
|
||||
const toaster = useAppToaster();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const { recallBothPrompts, recallSeed, recallWidthAndHeight, recallAllParameters } = useRecallParameters();
|
||||
|
||||
const { currentData: imageDTO } = useGetImageDTOQuery(lastSelectedImage?.image_name ?? skipToken);
|
||||
|
||||
const { metadata, isLoading: isLoadingMetadata } = useDebouncedMetadata(lastSelectedImage?.image_name);
|
||||
|
||||
const { getAndLoadEmbeddedWorkflow, getAndLoadEmbeddedWorkflowResult } = useGetAndLoadEmbeddedWorkflow({});
|
||||
|
||||
const handleLoadWorkflow = useCallback(() => {
|
||||
if (!lastSelectedImage || !lastSelectedImage.has_workflow) {
|
||||
return;
|
||||
}
|
||||
getAndLoadEmbeddedWorkflow(lastSelectedImage.image_name);
|
||||
}, [getAndLoadEmbeddedWorkflow, lastSelectedImage]);
|
||||
|
||||
useHotkeys('w', handleLoadWorkflow, [lastSelectedImage]);
|
||||
|
||||
const handleClickUseAllParameters = useCallback(() => {
|
||||
recallAllParameters(metadata);
|
||||
}, [metadata, recallAllParameters]);
|
||||
|
||||
useHotkeys('a', handleClickUseAllParameters, [metadata]);
|
||||
|
||||
const handleUseSeed = useCallback(() => {
|
||||
recallSeed(metadata?.seed);
|
||||
}, [metadata?.seed, recallSeed]);
|
||||
|
||||
useHotkeys('s', handleUseSeed, [metadata]);
|
||||
|
||||
const handleUsePrompt = useCallback(() => {
|
||||
recallBothPrompts(
|
||||
metadata?.positive_prompt,
|
||||
metadata?.negative_prompt,
|
||||
metadata?.positive_style_prompt,
|
||||
metadata?.negative_style_prompt
|
||||
);
|
||||
}, [
|
||||
metadata?.negative_prompt,
|
||||
metadata?.positive_prompt,
|
||||
metadata?.positive_style_prompt,
|
||||
metadata?.negative_style_prompt,
|
||||
recallBothPrompts,
|
||||
]);
|
||||
|
||||
useHotkeys('p', handleUsePrompt, [metadata]);
|
||||
|
||||
const handleRemixImage = useCallback(() => {
|
||||
// Recalls all metadata parameters except seed
|
||||
recallAllParameters({
|
||||
...metadata,
|
||||
seed: undefined,
|
||||
});
|
||||
}, [metadata, recallAllParameters]);
|
||||
|
||||
useHotkeys('r', handleRemixImage, [metadata]);
|
||||
|
||||
const handleUseSize = useCallback(() => {
|
||||
recallWidthAndHeight(metadata?.width, metadata?.height);
|
||||
}, [metadata?.width, metadata?.height, recallWidthAndHeight]);
|
||||
|
||||
useHotkeys('d', handleUseSize, [metadata]);
|
||||
|
||||
const handleSendToImageToImage = useCallback(() => {
|
||||
dispatch(sentImageToImg2Img());
|
||||
dispatch(initialImageSelected(imageDTO));
|
||||
}, [dispatch, imageDTO]);
|
||||
|
||||
useHotkeys('shift+i', handleSendToImageToImage, [imageDTO]);
|
||||
|
||||
const handleClickUpscale = useCallback(() => {
|
||||
if (!imageDTO) {
|
||||
return;
|
||||
}
|
||||
dispatch(upscaleRequested({ imageDTO }));
|
||||
}, [dispatch, imageDTO]);
|
||||
|
||||
const handleDelete = useCallback(() => {
|
||||
if (!imageDTO) {
|
||||
return;
|
||||
}
|
||||
dispatch(imagesToDeleteSelected([imageDTO]));
|
||||
}, [dispatch, imageDTO]);
|
||||
|
||||
useHotkeys(
|
||||
'Shift+U',
|
||||
() => {
|
||||
handleClickUpscale();
|
||||
},
|
||||
{
|
||||
enabled: () => Boolean(isUpscalingEnabled && !shouldDisableToolbarButtons && isConnected),
|
||||
},
|
||||
[isUpscalingEnabled, imageDTO, shouldDisableToolbarButtons, isConnected]
|
||||
);
|
||||
|
||||
const handleClickShowImageDetails = useCallback(
|
||||
() => dispatch(setShouldShowImageDetails(!shouldShowImageDetails)),
|
||||
[dispatch, shouldShowImageDetails]
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
'i',
|
||||
() => {
|
||||
if (imageDTO) {
|
||||
handleClickShowImageDetails();
|
||||
} else {
|
||||
toaster({
|
||||
title: t('toast.metadataLoadFailed'),
|
||||
status: 'error',
|
||||
duration: 2500,
|
||||
isClosable: true,
|
||||
});
|
||||
}
|
||||
},
|
||||
[imageDTO, shouldShowImageDetails, toaster]
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
'delete',
|
||||
() => {
|
||||
handleDelete();
|
||||
},
|
||||
[dispatch, imageDTO]
|
||||
);
|
||||
|
||||
const handleClickProgressImagesToggle = useCallback(() => {
|
||||
dispatch(setShouldShowProgressInViewer(!shouldShowProgressInViewer));
|
||||
}, [dispatch, shouldShowProgressInViewer]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Flex flexWrap="wrap" justifyContent="center" alignItems="center" gap={2}>
|
||||
<ButtonGroup isDisabled={shouldDisableToolbarButtons}>
|
||||
<Menu isLazy>
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
aria-label={t('parameters.imageActions')}
|
||||
tooltip={t('parameters.imageActions')}
|
||||
isDisabled={!imageDTO}
|
||||
icon={<PiDotsThreeOutlineFill />}
|
||||
/>
|
||||
<MenuList>{imageDTO && <SingleSelectionMenuItems imageDTO={imageDTO} />}</MenuList>
|
||||
</Menu>
|
||||
</ButtonGroup>
|
||||
|
||||
<ButtonGroup isDisabled={shouldDisableToolbarButtons}>
|
||||
<IconButton
|
||||
icon={<PiFlowArrowBold />}
|
||||
tooltip={`${t('nodes.loadWorkflow')} (W)`}
|
||||
aria-label={`${t('nodes.loadWorkflow')} (W)`}
|
||||
isDisabled={!imageDTO?.has_workflow}
|
||||
onClick={handleLoadWorkflow}
|
||||
isLoading={getAndLoadEmbeddedWorkflowResult.isLoading}
|
||||
/>
|
||||
<IconButton
|
||||
isLoading={isLoadingMetadata}
|
||||
icon={<PiArrowsCounterClockwiseBold />}
|
||||
tooltip={`${t('parameters.remixImage')} (R)`}
|
||||
aria-label={`${t('parameters.remixImage')} (R)`}
|
||||
isDisabled={!metadata?.positive_prompt}
|
||||
onClick={handleRemixImage}
|
||||
/>
|
||||
<IconButton
|
||||
isLoading={isLoadingMetadata}
|
||||
icon={<PiQuotesBold />}
|
||||
tooltip={`${t('parameters.usePrompt')} (P)`}
|
||||
aria-label={`${t('parameters.usePrompt')} (P)`}
|
||||
isDisabled={!metadata?.positive_prompt}
|
||||
onClick={handleUsePrompt}
|
||||
/>
|
||||
<IconButton
|
||||
isLoading={isLoadingMetadata}
|
||||
icon={<PiPlantBold />}
|
||||
tooltip={`${t('parameters.useSeed')} (S)`}
|
||||
aria-label={`${t('parameters.useSeed')} (S)`}
|
||||
isDisabled={metadata?.seed === null || metadata?.seed === undefined}
|
||||
onClick={handleUseSeed}
|
||||
/>
|
||||
<IconButton
|
||||
isLoading={isLoadingMetadata}
|
||||
icon={<PiRulerBold />}
|
||||
tooltip={`${t('parameters.useSize')} (D)`}
|
||||
aria-label={`${t('parameters.useSize')} (D)`}
|
||||
isDisabled={
|
||||
metadata?.height === null ||
|
||||
metadata?.height === undefined ||
|
||||
metadata?.width === null ||
|
||||
metadata?.width === undefined
|
||||
}
|
||||
onClick={handleUseSize}
|
||||
/>
|
||||
<IconButton
|
||||
isLoading={isLoadingMetadata}
|
||||
icon={<PiAsteriskBold />}
|
||||
tooltip={`${t('parameters.useAll')} (A)`}
|
||||
aria-label={`${t('parameters.useAll')} (A)`}
|
||||
isDisabled={!metadata}
|
||||
onClick={handleClickUseAllParameters}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
|
||||
{isUpscalingEnabled && (
|
||||
<ButtonGroup isDisabled={isQueueMutationInProgress}>
|
||||
{isUpscalingEnabled && <ParamUpscalePopover imageDTO={imageDTO} />}
|
||||
</ButtonGroup>
|
||||
)}
|
||||
|
||||
<ButtonGroup>
|
||||
<IconButton
|
||||
icon={<PiInfoBold />}
|
||||
tooltip={`${t('parameters.info')} (I)`}
|
||||
aria-label={`${t('parameters.info')} (I)`}
|
||||
isChecked={shouldShowImageDetails}
|
||||
onClick={handleClickShowImageDetails}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
|
||||
<ButtonGroup>
|
||||
<IconButton
|
||||
aria-label={t('settings.displayInProgress')}
|
||||
tooltip={t('settings.displayInProgress')}
|
||||
icon={<PiHourglassHighBold />}
|
||||
isChecked={shouldShowProgressInViewer}
|
||||
onClick={handleClickProgressImagesToggle}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
|
||||
<ButtonGroup>
|
||||
<DeleteImageButton onClick={handleDelete} />
|
||||
</ButtonGroup>
|
||||
</Flex>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(CurrentImageButtons);
|
@ -1,24 +0,0 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
|
||||
import CurrentImageButtons from './CurrentImageButtons';
|
||||
import CurrentImagePreview from './CurrentImagePreview';
|
||||
|
||||
const CurrentImageDisplay = () => {
|
||||
return (
|
||||
<Flex
|
||||
position="relative"
|
||||
flexDirection="column"
|
||||
height="100%"
|
||||
width="100%"
|
||||
rowGap={4}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<CurrentImageButtons />
|
||||
<CurrentImagePreview />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(CurrentImageDisplay);
|
@ -1,129 +0,0 @@
|
||||
import { Box, Flex } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import ProgressImage from 'features/gallery/components/CurrentImage/ProgressImage';
|
||||
import ImageMetadataViewer from 'features/gallery/components/ImageMetadataViewer/ImageMetadataViewer';
|
||||
import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
import type { AnimationProps } from 'framer-motion';
|
||||
import { AnimatePresence, motion } from 'framer-motion';
|
||||
import type { CSSProperties } from 'react';
|
||||
import { memo, useCallback, useMemo, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiImageBold } from 'react-icons/pi';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
|
||||
const selectLastSelectedImageName = createSelector(
|
||||
selectLastSelectedImage,
|
||||
(lastSelectedImage) => lastSelectedImage?.image_name
|
||||
);
|
||||
|
||||
const CurrentImagePreview = () => {
|
||||
const shouldShowImageDetails = useAppSelector((s) => s.ui.shouldShowImageDetails);
|
||||
const imageName = useAppSelector(selectLastSelectedImageName);
|
||||
const hasDenoiseProgress = useAppSelector((s) => Boolean(s.system.denoiseProgress));
|
||||
const shouldShowProgressInViewer = useAppSelector((s) => s.ui.shouldShowProgressInViewer);
|
||||
|
||||
const { currentData: imageDTO } = useGetImageDTOQuery(imageName ?? skipToken);
|
||||
|
||||
const draggableData = useMemo<TypesafeDraggableData | undefined>(() => {
|
||||
if (imageDTO) {
|
||||
return {
|
||||
id: 'current-image',
|
||||
payloadType: 'IMAGE_DTO',
|
||||
payload: { imageDTO },
|
||||
};
|
||||
}
|
||||
}, [imageDTO]);
|
||||
|
||||
const droppableData = useMemo<TypesafeDroppableData | undefined>(
|
||||
() => ({
|
||||
id: 'current-image',
|
||||
actionType: 'SET_CURRENT_IMAGE',
|
||||
}),
|
||||
[]
|
||||
);
|
||||
|
||||
// Show and hide the next/prev buttons on mouse move
|
||||
const [shouldShowNextPrevButtons, setShouldShowNextPrevButtons] = useState<boolean>(false);
|
||||
|
||||
const timeoutId = useRef(0);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleMouseOver = useCallback(() => {
|
||||
setShouldShowNextPrevButtons(true);
|
||||
window.clearTimeout(timeoutId.current);
|
||||
}, []);
|
||||
|
||||
const handleMouseOut = useCallback(() => {
|
||||
timeoutId.current = window.setTimeout(() => {
|
||||
setShouldShowNextPrevButtons(false);
|
||||
}, 500);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
onMouseOver={handleMouseOver}
|
||||
onMouseOut={handleMouseOut}
|
||||
width="full"
|
||||
height="full"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
position="relative"
|
||||
>
|
||||
{hasDenoiseProgress && shouldShowProgressInViewer ? (
|
||||
<ProgressImage />
|
||||
) : (
|
||||
<IAIDndImage
|
||||
imageDTO={imageDTO}
|
||||
droppableData={droppableData}
|
||||
draggableData={draggableData}
|
||||
isUploadDisabled={true}
|
||||
fitContainer
|
||||
useThumbailFallback
|
||||
dropLabel={t('gallery.setCurrentImage')}
|
||||
noContentFallback={<IAINoContentFallback icon={PiImageBold} label={t('gallery.noImageSelected')} />}
|
||||
dataTestId="image-preview"
|
||||
/>
|
||||
)}
|
||||
{shouldShowImageDetails && imageDTO && (
|
||||
<Box position="absolute" top="0" width="full" height="full" borderRadius="base">
|
||||
<ImageMetadataViewer image={imageDTO} />
|
||||
</Box>
|
||||
)}
|
||||
<AnimatePresence>
|
||||
{!shouldShowImageDetails && imageDTO && shouldShowNextPrevButtons && (
|
||||
<motion.div key="nextPrevButtons" initial={initial} animate={animate} exit={exit} style={motionStyles}>
|
||||
<NextPrevImageButtons />
|
||||
</motion.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(CurrentImagePreview);
|
||||
|
||||
const initial: AnimationProps['initial'] = {
|
||||
opacity: 0,
|
||||
};
|
||||
const animate: AnimationProps['animate'] = {
|
||||
opacity: 1,
|
||||
transition: { duration: 0.1 },
|
||||
};
|
||||
const exit: AnimationProps['exit'] = {
|
||||
opacity: 0,
|
||||
transition: { duration: 0.1 },
|
||||
};
|
||||
const motionStyles: CSSProperties = {
|
||||
position: 'absolute',
|
||||
top: '0',
|
||||
width: '100%',
|
||||
height: '100%',
|
||||
pointerEvents: 'none',
|
||||
};
|
@ -1,38 +0,0 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Image } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
const CurrentImagePreview = () => {
|
||||
const progress_image = useAppSelector((s) => s.system.denoiseProgress?.progress_image);
|
||||
const shouldAntialiasProgressImage = useAppSelector((s) => s.system.shouldAntialiasProgressImage);
|
||||
|
||||
const sx = useMemo<SystemStyleObject>(
|
||||
() => ({
|
||||
imageRendering: shouldAntialiasProgressImage ? 'auto' : 'pixelated',
|
||||
}),
|
||||
[shouldAntialiasProgressImage]
|
||||
);
|
||||
|
||||
if (!progress_image) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Image
|
||||
src={progress_image.dataURL}
|
||||
width={progress_image.width}
|
||||
height={progress_image.height}
|
||||
draggable={false}
|
||||
data-testid="progress-image"
|
||||
objectFit="contain"
|
||||
maxWidth="full"
|
||||
maxHeight="full"
|
||||
position="absolute"
|
||||
borderRadius="base"
|
||||
sx={sx}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(CurrentImagePreview);
|
@ -28,12 +28,11 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => {
|
||||
<Flex
|
||||
layerStyle="first"
|
||||
padding={4}
|
||||
gap={1}
|
||||
gap={4}
|
||||
flexDirection="column"
|
||||
width="full"
|
||||
height="full"
|
||||
borderRadius="base"
|
||||
position="absolute"
|
||||
overflow="hidden"
|
||||
>
|
||||
<ExternalLink href={image.image_url} label={image.image_name} />
|
||||
|
@ -5,7 +5,7 @@ import { imageListContainerTestId } from 'features/gallery/components/ImageGrid/
|
||||
import { virtuosoGridRefs } from 'features/gallery/components/ImageGrid/types';
|
||||
import { useGalleryImages } from 'features/gallery/hooks/useGalleryImages';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { imageSelectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { getIsVisible } from 'features/gallery/util/getIsVisible';
|
||||
import { getScrollToIndexAlign } from 'features/gallery/util/getScrollToIndexAlign';
|
||||
import { clamp } from 'lodash-es';
|
||||
@ -144,7 +144,7 @@ export const useGalleryNavigation = (): UseGalleryNavigationReturn => {
|
||||
if (!image || index === lastSelectedImageIndex) {
|
||||
return;
|
||||
}
|
||||
dispatch(imageSelected(image));
|
||||
dispatch(imageSelectionChanged(image));
|
||||
scrollToImage(image.image_name, index);
|
||||
},
|
||||
[dispatch, lastSelectedImageIndex, data]
|
||||
|
@ -1,7 +1,6 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { galleryImageClicked } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectGallerySlice, selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { galleryImageClicked, imageSelectionChanged, selectGallerySlice } from 'features/gallery/store/gallerySlice';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import type { MouseEvent } from 'react';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
@ -26,7 +25,7 @@ export const useMultiselect = (imageDTO?: ImageDTO) => {
|
||||
return;
|
||||
}
|
||||
if (!isMultiSelectEnabled) {
|
||||
dispatch(selectionChanged([imageDTO]));
|
||||
dispatch(imageSelectionChanged(imageDTO));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -14,5 +14,3 @@ export const requestedBoardImagesDeletion = createAction<RequestedBoardImagesDel
|
||||
export const sentImageToCanvas = createAction('gallery/sentImageToCanvas');
|
||||
|
||||
export const sentImageToImg2Img = createAction('gallery/sentImageToImg2Img');
|
||||
|
||||
export const imageDownloaded = createAction('gallery/imageDownloaded');
|
||||
|
@ -1,5 +1,5 @@
|
||||
import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSlice, isAnyOf } from '@reduxjs/toolkit';
|
||||
import { createAction, createSlice, isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { uniqBy } from 'lodash-es';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
@ -26,11 +26,16 @@ export const gallerySlice = createSlice({
|
||||
name: 'gallery',
|
||||
initialState: initialGalleryState,
|
||||
reducers: {
|
||||
imageSelected: (state, action: PayloadAction<ImageDTO | null>) => {
|
||||
state.selection = action.payload ? [action.payload] : [];
|
||||
},
|
||||
selectionChanged: (state, action: PayloadAction<ImageDTO[]>) => {
|
||||
state.selection = uniqBy(action.payload, (i) => i.image_name);
|
||||
imageSelectionChanged: (state, action: PayloadAction<ImageDTO[] | ImageDTO | null>) => {
|
||||
if (!action.payload) {
|
||||
state.selection = [];
|
||||
return;
|
||||
}
|
||||
if (Array.isArray(action.payload)) {
|
||||
state.selection = uniqBy(action.payload, (i) => i.image_name);
|
||||
return;
|
||||
}
|
||||
state.selection = [action.payload];
|
||||
},
|
||||
shouldAutoSwitchChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldAutoSwitch = action.payload;
|
||||
@ -97,14 +102,13 @@ export const gallerySlice = createSlice({
|
||||
});
|
||||
|
||||
export const {
|
||||
imageSelected,
|
||||
shouldAutoSwitchChanged,
|
||||
autoAssignBoardOnClickChanged,
|
||||
setGalleryImageMinimumWidth,
|
||||
boardIdSelected,
|
||||
autoAddBoardIdChanged,
|
||||
galleryViewChanged,
|
||||
selectionChanged,
|
||||
imageSelectionChanged,
|
||||
boardSearchTextChanged,
|
||||
moreImagesLoaded,
|
||||
} = gallerySlice.actions;
|
||||
@ -130,3 +134,10 @@ export const galleryPersistConfig: PersistConfig<GalleryState> = {
|
||||
migrate: migrateGalleryState,
|
||||
persistDenylist: ['selection', 'selectedBoardId', 'galleryView', 'offset', 'limit'],
|
||||
};
|
||||
|
||||
export const galleryImageClicked = createAction<{
|
||||
imageDTO: ImageDTO;
|
||||
shiftKey: boolean;
|
||||
ctrlKey: boolean;
|
||||
metaKey: boolean;
|
||||
}>(`${gallerySlice.name}/galleryImageClicked`);
|
||||
|
@ -1,7 +1,6 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { setHrfMethod } from 'features/hrf/store/hrfSlice';
|
||||
import { isParameterHRFMethod } from 'features/parameters/types/parameterSchemas';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@ -31,9 +30,7 @@ const ParamHrfMethodSelect = () => {
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<InformationalPopover feature="paramUpscaleMethod">
|
||||
<FormLabel>{t('hrf.upscaleMethod')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<FormLabel>{t('hrf.upscaleMethod')}</FormLabel>
|
||||
<Combobox value={value} options={options} onChange={onChange} />
|
||||
</FormControl>
|
||||
);
|
||||
|
@ -1,6 +1,5 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { setHrfStrength } from 'features/hrf/store/hrfSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -26,9 +25,7 @@ const ParamHrfStrength = () => {
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<InformationalPopover feature="paramDenoisingStrength">
|
||||
<FormLabel>{`${t('parameters.denoisingStrength')}`}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<FormLabel>{t('parameters.denoisingStrength')}</FormLabel>
|
||||
<CompositeSlider
|
||||
min={sliderMin}
|
||||
max={sliderMax}
|
||||
|
@ -1,6 +1,5 @@
|
||||
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { setHrfEnabled } from 'features/hrf/store/hrfSlice';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
@ -19,9 +18,7 @@ const ParamHrfToggle = () => {
|
||||
|
||||
return (
|
||||
<FormControl w="full">
|
||||
<InformationalPopover feature="paramHrf">
|
||||
<FormLabel flexGrow={1}>{t('hrf.enableHrf')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<FormLabel flexGrow={1}>{t('hrf.enableHrf')}</FormLabel>
|
||||
<Switch isChecked={hrfEnabled} onChange={handleHrfEnabled} />
|
||||
</FormControl>
|
||||
);
|
||||
|
@ -10,7 +10,6 @@ import {
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { LoRA } from 'features/lora/store/loraSlice';
|
||||
import { loraIsEnabledChanged, loraRemoved, loraWeightChanged } from 'features/lora/store/loraSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
@ -58,31 +57,29 @@ export const LoRACard = memo((props: LoRACardProps) => {
|
||||
</Flex>
|
||||
</Flex>
|
||||
</CardHeader>
|
||||
<InformationalPopover feature="loraWeight">
|
||||
<CardBody>
|
||||
<CompositeSlider
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-1}
|
||||
max={2}
|
||||
step={0.01}
|
||||
marks={marks}
|
||||
defaultValue={0.75}
|
||||
isDisabled={!lora.isEnabled}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-5}
|
||||
max={5}
|
||||
step={0.01}
|
||||
w={20}
|
||||
flexShrink={0}
|
||||
defaultValue={0.75}
|
||||
isDisabled={!lora.isEnabled}
|
||||
/>
|
||||
</CardBody>
|
||||
</InformationalPopover>
|
||||
<CardBody>
|
||||
<CompositeSlider
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-1}
|
||||
max={2}
|
||||
step={0.01}
|
||||
marks={marks}
|
||||
defaultValue={0.75}
|
||||
isDisabled={!lora.isEnabled}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-5}
|
||||
max={5}
|
||||
step={0.01}
|
||||
w={20}
|
||||
flexShrink={0}
|
||||
defaultValue={0.75}
|
||||
isDisabled={!lora.isEnabled}
|
||||
/>
|
||||
</CardBody>
|
||||
</Card>
|
||||
);
|
||||
});
|
||||
|
@ -2,7 +2,6 @@ import type { ChakraProps } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { loraAdded, selectLoraSlice } from 'features/lora/store/loraSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@ -58,9 +57,7 @@ const LoRASelect = () => {
|
||||
|
||||
return (
|
||||
<FormControl isDisabled={!options.length}>
|
||||
<InformationalPopover feature="lora">
|
||||
<FormLabel>{t('models.lora')} </FormLabel>
|
||||
</InformationalPopover>
|
||||
<FormLabel>{t('models.lora')} </FormLabel>
|
||||
<Combobox
|
||||
placeholder={placeholder}
|
||||
value={null}
|
||||
|
@ -35,7 +35,7 @@ export const loraSlice = createSlice({
|
||||
},
|
||||
loraRecalled: (state, action: PayloadAction<LoRAModelConfigEntity & { weight: number }>) => {
|
||||
const { model_name, id, base_model, weight } = action.payload;
|
||||
state.loras[id] = { id, model_name, base_model, weight, isEnabled: true };
|
||||
state.loras[id] = { id, model_name, base_model, weight };
|
||||
},
|
||||
loraRemoved: (state, action: PayloadAction<string>) => {
|
||||
const id = action.payload;
|
||||
|
@ -1,6 +1,7 @@
|
||||
import 'reactflow/dist/style.css';
|
||||
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import TopPanel from 'features/nodes/components/flow/panels/TopPanel/TopPanel';
|
||||
import { SaveWorkflowAsDialog } from 'features/workflowLibrary/components/SaveWorkflowAsDialog/SaveWorkflowAsDialog';
|
||||
@ -10,7 +11,6 @@ import type { CSSProperties } from 'react';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { MdDeviceHub } from 'react-icons/md';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
import AddNodePopover from './flow/AddNodePopover/AddNodePopover';
|
||||
import { Flow } from './flow/Flow';
|
||||
@ -40,7 +40,7 @@ const exit: AnimationProps['exit'] = {
|
||||
};
|
||||
|
||||
const NodeEditor = () => {
|
||||
const { data, isLoading } = useGetOpenAPISchemaQuery();
|
||||
const isReady = useAppSelector((s) => s.nodes.isReady);
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Flex
|
||||
@ -53,7 +53,7 @@ const NodeEditor = () => {
|
||||
justifyContent="center"
|
||||
>
|
||||
<AnimatePresence>
|
||||
{data && (
|
||||
{isReady && (
|
||||
<motion.div initial={initial} animate={animate} exit={exit} style={isReadyMotionStyles}>
|
||||
<Flow />
|
||||
<AddNodePopover />
|
||||
@ -65,7 +65,7 @@ const NodeEditor = () => {
|
||||
)}
|
||||
</AnimatePresence>
|
||||
<AnimatePresence>
|
||||
{isLoading && (
|
||||
{!isReady && (
|
||||
<motion.div initial={initial} animate={animate} exit={exit} style={notIsReadyMotionStyles}>
|
||||
<Flex
|
||||
layerStyle="first"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user