mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
88 Commits
3.6.3rc1
...
bugfix/han
Author | SHA1 | Date | |
---|---|---|---|
b3abc7252d | |||
9986fce1a6 | |||
228f1d7f62 | |||
01a6378dc1 | |||
e01769294f | |||
16aa261e28 | |||
1dabf18d14 | |||
115d92b1ae | |||
f0d4c71960 | |||
3e48edda6f | |||
716b584f03 | |||
d43b843c23 | |||
f36b5990ed | |||
5706237ec7 | |||
163b22a7b3 | |||
c5aeb36230 | |||
5e77f0d93b | |||
d3acb81743 | |||
e0f2404c00 | |||
5ed7972e5f | |||
792131be01 | |||
fc278c5cb1 | |||
d7f6af1f07 | |||
ff9bd040cc | |||
17d5f7bebd | |||
30dae0f5aa | |||
161000cde6 | |||
de832f6862 | |||
21ba3c63de | |||
a948bd1310 | |||
2071972a8c | |||
5ed2f6e6c1 | |||
b77f6bd0ad | |||
34cc26a4ed | |||
9d6e4ff1fb | |||
85bbf65967 | |||
3726293258 | |||
8bd65be8c8 | |||
783442c40d | |||
8a147bd6e6 | |||
273994b742 | |||
3339ad4df8 | |||
c3b2a8cb27 | |||
daa780940b | |||
2289680ae1 | |||
cda85a0637 | |||
1d9801e7be | |||
3ecb1e580f | |||
6301e58a2e | |||
5dd552effa | |||
25ce505628 | |||
1dd07fb1eb | |||
e82c21b5ba | |||
50b93992cf | |||
f8e566d62a | |||
f588b95c7f | |||
67daf1751c | |||
7d80261d47 | |||
67cbfeb33d | |||
f7998b4be0 | |||
675c73c94f | |||
0a27b0379f | |||
0ef18b6477 | |||
6539ef7c9f | |||
14c9a1e4f3 | |||
64b0feca31 | |||
0be9a2d906 | |||
d925f721b9 | |||
4e5be1891a | |||
156d4ec3b2 | |||
c45a43519a | |||
763816ca0c | |||
83a7c9059f | |||
c5f069a255 | |||
cd169ee082 | |||
66b106f107 | |||
b10d745dae | |||
d20f98fb4f | |||
c9c150f850 | |||
a60e2b7c77 | |||
da6e5b2ba1 | |||
c65d497cbc | |||
a68d8fe203 | |||
5de2288cfa | |||
2ce70b4457 | |||
6c5f743e2b | |||
bb242c4e1e | |||
c9e246ed1b |
@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ARG TORCH_VERSION=2.1.0
|
||||
ARG TORCHVISION_VERSION=0.16
|
||||
ARG TORCH_VERSION=2.1.2
|
||||
ARG TORCHVISION_VERSION=0.16.2
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||
fi &&\
|
||||
@ -54,7 +54,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
pip install -e ".[xformers]"; \
|
||||
else \
|
||||
pip install -e "."; \
|
||||
pip install $extra_index_url_arg -e "."; \
|
||||
fi
|
||||
|
||||
# #### Build the Web UI ------------------------------------
|
||||
|
@ -28,7 +28,7 @@ This is done via Docker Desktop preferences
|
||||
|
||||
### Configure Invoke environment
|
||||
|
||||
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||
1. Make a copy of `.env.sample` and name it `.env` (`cp .env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||
a. the desired location of the InvokeAI runtime directory, or
|
||||
b. an existing, v3.0.0 compatible runtime directory.
|
||||
1. Execute `run.sh`
|
||||
|
@ -21,7 +21,7 @@ run() {
|
||||
printf "%s\n" "$build_args"
|
||||
fi
|
||||
|
||||
docker compose build $build_args
|
||||
docker compose build $build_args $service_name
|
||||
unset build_args
|
||||
|
||||
printf "%s\n" "starting service $service_name"
|
||||
|
@ -94,6 +94,8 @@ A model that helps generate creative QR codes that still scan. Can also be used
|
||||
**Openpose**:
|
||||
The OpenPose control model allows for the identification of the general pose of a character by pre-processing an existing image with a clear human structure. With advanced options, Openpose can also detect the face or hands in the image.
|
||||
|
||||
*Note:* The DWPose Processor has replaced the OpenPose processor in Invoke. Workflows and generations that relied on the OpenPose Processor will need to be updated to use the DWPose Processor instead.
|
||||
|
||||
**Mediapipe Face**:
|
||||
|
||||
The MediaPipe Face identification processor is able to clearly identify facial features in order to capture vivid expressions of human faces.
|
||||
|
@ -230,13 +230,13 @@ manager, please follow these steps:
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
invokeai --web
|
||||
invokeai-web
|
||||
```
|
||||
|
||||
=== "Public Webserver"
|
||||
|
||||
```bash
|
||||
invokeai --web --host 0.0.0.0
|
||||
invokeai-web --host 0.0.0.0
|
||||
```
|
||||
|
||||
=== "CLI"
|
||||
@ -402,4 +402,4 @@ environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||
staff will **not** be able to help you out. Caveat Emptor!
|
||||
|
||||
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
|
@ -69,7 +69,7 @@ a token and copy it, since you will need in for the next step.
|
||||
|
||||
### Setup
|
||||
|
||||
Set up your environmnent variables. In the `docker` directory, make a copy of `env.sample` and name it `.env`. Make changes as necessary.
|
||||
Set up your environmnent variables. In the `docker` directory, make a copy of `.env.sample` and name it `.env`. Make changes as necessary.
|
||||
|
||||
Any environment variables supported by InvokeAI can be set here - please see the [CONFIGURATION](../features/CONFIGURATION.md) for further detail.
|
||||
|
||||
|
@ -32,6 +32,7 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||
+ [Image Picker](#image-picker)
|
||||
+ [Image Resize Plus](#image-resize-plus)
|
||||
+ [Latent Upscale](#latent-upscale)
|
||||
+ [Load Video Frame](#load-video-frame)
|
||||
+ [Make 3D](#make-3d)
|
||||
+ [Mask Operations](#mask-operations)
|
||||
@ -290,6 +291,13 @@ View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-resize-plus-node/master/.readme/node.png" width="500" />
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Latent Upscale
|
||||
|
||||
**Description:** This node uses a small (~2.4mb) model to upscale the latents used in a Stable Diffusion 1.5 or Stable Diffusion XL image generation, rather than the typical interpolation method, avoiding the traditional downsides of the latent upscale technique.
|
||||
|
||||
**Node Link:** [https://github.com/gogurtenjoyer/latent-upscale](https://github.com/gogurtenjoyer/latent-upscale)
|
||||
|
||||
--------------------------------
|
||||
### Load Video Frame
|
||||
|
||||
|
@ -81,7 +81,7 @@ their descriptions.
|
||||
| ONNX Text to Latents | Generates latents from conditionings. |
|
||||
| ONNX Model Loader | Loads a main model, outputting its submodels. |
|
||||
| OpenCV Inpaint | Simple inpaint using opencv. |
|
||||
| Openpose Processor | Applies Openpose processing to image |
|
||||
| DW Openpose Processor | Applies Openpose processing to image |
|
||||
| PIDI Processor | Applies PIDI processing to image |
|
||||
| Prompts from File | Loads prompts from a text file |
|
||||
| Random Integer | Outputs a single random integer. |
|
||||
|
@ -91,8 +91,8 @@ def choose_version(available_releases: tuple | None = None) -> str:
|
||||
complete_while_typing=True,
|
||||
completer=FuzzyWordCompleter(choices),
|
||||
)
|
||||
console.print(f" Version {choices[0] if response == '' else response} will be installed.")
|
||||
|
||||
console.print(f" Version {choices[0]} will be installed.")
|
||||
console.line()
|
||||
|
||||
return "stable" if response == "" else response
|
||||
|
@ -14,7 +14,7 @@ class SocketIO:
|
||||
|
||||
def __init__(self, app: FastAPI):
|
||||
self.__sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*")
|
||||
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="socket.io")
|
||||
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="/ws/socket.io")
|
||||
app.mount("/ws", self.__app)
|
||||
|
||||
self.__sio.on("subscribe_queue", handler=self._handle_sub_queue)
|
||||
|
@ -17,7 +17,6 @@ from controlnet_aux import (
|
||||
MidasDetector,
|
||||
MLSDdetector,
|
||||
NormalBaeDetector,
|
||||
OpenposeDetector,
|
||||
PidiNetDetector,
|
||||
SamDetector,
|
||||
ZoeDetector,
|
||||
@ -31,6 +30,7 @@ from invokeai.app.invocations.util import validate_begin_end_step, validate_weig
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
|
||||
|
||||
from ...backend.model_management import BaseModelType
|
||||
from .baseinvocation import (
|
||||
@ -276,31 +276,6 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"openpose_image_processor",
|
||||
title="Openpose Processor",
|
||||
tags=["controlnet", "openpose", "pose"],
|
||||
category="controlnet",
|
||||
version="1.2.0",
|
||||
)
|
||||
class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Openpose processing to image"""
|
||||
|
||||
hand_and_face: bool = InputField(default=False, description="Whether to use hands and face mode")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = openpose_processor(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
hand_and_face=self.hand_and_face,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"midas_depth_image_processor",
|
||||
title="Midas Depth Processor",
|
||||
@ -624,7 +599,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
offload: bool = InputField(default=False)
|
||||
|
||||
def run_processor(self, image):
|
||||
def run_processor(self, image: Image.Image):
|
||||
depth_anything_detector = DepthAnythingDetector()
|
||||
depth_anything_detector.load_model(model_size=self.model_size)
|
||||
|
||||
@ -633,3 +608,30 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"dw_openpose_image_processor",
|
||||
title="DW Openpose Image Processor",
|
||||
tags=["controlnet", "dwpose", "openpose"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
)
|
||||
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates an openpose pose from an image using DWPose"""
|
||||
|
||||
draw_body: bool = InputField(default=True)
|
||||
draw_face: bool = InputField(default=False)
|
||||
draw_hands: bool = InputField(default=False)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
dw_openpose = DWOpenposeDetector()
|
||||
processed_image = dw_openpose(
|
||||
image,
|
||||
draw_face=self.draw_face,
|
||||
draw_hands=self.draw_hands,
|
||||
draw_body=self.draw_body,
|
||||
resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
@ -5,12 +5,12 @@ from typing import Literal
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from PIL import Image
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
|
@ -154,7 +154,7 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Image record not found")
|
||||
raise
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error("Problem getting image DTO")
|
||||
self.__invoker.services.logger.error("Problem getting image metadata")
|
||||
raise e
|
||||
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
from typing import Annotated, Any, Optional, Union, get_args, get_origin, get_type_hints
|
||||
from typing import Annotated, Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints
|
||||
|
||||
import networkx as nx
|
||||
from pydantic import BaseModel, ConfigDict, field_validator, model_validator
|
||||
@ -141,6 +141,16 @@ def are_connections_compatible(
|
||||
return are_connection_types_compatible(from_node_field, to_node_field)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def copydeep(obj: T) -> T:
|
||||
"""Deep-copies an object. If it is a pydantic model, use the model's copy method."""
|
||||
if isinstance(obj, BaseModel):
|
||||
return obj.model_copy(deep=True)
|
||||
return copy.deepcopy(obj)
|
||||
|
||||
|
||||
class NodeAlreadyInGraphError(ValueError):
|
||||
pass
|
||||
|
||||
@ -530,7 +540,7 @@ class Graph(BaseModel):
|
||||
except NodeNotFoundError:
|
||||
return False
|
||||
|
||||
def get_node(self, node_path: str) -> InvocationsUnion:
|
||||
def get_node(self, node_path: str) -> BaseInvocation:
|
||||
"""Gets a node from the graph using a node path."""
|
||||
# Materialized graphs may have nodes at the top level
|
||||
graph, node_id = self._get_graph_and_node(node_path)
|
||||
@ -881,7 +891,7 @@ class GraphExecutionState(BaseModel):
|
||||
# If next is still none, there's no next node, return None
|
||||
return next_node
|
||||
|
||||
def complete(self, node_id: str, output: InvocationOutputsUnion):
|
||||
def complete(self, node_id: str, output: BaseInvocationOutput) -> None:
|
||||
"""Marks a node as complete"""
|
||||
|
||||
if node_id not in self.execution_graph.nodes:
|
||||
@ -1118,17 +1128,22 @@ class GraphExecutionState(BaseModel):
|
||||
|
||||
def _prepare_inputs(self, node: BaseInvocation):
|
||||
input_edges = [e for e in self.execution_graph.edges if e.destination.node_id == node.id]
|
||||
# Inputs must be deep-copied, else if a node mutates the object, other nodes that get the same input
|
||||
# will see the mutation.
|
||||
if isinstance(node, CollectInvocation):
|
||||
output_collection = [
|
||||
getattr(self.results[edge.source.node_id], edge.source.field)
|
||||
copydeep(getattr(self.results[edge.source.node_id], edge.source.field))
|
||||
for edge in input_edges
|
||||
if edge.destination.field == "item"
|
||||
]
|
||||
node.collection = output_collection
|
||||
else:
|
||||
for edge in input_edges:
|
||||
output_value = getattr(self.results[edge.source.node_id], edge.source.field)
|
||||
setattr(node, edge.destination.field, output_value)
|
||||
setattr(
|
||||
node,
|
||||
edge.destination.field,
|
||||
copydeep(getattr(self.results[edge.source.node_id], edge.source.field)),
|
||||
)
|
||||
|
||||
# TODO: Add API for modifying underlying graph that checks if the change will be valid given the current execution state
|
||||
def _is_edge_valid(self, edge: Edge) -> bool:
|
||||
|
201
invokeai/backend/image_util/basicsr/LICENSE
Normal file
201
invokeai/backend/image_util/basicsr/LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2018-2022 BasicSR Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
18
invokeai/backend/image_util/basicsr/__init__.py
Normal file
18
invokeai/backend/image_util/basicsr/__init__.py
Normal file
@ -0,0 +1,18 @@
|
||||
"""
|
||||
Adapted from https://github.com/XPixelGroup/BasicSR
|
||||
License: Apache-2.0
|
||||
|
||||
As of Feb 2024, `basicsr` appears to be unmaintained. It imports a function from `torchvision` that is removed in
|
||||
`torchvision` 0.17. Here is the deprecation warning:
|
||||
|
||||
UserWarning: The torchvision.transforms.functional_tensor module is deprecated in 0.15 and will be **removed in
|
||||
0.17**. Please don't rely on it. You probably just need to use APIs in torchvision.transforms.functional or in
|
||||
torchvision.transforms.v2.functional.
|
||||
|
||||
As a result, a dependency on `basicsr` means we cannot keep our `torchvision` dependency up to date.
|
||||
|
||||
Because we only rely on a single class `RRDBNet` from `basicsr`, we've copied the relevant code here and removed the
|
||||
dependency on `basicsr`.
|
||||
|
||||
The code is almost unchanged, only a few type annotations have been added. The license is also copied.
|
||||
"""
|
75
invokeai/backend/image_util/basicsr/arch_util.py
Normal file
75
invokeai/backend/image_util/basicsr/arch_util.py
Normal file
@ -0,0 +1,75 @@
|
||||
from typing import Type
|
||||
|
||||
import torch
|
||||
from torch import nn as nn
|
||||
from torch.nn import init as init
|
||||
from torch.nn.modules.batchnorm import _BatchNorm
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def default_init_weights(
|
||||
module_list: list[nn.Module] | nn.Module, scale: float = 1, bias_fill: float = 0, **kwargs
|
||||
) -> None:
|
||||
"""Initialize network weights.
|
||||
|
||||
Args:
|
||||
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
|
||||
scale (float): Scale initialized weights, especially for residual
|
||||
blocks. Default: 1.
|
||||
bias_fill (float): The value to fill bias. Default: 0
|
||||
kwargs (dict): Other arguments for initialization function.
|
||||
"""
|
||||
if not isinstance(module_list, list):
|
||||
module_list = [module_list]
|
||||
for module in module_list:
|
||||
for m in module.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
init.kaiming_normal_(m.weight, **kwargs)
|
||||
m.weight.data *= scale
|
||||
if m.bias is not None:
|
||||
m.bias.data.fill_(bias_fill)
|
||||
elif isinstance(m, nn.Linear):
|
||||
init.kaiming_normal_(m.weight, **kwargs)
|
||||
m.weight.data *= scale
|
||||
if m.bias is not None:
|
||||
m.bias.data.fill_(bias_fill)
|
||||
elif isinstance(m, _BatchNorm):
|
||||
init.constant_(m.weight, 1)
|
||||
if m.bias is not None:
|
||||
m.bias.data.fill_(bias_fill)
|
||||
|
||||
|
||||
def make_layer(basic_block: Type[nn.Module], num_basic_block: int, **kwarg) -> nn.Sequential:
|
||||
"""Make layers by stacking the same blocks.
|
||||
|
||||
Args:
|
||||
basic_block (Type[nn.Module]): nn.Module class for basic block.
|
||||
num_basic_block (int): number of blocks.
|
||||
|
||||
Returns:
|
||||
nn.Sequential: Stacked blocks in nn.Sequential.
|
||||
"""
|
||||
layers = []
|
||||
for _ in range(num_basic_block):
|
||||
layers.append(basic_block(**kwarg))
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
|
||||
# TODO: may write a cpp file
|
||||
def pixel_unshuffle(x: torch.Tensor, scale: int) -> torch.Tensor:
|
||||
"""Pixel unshuffle.
|
||||
|
||||
Args:
|
||||
x (Tensor): Input feature with shape (b, c, hh, hw).
|
||||
scale (int): Downsample ratio.
|
||||
|
||||
Returns:
|
||||
Tensor: the pixel unshuffled feature.
|
||||
"""
|
||||
b, c, hh, hw = x.size()
|
||||
out_channel = c * (scale**2)
|
||||
assert hh % scale == 0 and hw % scale == 0
|
||||
h = hh // scale
|
||||
w = hw // scale
|
||||
x_view = x.view(b, c, h, scale, w, scale)
|
||||
return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
|
125
invokeai/backend/image_util/basicsr/rrdbnet_arch.py
Normal file
125
invokeai/backend/image_util/basicsr/rrdbnet_arch.py
Normal file
@ -0,0 +1,125 @@
|
||||
import torch
|
||||
from torch import nn as nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from .arch_util import default_init_weights, make_layer, pixel_unshuffle
|
||||
|
||||
|
||||
class ResidualDenseBlock(nn.Module):
|
||||
"""Residual Dense Block.
|
||||
|
||||
Used in RRDB block in ESRGAN.
|
||||
|
||||
Args:
|
||||
num_feat (int): Channel number of intermediate features.
|
||||
num_grow_ch (int): Channels for each growth.
|
||||
"""
|
||||
|
||||
def __init__(self, num_feat: int = 64, num_grow_ch: int = 32) -> None:
|
||||
super(ResidualDenseBlock, self).__init__()
|
||||
self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
|
||||
self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
|
||||
self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
|
||||
self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
|
||||
self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
|
||||
|
||||
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
||||
|
||||
# initialization
|
||||
default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
x1 = self.lrelu(self.conv1(x))
|
||||
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
|
||||
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
|
||||
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
|
||||
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
|
||||
# Empirically, we use 0.2 to scale the residual for better performance
|
||||
return x5 * 0.2 + x
|
||||
|
||||
|
||||
class RRDB(nn.Module):
|
||||
"""Residual in Residual Dense Block.
|
||||
|
||||
Used in RRDB-Net in ESRGAN.
|
||||
|
||||
Args:
|
||||
num_feat (int): Channel number of intermediate features.
|
||||
num_grow_ch (int): Channels for each growth.
|
||||
"""
|
||||
|
||||
def __init__(self, num_feat: int, num_grow_ch: int = 32) -> None:
|
||||
super(RRDB, self).__init__()
|
||||
self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
|
||||
self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
|
||||
self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
out = self.rdb1(x)
|
||||
out = self.rdb2(out)
|
||||
out = self.rdb3(out)
|
||||
# Empirically, we use 0.2 to scale the residual for better performance
|
||||
return out * 0.2 + x
|
||||
|
||||
|
||||
class RRDBNet(nn.Module):
|
||||
"""Networks consisting of Residual in Residual Dense Block, which is used
|
||||
in ESRGAN.
|
||||
|
||||
ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
|
||||
|
||||
We extend ESRGAN for scale x2 and scale x1.
|
||||
Note: This is one option for scale 1, scale 2 in RRDBNet.
|
||||
We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
|
||||
and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
|
||||
|
||||
Args:
|
||||
num_in_ch (int): Channel number of inputs.
|
||||
num_out_ch (int): Channel number of outputs.
|
||||
num_feat (int): Channel number of intermediate features.
|
||||
Default: 64
|
||||
num_block (int): Block number in the trunk network. Defaults: 23
|
||||
num_grow_ch (int): Channels for each growth. Default: 32.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_in_ch: int,
|
||||
num_out_ch: int,
|
||||
scale: int = 4,
|
||||
num_feat: int = 64,
|
||||
num_block: int = 23,
|
||||
num_grow_ch: int = 32,
|
||||
) -> None:
|
||||
super(RRDBNet, self).__init__()
|
||||
self.scale = scale
|
||||
if scale == 2:
|
||||
num_in_ch = num_in_ch * 4
|
||||
elif scale == 1:
|
||||
num_in_ch = num_in_ch * 16
|
||||
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
||||
self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
|
||||
self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
# upsample
|
||||
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
||||
|
||||
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
if self.scale == 2:
|
||||
feat = pixel_unshuffle(x, scale=2)
|
||||
elif self.scale == 1:
|
||||
feat = pixel_unshuffle(x, scale=4)
|
||||
else:
|
||||
feat = x
|
||||
feat = self.conv_first(feat)
|
||||
body_feat = self.conv_body(self.body(feat))
|
||||
feat = feat + body_feat
|
||||
# upsample
|
||||
feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode="nearest")))
|
||||
feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode="nearest")))
|
||||
out = self.conv_last(self.lrelu(self.conv_hr(feat)))
|
||||
return out
|
81
invokeai/backend/image_util/dw_openpose/__init__.py
Normal file
81
invokeai/backend/image_util/dw_openpose/__init__.py
Normal file
@ -0,0 +1,81 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from controlnet_aux.util import resize_image
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.dw_openpose.utils import draw_bodypose, draw_facepose, draw_handpose
|
||||
from invokeai.backend.image_util.dw_openpose.wholebody import Wholebody
|
||||
|
||||
|
||||
def draw_pose(pose, H, W, draw_face=True, draw_body=True, draw_hands=True, resolution=512):
|
||||
bodies = pose["bodies"]
|
||||
faces = pose["faces"]
|
||||
hands = pose["hands"]
|
||||
candidate = bodies["candidate"]
|
||||
subset = bodies["subset"]
|
||||
canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8)
|
||||
|
||||
if draw_body:
|
||||
canvas = draw_bodypose(canvas, candidate, subset)
|
||||
|
||||
if draw_hands:
|
||||
canvas = draw_handpose(canvas, hands)
|
||||
|
||||
if draw_face:
|
||||
canvas = draw_facepose(canvas, faces)
|
||||
|
||||
dwpose_image = resize_image(
|
||||
canvas,
|
||||
resolution,
|
||||
)
|
||||
dwpose_image = Image.fromarray(dwpose_image)
|
||||
|
||||
return dwpose_image
|
||||
|
||||
|
||||
class DWOpenposeDetector:
|
||||
"""
|
||||
Code from the original implementation of the DW Openpose Detector.
|
||||
Credits: https://github.com/IDEA-Research/DWPose
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.pose_estimation = Wholebody()
|
||||
|
||||
def __call__(
|
||||
self, image: Image.Image, draw_face=False, draw_body=True, draw_hands=False, resolution=512
|
||||
) -> Image.Image:
|
||||
np_image = np.array(image)
|
||||
H, W, C = np_image.shape
|
||||
|
||||
with torch.no_grad():
|
||||
candidate, subset = self.pose_estimation(np_image)
|
||||
nums, keys, locs = candidate.shape
|
||||
candidate[..., 0] /= float(W)
|
||||
candidate[..., 1] /= float(H)
|
||||
body = candidate[:, :18].copy()
|
||||
body = body.reshape(nums * 18, locs)
|
||||
score = subset[:, :18]
|
||||
for i in range(len(score)):
|
||||
for j in range(len(score[i])):
|
||||
if score[i][j] > 0.3:
|
||||
score[i][j] = int(18 * i + j)
|
||||
else:
|
||||
score[i][j] = -1
|
||||
|
||||
un_visible = subset < 0.3
|
||||
candidate[un_visible] = -1
|
||||
|
||||
# foot = candidate[:, 18:24]
|
||||
|
||||
faces = candidate[:, 24:92]
|
||||
|
||||
hands = candidate[:, 92:113]
|
||||
hands = np.vstack([hands, candidate[:, 113:]])
|
||||
|
||||
bodies = {"candidate": body, "subset": score}
|
||||
pose = {"bodies": bodies, "hands": hands, "faces": faces}
|
||||
|
||||
return draw_pose(
|
||||
pose, H, W, draw_face=draw_face, draw_hands=draw_hands, draw_body=draw_body, resolution=resolution
|
||||
)
|
128
invokeai/backend/image_util/dw_openpose/onnxdet.py
Normal file
128
invokeai/backend/image_util/dw_openpose/onnxdet.py
Normal file
@ -0,0 +1,128 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def nms(boxes, scores, nms_thr):
|
||||
"""Single class NMS implemented in Numpy."""
|
||||
x1 = boxes[:, 0]
|
||||
y1 = boxes[:, 1]
|
||||
x2 = boxes[:, 2]
|
||||
y2 = boxes[:, 3]
|
||||
|
||||
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
|
||||
order = scores.argsort()[::-1]
|
||||
|
||||
keep = []
|
||||
while order.size > 0:
|
||||
i = order[0]
|
||||
keep.append(i)
|
||||
xx1 = np.maximum(x1[i], x1[order[1:]])
|
||||
yy1 = np.maximum(y1[i], y1[order[1:]])
|
||||
xx2 = np.minimum(x2[i], x2[order[1:]])
|
||||
yy2 = np.minimum(y2[i], y2[order[1:]])
|
||||
|
||||
w = np.maximum(0.0, xx2 - xx1 + 1)
|
||||
h = np.maximum(0.0, yy2 - yy1 + 1)
|
||||
inter = w * h
|
||||
ovr = inter / (areas[i] + areas[order[1:]] - inter)
|
||||
|
||||
inds = np.where(ovr <= nms_thr)[0]
|
||||
order = order[inds + 1]
|
||||
|
||||
return keep
|
||||
|
||||
|
||||
def multiclass_nms(boxes, scores, nms_thr, score_thr):
|
||||
"""Multiclass NMS implemented in Numpy. Class-aware version."""
|
||||
final_dets = []
|
||||
num_classes = scores.shape[1]
|
||||
for cls_ind in range(num_classes):
|
||||
cls_scores = scores[:, cls_ind]
|
||||
valid_score_mask = cls_scores > score_thr
|
||||
if valid_score_mask.sum() == 0:
|
||||
continue
|
||||
else:
|
||||
valid_scores = cls_scores[valid_score_mask]
|
||||
valid_boxes = boxes[valid_score_mask]
|
||||
keep = nms(valid_boxes, valid_scores, nms_thr)
|
||||
if len(keep) > 0:
|
||||
cls_inds = np.ones((len(keep), 1)) * cls_ind
|
||||
dets = np.concatenate([valid_boxes[keep], valid_scores[keep, None], cls_inds], 1)
|
||||
final_dets.append(dets)
|
||||
if len(final_dets) == 0:
|
||||
return None
|
||||
return np.concatenate(final_dets, 0)
|
||||
|
||||
|
||||
def demo_postprocess(outputs, img_size, p6=False):
|
||||
grids = []
|
||||
expanded_strides = []
|
||||
strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
|
||||
|
||||
hsizes = [img_size[0] // stride for stride in strides]
|
||||
wsizes = [img_size[1] // stride for stride in strides]
|
||||
|
||||
for hsize, wsize, stride in zip(hsizes, wsizes, strides, strict=False):
|
||||
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
|
||||
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
|
||||
grids.append(grid)
|
||||
shape = grid.shape[:2]
|
||||
expanded_strides.append(np.full((*shape, 1), stride))
|
||||
|
||||
grids = np.concatenate(grids, 1)
|
||||
expanded_strides = np.concatenate(expanded_strides, 1)
|
||||
outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
|
||||
outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
|
||||
|
||||
return outputs
|
||||
|
||||
|
||||
def preprocess(img, input_size, swap=(2, 0, 1)):
|
||||
if len(img.shape) == 3:
|
||||
padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
|
||||
else:
|
||||
padded_img = np.ones(input_size, dtype=np.uint8) * 114
|
||||
|
||||
r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
|
||||
resized_img = cv2.resize(
|
||||
img,
|
||||
(int(img.shape[1] * r), int(img.shape[0] * r)),
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
).astype(np.uint8)
|
||||
padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
|
||||
|
||||
padded_img = padded_img.transpose(swap)
|
||||
padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
|
||||
return padded_img, r
|
||||
|
||||
|
||||
def inference_detector(session, oriImg):
|
||||
input_shape = (640, 640)
|
||||
img, ratio = preprocess(oriImg, input_shape)
|
||||
|
||||
ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
|
||||
output = session.run(None, ort_inputs)
|
||||
predictions = demo_postprocess(output[0], input_shape)[0]
|
||||
|
||||
boxes = predictions[:, :4]
|
||||
scores = predictions[:, 4:5] * predictions[:, 5:]
|
||||
|
||||
boxes_xyxy = np.ones_like(boxes)
|
||||
boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
|
||||
boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
|
||||
boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
|
||||
boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
|
||||
boxes_xyxy /= ratio
|
||||
dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
|
||||
if dets is not None:
|
||||
final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
|
||||
isscore = final_scores > 0.3
|
||||
iscat = final_cls_inds == 0
|
||||
isbbox = [i and j for (i, j) in zip(isscore, iscat, strict=False)]
|
||||
final_boxes = final_boxes[isbbox]
|
||||
else:
|
||||
final_boxes = np.array([])
|
||||
|
||||
return final_boxes
|
361
invokeai/backend/image_util/dw_openpose/onnxpose.py
Normal file
361
invokeai/backend/image_util/dw_openpose/onnxpose.py
Normal file
@ -0,0 +1,361 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
|
||||
from typing import List, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
|
||||
|
||||
def preprocess(
|
||||
img: np.ndarray, out_bbox, input_size: Tuple[int, int] = (192, 256)
|
||||
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||
"""Do preprocessing for RTMPose model inference.
|
||||
|
||||
Args:
|
||||
img (np.ndarray): Input image in shape.
|
||||
input_size (tuple): Input image size in shape (w, h).
|
||||
|
||||
Returns:
|
||||
tuple:
|
||||
- resized_img (np.ndarray): Preprocessed image.
|
||||
- center (np.ndarray): Center of image.
|
||||
- scale (np.ndarray): Scale of image.
|
||||
"""
|
||||
# get shape of image
|
||||
img_shape = img.shape[:2]
|
||||
out_img, out_center, out_scale = [], [], []
|
||||
if len(out_bbox) == 0:
|
||||
out_bbox = [[0, 0, img_shape[1], img_shape[0]]]
|
||||
for i in range(len(out_bbox)):
|
||||
x0 = out_bbox[i][0]
|
||||
y0 = out_bbox[i][1]
|
||||
x1 = out_bbox[i][2]
|
||||
y1 = out_bbox[i][3]
|
||||
bbox = np.array([x0, y0, x1, y1])
|
||||
|
||||
# get center and scale
|
||||
center, scale = bbox_xyxy2cs(bbox, padding=1.25)
|
||||
|
||||
# do affine transformation
|
||||
resized_img, scale = top_down_affine(input_size, scale, center, img)
|
||||
|
||||
# normalize image
|
||||
mean = np.array([123.675, 116.28, 103.53])
|
||||
std = np.array([58.395, 57.12, 57.375])
|
||||
resized_img = (resized_img - mean) / std
|
||||
|
||||
out_img.append(resized_img)
|
||||
out_center.append(center)
|
||||
out_scale.append(scale)
|
||||
|
||||
return out_img, out_center, out_scale
|
||||
|
||||
|
||||
def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray:
|
||||
"""Inference RTMPose model.
|
||||
|
||||
Args:
|
||||
sess (ort.InferenceSession): ONNXRuntime session.
|
||||
img (np.ndarray): Input image in shape.
|
||||
|
||||
Returns:
|
||||
outputs (np.ndarray): Output of RTMPose model.
|
||||
"""
|
||||
all_out = []
|
||||
# build input
|
||||
for i in range(len(img)):
|
||||
input = [img[i].transpose(2, 0, 1)]
|
||||
|
||||
# build output
|
||||
sess_input = {sess.get_inputs()[0].name: input}
|
||||
sess_output = []
|
||||
for out in sess.get_outputs():
|
||||
sess_output.append(out.name)
|
||||
|
||||
# run model
|
||||
outputs = sess.run(sess_output, sess_input)
|
||||
all_out.append(outputs)
|
||||
|
||||
return all_out
|
||||
|
||||
|
||||
def postprocess(
|
||||
outputs: List[np.ndarray],
|
||||
model_input_size: Tuple[int, int],
|
||||
center: Tuple[int, int],
|
||||
scale: Tuple[int, int],
|
||||
simcc_split_ratio: float = 2.0,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Postprocess for RTMPose model output.
|
||||
|
||||
Args:
|
||||
outputs (np.ndarray): Output of RTMPose model.
|
||||
model_input_size (tuple): RTMPose model Input image size.
|
||||
center (tuple): Center of bbox in shape (x, y).
|
||||
scale (tuple): Scale of bbox in shape (w, h).
|
||||
simcc_split_ratio (float): Split ratio of simcc.
|
||||
|
||||
Returns:
|
||||
tuple:
|
||||
- keypoints (np.ndarray): Rescaled keypoints.
|
||||
- scores (np.ndarray): Model predict scores.
|
||||
"""
|
||||
all_key = []
|
||||
all_score = []
|
||||
for i in range(len(outputs)):
|
||||
# use simcc to decode
|
||||
simcc_x, simcc_y = outputs[i]
|
||||
keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio)
|
||||
|
||||
# rescale keypoints
|
||||
keypoints = keypoints / model_input_size * scale[i] + center[i] - scale[i] / 2
|
||||
all_key.append(keypoints[0])
|
||||
all_score.append(scores[0])
|
||||
|
||||
return np.array(all_key), np.array(all_score)
|
||||
|
||||
|
||||
def bbox_xyxy2cs(bbox: np.ndarray, padding: float = 1.0) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Transform the bbox format from (x,y,w,h) into (center, scale)
|
||||
|
||||
Args:
|
||||
bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
|
||||
as (left, top, right, bottom)
|
||||
padding (float): BBox padding factor that will be multilied to scale.
|
||||
Default: 1.0
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing center and scale.
|
||||
- np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
|
||||
(n, 2)
|
||||
- np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
|
||||
(n, 2)
|
||||
"""
|
||||
# convert single bbox from (4, ) to (1, 4)
|
||||
dim = bbox.ndim
|
||||
if dim == 1:
|
||||
bbox = bbox[None, :]
|
||||
|
||||
# get bbox center and scale
|
||||
x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3])
|
||||
center = np.hstack([x1 + x2, y1 + y2]) * 0.5
|
||||
scale = np.hstack([x2 - x1, y2 - y1]) * padding
|
||||
|
||||
if dim == 1:
|
||||
center = center[0]
|
||||
scale = scale[0]
|
||||
|
||||
return center, scale
|
||||
|
||||
|
||||
def _fix_aspect_ratio(bbox_scale: np.ndarray, aspect_ratio: float) -> np.ndarray:
|
||||
"""Extend the scale to match the given aspect ratio.
|
||||
|
||||
Args:
|
||||
scale (np.ndarray): The image scale (w, h) in shape (2, )
|
||||
aspect_ratio (float): The ratio of ``w/h``
|
||||
|
||||
Returns:
|
||||
np.ndarray: The reshaped image scale in (2, )
|
||||
"""
|
||||
w, h = np.hsplit(bbox_scale, [1])
|
||||
bbox_scale = np.where(w > h * aspect_ratio, np.hstack([w, w / aspect_ratio]), np.hstack([h * aspect_ratio, h]))
|
||||
return bbox_scale
|
||||
|
||||
|
||||
def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray:
|
||||
"""Rotate a point by an angle.
|
||||
|
||||
Args:
|
||||
pt (np.ndarray): 2D point coordinates (x, y) in shape (2, )
|
||||
angle_rad (float): rotation angle in radian
|
||||
|
||||
Returns:
|
||||
np.ndarray: Rotated point in shape (2, )
|
||||
"""
|
||||
sn, cs = np.sin(angle_rad), np.cos(angle_rad)
|
||||
rot_mat = np.array([[cs, -sn], [sn, cs]])
|
||||
return rot_mat @ pt
|
||||
|
||||
|
||||
def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray:
|
||||
"""To calculate the affine matrix, three pairs of points are required. This
|
||||
function is used to get the 3rd point, given 2D points a & b.
|
||||
|
||||
The 3rd point is defined by rotating vector `a - b` by 90 degrees
|
||||
anticlockwise, using b as the rotation center.
|
||||
|
||||
Args:
|
||||
a (np.ndarray): The 1st point (x,y) in shape (2, )
|
||||
b (np.ndarray): The 2nd point (x,y) in shape (2, )
|
||||
|
||||
Returns:
|
||||
np.ndarray: The 3rd point.
|
||||
"""
|
||||
direction = a - b
|
||||
c = b + np.r_[-direction[1], direction[0]]
|
||||
return c
|
||||
|
||||
|
||||
def get_warp_matrix(
|
||||
center: np.ndarray,
|
||||
scale: np.ndarray,
|
||||
rot: float,
|
||||
output_size: Tuple[int, int],
|
||||
shift: Tuple[float, float] = (0.0, 0.0),
|
||||
inv: bool = False,
|
||||
) -> np.ndarray:
|
||||
"""Calculate the affine transformation matrix that can warp the bbox area
|
||||
in the input image to the output size.
|
||||
|
||||
Args:
|
||||
center (np.ndarray[2, ]): Center of the bounding box (x, y).
|
||||
scale (np.ndarray[2, ]): Scale of the bounding box
|
||||
wrt [width, height].
|
||||
rot (float): Rotation angle (degree).
|
||||
output_size (np.ndarray[2, ] | list(2,)): Size of the
|
||||
destination heatmaps.
|
||||
shift (0-100%): Shift translation ratio wrt the width/height.
|
||||
Default (0., 0.).
|
||||
inv (bool): Option to inverse the affine transform direction.
|
||||
(inv=False: src->dst or inv=True: dst->src)
|
||||
|
||||
Returns:
|
||||
np.ndarray: A 2x3 transformation matrix
|
||||
"""
|
||||
shift = np.array(shift)
|
||||
src_w = scale[0]
|
||||
dst_w = output_size[0]
|
||||
dst_h = output_size[1]
|
||||
|
||||
# compute transformation matrix
|
||||
rot_rad = np.deg2rad(rot)
|
||||
src_dir = _rotate_point(np.array([0.0, src_w * -0.5]), rot_rad)
|
||||
dst_dir = np.array([0.0, dst_w * -0.5])
|
||||
|
||||
# get four corners of the src rectangle in the original image
|
||||
src = np.zeros((3, 2), dtype=np.float32)
|
||||
src[0, :] = center + scale * shift
|
||||
src[1, :] = center + src_dir + scale * shift
|
||||
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
|
||||
|
||||
# get four corners of the dst rectangle in the input image
|
||||
dst = np.zeros((3, 2), dtype=np.float32)
|
||||
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
|
||||
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
|
||||
dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
|
||||
|
||||
if inv:
|
||||
warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src))
|
||||
else:
|
||||
warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst))
|
||||
|
||||
return warp_mat
|
||||
|
||||
|
||||
def top_down_affine(
|
||||
input_size: dict, bbox_scale: dict, bbox_center: dict, img: np.ndarray
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Get the bbox image as the model input by affine transform.
|
||||
|
||||
Args:
|
||||
input_size (dict): The input size of the model.
|
||||
bbox_scale (dict): The bbox scale of the img.
|
||||
bbox_center (dict): The bbox center of the img.
|
||||
img (np.ndarray): The original image.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing center and scale.
|
||||
- np.ndarray[float32]: img after affine transform.
|
||||
- np.ndarray[float32]: bbox scale after affine transform.
|
||||
"""
|
||||
w, h = input_size
|
||||
warp_size = (int(w), int(h))
|
||||
|
||||
# reshape bbox to fixed aspect ratio
|
||||
bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h)
|
||||
|
||||
# get the affine matrix
|
||||
center = bbox_center
|
||||
scale = bbox_scale
|
||||
rot = 0
|
||||
warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h))
|
||||
|
||||
# do affine transform
|
||||
img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR)
|
||||
|
||||
return img, bbox_scale
|
||||
|
||||
|
||||
def get_simcc_maximum(simcc_x: np.ndarray, simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Get maximum response location and value from simcc representations.
|
||||
|
||||
Note:
|
||||
instance number: N
|
||||
num_keypoints: K
|
||||
heatmap height: H
|
||||
heatmap width: W
|
||||
|
||||
Args:
|
||||
simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
|
||||
simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)
|
||||
|
||||
Returns:
|
||||
tuple:
|
||||
- locs (np.ndarray): locations of maximum heatmap responses in shape
|
||||
(K, 2) or (N, K, 2)
|
||||
- vals (np.ndarray): values of maximum heatmap responses in shape
|
||||
(K,) or (N, K)
|
||||
"""
|
||||
N, K, Wx = simcc_x.shape
|
||||
simcc_x = simcc_x.reshape(N * K, -1)
|
||||
simcc_y = simcc_y.reshape(N * K, -1)
|
||||
|
||||
# get maximum value locations
|
||||
x_locs = np.argmax(simcc_x, axis=1)
|
||||
y_locs = np.argmax(simcc_y, axis=1)
|
||||
locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
|
||||
max_val_x = np.amax(simcc_x, axis=1)
|
||||
max_val_y = np.amax(simcc_y, axis=1)
|
||||
|
||||
# get maximum value across x and y axis
|
||||
mask = max_val_x > max_val_y
|
||||
max_val_x[mask] = max_val_y[mask]
|
||||
vals = max_val_x
|
||||
locs[vals <= 0.0] = -1
|
||||
|
||||
# reshape
|
||||
locs = locs.reshape(N, K, 2)
|
||||
vals = vals.reshape(N, K)
|
||||
|
||||
return locs, vals
|
||||
|
||||
|
||||
def decode(simcc_x: np.ndarray, simcc_y: np.ndarray, simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Modulate simcc distribution with Gaussian.
|
||||
|
||||
Args:
|
||||
simcc_x (np.ndarray[K, Wx]): model predicted simcc in x.
|
||||
simcc_y (np.ndarray[K, Wy]): model predicted simcc in y.
|
||||
simcc_split_ratio (int): The split ratio of simcc.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing center and scale.
|
||||
- np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2)
|
||||
- np.ndarray[float32]: scores in shape (K,) or (n, K)
|
||||
"""
|
||||
keypoints, scores = get_simcc_maximum(simcc_x, simcc_y)
|
||||
keypoints /= simcc_split_ratio
|
||||
|
||||
return keypoints, scores
|
||||
|
||||
|
||||
def inference_pose(session, out_bbox, oriImg):
|
||||
h, w = session.get_inputs()[0].shape[2:]
|
||||
model_input_size = (w, h)
|
||||
resized_img, center, scale = preprocess(oriImg, out_bbox, model_input_size)
|
||||
outputs = inference(session, resized_img)
|
||||
keypoints, scores = postprocess(outputs, model_input_size, center, scale)
|
||||
|
||||
return keypoints, scores
|
155
invokeai/backend/image_util/dw_openpose/utils.py
Normal file
155
invokeai/backend/image_util/dw_openpose/utils.py
Normal file
@ -0,0 +1,155 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
|
||||
import math
|
||||
|
||||
import cv2
|
||||
import matplotlib
|
||||
import numpy as np
|
||||
|
||||
eps = 0.01
|
||||
|
||||
|
||||
def draw_bodypose(canvas, candidate, subset):
|
||||
H, W, C = canvas.shape
|
||||
candidate = np.array(candidate)
|
||||
subset = np.array(subset)
|
||||
|
||||
stickwidth = 4
|
||||
|
||||
limbSeq = [
|
||||
[2, 3],
|
||||
[2, 6],
|
||||
[3, 4],
|
||||
[4, 5],
|
||||
[6, 7],
|
||||
[7, 8],
|
||||
[2, 9],
|
||||
[9, 10],
|
||||
[10, 11],
|
||||
[2, 12],
|
||||
[12, 13],
|
||||
[13, 14],
|
||||
[2, 1],
|
||||
[1, 15],
|
||||
[15, 17],
|
||||
[1, 16],
|
||||
[16, 18],
|
||||
[3, 17],
|
||||
[6, 18],
|
||||
]
|
||||
|
||||
colors = [
|
||||
[255, 0, 0],
|
||||
[255, 85, 0],
|
||||
[255, 170, 0],
|
||||
[255, 255, 0],
|
||||
[170, 255, 0],
|
||||
[85, 255, 0],
|
||||
[0, 255, 0],
|
||||
[0, 255, 85],
|
||||
[0, 255, 170],
|
||||
[0, 255, 255],
|
||||
[0, 170, 255],
|
||||
[0, 85, 255],
|
||||
[0, 0, 255],
|
||||
[85, 0, 255],
|
||||
[170, 0, 255],
|
||||
[255, 0, 255],
|
||||
[255, 0, 170],
|
||||
[255, 0, 85],
|
||||
]
|
||||
|
||||
for i in range(17):
|
||||
for n in range(len(subset)):
|
||||
index = subset[n][np.array(limbSeq[i]) - 1]
|
||||
if -1 in index:
|
||||
continue
|
||||
Y = candidate[index.astype(int), 0] * float(W)
|
||||
X = candidate[index.astype(int), 1] * float(H)
|
||||
mX = np.mean(X)
|
||||
mY = np.mean(Y)
|
||||
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
|
||||
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
|
||||
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
|
||||
cv2.fillConvexPoly(canvas, polygon, colors[i])
|
||||
|
||||
canvas = (canvas * 0.6).astype(np.uint8)
|
||||
|
||||
for i in range(18):
|
||||
for n in range(len(subset)):
|
||||
index = int(subset[n][i])
|
||||
if index == -1:
|
||||
continue
|
||||
x, y = candidate[index][0:2]
|
||||
x = int(x * W)
|
||||
y = int(y * H)
|
||||
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
|
||||
|
||||
return canvas
|
||||
|
||||
|
||||
def draw_handpose(canvas, all_hand_peaks):
|
||||
H, W, C = canvas.shape
|
||||
|
||||
edges = [
|
||||
[0, 1],
|
||||
[1, 2],
|
||||
[2, 3],
|
||||
[3, 4],
|
||||
[0, 5],
|
||||
[5, 6],
|
||||
[6, 7],
|
||||
[7, 8],
|
||||
[0, 9],
|
||||
[9, 10],
|
||||
[10, 11],
|
||||
[11, 12],
|
||||
[0, 13],
|
||||
[13, 14],
|
||||
[14, 15],
|
||||
[15, 16],
|
||||
[0, 17],
|
||||
[17, 18],
|
||||
[18, 19],
|
||||
[19, 20],
|
||||
]
|
||||
|
||||
for peaks in all_hand_peaks:
|
||||
peaks = np.array(peaks)
|
||||
|
||||
for ie, e in enumerate(edges):
|
||||
x1, y1 = peaks[e[0]]
|
||||
x2, y2 = peaks[e[1]]
|
||||
x1 = int(x1 * W)
|
||||
y1 = int(y1 * H)
|
||||
x2 = int(x2 * W)
|
||||
y2 = int(y2 * H)
|
||||
if x1 > eps and y1 > eps and x2 > eps and y2 > eps:
|
||||
cv2.line(
|
||||
canvas,
|
||||
(x1, y1),
|
||||
(x2, y2),
|
||||
matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255,
|
||||
thickness=2,
|
||||
)
|
||||
|
||||
for _, keyponit in enumerate(peaks):
|
||||
x, y = keyponit
|
||||
x = int(x * W)
|
||||
y = int(y * H)
|
||||
if x > eps and y > eps:
|
||||
cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
|
||||
return canvas
|
||||
|
||||
|
||||
def draw_facepose(canvas, all_lmks):
|
||||
H, W, C = canvas.shape
|
||||
for lmks in all_lmks:
|
||||
lmks = np.array(lmks)
|
||||
for lmk in lmks:
|
||||
x, y = lmk
|
||||
x = int(x * W)
|
||||
y = int(y * H)
|
||||
if x > eps and y > eps:
|
||||
cv2.circle(canvas, (x, y), 3, (255, 255, 255), thickness=-1)
|
||||
return canvas
|
67
invokeai/backend/image_util/dw_openpose/wholebody.py
Normal file
67
invokeai/backend/image_util/dw_openpose/wholebody.py
Normal file
@ -0,0 +1,67 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
# Modified pathing to suit Invoke
|
||||
|
||||
import pathlib
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.util import download_with_progress_bar
|
||||
|
||||
from .onnxdet import inference_detector
|
||||
from .onnxpose import inference_pose
|
||||
|
||||
DWPOSE_MODELS = {
|
||||
"yolox_l.onnx": {
|
||||
"local": "any/annotators/dwpose/yolox_l.onnx",
|
||||
"url": "https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx?download=true",
|
||||
},
|
||||
"dw-ll_ucoco_384.onnx": {
|
||||
"local": "any/annotators/dwpose/dw-ll_ucoco_384.onnx",
|
||||
"url": "https://huggingface.co/yzd-v/DWPose/resolve/main/dw-ll_ucoco_384.onnx?download=true",
|
||||
},
|
||||
}
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
|
||||
class Wholebody:
|
||||
def __init__(self):
|
||||
device = choose_torch_device()
|
||||
|
||||
providers = ["CUDAExecutionProvider"] if device == "cuda" else ["CPUExecutionProvider"]
|
||||
|
||||
DET_MODEL_PATH = pathlib.Path(config.models_path / DWPOSE_MODELS["yolox_l.onnx"]["local"])
|
||||
if not DET_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DWPOSE_MODELS["yolox_l.onnx"]["url"], DET_MODEL_PATH)
|
||||
|
||||
POSE_MODEL_PATH = pathlib.Path(config.models_path / DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["local"])
|
||||
if not POSE_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["url"], POSE_MODEL_PATH)
|
||||
|
||||
onnx_det = DET_MODEL_PATH
|
||||
onnx_pose = POSE_MODEL_PATH
|
||||
|
||||
self.session_det = ort.InferenceSession(path_or_bytes=onnx_det, providers=providers)
|
||||
self.session_pose = ort.InferenceSession(path_or_bytes=onnx_pose, providers=providers)
|
||||
|
||||
def __call__(self, oriImg):
|
||||
det_result = inference_detector(self.session_det, oriImg)
|
||||
keypoints, scores = inference_pose(self.session_pose, det_result, oriImg)
|
||||
|
||||
keypoints_info = np.concatenate((keypoints, scores[..., None]), axis=-1)
|
||||
# compute neck joint
|
||||
neck = np.mean(keypoints_info[:, [5, 6]], axis=1)
|
||||
# neck score when visualizing pred
|
||||
neck[:, 2:4] = np.logical_and(keypoints_info[:, 5, 2:4] > 0.3, keypoints_info[:, 6, 2:4] > 0.3).astype(int)
|
||||
new_keypoints_info = np.insert(keypoints_info, 17, neck, axis=1)
|
||||
mmpose_idx = [17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3]
|
||||
openpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17]
|
||||
new_keypoints_info[:, openpose_idx] = new_keypoints_info[:, mmpose_idx]
|
||||
keypoints_info = new_keypoints_info
|
||||
|
||||
keypoints, scores = keypoints_info[..., :2], keypoints_info[..., 2]
|
||||
|
||||
return keypoints, scores
|
@ -7,10 +7,10 @@ import cv2
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from cv2.typing import MatLike
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
"""
|
||||
|
@ -287,6 +287,14 @@ class ModelCache(object):
|
||||
if torch.device(source_device).type == torch.device(target_device).type:
|
||||
return
|
||||
|
||||
if target_device.type == "cuda":
|
||||
vram_device = (
|
||||
target_device if target_device.index is not None else torch.device(str(target_device), index=0)
|
||||
)
|
||||
free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device))
|
||||
if cache_entry.size > free_mem:
|
||||
raise torch.cuda.OutOfMemoryError
|
||||
|
||||
start_model_to_time = time.time()
|
||||
snapshot_before = self._capture_memory_snapshot()
|
||||
cache_entry.model.to(target_device)
|
||||
@ -356,6 +364,10 @@ class ModelCache(object):
|
||||
self.cache.logger.debug(f"Locking {self.key} in {self.cache.execution_device}")
|
||||
self.cache._print_cuda_stats()
|
||||
|
||||
except torch.cuda.OutOfMemoryError:
|
||||
self.cache.logger.warning("Out of GPU memory encountered.")
|
||||
self.cache_entry.unlock()
|
||||
raise
|
||||
except Exception:
|
||||
self.cache_entry.unlock()
|
||||
raise
|
||||
@ -524,7 +536,6 @@ class ModelCache(object):
|
||||
break
|
||||
if not cache_entry.locked and cache_entry.loaded:
|
||||
self._move_model_to_device(model_key, self.storage_device)
|
||||
|
||||
vram_in_use = torch.cuda.memory_allocated()
|
||||
self.logger.debug(f"{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB")
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import List, Union
|
||||
from typing import Callable, List, Union
|
||||
|
||||
import torch.nn as nn
|
||||
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
@ -26,70 +27,51 @@ def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
|
||||
@contextmanager
|
||||
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axes: List[str]):
|
||||
# Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor
|
||||
to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = []
|
||||
try:
|
||||
to_restore = []
|
||||
|
||||
# Hard coded to skip down block layers, allowing for seamless tiling at the expense of prompt adherence
|
||||
skipped_layers = 1
|
||||
for m_name, m in model.named_modules():
|
||||
if isinstance(model, UNet2DConditionModel):
|
||||
if ".attentions." in m_name:
|
||||
if not isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
continue
|
||||
|
||||
if isinstance(model, UNet2DConditionModel) and m_name.startswith("down_blocks.") and ".resnets." in m_name:
|
||||
# down_blocks.1.resnets.1.conv1
|
||||
_, block_num, _, resnet_num, submodule_name = m_name.split(".")
|
||||
block_num = int(block_num)
|
||||
resnet_num = int(resnet_num)
|
||||
|
||||
if block_num >= len(model.down_blocks) - skipped_layers:
|
||||
continue
|
||||
|
||||
if ".resnets." in m_name:
|
||||
if ".conv2" in m_name:
|
||||
continue
|
||||
if ".conv_shortcut" in m_name:
|
||||
continue
|
||||
|
||||
"""
|
||||
if isinstance(model, UNet2DConditionModel):
|
||||
if False and ".upsamplers." in m_name:
|
||||
# Skip the second resnet (could be configurable)
|
||||
if resnet_num > 0:
|
||||
continue
|
||||
|
||||
if False and ".downsamplers." in m_name:
|
||||
# Skip Conv2d layers (could be configurable)
|
||||
if submodule_name == "conv2":
|
||||
continue
|
||||
|
||||
if True and ".resnets." in m_name:
|
||||
if True and ".conv1" in m_name:
|
||||
if False and "down_blocks" in m_name:
|
||||
continue
|
||||
if False and "mid_block" in m_name:
|
||||
continue
|
||||
if False and "up_blocks" in m_name:
|
||||
continue
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
|
||||
if True and ".conv2" in m_name:
|
||||
continue
|
||||
|
||||
if True and ".conv_shortcut" in m_name:
|
||||
continue
|
||||
|
||||
if True and ".attentions." in m_name:
|
||||
continue
|
||||
|
||||
if False and m_name in ["conv_in", "conv_out"]:
|
||||
continue
|
||||
"""
|
||||
|
||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
|
||||
to_restore.append((m, m._conv_forward))
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
to_restore.append((m, m._conv_forward))
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
|
||||
yield
|
||||
|
||||
|
@ -52,6 +52,7 @@
|
||||
"@chakra-ui/react-use-size": "^2.1.0",
|
||||
"@dagrejs/graphlib": "^2.1.13",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.0.16",
|
||||
"@invoke-ai/ui-library": "^0.0.18",
|
||||
|
15
invokeai/frontend/web/pnpm-lock.yaml
generated
15
invokeai/frontend/web/pnpm-lock.yaml
generated
@ -22,6 +22,9 @@ dependencies:
|
||||
'@dnd-kit/core':
|
||||
specifier: ^6.1.0
|
||||
version: 6.1.0(react-dom@18.2.0)(react@18.2.0)
|
||||
'@dnd-kit/sortable':
|
||||
specifier: ^8.0.0
|
||||
version: 8.0.0(@dnd-kit/core@6.1.0)(react@18.2.0)
|
||||
'@dnd-kit/utilities':
|
||||
specifier: ^3.2.2
|
||||
version: 3.2.2(react@18.2.0)
|
||||
@ -2884,6 +2887,18 @@ packages:
|
||||
tslib: 2.6.2
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/sortable@8.0.0(@dnd-kit/core@6.1.0)(react@18.2.0):
|
||||
resolution: {integrity: sha512-U3jk5ebVXe1Lr7c2wU7SBZjcWdQP+j7peHJfCspnA81enlu88Mgd7CC8Q+pub9ubP7eKVETzJW+IBAhsqbSu/g==}
|
||||
peerDependencies:
|
||||
'@dnd-kit/core': ^6.1.0
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
'@dnd-kit/core': 6.1.0(react-dom@18.2.0)(react@18.2.0)
|
||||
'@dnd-kit/utilities': 3.2.2(react@18.2.0)
|
||||
react: 18.2.0
|
||||
tslib: 2.6.2
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/utilities@3.2.2(react@18.2.0):
|
||||
resolution: {integrity: sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==}
|
||||
peerDependencies:
|
||||
|
@ -56,7 +56,7 @@
|
||||
"nodeEditor": "Knoten Editor",
|
||||
"statusMergingModels": "Modelle zusammenführen",
|
||||
"ipAdapter": "IP Adapter",
|
||||
"controlAdapter": "Control Adapter",
|
||||
"controlAdapter": "Control-Adapter",
|
||||
"auto": "Automatisch",
|
||||
"controlNet": "ControlNet",
|
||||
"imageFailedToLoad": "Kann Bild nicht laden",
|
||||
@ -69,19 +69,19 @@
|
||||
"random": "Zufall",
|
||||
"batch": "Stapel-Manager",
|
||||
"advanced": "Erweitert",
|
||||
"unifiedCanvas": "Einheitliche Leinwand",
|
||||
"unifiedCanvas": "Leinwand",
|
||||
"openInNewTab": "In einem neuem Tab öffnen",
|
||||
"statusProcessing": "wird bearbeitet",
|
||||
"linear": "Linear",
|
||||
"imagePrompt": "Bild Prompt",
|
||||
"checkpoint": "Checkpoint",
|
||||
"inpaint": "inpaint",
|
||||
"inpaint": "Inpaint",
|
||||
"simple": "Einfach",
|
||||
"template": "Vorlage",
|
||||
"outputs": "Ausgabe",
|
||||
"data": "Daten",
|
||||
"safetensors": "Safetensors",
|
||||
"outpaint": "Ausmalen",
|
||||
"safetensors": "Safe-Tensors",
|
||||
"outpaint": "Outpaint (Außen ausmalen)",
|
||||
"details": "Details",
|
||||
"format": "Format",
|
||||
"unknown": "Unbekannt",
|
||||
@ -110,24 +110,25 @@
|
||||
"nextPage": "Nächste Seite",
|
||||
"unknownError": "Unbekannter Fehler",
|
||||
"unsaved": "Nicht gespeichert",
|
||||
"aboutDesc": "Verwenden Sie Invoke für die Arbeit? Dann siehe hier:",
|
||||
"aboutDesc": "Verwenden Sie Invoke für die Arbeit? Siehe hier:",
|
||||
"localSystem": "Lokales System",
|
||||
"orderBy": "Ordnen nach",
|
||||
"saveAs": "Speicher als",
|
||||
"saveAs": "Speichern als",
|
||||
"updated": "Aktualisiert",
|
||||
"copy": "Kopieren",
|
||||
"aboutHeading": "Nutzen Sie Ihre kreative Energie"
|
||||
"aboutHeading": "Nutzen Sie Ihre kreative Energie",
|
||||
"toResolve": "Lösen"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Erzeugungen",
|
||||
"showGenerations": "Zeige Erzeugnisse",
|
||||
"showGenerations": "Zeige Ergebnisse",
|
||||
"uploads": "Uploads",
|
||||
"showUploads": "Zeige Uploads",
|
||||
"galleryImageSize": "Bildgröße",
|
||||
"galleryImageResetSize": "Größe zurücksetzen",
|
||||
"gallerySettings": "Galerie-Einstellungen",
|
||||
"maintainAspectRatio": "Seitenverhältnis beibehalten",
|
||||
"autoSwitchNewImages": "Automatisch zu neuen Bildern wechseln",
|
||||
"autoSwitchNewImages": "Auto-Wechsel zu neuen Bildern",
|
||||
"singleColumnLayout": "Einspaltiges Layout",
|
||||
"allImagesLoaded": "Alle Bilder geladen",
|
||||
"loadMore": "Mehr laden",
|
||||
@ -150,9 +151,9 @@
|
||||
"problemDeletingImagesDesc": "Ein oder mehrere Bilder konnten nicht gelöscht werden",
|
||||
"starImage": "Bild markieren",
|
||||
"assets": "Ressourcen",
|
||||
"unstarImage": "Markierung Entfernen",
|
||||
"unstarImage": "Markierung entfernen",
|
||||
"image": "Bild",
|
||||
"deleteSelection": "Lösche markierte",
|
||||
"deleteSelection": "Lösche Auswahl",
|
||||
"dropToUpload": "$t(gallery.drop) zum hochladen",
|
||||
"dropOrUpload": "$t(gallery.drop) oder hochladen",
|
||||
"drop": "Ablegen",
|
||||
@ -161,16 +162,16 @@
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tastenkürzel",
|
||||
"appHotkeys": "App-Tastenkombinationen",
|
||||
"generalHotkeys": "Allgemeine Tastenkürzel",
|
||||
"galleryHotkeys": "Galerie Tastenkürzel",
|
||||
"unifiedCanvasHotkeys": "Unified Canvas Tastenkürzel",
|
||||
"generalHotkeys": "Allgemein",
|
||||
"galleryHotkeys": "Galerie",
|
||||
"unifiedCanvasHotkeys": "Leinwand",
|
||||
"invoke": {
|
||||
"desc": "Ein Bild erzeugen",
|
||||
"title": "Invoke"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "Abbrechen",
|
||||
"desc": "Bilderzeugung abbrechen"
|
||||
"desc": "Aktuelle Bilderzeugung abbrechen"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Fokussiere Prompt",
|
||||
@ -226,7 +227,7 @@
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "An Bild zu Bild senden",
|
||||
"desc": "Aktuelles Bild an Bild zu Bild senden"
|
||||
"desc": "Aktuelles Bild an Bild-zu-Bild senden"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Bild löschen",
|
||||
@ -258,7 +259,7 @@
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "Radiergummi auswählen",
|
||||
"desc": "Wählt den Radiergummi für die Leinwand aus"
|
||||
"desc": "Wählt den Radiergummi aus"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "Pinselgröße verkleinern",
|
||||
@ -330,7 +331,7 @@
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "Bild herunterladen",
|
||||
"desc": "Aktuelle Leinwand herunterladen"
|
||||
"desc": "Aktuelles Bild herunterladen"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Pinselstrich rückgängig machen",
|
||||
@ -356,7 +357,7 @@
|
||||
"title": "Staging-Bild akzeptieren",
|
||||
"desc": "Akzeptieren Sie das aktuelle Bild des Staging-Bereichs"
|
||||
},
|
||||
"nodesHotkeys": "Knoten Tastenkürzel",
|
||||
"nodesHotkeys": "Knoten",
|
||||
"addNodes": {
|
||||
"title": "Knotenpunkt hinzufügen",
|
||||
"desc": "Öffnet das Menü zum Hinzufügen von Knoten"
|
||||
@ -399,7 +400,7 @@
|
||||
"vaeLocation": "VAE Ort",
|
||||
"vaeLocationValidationMsg": "Pfad zum Speicherort Ihres VAE.",
|
||||
"width": "Breite",
|
||||
"widthValidationMsg": "Standardbreite Ihres Models.",
|
||||
"widthValidationMsg": "Standardbreite Ihres Modells.",
|
||||
"height": "Höhe",
|
||||
"heightValidationMsg": "Standardbhöhe Ihres Models.",
|
||||
"addModel": "Modell hinzufügen",
|
||||
@ -501,7 +502,7 @@
|
||||
"quickAdd": "Schnell hinzufügen",
|
||||
"simpleModelDesc": "Geben Sie einen Pfad zu einem lokalen Diffusers-Modell, einem lokalen Checkpoint-/Safetensors-Modell, einer HuggingFace-Repo-ID oder einer Checkpoint-/Diffusers-Modell-URL an.",
|
||||
"modelDeleted": "Modell gelöscht",
|
||||
"inpainting": "v1 Inpainting",
|
||||
"inpainting": "V1-Inpainting",
|
||||
"modelUpdateFailed": "Modellaktualisierung fehlgeschlagen",
|
||||
"useCustomConfig": "Benutzerdefinierte Konfiguration verwenden",
|
||||
"settings": "Einstellungen",
|
||||
@ -518,7 +519,7 @@
|
||||
"interpolationType": "Interpolationstyp",
|
||||
"oliveModels": "Olives",
|
||||
"variant": "Variante",
|
||||
"loraModels": "LoRAs",
|
||||
"loraModels": "\"LoRAs\"",
|
||||
"modelDeleteFailed": "Modell konnte nicht gelöscht werden",
|
||||
"mergedModelName": "Zusammengeführter Modellname",
|
||||
"checkpointOrSafetensors": "$t(common.checkpoint) / $t(common.safetensors)",
|
||||
@ -564,8 +565,8 @@
|
||||
"img2imgStrength": "Bild-zu-Bild-Stärke",
|
||||
"toggleLoopback": "Loopback umschalten",
|
||||
"sendTo": "Senden an",
|
||||
"sendToImg2Img": "Senden an Bild zu Bild",
|
||||
"sendToUnifiedCanvas": "Senden an Unified Canvas",
|
||||
"sendToImg2Img": "Senden an Bild-zu-Bild",
|
||||
"sendToUnifiedCanvas": "Senden an Leinwand",
|
||||
"copyImageToLink": "Bild-Link kopieren",
|
||||
"downloadImage": "Bild herunterladen",
|
||||
"openInViewer": "Im Viewer öffnen",
|
||||
@ -590,10 +591,18 @@
|
||||
"general": "Allgemein",
|
||||
"hiresStrength": "High Res Stärke",
|
||||
"hidePreview": "Verstecke Vorschau",
|
||||
"showPreview": "Zeige Vorschau"
|
||||
"showPreview": "Zeige Vorschau",
|
||||
"aspect": "Seitenverhältnis",
|
||||
"aspectRatio": "Seitenverhältnis",
|
||||
"scheduler": "Planer",
|
||||
"aspectRatioFree": "Frei",
|
||||
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (kann zu groß sein)",
|
||||
"lockAspectRatio": "Seitenverhältnis sperren",
|
||||
"swapDimensions": "Seitenverhältnis umkehren",
|
||||
"setToOptimalSize": "Optimiere Größe für Modell"
|
||||
},
|
||||
"settings": {
|
||||
"displayInProgress": "Bilder in Bearbeitung anzeigen",
|
||||
"displayInProgress": "Zwischenbilder anzeigen",
|
||||
"saveSteps": "Speichern der Bilder alle n Schritte",
|
||||
"confirmOnDelete": "Bestätigen beim Löschen",
|
||||
"displayHelpIcons": "Hilfesymbole anzeigen",
|
||||
@ -603,7 +612,37 @@
|
||||
"resetWebUIDesc2": "Wenn die Bilder nicht in der Galerie angezeigt werden oder etwas anderes nicht funktioniert, versuchen Sie bitte, die Einstellungen zurückzusetzen, bevor Sie einen Fehler auf GitHub melden.",
|
||||
"resetComplete": "Die Web-Oberfläche wurde zurückgesetzt.",
|
||||
"models": "Modelle",
|
||||
"useSlidersForAll": "Schieberegler für alle Optionen verwenden"
|
||||
"useSlidersForAll": "Schieberegler für alle Optionen verwenden",
|
||||
"showAdvancedOptions": "Erweiterte Optionen anzeigen",
|
||||
"alternateCanvasLayout": "Alternatives Leinwand-Layout",
|
||||
"clearIntermediatesDesc1": "Das Löschen der Zwischenbilder setzt Leinwand und ControlNet zurück.",
|
||||
"favoriteSchedulers": "Lieblings-Planer",
|
||||
"favoriteSchedulersPlaceholder": "Keine Planer favorisiert",
|
||||
"generation": "Erzeugung",
|
||||
"enableInformationalPopovers": "Info-Popouts anzeigen",
|
||||
"shouldLogToConsole": "Konsole loggen",
|
||||
"showProgressInViewer": "Zwischenbilder im Viewer anzeigen",
|
||||
"clearIntermediatesDesc3": "Ihre Bilder werden nicht gelöscht.",
|
||||
"clearIntermediatesWithCount_one": "Lösche {{count}} Zwischenbilder",
|
||||
"clearIntermediatesWithCount_other": "Lösche {{count}} Zwischenbilder",
|
||||
"reloadingIn": "Neuladen in",
|
||||
"enableNodesEditor": "Nodes Editor aktivieren",
|
||||
"autoChangeDimensions": "Breite/Höhe auf Modellstandard setzen",
|
||||
"experimental": "Experimentell",
|
||||
"intermediatesCleared_one": "{{count}} Zwischenbilder gelöscht",
|
||||
"intermediatesCleared_other": "{{count}} Zwischenbilder gelöscht",
|
||||
"enableInvisibleWatermark": "Unsichtbares Wasserzeichen aktivieren",
|
||||
"general": "Allgemein",
|
||||
"consoleLogLevel": "Protokollierungsstufe",
|
||||
"clearIntermediatesDisabled": "Warteschlange muss leer sein, um Zwischenbilder zu löschen",
|
||||
"developer": "Entwickler",
|
||||
"antialiasProgressImages": "Zwischenbilder mit Anti-Alias",
|
||||
"beta": "Beta",
|
||||
"ui": "Benutzeroberfläche",
|
||||
"clearIntermediatesDesc2": "Zwischenbilder sind Nebenprodukte der Erstellung. Sie zu löschen macht Festplattenspeicher frei.",
|
||||
"clearIntermediates": "Zwischenbilder löschen",
|
||||
"intermediatesClearedFailed": "Problem beim Löschen der Zwischenbilder",
|
||||
"enableNSFWChecker": "Auf unangemessene Inhalte prüfen"
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "Temp-Ordner geleert",
|
||||
@ -617,7 +656,7 @@
|
||||
"imageSavedToGallery": "Bild in die Galerie gespeichert",
|
||||
"canvasMerged": "Leinwand zusammengeführt",
|
||||
"sentToImageToImage": "Gesendet an Bild zu Bild",
|
||||
"sentToUnifiedCanvas": "Gesendet an Unified Canvas",
|
||||
"sentToUnifiedCanvas": "Gesendet an Leinwand",
|
||||
"parametersSet": "Parameter festlegen",
|
||||
"parametersNotSet": "Parameter nicht festgelegt",
|
||||
"parametersNotSetDesc": "Keine Metadaten für dieses Bild gefunden.",
|
||||
@ -634,7 +673,23 @@
|
||||
"metadataLoadFailed": "Metadaten konnten nicht geladen werden",
|
||||
"initialImageSet": "Ausgangsbild festgelegt",
|
||||
"initialImageNotSet": "Ausgangsbild nicht festgelegt",
|
||||
"initialImageNotSetDesc": "Ausgangsbild konnte nicht geladen werden"
|
||||
"initialImageNotSetDesc": "Ausgangsbild konnte nicht geladen werden",
|
||||
"setCanvasInitialImage": "Ausgangsbild setzen",
|
||||
"problemMergingCanvas": "Problem bei Verschmelzung der Leinwand",
|
||||
"canvasCopiedClipboard": "Leinwand in Zwischenablage kopiert",
|
||||
"canvasSentControlnetAssets": "Leinwand an ControlNet & Sammlung geschickt",
|
||||
"problemDownloadingCanvasDesc": "Kann Basis-Layer nicht exportieren",
|
||||
"canvasDownloaded": "Leinwand heruntergeladen",
|
||||
"problemSavingCanvasDesc": "Kann Basis-Layer nicht exportieren",
|
||||
"canvasSavedGallery": "Leinwand in Galerie gespeichert",
|
||||
"problemMergingCanvasDesc": "Kann Basis-Layer nicht exportieren",
|
||||
"problemSavingCanvas": "Problem beim Speichern der Leinwand",
|
||||
"problemCopyingCanvas": "Problem beim Kopieren der Leinwand",
|
||||
"problemCopyingCanvasDesc": "Kann Basis-Layer nicht exportieren",
|
||||
"problemDownloadingCanvas": "Problem beim Herunterladen der Leinwand",
|
||||
"setAsCanvasInitialImage": "Als Ausgangsbild gesetzt",
|
||||
"addedToBoard": "Dem Board hinzugefügt",
|
||||
"loadedWithWarnings": "Workflow mit Warnungen geladen"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@ -646,8 +701,8 @@
|
||||
"upscale": "Verwenden Sie ESRGAN, um das Bild unmittelbar nach der Erzeugung zu vergrößern.",
|
||||
"faceCorrection": "Gesichtskorrektur mit GFPGAN oder Codeformer: Der Algorithmus erkennt Gesichter im Bild und korrigiert alle Fehler. Ein hoher Wert verändert das Bild stärker, was zu attraktiveren Gesichtern führt. Codeformer mit einer höheren Genauigkeit bewahrt das Originalbild auf Kosten einer stärkeren Gesichtskorrektur.",
|
||||
"imageToImage": "Bild zu Bild lädt ein beliebiges Bild als Ausgangsbild, aus dem dann zusammen mit dem Prompt ein neues Bild erzeugt wird. Je höher der Wert ist, desto stärker wird das Ergebnisbild verändert. Werte von 0,0 bis 1,0 sind möglich, der empfohlene Bereich ist .25-.75",
|
||||
"boundingBox": "Der Begrenzungsrahmen ist derselbe wie die Einstellungen für Breite und Höhe bei Text zu Bild oder Bild zu Bild. Es wird nur der Bereich innerhalb des Rahmens verarbeitet.",
|
||||
"seamCorrection": "Steuert die Behandlung von sichtbaren Übergängen, die zwischen den erzeugten Bildern auf der Leinwand auftreten.",
|
||||
"boundingBox": "Der Begrenzungsrahmen ist derselbe wie die Einstellungen für Breite und Höhe bei Text-zu-Bild oder Bild-zu-Bild. Es wird nur der Bereich innerhalb des Rahmens verarbeitet.",
|
||||
"seamCorrection": "Behandlung von sichtbaren Übergängen, die zwischen den erzeugten Bildern auftreten.",
|
||||
"infillAndScaling": "Verwalten Sie Infill-Methoden (für maskierte oder gelöschte Bereiche der Leinwand) und Skalierung (nützlich für kleine Begrenzungsrahmengrößen)."
|
||||
}
|
||||
},
|
||||
@ -658,17 +713,17 @@
|
||||
"maskingOptions": "Maskierungsoptionen",
|
||||
"enableMask": "Maske aktivieren",
|
||||
"preserveMaskedArea": "Maskierten Bereich bewahren",
|
||||
"clearMask": "Maske löschen",
|
||||
"clearMask": "Maske löschen (Shift+C)",
|
||||
"brush": "Pinsel",
|
||||
"eraser": "Radierer",
|
||||
"fillBoundingBox": "Begrenzungsrahmen füllen",
|
||||
"eraseBoundingBox": "Begrenzungsrahmen löschen",
|
||||
"colorPicker": "Farbpipette",
|
||||
"colorPicker": "Pipette",
|
||||
"brushOptions": "Pinseloptionen",
|
||||
"brushSize": "Größe",
|
||||
"move": "Bewegen",
|
||||
"resetView": "Ansicht zurücksetzen",
|
||||
"mergeVisible": "Sichtbare Zusammenführen",
|
||||
"mergeVisible": "Sichtbare zusammenführen",
|
||||
"saveToGallery": "In Galerie speichern",
|
||||
"copyToClipboard": "In Zwischenablage kopieren",
|
||||
"downloadAsImage": "Als Bild herunterladen",
|
||||
@ -682,15 +737,15 @@
|
||||
"darkenOutsideSelection": "Außerhalb der Auswahl verdunkeln",
|
||||
"autoSaveToGallery": "Automatisch in Galerie speichern",
|
||||
"saveBoxRegionOnly": "Nur Auswahlbox speichern",
|
||||
"limitStrokesToBox": "Striche auf Box beschränken",
|
||||
"showCanvasDebugInfo": "Zusätzliche Informationen zur Leinwand anzeigen",
|
||||
"limitStrokesToBox": "Striche auf Auswahl beschränken",
|
||||
"showCanvasDebugInfo": "Zusätzliche Informationen anzeigen",
|
||||
"clearCanvasHistory": "Leinwand-Verlauf löschen",
|
||||
"clearHistory": "Verlauf löschen",
|
||||
"clearCanvasHistoryMessage": "Wenn Sie den Verlauf der Leinwand löschen, bleibt die aktuelle Leinwand intakt, aber der Verlauf der Rückgängig- und Wiederherstellung wird unwiderruflich gelöscht.",
|
||||
"clearCanvasHistoryConfirm": "Sind Sie sicher, dass Sie den Verlauf der Leinwand löschen möchten?",
|
||||
"clearCanvasHistoryMessage": "Wenn Sie den Verlauf löschen, bleibt die aktuelle Leinwand intakt, aber der Verlauf der Rückgängig- und Wiederherstellung wird unwiderruflich gelöscht.",
|
||||
"clearCanvasHistoryConfirm": "Sind Sie sicher, dass Sie den Verlauf löschen möchten?",
|
||||
"emptyTempImageFolder": "Temp-Image Ordner leeren",
|
||||
"emptyFolder": "Leerer Ordner",
|
||||
"emptyTempImagesFolderMessage": "Wenn Sie den Ordner für temporäre Bilder leeren, wird auch der Unified Canvas vollständig zurückgesetzt. Dies umfasst den gesamten Verlauf der Rückgängig-/Wiederherstellungsvorgänge, die Bilder im Bereitstellungsbereich und die Leinwand-Basisebene.",
|
||||
"emptyTempImagesFolderMessage": "Wenn Sie den Ordner für temporäre Bilder leeren, wird die Leinwand zurückgesetzt. Dies umfasst den gesamten Verlauf der Rückgängig-/Wiederherstellungsvorgänge, die Bilder im Bereitstellungsbereich und die Leinwand-Basisebene.",
|
||||
"emptyTempImagesFolderConfirm": "Sind Sie sicher, dass Sie den temporären Ordner leeren wollen?",
|
||||
"activeLayer": "Aktive Ebene",
|
||||
"canvasScale": "Leinwand Maßstab",
|
||||
@ -707,32 +762,32 @@
|
||||
"discardAll": "Alles verwerfen",
|
||||
"betaClear": "Löschen",
|
||||
"betaDarkenOutside": "Außen abdunkeln",
|
||||
"betaLimitToBox": "Begrenzung auf das Feld",
|
||||
"betaLimitToBox": "Auf Auswahl begrenzen",
|
||||
"betaPreserveMasked": "Maskiertes bewahren",
|
||||
"antialiasing": "Kantenglättung",
|
||||
"showResultsOn": "Zeige Ergebnisse (An)",
|
||||
"showResultsOff": "Zeige Ergebnisse (Aus)"
|
||||
},
|
||||
"accessibility": {
|
||||
"modelSelect": "Model Auswahl",
|
||||
"modelSelect": "Modell-Auswahl",
|
||||
"uploadImage": "Bild hochladen",
|
||||
"previousImage": "Voriges Bild",
|
||||
"previousImage": "Vorheriges Bild",
|
||||
"useThisParameter": "Benutze diesen Parameter",
|
||||
"copyMetadataJson": "Kopiere Metadaten JSON",
|
||||
"copyMetadataJson": "Kopiere JSON-Metadaten",
|
||||
"zoomIn": "Vergrößern",
|
||||
"rotateClockwise": "Im Uhrzeigersinn drehen",
|
||||
"flipHorizontally": "Horizontal drehen",
|
||||
"flipVertically": "Vertikal drehen",
|
||||
"modifyConfig": "Optionen einstellen",
|
||||
"toggleAutoscroll": "Auroscroll ein/ausschalten",
|
||||
"toggleLogViewer": "Log Betrachter ein/ausschalten",
|
||||
"showOptionsPanel": "Zeige Optionen",
|
||||
"toggleLogViewer": "Log-Betrachter ein/ausschalten",
|
||||
"showOptionsPanel": "Seitenpanel anzeigen",
|
||||
"reset": "Zurücksetzten",
|
||||
"nextImage": "Nächstes Bild",
|
||||
"zoomOut": "Verkleinern",
|
||||
"rotateCounterClockwise": "Gegen den Uhrzeigersinn verdrehen",
|
||||
"showGalleryPanel": "Galeriefenster anzeigen",
|
||||
"exitViewer": "Betrachten beenden",
|
||||
"rotateCounterClockwise": "Gegen den Uhrzeigersinn drehen",
|
||||
"showGalleryPanel": "Galerie-Panel anzeigen",
|
||||
"exitViewer": "Betrachter beenden",
|
||||
"menu": "Menü",
|
||||
"loadMore": "Mehr laden",
|
||||
"invokeProgressBar": "Invoke Fortschrittsanzeige",
|
||||
@ -742,22 +797,22 @@
|
||||
"about": "Über"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Automatisches Hinzufügen zum Ordner",
|
||||
"autoAddBoard": "Automatisches Hinzufügen zum Board",
|
||||
"topMessage": "Dieser Ordner enthält Bilder die in den folgenden Funktionen verwendet werden:",
|
||||
"move": "Bewegen",
|
||||
"menuItemAutoAdd": "Automatisches Hinzufügen zu diesem Ordner",
|
||||
"menuItemAutoAdd": "Auto-Hinzufügen zu diesem Ordner",
|
||||
"myBoard": "Meine Ordner",
|
||||
"searchBoard": "Ordner durchsuchen...",
|
||||
"noMatching": "Keine passenden Ordner",
|
||||
"selectBoard": "Ordner aussuchen",
|
||||
"cancel": "Abbrechen",
|
||||
"addBoard": "Ordner hinzufügen",
|
||||
"uncategorized": "Nicht kategorisiert",
|
||||
"addBoard": "Board hinzufügen",
|
||||
"uncategorized": "Ohne Kategorie",
|
||||
"downloadBoard": "Ordner runterladen",
|
||||
"changeBoard": "Ordner wechseln",
|
||||
"loading": "Laden...",
|
||||
"clearSearch": "Suche leeren",
|
||||
"bottomMessage": "Durch das Löschen dieses Ordners und seiner Bilder werden alle Funktionen zurückgesetzt, die sie derzeit verwenden.",
|
||||
"bottomMessage": "Löschen des Boards und seiner Bilder setzt alle Funktionen zurück, die sie gerade verwenden.",
|
||||
"deleteBoardOnly": "Nur Ordner löschen",
|
||||
"deleteBoard": "Löschen Ordner",
|
||||
"deleteBoardAndImages": "Löschen Ordner und Bilder",
|
||||
@ -784,7 +839,7 @@
|
||||
"depthMidasDescription": "Tiefenmap erstellen mit Midas",
|
||||
"controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))",
|
||||
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) ist aktiv, $t(common.controlNet) ist deaktiviert",
|
||||
"weight": "Breite",
|
||||
"weight": "Einfluss",
|
||||
"selectModel": "Wähle ein Modell",
|
||||
"depthMidas": "Tiefe (Midas)",
|
||||
"w": "W",
|
||||
@ -809,14 +864,14 @@
|
||||
"controlAdapter_other": "Control Adapter",
|
||||
"colorMapTileSize": "Kachelgröße",
|
||||
"depthZoeDescription": "Tiefenmap erstellen mit Zoe",
|
||||
"setControlImageDimensions": "Setze Control Bild Auflösung auf Breite/Höhe",
|
||||
"setControlImageDimensions": "Setze Control-Bild Auflösung auf Breite/Höhe",
|
||||
"handAndFace": "Hand und Gesicht",
|
||||
"enableIPAdapter": "Aktiviere IP Adapter",
|
||||
"resize": "Größe ändern",
|
||||
"resetControlImage": "Zurücksetzen vom Referenz Bild",
|
||||
"balanced": "Ausgewogen",
|
||||
"prompt": "Prompt",
|
||||
"resizeMode": "Größenänderungsmodus",
|
||||
"resizeMode": "Größe",
|
||||
"processor": "Prozessor",
|
||||
"saveControlImage": "Speichere Referenz Bild",
|
||||
"safe": "Speichern",
|
||||
@ -825,7 +880,6 @@
|
||||
"pidi": "PIDI",
|
||||
"normalBae": "Normales BAE",
|
||||
"mlsdDescription": "Minimalistischer Liniensegmentdetektor",
|
||||
"openPoseDescription": "Schätzung der menschlichen Pose mit Openpose",
|
||||
"control": "Kontrolle",
|
||||
"coarse": "Grob",
|
||||
"crop": "Zuschneiden",
|
||||
@ -838,23 +892,26 @@
|
||||
"lineartAnimeDescription": "Lineart-Verarbeitung im Anime-Stil",
|
||||
"minConfidence": "Minimales Vertrauen",
|
||||
"megaControl": "Mega-Kontrolle",
|
||||
"autoConfigure": "Prozessor automatisch konfigurieren",
|
||||
"autoConfigure": "Prozessor Auto-konfig",
|
||||
"normalBaeDescription": "Normale BAE-Verarbeitung",
|
||||
"noneDescription": "Es wurde keine Verarbeitung angewendet",
|
||||
"openPose": "Openpose",
|
||||
"lineartAnime": "Lineart Anime",
|
||||
"lineartAnime": "Lineart Anime / \"Strichzeichnung Anime\"",
|
||||
"mediapipeFaceDescription": "Gesichtserkennung mit Mediapipe",
|
||||
"canny": "Canny",
|
||||
"canny": "\"Canny\"",
|
||||
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
|
||||
"scribble": "Scribble",
|
||||
"maxFaces": "Maximale Anzahl Gesichter",
|
||||
"resizeSimple": "Größe ändern (einfach)",
|
||||
"large": "Groß",
|
||||
"modelSize": "Modell Größe",
|
||||
"modelSize": "Modellgröße",
|
||||
"small": "Klein",
|
||||
"base": "Basis",
|
||||
"depthAnything": "Depth Anything",
|
||||
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik"
|
||||
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth-Anything-Technik",
|
||||
"face": "Gesicht",
|
||||
"body": "Körper",
|
||||
"hands": "Hände",
|
||||
"dwOpenpose": "DW Openpose"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@ -889,7 +946,7 @@
|
||||
"batchValues": "Stapel Werte",
|
||||
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
|
||||
"queuedCount": "{{pending}} wartenden Elemente",
|
||||
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"clearQueueAlertDialog": "\"Die Warteschlange leeren\" stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"completedIn": "Fertig in",
|
||||
"cancelBatchSucceeded": "Stapel abgebrochen",
|
||||
"cancelBatch": "Stapel stoppen",
|
||||
@ -898,24 +955,26 @@
|
||||
"cancelBatchFailed": "Problem beim Abbruch vom Stapel",
|
||||
"clearQueueAlertDialog2": "Warteschlange wirklich leeren?",
|
||||
"pruneSucceeded": "{{item_count}} abgeschlossene Elemente aus der Warteschlange entfernt",
|
||||
"pauseSucceeded": "Prozessor angehalten",
|
||||
"pauseSucceeded": "Prozess angehalten",
|
||||
"cancelFailed": "Problem beim Stornieren des Auftrags",
|
||||
"pauseFailed": "Problem beim Anhalten des Prozessors",
|
||||
"pauseFailed": "Problem beim Anhalten des Prozesses",
|
||||
"front": "Vorne",
|
||||
"pruneTooltip": "Bereinigen Sie {{item_count}} abgeschlossene Aufträge",
|
||||
"resumeFailed": "Problem beim wieder aufnehmen von Prozessor",
|
||||
"resumeFailed": "Problem beim Fortsetzen des Prozesses",
|
||||
"pruneFailed": "Problem beim leeren der Warteschlange",
|
||||
"pauseTooltip": "Pause von Prozessor",
|
||||
"pauseTooltip": "Prozess anhalten",
|
||||
"back": "Hinten",
|
||||
"resumeSucceeded": "Prozessor wieder aufgenommen",
|
||||
"resumeTooltip": "Prozessor wieder aufnehmen",
|
||||
"resumeSucceeded": "Prozess wird fortgesetzt",
|
||||
"resumeTooltip": "Prozess wieder aufnehmen",
|
||||
"time": "Zeit",
|
||||
"batchQueuedDesc_one": "{{count}} Eintrag ans {{direction}} der Wartschlange hinzugefügt",
|
||||
"batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt",
|
||||
"batchQueuedDesc_one": "{{count}} Eintrag an {{direction}} der Wartschlange hinzugefügt",
|
||||
"batchQueuedDesc_other": "{{count}} Einträge an {{direction}} der Wartschlange hinzugefügt",
|
||||
"openQueue": "Warteschlange öffnen",
|
||||
"batchFailedToQueue": "Fehler beim Einreihen in die Stapelverarbeitung",
|
||||
"batchFieldValues": "Stapelverarbeitungswerte",
|
||||
"batchQueued": "Stapelverarbeitung eingereiht"
|
||||
"batchQueued": "Stapelverarbeitung eingereiht",
|
||||
"graphQueued": "Graph eingereiht",
|
||||
"graphFailedToQueue": "Fehler beim Einreihen des Graphen"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
@ -936,46 +995,149 @@
|
||||
"generationMode": "Generierungsmodus",
|
||||
"Threshold": "Rauschen-Schwelle",
|
||||
"seed": "Seed",
|
||||
"perlin": "Perlin Noise",
|
||||
"perlin": "Perlin-Rauschen",
|
||||
"hiresFix": "Optimierung für hohe Auflösungen",
|
||||
"initImage": "Erstes Bild",
|
||||
"variations": "Samengewichtspaare",
|
||||
"variations": "Seed-Gewichtungs-Paare",
|
||||
"vae": "VAE",
|
||||
"workflow": "Arbeitsablauf",
|
||||
"workflow": "Workflow",
|
||||
"scheduler": "Planer",
|
||||
"noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden",
|
||||
"recallParameters": "Recall Parameters"
|
||||
"recallParameters": "Parameter wiederherstellen"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
"heading": "Nutze Prozessor rauschen"
|
||||
"heading": "Nutze CPU-Rauschen",
|
||||
"paragraphs": [
|
||||
"Entscheidet, ob auf der CPU oder GPU Rauschen erzeugt wird.",
|
||||
"Mit aktiviertem CPU-Rauschen wird ein bestimmter Seedwert das gleiche Bild auf jeder Maschine erzeugen.",
|
||||
"CPU-Rauschen einzuschalten beeinflusst nicht die Systemleistung."
|
||||
]
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "Modell"
|
||||
"heading": "Modell",
|
||||
"paragraphs": [
|
||||
"Modell für die Entrauschungsschritte."
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "Iterationen"
|
||||
"heading": "Iterationen",
|
||||
"paragraphs": [
|
||||
"Die Anzahl der Bilder, die erzeugt werden sollen.",
|
||||
"Wenn \"Dynamische Prompts\" aktiviert ist, wird jeder einzelne Prompt so oft generiert."
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"heading": "CFG-Skala"
|
||||
"heading": "CFG-Skala",
|
||||
"paragraphs": [
|
||||
"Bestimmt, wie viel Ihr Prompt den Erzeugungsprozess beeinflusst."
|
||||
]
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "Schritte"
|
||||
"heading": "Schritte",
|
||||
"paragraphs": [
|
||||
"Anzahl der Schritte, die bei jeder Generierung durchgeführt werden.",
|
||||
"Höhere Schrittzahlen werden in der Regel bessere Bilder ergeben, aber mehr Zeit benötigen."
|
||||
]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA Gewichte"
|
||||
"heading": "LoRA Gewichte",
|
||||
"paragraphs": [
|
||||
"Höhere LoRA-Wichtungen führen zu größeren Auswirkungen auf das endgültige Bild."
|
||||
]
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "Füllmethode"
|
||||
"heading": "Füllmethode",
|
||||
"paragraphs": [
|
||||
"Infill-Methode für den ausgewählten Bereich."
|
||||
]
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE"
|
||||
"heading": "VAE",
|
||||
"paragraphs": [
|
||||
"Verwendetes Modell, um den KI-Ausgang in das endgültige Bild zu übersetzen."
|
||||
]
|
||||
},
|
||||
"paramRatio": {
|
||||
"heading": "Seitenverhältnis",
|
||||
"paragraphs": [
|
||||
"Das Seitenverhältnis des erzeugten Bildes.",
|
||||
"Für SD1.5-Modelle wird eine Bildgröße von 512x512 Pixel empfohlen, für SDXL-Modelle sind es 1024x1024 Pixel."
|
||||
]
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"Wie viel Rauschen dem Eingabebild hinzugefügt wird.",
|
||||
"0 wird zu einem identischen Bild führen, während 1 zu einem völlig neuen Bild führt."
|
||||
],
|
||||
"heading": "Stärke der Entrauschung"
|
||||
},
|
||||
"paramVAEPrecision": {
|
||||
"heading": "VAE-Präzision",
|
||||
"paragraphs": [
|
||||
"Die bei der VAE-Kodierung und Dekodierung verwendete Präzision. FP16/Halbpräzision ist effizienter, aber auf Kosten kleiner Bildvariationen."
|
||||
]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"heading": "CFG Rescale Multiplikator",
|
||||
"paragraphs": [
|
||||
"Rescale-Multiplikator für die CFG-Lenkung, der für Modelle verwendet wird, die mit dem zero-terminal SNR (ztsnr) trainiert wurden. Empfohlener Wert: 0,7."
|
||||
]
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"paragraphs": [
|
||||
"Skaliert den ausgewählten Bereich auf die Größe, die für das Modell am besten geeignet ist."
|
||||
],
|
||||
"heading": "Skalieren vor der Verarbeitung"
|
||||
},
|
||||
"paramSeed": {
|
||||
"paragraphs": [
|
||||
"Kontrolliert das für die Erzeugung verwendete Startrauschen.",
|
||||
"Deaktivieren Sie “Random Seed”, um identische Ergebnisse mit den gleichen Generierungseinstellungen zu erzeugen."
|
||||
],
|
||||
"heading": "Seed"
|
||||
},
|
||||
"dynamicPromptsMaxPrompts": {
|
||||
"paragraphs": [
|
||||
"Beschränkt die Anzahl der Prompts, die von \"Dynamic Prompts\" generiert werden können."
|
||||
],
|
||||
"heading": "Maximale Prompts"
|
||||
},
|
||||
"dynamicPromptsSeedBehaviour": {
|
||||
"paragraphs": [
|
||||
"Bestimmt, wie der Seed-Wert beim Erzeugen von Prompts verwendet wird.",
|
||||
"Verwenden Sie dies, um schnelle Variationen eines einzigen Seeds zu erkunden.",
|
||||
"Wenn Sie z. B. 5 Prompts haben, wird jedes Bild den selben Seed-Wert verwenden.",
|
||||
"\"Per Bild\" wird einen einzigartigen Seed-Wert für jedes Bild verwenden. Dies bietet mehr Variationen."
|
||||
],
|
||||
"heading": "Seed-Verhalten"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"paragraphs": [
|
||||
"\"Dynamische Prompts\" übersetzt einen Prompt in mehrere.",
|
||||
"Die Ausgangs-Syntax ist \"ein {roter|grüner|blauer} ball\". Das generiert 3 Prompts: \"ein roter ball\", \"ein grüner ball\" und \"ein blauer ball\".",
|
||||
"Sie können die Syntax so oft verwenden, wie Sie in einem einzigen Prompt möchten, aber stellen Sie sicher, dass die Anzahl der Prompts zur Einstellung von \"Max Prompts\" passt."
|
||||
],
|
||||
"heading": "Dynamische Prompts"
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"paragraphs": [
|
||||
"Wie stark wird das ControlNet das generierte Bild beeinflussen wird."
|
||||
],
|
||||
"heading": "Einfluss"
|
||||
},
|
||||
"paramScheduler": {
|
||||
"paragraphs": [
|
||||
"\"Planer\" definiert, wie iterativ Rauschen zu einem Bild hinzugefügt wird, oder wie ein Sample bei der Ausgabe eines Modells aktualisiert wird."
|
||||
],
|
||||
"heading": "Planer"
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"lockRatio": "Verhältnis sperren",
|
||||
"hideProgressImages": "Verstecke Prozess Bild",
|
||||
"showProgressImages": "Zeige Prozess Bild"
|
||||
"hideProgressImages": "Fortschrittsbilder verbergen",
|
||||
"showProgressImages": "Fortschrittsbilder anzeigen",
|
||||
"swapSizes": "Tausche Größen"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Deaktivieren",
|
||||
@ -989,7 +1151,7 @@
|
||||
"enableFailed": "Problem beim Aktivieren des Zwischenspeichers",
|
||||
"disableFailed": "Problem bei Deaktivierung des Cache",
|
||||
"enableSucceeded": "Zwischenspeicher aktiviert",
|
||||
"disableSucceeded": "Aufrufcache deaktiviert",
|
||||
"disableSucceeded": "Invocation-Cache deaktiviert",
|
||||
"clearSucceeded": "Zwischenspeicher gelöscht",
|
||||
"invocationCache": "Zwischenspeicher",
|
||||
"clearFailed": "Problem beim Löschen des Zwischenspeichers"
|
||||
@ -1035,15 +1197,15 @@
|
||||
"collectionFieldType": "{{name}} Sammlung",
|
||||
"controlCollectionDescription": "Kontrollinformationen zwischen Knotenpunkten weitergegeben.",
|
||||
"connectionWouldCreateCycle": "Verbindung würde einen Kreislauf/cycle schaffen",
|
||||
"ipAdapterDescription": "Ein Adapter für die Bildabfrage (IP-Adapter) / Bilderprompt-Adapter.",
|
||||
"ipAdapterDescription": "Ein Adapter für die Bildabfrage (IP-Adapter) / Bildprompt-Adapter.",
|
||||
"controlField": "Kontrolle",
|
||||
"inputFields": "Eingabefelder",
|
||||
"imageField": "Bild",
|
||||
"inputMayOnlyHaveOneConnection": "Eingang darf nur eine Verbindung haben",
|
||||
"integerCollectionDescription": "Eine Sammlung ganzer Zahlen.",
|
||||
"integerDescription": "Das sind ganze Zahlen ohne Dezimalpunkt.",
|
||||
"integerDescription": "\"Integer\" sind ganze Zahlen ohne Dezimalpunkt.",
|
||||
"conditioningPolymorphic": "Konditionierung polymorphisch",
|
||||
"conditioningPolymorphicDescription": "Die Konditionierung kann zwischen den Knotenpunkten weitergegeben werden.",
|
||||
"conditioningPolymorphicDescription": "Die Konditionierung kann zwischen den Knoten weitergegeben werden.",
|
||||
"invalidOutputSchema": "Ungültiges Ausgabeschema",
|
||||
"ipAdapterModel": "IP-Adapter Modell",
|
||||
"conditioningFieldDescription": "Die Konditionierung kann zwischen den Knotenpunkten weitergegeben werden.",
|
||||
@ -1065,10 +1227,129 @@
|
||||
"imageCollection": "Bildersammlung",
|
||||
"imageCollectionDescription": "Eine Sammlung von Bildern.",
|
||||
"denoiseMaskField": "Entrauschen-Maske",
|
||||
"ipAdapterCollection": "IP-Adapter Sammlung"
|
||||
"ipAdapterCollection": "IP-Adapter Sammlung",
|
||||
"newWorkflowDesc2": "Ihr aktueller Arbeitsablauf hat ungespeicherte Änderungen.",
|
||||
"problemSettingTitle": "Problem beim Einstellen des Titels",
|
||||
"noConnectionData": "Keine Verbindungsdaten",
|
||||
"outputField": "Ausgabefeld",
|
||||
"outputFieldInInput": "Ausgabefeld im Eingang",
|
||||
"problemReadingWorkflow": "Problem beim Lesen des Arbeitsablaufs vom Bild",
|
||||
"reloadNodeTemplates": "Knoten-Vorlagen neu laden",
|
||||
"newWorkflow": "Neuer Arbeitsablauf / Workflow",
|
||||
"newWorkflowDesc": "Einen neuen Arbeitsablauf erstellen?",
|
||||
"noFieldsLinearview": "Keine Felder zur linearen Ansicht hinzugefügt",
|
||||
"clearWorkflow": "Workflow löschen",
|
||||
"clearWorkflowDesc": "Diesen Arbeitsablauf löschen und neu starten?",
|
||||
"noConnectionInProgress": "Es besteht keine Verbindung",
|
||||
"notes": "Anmerkungen",
|
||||
"nodeVersion": "Knoten Version",
|
||||
"noOutputSchemaName": "Kein Name des Ausgabeschemas im ref-Objekt gefunden",
|
||||
"node": "Knoten",
|
||||
"nodeSearch": "Knoten suchen",
|
||||
"removeLinearView": "Entfernen aus Linear View",
|
||||
"nodeOutputs": "Knoten-Ausgänge",
|
||||
"nodeTemplate": "Knoten-Vorlage",
|
||||
"nodeType": "Knotentyp",
|
||||
"noFieldType": "Kein Feldtyp",
|
||||
"oNNXModelField": "ONNX-Modell",
|
||||
"noMatchingNodes": "Keine passenden Knoten",
|
||||
"noNodeSelected": "Kein Knoten gewählt",
|
||||
"noImageFoundState": "Kein Anfangsbild im Status gefunden",
|
||||
"nodeOpacity": "Knoten-Deckkraft",
|
||||
"noOutputRecorded": "Keine Ausgänge aufgezeichnet",
|
||||
"outputSchemaNotFound": "Ausgabeschema nicht gefunden",
|
||||
"oNNXModelFieldDescription": "ONNX-Modellfeld.",
|
||||
"outputNode": "Ausgabeknoten",
|
||||
"pickOne": "Eins auswählen",
|
||||
"problemReadingMetadata": "Problem beim Lesen von Metadaten aus dem Bild",
|
||||
"notesDescription": "Anmerkungen zum Arbeitsablauf hinzufügen",
|
||||
"outputFields": "Ausgabefelder",
|
||||
"sDXLRefinerModelField": "Refiner-Modell",
|
||||
"sDXLMainModelFieldDescription": "SDXL Modellfeld.",
|
||||
"clearWorkflowDesc2": "Ihr aktueller Arbeitsablauf hat ungespeicherte Änderungen.",
|
||||
"skipped": "Übersprungen",
|
||||
"schedulerDescription": "Zu erledigen",
|
||||
"scheduler": "Planer",
|
||||
"showGraphNodes": "Graph Overlay anzeigen",
|
||||
"showMinimapnodes": "MiniMap anzeigen",
|
||||
"sDXLMainModelField": "SDXL Modell",
|
||||
"skippedReservedInput": "Reserviertes Eingabefeld übersprungen",
|
||||
"sDXLRefinerModelFieldDescription": "Zu erledigen",
|
||||
"showLegendNodes": "Feldtyp-Legende anzeigen",
|
||||
"skippedReservedOutput": "Reserviertes Ausgangsfeld übersprungen",
|
||||
"skippingInputNoTemplate": "Überspringe Eingabefeld ohne Vorlage",
|
||||
"executionStateCompleted": "Erledigt",
|
||||
"denoiseMaskFieldDescription": "Denoise Maske kann zwischen Knoten weitergegeben werden",
|
||||
"downloadWorkflow": "Workflow JSON herunterladen",
|
||||
"executionStateInProgress": "In Bearbeitung",
|
||||
"snapToGridHelp": "Knoten am Gitternetz einrasten bei Bewegung",
|
||||
"controlCollection": "Control-Sammlung",
|
||||
"controlFieldDescription": "Control-Informationen zwischen Knotenpunkten weitergegeben.",
|
||||
"latentsField": "Latents",
|
||||
"mainModelFieldDescription": "Zu erledigen",
|
||||
"missingTemplate": "Ungültiger Knoten: Knoten {{node}} vom Typ {{type}} fehlt Vorlage (nicht installiert?)",
|
||||
"skippingUnknownInputType": "Überspringe unbekannten Eingabe-Feldtyp",
|
||||
"stringCollectionDescription": "Eine Sammlung von Zeichenfolgen.",
|
||||
"string": "Zeichenfolge",
|
||||
"stringCollection": "Sammlung von Zeichenfolgen",
|
||||
"stringDescription": "Zeichenfolgen (Strings) sind Text.",
|
||||
"fieldTypesMustMatch": "Feldtypen müssen übereinstimmen",
|
||||
"fitViewportNodes": "An Ansichtsgröße anpassen",
|
||||
"missingCanvaInitMaskImages": "Fehlende Startbilder und Masken auf der Leinwand",
|
||||
"missingCanvaInitImage": "Fehlendes Startbild auf der Leinwand",
|
||||
"ipAdapterModelDescription": "IP-Adapter-Modellfeld",
|
||||
"latentsPolymorphicDescription": "Zwischen Nodes können Latents weitergegeben werden.",
|
||||
"loadingNodes": "Lade Nodes...",
|
||||
"latentsCollectionDescription": "Zwischen Knoten können Latents weitergegeben werden.",
|
||||
"mismatchedVersion": "Ungültiger Knoten: Knoten {{node}} vom Typ {{type}} hat keine passende Version (Update versuchen?)",
|
||||
"colorCollectionDescription": "Zu erledigen",
|
||||
"ipAdapterPolymorphicDescription": "Eine Sammlung von IP-Adaptern.",
|
||||
"fullyContainNodesHelp": "Nodes müssen vollständig innerhalb der Auswahlbox sein, um ausgewählt werden zu können",
|
||||
"latentsFieldDescription": "Zwischen Nodes können Latents weitergegeben werden.",
|
||||
"noWorkflow": "Kein Workflow",
|
||||
"hideGraphNodes": "Graph Overlay verbergen",
|
||||
"sourceNode": "Quellknoten",
|
||||
"executionStateError": "Fehler",
|
||||
"latentsCollection": "Latents Sammlung",
|
||||
"maybeIncompatible": "Möglicherweise inkompatibel mit installierten",
|
||||
"nodePack": "Knoten-Pack",
|
||||
"skippingUnknownOutputType": "Überspringe unbekannten Ausgabe-Feldtyp",
|
||||
"loadWorkflow": "Lade Workflow",
|
||||
"snapToGrid": "Am Gitternetz einrasten",
|
||||
"skippingReservedFieldType": "Überspringe reservierten Feldtyp",
|
||||
"loRAModelField": "LoRA",
|
||||
"loRAModelFieldDescription": "Zu erledigen",
|
||||
"mainModelField": "Modell",
|
||||
"doesNotExist": "existiert nicht",
|
||||
"vaeField": "VAE",
|
||||
"unknownOutput": "Unbekannte Ausgabe: {{name}}",
|
||||
"updateNode": "Knoten updaten",
|
||||
"edge": "Rand / Kante",
|
||||
"sourceNodeDoesNotExist": "Ungültiger Rand: Quell- / Ausgabe-Knoten {{node}} existiert nicht",
|
||||
"updateAllNodes": "Update Knoten",
|
||||
"allNodesUpdated": "Alle Knoten aktualisiert",
|
||||
"unknownTemplate": "Unbekannte Vorlage",
|
||||
"floatDescription": "Floats sind Zahlen mit einem Dezimalpunkt.",
|
||||
"updateApp": "Update App",
|
||||
"vaeFieldDescription": "VAE Submodell.",
|
||||
"unknownInput": "Unbekannte Eingabe: {{name}}",
|
||||
"unknownNodeType": "Unbekannter Knotentyp",
|
||||
"float": "Kommazahlen",
|
||||
"latentsPolymorphic": "Latents Polymorph",
|
||||
"integerPolymorphicDescription": "Eine Sammlung von ganzen Zahlen.",
|
||||
"integerPolymorphic": "Ganze Zahl Polymorph",
|
||||
"ipAdapterPolymorphic": "IP-Adapter Polymorph",
|
||||
"floatPolymorphic": "Fließkommazahl Polymorph",
|
||||
"enumDescription": "Aufzählungen sind Werte, die eine von mehreren Optionen sein können.",
|
||||
"floatCollection": "Fließkommazahl Sammlung",
|
||||
"enum": "Aufzählung",
|
||||
"floatPolymorphicDescription": "Eine Sammlung von Fließkommazahlen",
|
||||
"fullyContainNodes": "Vollständig ausgewählte Nodes auswählen",
|
||||
"editMode": "Im Workflow-Editor bearbeiten",
|
||||
"floatCollectionDescription": "Eine Sammlung von Fließkommazahlen"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Aktivieren Sie die Korrektur für hohe Auflösungen",
|
||||
"enableHrf": "Korrektur für hohe Auflösungen",
|
||||
"upscaleMethod": "Vergrößerungsmethoden",
|
||||
"enableHrfTooltip": "Generieren Sie mit einer niedrigeren Anfangsauflösung, skalieren Sie auf die Basisauflösung hoch und führen Sie dann Image-to-Image aus.",
|
||||
"metadata": {
|
||||
@ -1114,12 +1395,12 @@
|
||||
},
|
||||
"control": {
|
||||
"title": "Kontrolle",
|
||||
"controlAdaptersTab": "Kontroll Adapter",
|
||||
"ipTab": "Bild Beschreibung"
|
||||
"controlAdaptersTab": "Kontroll-Adapter",
|
||||
"ipTab": "Bild-Prompts"
|
||||
},
|
||||
"compositing": {
|
||||
"coherenceTab": "Kohärenzpass",
|
||||
"infillTab": "Füllung",
|
||||
"infillTab": "Füllung / Infill",
|
||||
"title": "Compositing"
|
||||
}
|
||||
},
|
||||
@ -1127,7 +1408,7 @@
|
||||
"workflows": "Arbeitsabläufe",
|
||||
"noSystemWorkflows": "Keine System-Arbeitsabläufe",
|
||||
"workflowName": "Arbeitsablauf-Name",
|
||||
"workflowIsOpen": "Arbeitsablauf ist offen",
|
||||
"workflowIsOpen": "Arbeitsablauf ist geöffnet",
|
||||
"saveWorkflowAs": "Arbeitsablauf speichern als",
|
||||
"searchWorkflows": "Suche Arbeitsabläufe",
|
||||
"newWorkflowCreated": "Neuer Arbeitsablauf erstellt",
|
||||
@ -1157,5 +1438,15 @@
|
||||
},
|
||||
"app": {
|
||||
"storeNotInitialized": "App-Store ist nicht initialisiert"
|
||||
},
|
||||
"sdxl": {
|
||||
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
|
||||
"scheduler": "Planer",
|
||||
"steps": "Schritte",
|
||||
"useRefiner": "Refiner verwenden",
|
||||
"selectAModel": "Modell auswählen"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"showDynamicPrompts": "Dynamische Prompts anzeigen"
|
||||
}
|
||||
}
|
||||
|
@ -175,6 +175,7 @@
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"template": "Template",
|
||||
"toResolve": "To resolve",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddourings using Textual Inversion using the main script.",
|
||||
@ -235,6 +236,9 @@
|
||||
"fill": "Fill",
|
||||
"h": "H",
|
||||
"handAndFace": "Hand and Face",
|
||||
"face": "Face",
|
||||
"body": "Body",
|
||||
"hands": "Hands",
|
||||
"hed": "HED",
|
||||
"hedDescription": "Holistically-Nested Edge Detection",
|
||||
"hideAdvanced": "Hide Advanced",
|
||||
@ -261,8 +265,8 @@
|
||||
"noneDescription": "No processing applied",
|
||||
"normalBae": "Normal BAE",
|
||||
"normalBaeDescription": "Normal BAE processing",
|
||||
"openPose": "Openpose",
|
||||
"openPoseDescription": "Human pose estimation using Openpose",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"dwOpenposeDescription": "Human pose estimation using DW Openpose",
|
||||
"pidi": "PIDI",
|
||||
"pidiDescription": "PIDI image processing",
|
||||
"processor": "Processor",
|
||||
@ -897,6 +901,7 @@
|
||||
"doesNotExist": "does not exist",
|
||||
"downloadWorkflow": "Download Workflow JSON",
|
||||
"edge": "Edge",
|
||||
"editMode": "Edit in Workflow Editor",
|
||||
"enum": "Enum",
|
||||
"enumDescription": "Enums are values that may be one of a number of options.",
|
||||
"executionStateCompleted": "Completed",
|
||||
@ -992,8 +997,10 @@
|
||||
"problemReadingMetadata": "Problem reading metadata from image",
|
||||
"problemReadingWorkflow": "Problem reading workflow from image",
|
||||
"problemSettingTitle": "Problem Setting Title",
|
||||
"resetToDefaultValue": "Reset to default value",
|
||||
"reloadNodeTemplates": "Reload Node Templates",
|
||||
"removeLinearView": "Remove from Linear View",
|
||||
"reorderLinearView": "Reorder Linear View",
|
||||
"newWorkflow": "New Workflow",
|
||||
"newWorkflowDesc": "Create a new workflow?",
|
||||
"newWorkflowDesc2": "Your current workflow has unsaved changes.",
|
||||
@ -1064,6 +1071,7 @@
|
||||
"vaeModelFieldDescription": "TODO",
|
||||
"validateConnections": "Validate Connections and Graph",
|
||||
"validateConnectionsHelp": "Prevent invalid connections from being made, and invalid graphs from being invoked",
|
||||
"viewMode": "Use in Linear View",
|
||||
"unableToGetWorkflowVersion": "Unable to get workflow schema version",
|
||||
"unrecognizedWorkflowVersion": "Unrecognized workflow schema version {{version}}",
|
||||
"version": "Version",
|
||||
@ -1416,9 +1424,8 @@
|
||||
"clipSkip": {
|
||||
"heading": "CLIP Skip",
|
||||
"paragraphs": [
|
||||
"Choose how many layers of the CLIP model to skip.",
|
||||
"Some models work better with certain CLIP Skip settings.",
|
||||
"A higher value typically results in a less detailed image."
|
||||
"How many layers of the CLIP model to skip.",
|
||||
"Certain models are better suited to be used with CLIP Skip."
|
||||
]
|
||||
},
|
||||
"paramNegativeConditioning": {
|
||||
@ -1438,7 +1445,8 @@
|
||||
"paramScheduler": {
|
||||
"heading": "Scheduler",
|
||||
"paragraphs": [
|
||||
"Scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output."
|
||||
"Scheduler used during the generation process.",
|
||||
"Each scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output."
|
||||
]
|
||||
},
|
||||
"compositingBlur": {
|
||||
@ -1455,47 +1463,52 @@
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "Mode",
|
||||
"paragraphs": ["The mode of the Coherence Pass."]
|
||||
"paragraphs": ["Method used to create a coherent image with the newly generated masked area."]
|
||||
},
|
||||
"compositingCoherenceSteps": {
|
||||
"heading": "Steps",
|
||||
"paragraphs": ["Number of denoising steps used in the Coherence Pass.", "Same as the main Steps parameter."]
|
||||
"paragraphs": ["Number of steps in the Coherence Pass.", "Similar to Generation Steps."]
|
||||
},
|
||||
"compositingStrength": {
|
||||
"heading": "Strength",
|
||||
"paragraphs": [
|
||||
"Denoising strength for the Coherence Pass.",
|
||||
"Same as the Image to Image Denoising Strength parameter."
|
||||
]
|
||||
"paragraphs": ["Amount of noise added for the Coherence Pass.", "Similar to Denoising Strength."]
|
||||
},
|
||||
"compositingMaskAdjustments": {
|
||||
"heading": "Mask Adjustments",
|
||||
"paragraphs": ["Adjust the mask."]
|
||||
},
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraphs": [
|
||||
"Which steps of the denoising process will have the ControlNet applied.",
|
||||
"ControlNets applied at the beginning of the process guide composition, and ControlNets applied at the end guide details."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"heading": "Control Mode",
|
||||
"paragraphs": ["Lends more weight to either the prompt or ControlNet."]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Resize Mode",
|
||||
"paragraphs": ["How the ControlNet image will be fit to the image output size."]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet",
|
||||
"paragraphs": [
|
||||
"ControlNets provide guidance to the generation process, helping create images with controlled composition, structure, or style, depending on the model selected."
|
||||
]
|
||||
},
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraphs": [
|
||||
"The part of the of the denoising process that will have the Control Adapter applied.",
|
||||
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"heading": "Control Mode",
|
||||
"paragraphs": ["Lend more weight to either the prompt or ControlNet."]
|
||||
},
|
||||
"controlNetProcessor": {
|
||||
"heading": "Processor",
|
||||
"paragraphs": [
|
||||
"Method of processing the input image to guide the generation process. Different processors will providedifferent effects or styles in your generated images."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Resize Mode",
|
||||
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": ["How strongly the ControlNet will impact the generated image."]
|
||||
"paragraphs": [
|
||||
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
|
||||
]
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"heading": "Dynamic Prompts",
|
||||
@ -1518,13 +1531,23 @@
|
||||
"Per Image will use a unique seed for each image. This provides more variation."
|
||||
]
|
||||
},
|
||||
"imageFit": {
|
||||
"heading": "Fit Initial Image to Output Size",
|
||||
"paragraphs": [
|
||||
"Resizes the initial image to the width and height of the output image. Recommended to enable."
|
||||
]
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "Infill Method",
|
||||
"paragraphs": ["Method to infill the selected area."]
|
||||
"paragraphs": ["Method of infilling during the Outpainting or Inpainting process."]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA Weight",
|
||||
"paragraphs": ["Higher LoRA weight will lead to larger impacts on the final image."]
|
||||
"heading": "LoRA",
|
||||
"paragraphs": ["Lightweight models that are used in conjunction with base models."]
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": ["Weight of the LoRA. Higher weight will lead to larger impacts on the final image."]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
"heading": "Use CPU Noise",
|
||||
@ -1534,14 +1557,25 @@
|
||||
"There is no performance impact to enabling CPU Noise."
|
||||
]
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "Aspect",
|
||||
"paragraphs": [
|
||||
"Aspect ratio of the generated image. Changing the ratio will update the Width and Height accordingly.",
|
||||
"“Optimize” will set the Width and Height to optimal dimensions for the chosen model."
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"heading": "CFG Scale",
|
||||
"paragraphs": ["Controls how much your prompt influences the generation process."]
|
||||
"paragraphs": [
|
||||
"Controls how much the prompt influences the generation process.",
|
||||
"High CFG Scale values can result in over-saturation and distorted generation results. "
|
||||
]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"heading": "CFG Rescale Multiplier",
|
||||
"paragraphs": [
|
||||
"Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr). Suggested value 0.7."
|
||||
"Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr).",
|
||||
"Suggested value of 0.7 for these models."
|
||||
]
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
@ -1551,6 +1585,16 @@
|
||||
"0 will result in an identical image, while 1 will result in a completely new image."
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "Height",
|
||||
"paragraphs": ["Height of the generated image. Must be a multiple of 8."]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "Enable High Resolution Fix",
|
||||
"paragraphs": [
|
||||
"Generate high quality images at a larger resolution than optimal for the model. Generally used to prevent duplication in the generated image."
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "Iterations",
|
||||
"paragraphs": [
|
||||
@ -1561,8 +1605,7 @@
|
||||
"paramModel": {
|
||||
"heading": "Model",
|
||||
"paragraphs": [
|
||||
"Model used for the denoising steps.",
|
||||
"Different models are typically trained to specialize in producing particular aesthetic results and content."
|
||||
"Model used for generation. Different models are trained to specialize in producing different aesthetic results and content."
|
||||
]
|
||||
},
|
||||
"paramRatio": {
|
||||
@ -1576,7 +1619,7 @@
|
||||
"heading": "Seed",
|
||||
"paragraphs": [
|
||||
"Controls the starting noise used for generation.",
|
||||
"Disable “Random Seed” to produce identical results with the same generation settings."
|
||||
"Disable the “Random” option to produce identical results with the same generation settings."
|
||||
]
|
||||
},
|
||||
"paramSteps": {
|
||||
@ -1586,6 +1629,10 @@
|
||||
"Higher step counts will typically create better images but will require more generation time."
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "Upscale Method",
|
||||
"paragraphs": ["Method used to upscale the image for High Resolution Fix."]
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE",
|
||||
"paragraphs": ["Model used for translating AI output into the final image."]
|
||||
@ -1593,14 +1640,82 @@
|
||||
"paramVAEPrecision": {
|
||||
"heading": "VAE Precision",
|
||||
"paragraphs": [
|
||||
"The precision used during VAE encoding and decoding. FP16/half precision is more efficient, at the expense of minor image variations."
|
||||
"The precision used during VAE encoding and decoding.",
|
||||
"Fp16/Half precision is more efficient, at the expense of minor image variations."
|
||||
]
|
||||
},
|
||||
"paramWidth": {
|
||||
"heading": "Width",
|
||||
"paragraphs": ["Width of the generated image. Must be a multiple of 8."]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Downscale",
|
||||
"paragraphs": [
|
||||
"How much downscaling occurs before infilling.",
|
||||
"Higher downscaling will improve performance and reduce quality."
|
||||
]
|
||||
},
|
||||
"refinerModel": {
|
||||
"heading": "Refiner Model",
|
||||
"paragraphs": [
|
||||
"Model used during the refiner portion of the generation process.",
|
||||
"Similar to the Generation Model."
|
||||
]
|
||||
},
|
||||
"refinerPositiveAestheticScore": {
|
||||
"heading": "Positive Aesthetic Score",
|
||||
"paragraphs": [
|
||||
"Weight generations to be more similar to images with a high aesthetic score, based on the training data."
|
||||
]
|
||||
},
|
||||
"refinerNegativeAestheticScore": {
|
||||
"heading": "Negative Aesthetic Score",
|
||||
"paragraphs": [
|
||||
"Weight generations to be more similar to images with a low aesthetic score, based on the training data."
|
||||
]
|
||||
},
|
||||
"refinerScheduler": {
|
||||
"heading": "Scheduler",
|
||||
"paragraphs": [
|
||||
"Scheduler used during the refiner portion of the generation process.",
|
||||
"Similar to the Generation Scheduler."
|
||||
]
|
||||
},
|
||||
"refinerStart": {
|
||||
"heading": "Refiner Start",
|
||||
"paragraphs": [
|
||||
"Where in the generation process the refiner will start to be used.",
|
||||
"0 means the refiner will be used for the entire generation process, 0.8 means the refiner will be used for the last 20% of the generation process."
|
||||
]
|
||||
},
|
||||
"refinerSteps": {
|
||||
"heading": "Steps",
|
||||
"paragraphs": [
|
||||
"Number of steps that will be performed during the refiner portion of the generation process.",
|
||||
"Similar to the Generation Steps."
|
||||
]
|
||||
},
|
||||
"refinerCfgScale": {
|
||||
"heading": "CFG Scale",
|
||||
"paragraphs": [
|
||||
"Controls how much the prompt influences the generation process.",
|
||||
"Similar to the Generation CFG Scale."
|
||||
]
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"heading": "Scale Before Processing",
|
||||
"paragraphs": [
|
||||
"Scales the selected area to the size best suited for the model before the image generation process."
|
||||
"“Auto” scales the selected area to the size best suited for the model before the image generation process.",
|
||||
"“Manual” allows you to choose the width and height the selected area will be scaled to before the image generation process."
|
||||
]
|
||||
},
|
||||
"seamlessTilingXAxis": {
|
||||
"heading": "Seamless Tiling X Axis",
|
||||
"paragraphs": ["Seamlessly tile an image along the horizontal axis."]
|
||||
},
|
||||
"seamlessTilingYAxis": {
|
||||
"heading": "Seamless Tiling Y Axis",
|
||||
"paragraphs": ["Seamlessly tile an image along the vertical axis."]
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
|
@ -795,7 +795,8 @@
|
||||
"workflowDeleted": "Flusso di lavoro eliminato",
|
||||
"problemRetrievingWorkflow": "Problema nel recupero del flusso di lavoro",
|
||||
"resetInitialImage": "Reimposta l'immagine iniziale",
|
||||
"uploadInitialImage": "Carica l'immagine iniziale"
|
||||
"uploadInitialImage": "Carica l'immagine iniziale",
|
||||
"problemDownloadingImage": "Impossibile scaricare l'immagine"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@ -1134,7 +1135,14 @@
|
||||
"newWorkflow": "Nuovo flusso di lavoro",
|
||||
"newWorkflowDesc": "Creare un nuovo flusso di lavoro?",
|
||||
"newWorkflowDesc2": "Il flusso di lavoro attuale presenta modifiche non salvate.",
|
||||
"unsupportedAnyOfLength": "unione di troppi elementi ({{count}})"
|
||||
"unsupportedAnyOfLength": "unione di troppi elementi ({{count}})",
|
||||
"clearWorkflowDesc": "Cancellare questo flusso di lavoro e avviarne uno nuovo?",
|
||||
"clearWorkflow": "Cancella il flusso di lavoro",
|
||||
"clearWorkflowDesc2": "Il tuo flusso di lavoro attuale presenta modifiche non salvate.",
|
||||
"viewMode": "Utilizzare nella vista lineare",
|
||||
"reorderLinearView": "Riordina la vista lineare",
|
||||
"editMode": "Modifica nell'editor del flusso di lavoro",
|
||||
"resetToDefaultValue": "Ripristina il valore predefinito"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@ -1191,7 +1199,6 @@
|
||||
"f": "F",
|
||||
"h": "A",
|
||||
"prompt": "Prompt",
|
||||
"openPoseDescription": "Stima della posa umana utilizzando Openpose",
|
||||
"resizeMode": "Ridimensionamento",
|
||||
"weight": "Peso",
|
||||
"selectModel": "Seleziona un modello",
|
||||
@ -1238,7 +1245,11 @@
|
||||
"large": "Grande",
|
||||
"small": "Piccolo",
|
||||
"depthAnythingDescription": "Generazione di mappe di profondità utilizzando la tecnica Depth Anything",
|
||||
"modelSize": "Dimensioni del modello"
|
||||
"modelSize": "Dimensioni del modello",
|
||||
"dwOpenposeDescription": "Stima della posa umana utilizzando DW Openpose",
|
||||
"face": "Viso",
|
||||
"body": "Corpo",
|
||||
"hands": "Mani"
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
@ -1368,7 +1379,8 @@
|
||||
"popovers": {
|
||||
"paramScheduler": {
|
||||
"paragraphs": [
|
||||
"Il campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello."
|
||||
"Il campionatore utilizzato durante il processo di generazione.",
|
||||
"Ciascun campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello."
|
||||
],
|
||||
"heading": "Campionatore"
|
||||
},
|
||||
@ -1381,8 +1393,8 @@
|
||||
"compositingCoherenceSteps": {
|
||||
"heading": "Passi",
|
||||
"paragraphs": [
|
||||
"Numero di passi di riduzione del rumore utilizzati nel Passaggio di Coerenza.",
|
||||
"Uguale al parametro principale Passi."
|
||||
"Numero di passi utilizzati nel Passaggio di Coerenza.",
|
||||
"Simile ai passi di generazione."
|
||||
]
|
||||
},
|
||||
"compositingBlur": {
|
||||
@ -1394,14 +1406,13 @@
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "Modalità",
|
||||
"paragraphs": [
|
||||
"La modalità del Passaggio di Coerenza."
|
||||
"Metodo utilizzato per creare un'immagine coerente con l'area mascherata appena generata."
|
||||
]
|
||||
},
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"Scegli quanti livelli del modello CLIP saltare.",
|
||||
"Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip.",
|
||||
"Un valore più alto in genere produce un'immagine meno dettagliata."
|
||||
"Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip."
|
||||
]
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
@ -1413,8 +1424,8 @@
|
||||
"compositingStrength": {
|
||||
"heading": "Forza",
|
||||
"paragraphs": [
|
||||
"Intensità di riduzione del rumore per il passaggio di coerenza.",
|
||||
"Uguale al parametro intensità di riduzione del rumore da immagine a immagine."
|
||||
"Quantità di rumore aggiunta per il Passaggio di Coerenza.",
|
||||
"Simile alla forza di riduzione del rumore."
|
||||
]
|
||||
},
|
||||
"paramNegativeConditioning": {
|
||||
@ -1440,8 +1451,8 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Percentuale passi Inizio / Fine",
|
||||
"paragraphs": [
|
||||
"A quali passi del processo di rimozione del rumore verrà applicato ControlNet.",
|
||||
"I ControlNet applicati all'inizio del processo guidano la composizione, mentre i ControlNet applicati alla fine guidano i dettagli."
|
||||
"La parte del processo di rimozione del rumore in cui verrà applicato l'adattatore di controllo.",
|
||||
"In genere, gli adattatori di controllo applicati all'inizio del processo guidano la composizione, mentre quelli applicati alla fine guidano i dettagli."
|
||||
]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
@ -1454,7 +1465,7 @@
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"paragraphs": [
|
||||
"Ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine."
|
||||
"\"Auto\" ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine."
|
||||
],
|
||||
"heading": "Scala prima dell'elaborazione"
|
||||
},
|
||||
@ -1489,20 +1500,21 @@
|
||||
"paramVAEPrecision": {
|
||||
"heading": "Precisione VAE",
|
||||
"paragraphs": [
|
||||
"La precisione utilizzata durante la codifica e decodifica VAE. FP16/mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine."
|
||||
"La precisione utilizzata durante la codifica e decodifica VAE.",
|
||||
"Fp16/Mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine."
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
"paragraphs": [
|
||||
"Controlla il rumore iniziale utilizzato per la generazione.",
|
||||
"Disabilita seme \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione."
|
||||
"Disabilita l'opzione \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione."
|
||||
],
|
||||
"heading": "Seme"
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Modalità ridimensionamento",
|
||||
"paragraphs": [
|
||||
"Come l'immagine ControlNet verrà adattata alle dimensioni di output dell'immagine."
|
||||
"Metodo per adattare le dimensioni dell'immagine in ingresso dell'adattatore di controllo alle dimensioni della generazione di output."
|
||||
]
|
||||
},
|
||||
"dynamicPromptsSeedBehaviour": {
|
||||
@ -1517,8 +1529,7 @@
|
||||
"paramModel": {
|
||||
"heading": "Modello",
|
||||
"paragraphs": [
|
||||
"Modello utilizzato per i passaggi di riduzione del rumore.",
|
||||
"Diversi modelli sono generalmente addestrati per specializzarsi nella produzione di particolari risultati e contenuti estetici."
|
||||
"Modello utilizzato per la generazione. Diversi modelli vengono addestrati per specializzarsi nella produzione di risultati e contenuti estetici diversi."
|
||||
]
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
@ -1536,25 +1547,26 @@
|
||||
},
|
||||
"infillMethod": {
|
||||
"paragraphs": [
|
||||
"Metodo per riempire l'area selezionata."
|
||||
"Metodo di riempimento durante il processo di Outpainting o Inpainting."
|
||||
],
|
||||
"heading": "Metodo di riempimento"
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Peso",
|
||||
"paragraphs": [
|
||||
"Quanto forte sarà l'impatto di ControlNet sull'immagine generata."
|
||||
"Peso dell'adattatore di controllo. Un peso maggiore porterà a impatti maggiori sull'immagine finale."
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"heading": "Scala CFG",
|
||||
"paragraphs": [
|
||||
"Controlla quanto il tuo prompt influenza il processo di generazione."
|
||||
"Controlla quanto il prompt influenza il processo di generazione.",
|
||||
"Valori elevati della scala CFG possono provocare una saturazione eccessiva e distorsioni nei risultati della generazione. "
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"paragraphs": [
|
||||
"Attribuisce più peso al prompt o a ControlNet."
|
||||
"Attribuisce più peso al prompt oppure a ControlNet."
|
||||
],
|
||||
"heading": "Modalità di controllo"
|
||||
},
|
||||
@ -1566,9 +1578,9 @@
|
||||
]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "Peso LoRA",
|
||||
"heading": "LoRA",
|
||||
"paragraphs": [
|
||||
"Un peso LoRA più elevato porterà a impatti maggiori sull'immagine finale."
|
||||
"Modelli leggeri utilizzati insieme ai modelli base."
|
||||
]
|
||||
},
|
||||
"controlNet": {
|
||||
@ -1580,8 +1592,65 @@
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"heading": "Moltiplicatore di riscala CFG",
|
||||
"paragraphs": [
|
||||
"Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr). Valore suggerito 0.7."
|
||||
"Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr).",
|
||||
"Valore suggerito di 0.7 per questi modelli."
|
||||
]
|
||||
},
|
||||
"controlNetProcessor": {
|
||||
"heading": "Processore",
|
||||
"paragraphs": [
|
||||
"Metodo di elaborazione dell'immagine di input per guidare il processo di generazione. Processori diversi forniranno effetti o stili diversi nelle immagini generate."
|
||||
]
|
||||
},
|
||||
"imageFit": {
|
||||
"heading": "Adatta l'immagine iniziale alle dimensioni di output",
|
||||
"paragraphs": [
|
||||
"Ridimensiona l'immagine iniziale in base alla larghezza e all'altezza dell'immagine di output. Si consiglia di abilitarlo."
|
||||
]
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "Peso",
|
||||
"paragraphs": [
|
||||
"Peso del LoRA. Un peso maggiore comporterà un impatto maggiore sull'immagine finale."
|
||||
]
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "Aspetto",
|
||||
"paragraphs": [
|
||||
"Proporzioni dell'immagine generata. La modifica del rapporto aggiornerà di conseguenza la larghezza e l'altezza.",
|
||||
"\"Ottimizza\" imposterà la larghezza e l'altezza alle dimensioni ottimali per il modello scelto."
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "Altezza",
|
||||
"paragraphs": [
|
||||
"Altezza dell'immagine generata. Deve essere un multiplo di 8."
|
||||
]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "Abilita correzione alta risoluzione",
|
||||
"paragraphs": [
|
||||
"Genera immagini di alta qualità con una risoluzione maggiore di quella ottimale per il modello. Generalmente utilizzato per impedire la duplicazione nell'immagine generata."
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "Metodo di ampliamento",
|
||||
"paragraphs": [
|
||||
"Metodo utilizzato per eseguire l'ampliamento dell'immagine per la correzione ad alta risoluzione."
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Ridimensiona",
|
||||
"paragraphs": [
|
||||
"Quanto ridimensionamento avviene prima del riempimento.",
|
||||
"Un ridimensionamento più elevato migliorerà le prestazioni e ridurrà la qualità."
|
||||
]
|
||||
},
|
||||
"paramWidth": {
|
||||
"paragraphs": [
|
||||
"Larghezza dell'immagine generata. Deve essere un multiplo di 8."
|
||||
],
|
||||
"heading": "Larghezza"
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@ -1672,7 +1741,9 @@
|
||||
"downloadWorkflow": "Salva su file",
|
||||
"uploadWorkflow": "Carica da file",
|
||||
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||
"noWorkflows": "Nessun flusso di lavoro"
|
||||
"noWorkflows": "Nessun flusso di lavoro",
|
||||
"workflowCleared": "Flusso di lavoro cancellato",
|
||||
"saveWorkflowToProject": "Salva flusso di lavoro nel progetto"
|
||||
},
|
||||
"app": {
|
||||
"storeNotInitialized": "Il negozio non è inizializzato"
|
||||
|
@ -555,7 +555,6 @@
|
||||
"balanced": "バランス",
|
||||
"prompt": "プロンプト",
|
||||
"depthMidasDescription": "Midasを使用して深度マップを生成",
|
||||
"openPoseDescription": "Openposeを使用してポーズを推定",
|
||||
"control": "コントロール",
|
||||
"resizeMode": "リサイズモード",
|
||||
"weight": "重み",
|
||||
|
@ -333,7 +333,6 @@
|
||||
"h": "H",
|
||||
"prompt": "프롬프트",
|
||||
"depthMidasDescription": "Midas를 사용하여 Depth map 생성하기",
|
||||
"openPoseDescription": "Openpose를 이용한 사람 포즈 추정",
|
||||
"control": "Control",
|
||||
"resizeMode": "크기 조정 모드",
|
||||
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) 사용 가능,$t(common.controlNet) 사용 불가능",
|
||||
@ -370,7 +369,6 @@
|
||||
"normalBaeDescription": "Normal BAE 처리",
|
||||
"noneDescription": "처리되지 않음",
|
||||
"saveControlImage": "Control Image 저장",
|
||||
"openPose": "Openpose",
|
||||
"toggleControlNet": "해당 ControlNet으로 전환",
|
||||
"delete": "삭제",
|
||||
"controlAdapter_other": "Control Adapter(s)",
|
||||
|
@ -1033,7 +1033,6 @@
|
||||
"prompt": "Prompt",
|
||||
"depthMidasDescription": "Genereer diepteblad via Midas",
|
||||
"controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))",
|
||||
"openPoseDescription": "Menselijke pose-benadering via Openpose",
|
||||
"control": "Controle",
|
||||
"resizeMode": "Modus schaling",
|
||||
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) ingeschakeld, $t(common.controlNet)s uitgeschakeld",
|
||||
@ -1072,7 +1071,6 @@
|
||||
"normalBaeDescription": "Normale BAE-verwerking",
|
||||
"noneDescription": "Geen verwerking toegepast",
|
||||
"saveControlImage": "Bewaar controle-afbeelding",
|
||||
"openPose": "Openpose",
|
||||
"toggleControlNet": "Zet deze ControlNet aan/uit",
|
||||
"delete": "Verwijder",
|
||||
"controlAdapter_one": "Control-adapter",
|
||||
@ -1219,16 +1217,14 @@
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"Kies hoeveel CLIP-modellagen je wilt overslaan.",
|
||||
"Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen.",
|
||||
"Een hogere waarde geeft meestal een minder gedetailleerde afbeelding."
|
||||
"Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen."
|
||||
],
|
||||
"heading": "Overslaan CLIP"
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "Model",
|
||||
"paragraphs": [
|
||||
"Model gebruikt voor de ontruisingsstappen.",
|
||||
"Verschillende modellen zijn meestal getraind om zich te specialiseren in het maken van bepaalde esthetische resultaten en materiaal."
|
||||
"Model gebruikt voor de ontruisingsstappen."
|
||||
]
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
|
@ -1155,7 +1155,6 @@
|
||||
"resetControlImage": "Сбросить контрольное изображение",
|
||||
"prompt": "Запрос",
|
||||
"controlnet": "$t(controlnet.controlAdapter_one) №{{number}} $t(common.controlNet)",
|
||||
"openPoseDescription": "Оценка позы человека с помощью Openpose",
|
||||
"resizeMode": "Режим изменения размера",
|
||||
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) включен, $t(common.controlNet)s отключен",
|
||||
"weight": "Вес",
|
||||
@ -1354,16 +1353,14 @@
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"Выберите, сколько слоев модели CLIP нужно пропустить.",
|
||||
"Некоторые модели работают лучше с определенными настройками пропуска CLIP.",
|
||||
"Более высокое значение обычно приводит к менее детализированному изображению."
|
||||
"Некоторые модели работают лучше с определенными настройками пропуска CLIP."
|
||||
],
|
||||
"heading": "CLIP пропуск"
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "Модель",
|
||||
"paragraphs": [
|
||||
"Модель, используемая для шагов шумоподавления.",
|
||||
"Различные модели обычно обучаются, чтобы специализироваться на достижении определенных эстетических результатов и содержания."
|
||||
"Модель, используемая для шагов шумоподавления."
|
||||
]
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
|
@ -259,7 +259,6 @@
|
||||
"mediapipeFace": "Mediapipe Yüz",
|
||||
"megaControl": "Aşırı Yönetim",
|
||||
"mlsd": "M-LSD",
|
||||
"openPoseDescription": "Openpose kullanarak poz belirleme",
|
||||
"setControlImageDimensions": "Yönetim Görseli Boyutlarını En/Boydan Al",
|
||||
"pidi": "PIDI",
|
||||
"scribble": "çiziktirme",
|
||||
@ -273,7 +272,6 @@
|
||||
"mlsdDescription": "Minimalist Line Segment Detector (Kolay Çizgi Parçası Algılama)",
|
||||
"normalBae": "Normal BAE",
|
||||
"normalBaeDescription": "Normal BAE işleme",
|
||||
"openPose": "Openpose",
|
||||
"resetControlImage": "Yönetim Görselini Kaldır",
|
||||
"enableIPAdapter": "IP Aracını Etkinleştir",
|
||||
"lineart": "Çizim",
|
||||
|
@ -1143,7 +1143,6 @@
|
||||
"balanced": "平衡",
|
||||
"prompt": "Prompt (提示词控制)",
|
||||
"depthMidasDescription": "使用 Midas 生成深度图",
|
||||
"openPoseDescription": "使用 Openpose 进行人体姿态估计",
|
||||
"resizeMode": "缩放模式",
|
||||
"weight": "权重",
|
||||
"selectModel": "选择一个模型",
|
||||
@ -1207,7 +1206,6 @@
|
||||
"megaControl": "Mega Control (超级控制)",
|
||||
"depthZoe": "Depth (Zoe)",
|
||||
"colorMap": "Color",
|
||||
"openPose": "Openpose",
|
||||
"controlAdapter_other": "Control Adapters",
|
||||
"lineartAnime": "Lineart Anime",
|
||||
"canny": "Canny",
|
||||
@ -1446,16 +1444,14 @@
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"选择要跳过 CLIP 模型多少层。",
|
||||
"部分模型跳过特定数值的层时效果会更好。",
|
||||
"较高的数值通常会导致图像细节更少。"
|
||||
"部分模型跳过特定数值的层时效果会更好。"
|
||||
],
|
||||
"heading": "CLIP 跳过层"
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "模型",
|
||||
"paragraphs": [
|
||||
"用于去噪过程的模型。",
|
||||
"不同的模型一般会通过接受训练来专门产生特定的美学内容和结果。"
|
||||
"用于去噪过程的模型。"
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
|
@ -2,7 +2,7 @@ import type { UnknownAction } from '@reduxjs/toolkit';
|
||||
import { isAnyGraphBuilt } from 'features/nodes/store/actions';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodeTemplatesSlice';
|
||||
import { cloneDeep } from 'lodash-es';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
import type { Graph } from 'services/api/types';
|
||||
import { socketGeneratorProgress } from 'services/events/actions';
|
||||
|
||||
@ -18,7 +18,7 @@ export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
|
||||
}
|
||||
}
|
||||
|
||||
if (receivedOpenAPISchema.fulfilled.match(action)) {
|
||||
if (appInfoApi.endpoints.getOpenAPISchema.matchFulfilled(action)) {
|
||||
return {
|
||||
...action,
|
||||
payload: '<OpenAPI schema omitted>',
|
||||
|
@ -23,6 +23,7 @@ import { addControlNetImageProcessedListener } from './listeners/controlNetImage
|
||||
import { addEnqueueRequestedCanvasListener } from './listeners/enqueueRequestedCanvas';
|
||||
import { addEnqueueRequestedLinear } from './listeners/enqueueRequestedLinear';
|
||||
import { addEnqueueRequestedNodes } from './listeners/enqueueRequestedNodes';
|
||||
import { addGetOpenAPISchemaListener } from './listeners/getOpenAPISchema';
|
||||
import {
|
||||
addImageAddedToBoardFulfilledListener,
|
||||
addImageAddedToBoardRejectedListener,
|
||||
@ -47,7 +48,6 @@ import { addInitialImageSelectedListener } from './listeners/initialImageSelecte
|
||||
import { addModelSelectedListener } from './listeners/modelSelected';
|
||||
import { addModelsLoadedListener } from './listeners/modelsLoaded';
|
||||
import { addDynamicPromptsListener } from './listeners/promptChanged';
|
||||
import { addReceivedOpenAPISchemaListener } from './listeners/receivedOpenAPISchema';
|
||||
import { addSocketConnectedEventListener as addSocketConnectedListener } from './listeners/socketio/socketConnected';
|
||||
import { addSocketDisconnectedEventListener as addSocketDisconnectedListener } from './listeners/socketio/socketDisconnected';
|
||||
import { addGeneratorProgressEventListener as addGeneratorProgressListener } from './listeners/socketio/socketGeneratorProgress';
|
||||
@ -150,7 +150,7 @@ addImageRemovedFromBoardRejectedListener();
|
||||
addBoardIdSelectedListener();
|
||||
|
||||
// Node schemas
|
||||
addReceivedOpenAPISchemaListener();
|
||||
addGetOpenAPISchemaListener();
|
||||
|
||||
// Workflows
|
||||
addWorkflowLoadRequestedListener();
|
||||
|
@ -3,18 +3,18 @@ import { parseify } from 'common/util/serialize';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodeTemplatesSlice';
|
||||
import { parseSchema } from 'features/nodes/util/schema/parseSchema';
|
||||
import { size } from 'lodash-es';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
|
||||
import { startAppListening } from '..';
|
||||
|
||||
export const addReceivedOpenAPISchemaListener = () => {
|
||||
export const addGetOpenAPISchemaListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: receivedOpenAPISchema.fulfilled,
|
||||
matcher: appInfoApi.endpoints.getOpenAPISchema.matchFulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const log = logger('system');
|
||||
const schemaJSON = action.payload;
|
||||
|
||||
log.debug({ schemaJSON }, 'Received OpenAPI schema');
|
||||
log.debug({ schemaJSON: parseify(schemaJSON) }, 'Received OpenAPI schema');
|
||||
const { nodesAllowlist, nodesDenylist } = getState().config;
|
||||
|
||||
const nodeTemplates = parseSchema(schemaJSON, nodesAllowlist, nodesDenylist);
|
||||
@ -26,10 +26,14 @@ export const addReceivedOpenAPISchemaListener = () => {
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: receivedOpenAPISchema.rejected,
|
||||
matcher: appInfoApi.endpoints.getOpenAPISchema.matchRejected,
|
||||
effect: (action) => {
|
||||
const log = logger('system');
|
||||
log.error({ error: parseify(action.error) }, 'Problem retrieving OpenAPI Schema');
|
||||
// If action.meta.condition === true, the request was canceled/skipped because another request was in flight or
|
||||
// the value was already in the cache. We don't want to log these errors.
|
||||
if (!action.meta.condition) {
|
||||
const log = logger('system');
|
||||
log.error({ error: parseify(action.error) }, 'Problem retrieving OpenAPI Schema');
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
@ -1,10 +1,9 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import { isEqual, size } from 'lodash-es';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import { api } from 'services/api';
|
||||
import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import { socketConnected } from 'services/events/actions';
|
||||
|
||||
import { startAppListening } from '../..';
|
||||
@ -77,17 +76,4 @@ export const addSocketConnectedEventListener = () => {
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: socketConnected,
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const { nodeTemplates, config } = getState();
|
||||
// We only want to re-fetch the schema if we don't have any node templates
|
||||
if (!size(nodeTemplates.templates) && !config.disabledTabs.includes('nodes')) {
|
||||
// This request is a createAsyncThunk - resetting API state as in the above listener
|
||||
// will not trigger this request, so we need to manually do it.
|
||||
dispatch(receivedOpenAPISchema());
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
@ -6,7 +6,6 @@ import { WorkflowMigrationError, WorkflowVersionError } from 'features/nodes/typ
|
||||
import { validateWorkflow } from 'features/nodes/util/workflow/validateWorkflow';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { t } from 'i18next';
|
||||
import { z } from 'zod';
|
||||
import { fromZodError } from 'zod-validation-error';
|
||||
@ -53,7 +52,6 @@ export const addWorkflowLoadRequestedListener = () => {
|
||||
});
|
||||
}
|
||||
|
||||
dispatch(setActiveTab('nodes'));
|
||||
requestAnimationFrame(() => {
|
||||
$flow.get()?.fitView();
|
||||
});
|
||||
|
@ -13,28 +13,46 @@ export type Feature =
|
||||
| 'compositingCoherenceSteps'
|
||||
| 'compositingStrength'
|
||||
| 'compositingMaskAdjustments'
|
||||
| 'controlNet'
|
||||
| 'controlNetBeginEnd'
|
||||
| 'controlNetControlMode'
|
||||
| 'controlNetProcessor'
|
||||
| 'controlNetResizeMode'
|
||||
| 'controlNet'
|
||||
| 'controlNetWeight'
|
||||
| 'dynamicPrompts'
|
||||
| 'dynamicPromptsMaxPrompts'
|
||||
| 'dynamicPromptsSeedBehaviour'
|
||||
| 'imageFit'
|
||||
| 'infillMethod'
|
||||
| 'lora'
|
||||
| 'loraWeight'
|
||||
| 'noiseUseCPU'
|
||||
| 'paramAspect'
|
||||
| 'paramCFGScale'
|
||||
| 'paramCFGRescaleMultiplier'
|
||||
| 'paramDenoisingStrength'
|
||||
| 'paramHeight'
|
||||
| 'paramHrf'
|
||||
| 'paramIterations'
|
||||
| 'paramModel'
|
||||
| 'paramRatio'
|
||||
| 'paramSeed'
|
||||
| 'paramSteps'
|
||||
| 'paramUpscaleMethod'
|
||||
| 'paramVAE'
|
||||
| 'paramVAEPrecision'
|
||||
| 'scaleBeforeProcessing';
|
||||
| 'paramWidth'
|
||||
| 'patchmatchDownScaleSize'
|
||||
| 'refinerModel'
|
||||
| 'refinerNegativeAestheticScore'
|
||||
| 'refinerPositiveAestheticScore'
|
||||
| 'refinerScheduler'
|
||||
| 'refinerStart'
|
||||
| 'refinerSteps'
|
||||
| 'refinerCfgScale'
|
||||
| 'scaleBeforeProcessing'
|
||||
| 'seamlessTilingXAxis'
|
||||
| 'seamlessTilingYAxis';
|
||||
|
||||
export type PopoverData = PopoverProps & {
|
||||
image?: string;
|
||||
@ -46,21 +64,57 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
paramNegativeConditioning: {
|
||||
placement: 'right',
|
||||
},
|
||||
clipSkip: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
controlNet: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000105880',
|
||||
},
|
||||
controlNetBeginEnd: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178148',
|
||||
},
|
||||
controlNetWeight: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178148',
|
||||
},
|
||||
lora: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159072',
|
||||
},
|
||||
loraWeight: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159072-concepts-low-rank-adaptations-loras-',
|
||||
},
|
||||
compositingBlur: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
compositingBlurMethod: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
compositingCoherenceMode: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
compositingCoherenceSteps: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
compositingStrength: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
infillMethod: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158841',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158841-infill-and-scaling',
|
||||
},
|
||||
scaleBeforeProcessing: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158841',
|
||||
},
|
||||
paramCFGScale: {
|
||||
href: 'https://www.youtube.com/watch?v=1OeHEJrsTpI',
|
||||
},
|
||||
paramCFGRescaleMultiplier: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
paramDenoisingStrength: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000094998-image-to-image',
|
||||
},
|
||||
paramHrf: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000096700-how-can-i-get-larger-images-what-does-upscaling-do-',
|
||||
},
|
||||
paramIterations: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159073',
|
||||
},
|
||||
@ -70,7 +124,10 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
},
|
||||
paramScheduler: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159073',
|
||||
href: 'https://www.youtube.com/watch?v=1OeHEJrsTpI',
|
||||
},
|
||||
paramSeed: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000096684-what-is-a-seed-how-do-i-use-it-to-recreate-the-same-image-',
|
||||
},
|
||||
paramModel: {
|
||||
placement: 'right',
|
||||
@ -81,15 +138,53 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
},
|
||||
controlNetControlMode: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178148',
|
||||
},
|
||||
controlNetProcessor: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000105880-using-controlnet',
|
||||
},
|
||||
controlNetResizeMode: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178148',
|
||||
},
|
||||
paramVAE: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
paramVAEPrecision: {
|
||||
placement: 'right',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
paramUpscaleMethod: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000096700-how-can-i-get-larger-images-what-does-upscaling-do-',
|
||||
},
|
||||
refinerModel: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerNegativeAestheticScore: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerPositiveAestheticScore: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerScheduler: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerStart: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerSteps: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
refinerCfgScale: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner',
|
||||
},
|
||||
seamlessTilingXAxis: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
seamlessTilingYAxis: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
@ -1,22 +1,28 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppToaster } from 'app/components/Toaster';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { imageDownloaded } from 'features/gallery/store/actions';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import { useImageUrlToBlob } from './useImageUrlToBlob';
|
||||
|
||||
export const useDownloadImage = () => {
|
||||
const toaster = useAppToaster();
|
||||
const { t } = useTranslation();
|
||||
const imageUrlToBlob = useImageUrlToBlob();
|
||||
const dispatch = useAppDispatch();
|
||||
const authToken = useStore($authToken);
|
||||
|
||||
const downloadImage = useCallback(
|
||||
async (image_url: string, image_name: string) => {
|
||||
try {
|
||||
const blob = await imageUrlToBlob(image_url);
|
||||
|
||||
const requestOpts = authToken
|
||||
? {
|
||||
headers: {
|
||||
Authorization: `Bearer ${authToken}`,
|
||||
},
|
||||
}
|
||||
: {};
|
||||
const blob = await fetch(image_url, requestOpts).then((resp) => resp.blob());
|
||||
if (!blob) {
|
||||
throw new Error('Unable to create Blob');
|
||||
}
|
||||
@ -40,7 +46,7 @@ export const useDownloadImage = () => {
|
||||
});
|
||||
}
|
||||
},
|
||||
[t, toaster, imageUrlToBlob, dispatch]
|
||||
[t, toaster, dispatch, authToken]
|
||||
);
|
||||
|
||||
return { downloadImage };
|
||||
|
@ -5,6 +5,7 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
|
||||
import { roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
||||
import { useControlAdapterControlImage } from 'features/controlAdapters/hooks/useControlAdapterControlImage';
|
||||
import { useControlAdapterProcessedControlImage } from 'features/controlAdapters/hooks/useControlAdapterProcessedControlImage';
|
||||
@ -91,19 +92,14 @@ const ControlAdapterImagePreview = ({ isSmall, id }: Props) => {
|
||||
return;
|
||||
}
|
||||
|
||||
const width = roundToMultiple(controlImage.width, 8);
|
||||
const height = roundToMultiple(controlImage.height, 8);
|
||||
|
||||
if (activeTabName === 'unifiedCanvas') {
|
||||
dispatch(
|
||||
setBoundingBoxDimensions(
|
||||
{
|
||||
width: controlImage.width,
|
||||
height: controlImage.height,
|
||||
},
|
||||
optimalDimension
|
||||
)
|
||||
);
|
||||
dispatch(setBoundingBoxDimensions({ width, height }, optimalDimension));
|
||||
} else {
|
||||
dispatch(widthChanged(controlImage.width));
|
||||
dispatch(heightChanged(controlImage.height));
|
||||
dispatch(widthChanged(width));
|
||||
dispatch(heightChanged(height));
|
||||
}
|
||||
}, [controlImage, activeTabName, dispatch, optimalDimension]);
|
||||
|
||||
|
@ -6,6 +6,7 @@ import CannyProcessor from './processors/CannyProcessor';
|
||||
import ColorMapProcessor from './processors/ColorMapProcessor';
|
||||
import ContentShuffleProcessor from './processors/ContentShuffleProcessor';
|
||||
import DepthAnyThingProcessor from './processors/DepthAnyThingProcessor';
|
||||
import DWOpenposeProcessor from './processors/DWOpenposeProcessor';
|
||||
import HedProcessor from './processors/HedProcessor';
|
||||
import LineartAnimeProcessor from './processors/LineartAnimeProcessor';
|
||||
import LineartProcessor from './processors/LineartProcessor';
|
||||
@ -13,7 +14,6 @@ import MediapipeFaceProcessor from './processors/MediapipeFaceProcessor';
|
||||
import MidasDepthProcessor from './processors/MidasDepthProcessor';
|
||||
import MlsdImageProcessor from './processors/MlsdImageProcessor';
|
||||
import NormalBaeProcessor from './processors/NormalBaeProcessor';
|
||||
import OpenposeProcessor from './processors/OpenposeProcessor';
|
||||
import PidiProcessor from './processors/PidiProcessor';
|
||||
import ZoeDepthProcessor from './processors/ZoeDepthProcessor';
|
||||
|
||||
@ -73,8 +73,8 @@ const ControlAdapterProcessorComponent = ({ id }: Props) => {
|
||||
return <NormalBaeProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
|
||||
}
|
||||
|
||||
if (processorNode.type === 'openpose_image_processor') {
|
||||
return <OpenposeProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
|
||||
if (processorNode.type === 'dw_openpose_image_processor') {
|
||||
return <DWOpenposeProcessor controlNetId={id} processorNode={processorNode} isEnabled={isEnabled} />;
|
||||
}
|
||||
|
||||
if (processorNode.type === 'pidi_image_processor') {
|
||||
|
@ -1,5 +1,6 @@
|
||||
import { CompositeRangeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useControlAdapterBeginEndStepPct } from 'features/controlAdapters/hooks/useControlAdapterBeginEndStepPct';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import {
|
||||
@ -61,12 +62,10 @@ export const ParamControlAdapterBeginEnd = memo(({ id }: Props) => {
|
||||
}
|
||||
|
||||
return (
|
||||
<FormControl
|
||||
isDisabled={!isEnabled}
|
||||
// feature="controlNetBeginEnd"
|
||||
orientation="vertical"
|
||||
>
|
||||
<FormLabel>{t('controlnet.beginEndStepPercent')}</FormLabel>
|
||||
<FormControl isDisabled={!isEnabled} orientation="vertical">
|
||||
<InformationalPopover feature="controlNetBeginEnd">
|
||||
<FormLabel>{t('controlnet.beginEndStepPercent')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<CompositeRangeSlider
|
||||
aria-label={ariaLabel}
|
||||
value={value}
|
||||
|
@ -2,6 +2,7 @@ import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import { useControlAdapterProcessorNode } from 'features/controlAdapters/hooks/useControlAdapterProcessorNode';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants';
|
||||
@ -58,7 +59,9 @@ const ParamControlAdapterProcessorSelect = ({ id }: Props) => {
|
||||
}
|
||||
return (
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.processor')}</FormLabel>
|
||||
<InformationalPopover feature="controlNetProcessor">
|
||||
<FormLabel>{t('controlnet.processor')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox value={value} options={options} onChange={onChange} />
|
||||
</FormControl>
|
||||
);
|
||||
|
@ -0,0 +1,92 @@
|
||||
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import { useProcessorNodeChanged } from 'features/controlAdapters/components/hooks/useProcessorNodeChanged';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants';
|
||||
import type { RequiredDWOpenposeImageProcessorInvocation } from 'features/controlAdapters/store/types';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './common/ProcessorWrapper';
|
||||
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS.dw_openpose_image_processor
|
||||
.default as RequiredDWOpenposeImageProcessorInvocation;
|
||||
|
||||
type Props = {
|
||||
controlNetId: string;
|
||||
processorNode: RequiredDWOpenposeImageProcessorInvocation;
|
||||
isEnabled: boolean;
|
||||
};
|
||||
|
||||
const DWOpenposeProcessor = (props: Props) => {
|
||||
const { controlNetId, processorNode, isEnabled } = props;
|
||||
const { image_resolution, draw_body, draw_face, draw_hands } = processorNode;
|
||||
const processorChanged = useProcessorNodeChanged();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleDrawBodyChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
processorChanged(controlNetId, { draw_body: e.target.checked });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleDrawFaceChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
processorChanged(controlNetId, { draw_face: e.target.checked });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleDrawHandsChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
processorChanged(controlNetId, { draw_hands: e.target.checked });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleImageResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { image_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<Flex sx={{ flexDir: 'row', gap: 6 }}>
|
||||
<FormControl isDisabled={!isEnabled} w="max-content">
|
||||
<FormLabel>{t('controlnet.body')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_body} isChecked={draw_body} onChange={handleDrawBodyChanged} />
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled} w="max-content">
|
||||
<FormLabel>{t('controlnet.face')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_face} isChecked={draw_face} onChange={handleDrawFaceChanged} />
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled} w="max-content">
|
||||
<FormLabel>{t('controlnet.hands')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_hands} isChecked={draw_hands} onChange={handleDrawHandsChanged} />
|
||||
</FormControl>
|
||||
</Flex>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.imageResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={image_resolution}
|
||||
onChange={handleImageResolutionChanged}
|
||||
defaultValue={DEFAULTS.image_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={image_resolution}
|
||||
onChange={handleImageResolutionChanged}
|
||||
defaultValue={DEFAULTS.image_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(DWOpenposeProcessor);
|
@ -1,92 +0,0 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import { useProcessorNodeChanged } from 'features/controlAdapters/components/hooks/useProcessorNodeChanged';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants';
|
||||
import type { RequiredOpenposeImageProcessorInvocation } from 'features/controlAdapters/store/types';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './common/ProcessorWrapper';
|
||||
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS.openpose_image_processor.default as RequiredOpenposeImageProcessorInvocation;
|
||||
|
||||
type Props = {
|
||||
controlNetId: string;
|
||||
processorNode: RequiredOpenposeImageProcessorInvocation;
|
||||
isEnabled: boolean;
|
||||
};
|
||||
|
||||
const OpenposeProcessor = (props: Props) => {
|
||||
const { controlNetId, processorNode, isEnabled } = props;
|
||||
const { image_resolution, detect_resolution, hand_and_face } = processorNode;
|
||||
const processorChanged = useProcessorNodeChanged();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleDetectResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { detect_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleImageResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { image_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleHandAndFaceChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
processorChanged(controlNetId, { hand_and_face: e.target.checked });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.detectResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={DEFAULTS.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={DEFAULTS.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.imageResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={image_resolution}
|
||||
onChange={handleImageResolutionChanged}
|
||||
defaultValue={DEFAULTS.image_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={image_resolution}
|
||||
onChange={handleImageResolutionChanged}
|
||||
defaultValue={DEFAULTS.image_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.handAndFace')}</FormLabel>
|
||||
<Switch isChecked={hand_and_face} onChange={handleHandAndFaceChanged} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(OpenposeProcessor);
|
@ -205,20 +205,21 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
|
||||
image_resolution: 512,
|
||||
},
|
||||
},
|
||||
openpose_image_processor: {
|
||||
type: 'openpose_image_processor',
|
||||
dw_openpose_image_processor: {
|
||||
type: 'dw_openpose_image_processor',
|
||||
get label() {
|
||||
return i18n.t('controlnet.openPose');
|
||||
return i18n.t('controlnet.dwOpenpose');
|
||||
},
|
||||
get description() {
|
||||
return i18n.t('controlnet.openPoseDescription');
|
||||
return i18n.t('controlnet.dwOpenposeDescription');
|
||||
},
|
||||
default: {
|
||||
id: 'openpose_image_processor',
|
||||
type: 'openpose_image_processor',
|
||||
detect_resolution: 512,
|
||||
id: 'dw_openpose_image_processor',
|
||||
type: 'dw_openpose_image_processor',
|
||||
image_resolution: 512,
|
||||
hand_and_face: false,
|
||||
draw_body: true,
|
||||
draw_face: false,
|
||||
draw_hands: false,
|
||||
},
|
||||
},
|
||||
pidi_image_processor: {
|
||||
@ -266,7 +267,7 @@ export const CONTROLNET_MODEL_DEFAULT_PROCESSORS: {
|
||||
lineart_anime: 'lineart_anime_image_processor',
|
||||
softedge: 'hed_image_processor',
|
||||
shuffle: 'content_shuffle_image_processor',
|
||||
openpose: 'openpose_image_processor',
|
||||
openpose: 'dw_openpose_image_processor',
|
||||
mediapipe: 'mediapipe_face_processor',
|
||||
pidi: 'pidi_image_processor',
|
||||
zoe: 'zoe_depth_image_processor',
|
||||
|
@ -11,6 +11,7 @@ import type {
|
||||
ColorMapImageProcessorInvocation,
|
||||
ContentShuffleImageProcessorInvocation,
|
||||
DepthAnythingImageProcessorInvocation,
|
||||
DWOpenposeImageProcessorInvocation,
|
||||
HedImageProcessorInvocation,
|
||||
LineartAnimeImageProcessorInvocation,
|
||||
LineartImageProcessorInvocation,
|
||||
@ -18,7 +19,6 @@ import type {
|
||||
MidasDepthImageProcessorInvocation,
|
||||
MlsdImageProcessorInvocation,
|
||||
NormalbaeImageProcessorInvocation,
|
||||
OpenposeImageProcessorInvocation,
|
||||
PidiImageProcessorInvocation,
|
||||
ZoeDepthImageProcessorInvocation,
|
||||
} from 'services/api/types';
|
||||
@ -40,7 +40,7 @@ export type ControlAdapterProcessorNode =
|
||||
| MidasDepthImageProcessorInvocation
|
||||
| MlsdImageProcessorInvocation
|
||||
| NormalbaeImageProcessorInvocation
|
||||
| OpenposeImageProcessorInvocation
|
||||
| DWOpenposeImageProcessorInvocation
|
||||
| PidiImageProcessorInvocation
|
||||
| ZoeDepthImageProcessorInvocation;
|
||||
|
||||
@ -143,11 +143,11 @@ export type RequiredNormalbaeImageProcessorInvocation = O.Required<
|
||||
>;
|
||||
|
||||
/**
|
||||
* The Openpose processor node, with parameters flagged as required
|
||||
* The DW Openpose processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredOpenposeImageProcessorInvocation = O.Required<
|
||||
OpenposeImageProcessorInvocation,
|
||||
'type' | 'detect_resolution' | 'image_resolution' | 'hand_and_face'
|
||||
export type RequiredDWOpenposeImageProcessorInvocation = O.Required<
|
||||
DWOpenposeImageProcessorInvocation,
|
||||
'type' | 'image_resolution' | 'draw_body' | 'draw_face' | 'draw_hands'
|
||||
>;
|
||||
|
||||
/**
|
||||
@ -179,7 +179,7 @@ export type RequiredControlAdapterProcessorNode =
|
||||
| RequiredMidasDepthImageProcessorInvocation
|
||||
| RequiredMlsdImageProcessorInvocation
|
||||
| RequiredNormalbaeImageProcessorInvocation
|
||||
| RequiredOpenposeImageProcessorInvocation
|
||||
| RequiredDWOpenposeImageProcessorInvocation
|
||||
| RequiredPidiImageProcessorInvocation
|
||||
| RequiredZoeDepthImageProcessorInvocation,
|
||||
'id'
|
||||
@ -299,10 +299,10 @@ export const isNormalbaeImageProcessorInvocation = (obj: unknown): obj is Normal
|
||||
};
|
||||
|
||||
/**
|
||||
* Type guard for OpenposeImageProcessorInvocation
|
||||
* Type guard for DWOpenposeImageProcessorInvocation
|
||||
*/
|
||||
export const isOpenposeImageProcessorInvocation = (obj: unknown): obj is OpenposeImageProcessorInvocation => {
|
||||
if (isObject(obj) && 'type' in obj && obj.type === 'openpose_image_processor') {
|
||||
export const isDWOpenposeImageProcessorInvocation = (obj: unknown): obj is DWOpenposeImageProcessorInvocation => {
|
||||
if (isObject(obj) && 'type' in obj && obj.type === 'dw_openpose_image_processor') {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -0,0 +1,23 @@
|
||||
import type { DragEndEvent } from '@dnd-kit/core';
|
||||
import { SortableContext, verticalListSortingStrategy } from '@dnd-kit/sortable';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo } from 'react';
|
||||
|
||||
import { DndContextTypesafe } from './DndContextTypesafe';
|
||||
|
||||
type Props = PropsWithChildren & {
|
||||
items: string[];
|
||||
onDragEnd(event: DragEndEvent): void;
|
||||
};
|
||||
|
||||
const DndSortable = (props: Props) => {
|
||||
return (
|
||||
<DndContextTypesafe onDragEnd={props.onDragEnd}>
|
||||
<SortableContext items={props.items} strategy={verticalListSortingStrategy}>
|
||||
{props.children}
|
||||
</SortableContext>
|
||||
</DndContextTypesafe>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(DndSortable);
|
@ -1,6 +1,7 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { setHrfMethod } from 'features/hrf/store/hrfSlice';
|
||||
import { isParameterHRFMethod } from 'features/parameters/types/parameterSchemas';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@ -30,7 +31,9 @@ const ParamHrfMethodSelect = () => {
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<FormLabel>{t('hrf.upscaleMethod')}</FormLabel>
|
||||
<InformationalPopover feature="paramUpscaleMethod">
|
||||
<FormLabel>{t('hrf.upscaleMethod')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox value={value} options={options} onChange={onChange} />
|
||||
</FormControl>
|
||||
);
|
||||
|
@ -1,5 +1,6 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { setHrfStrength } from 'features/hrf/store/hrfSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -25,7 +26,9 @@ const ParamHrfStrength = () => {
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<FormLabel>{t('parameters.denoisingStrength')}</FormLabel>
|
||||
<InformationalPopover feature="paramDenoisingStrength">
|
||||
<FormLabel>{`${t('parameters.denoisingStrength')}`}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<CompositeSlider
|
||||
min={sliderMin}
|
||||
max={sliderMax}
|
||||
|
@ -1,5 +1,6 @@
|
||||
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { setHrfEnabled } from 'features/hrf/store/hrfSlice';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
@ -18,7 +19,9 @@ const ParamHrfToggle = () => {
|
||||
|
||||
return (
|
||||
<FormControl w="full">
|
||||
<FormLabel flexGrow={1}>{t('hrf.enableHrf')}</FormLabel>
|
||||
<InformationalPopover feature="paramHrf">
|
||||
<FormLabel flexGrow={1}>{t('hrf.enableHrf')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Switch isChecked={hrfEnabled} onChange={handleHrfEnabled} />
|
||||
</FormControl>
|
||||
);
|
||||
|
@ -10,6 +10,7 @@ import {
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { LoRA } from 'features/lora/store/loraSlice';
|
||||
import { loraIsEnabledChanged, loraRemoved, loraWeightChanged } from 'features/lora/store/loraSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
@ -57,29 +58,31 @@ export const LoRACard = memo((props: LoRACardProps) => {
|
||||
</Flex>
|
||||
</Flex>
|
||||
</CardHeader>
|
||||
<CardBody>
|
||||
<CompositeSlider
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-1}
|
||||
max={2}
|
||||
step={0.01}
|
||||
marks={marks}
|
||||
defaultValue={0.75}
|
||||
isDisabled={!lora.isEnabled}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-5}
|
||||
max={5}
|
||||
step={0.01}
|
||||
w={20}
|
||||
flexShrink={0}
|
||||
defaultValue={0.75}
|
||||
isDisabled={!lora.isEnabled}
|
||||
/>
|
||||
</CardBody>
|
||||
<InformationalPopover feature="loraWeight">
|
||||
<CardBody>
|
||||
<CompositeSlider
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-1}
|
||||
max={2}
|
||||
step={0.01}
|
||||
marks={marks}
|
||||
defaultValue={0.75}
|
||||
isDisabled={!lora.isEnabled}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-5}
|
||||
max={5}
|
||||
step={0.01}
|
||||
w={20}
|
||||
flexShrink={0}
|
||||
defaultValue={0.75}
|
||||
isDisabled={!lora.isEnabled}
|
||||
/>
|
||||
</CardBody>
|
||||
</InformationalPopover>
|
||||
</Card>
|
||||
);
|
||||
});
|
||||
|
@ -2,6 +2,7 @@ import type { ChakraProps } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { loraAdded, selectLoraSlice } from 'features/lora/store/loraSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@ -57,7 +58,9 @@ const LoRASelect = () => {
|
||||
|
||||
return (
|
||||
<FormControl isDisabled={!options.length}>
|
||||
<FormLabel>{t('models.lora')} </FormLabel>
|
||||
<InformationalPopover feature="lora">
|
||||
<FormLabel>{t('models.lora')} </FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox
|
||||
placeholder={placeholder}
|
||||
value={null}
|
||||
|
@ -35,7 +35,7 @@ export const loraSlice = createSlice({
|
||||
},
|
||||
loraRecalled: (state, action: PayloadAction<LoRAModelConfigEntity & { weight: number }>) => {
|
||||
const { model_name, id, base_model, weight } = action.payload;
|
||||
state.loras[id] = { id, model_name, base_model, weight };
|
||||
state.loras[id] = { id, model_name, base_model, weight, isEnabled: true };
|
||||
},
|
||||
loraRemoved: (state, action: PayloadAction<string>) => {
|
||||
const id = action.payload;
|
||||
|
@ -1,7 +1,6 @@
|
||||
import 'reactflow/dist/style.css';
|
||||
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import TopPanel from 'features/nodes/components/flow/panels/TopPanel/TopPanel';
|
||||
import { SaveWorkflowAsDialog } from 'features/workflowLibrary/components/SaveWorkflowAsDialog/SaveWorkflowAsDialog';
|
||||
@ -11,6 +10,7 @@ import type { CSSProperties } from 'react';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { MdDeviceHub } from 'react-icons/md';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
import AddNodePopover from './flow/AddNodePopover/AddNodePopover';
|
||||
import { Flow } from './flow/Flow';
|
||||
@ -40,7 +40,7 @@ const exit: AnimationProps['exit'] = {
|
||||
};
|
||||
|
||||
const NodeEditor = () => {
|
||||
const isReady = useAppSelector((s) => s.nodes.isReady);
|
||||
const { data, isLoading } = useGetOpenAPISchemaQuery();
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Flex
|
||||
@ -53,7 +53,7 @@ const NodeEditor = () => {
|
||||
justifyContent="center"
|
||||
>
|
||||
<AnimatePresence>
|
||||
{isReady && (
|
||||
{data && (
|
||||
<motion.div initial={initial} animate={animate} exit={exit} style={isReadyMotionStyles}>
|
||||
<Flow />
|
||||
<AddNodePopover />
|
||||
@ -65,7 +65,7 @@ const NodeEditor = () => {
|
||||
)}
|
||||
</AnimatePresence>
|
||||
<AnimatePresence>
|
||||
{!isReady && (
|
||||
{isLoading && (
|
||||
<motion.div initial={initial} animate={animate} exit={exit} style={notIsReadyMotionStyles}>
|
||||
<Flex
|
||||
layerStyle="first"
|
||||
|
@ -1,84 +0,0 @@
|
||||
import type { ContextMenuProps } from '@invoke-ai/ui-library';
|
||||
import { ContextMenu, MenuGroup, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useFieldInputKind } from 'features/nodes/hooks/useFieldInputKind';
|
||||
import { useFieldLabel } from 'features/nodes/hooks/useFieldLabel';
|
||||
import { useFieldTemplateTitle } from 'features/nodes/hooks/useFieldTemplateTitle';
|
||||
import {
|
||||
selectWorkflowSlice,
|
||||
workflowExposedFieldAdded,
|
||||
workflowExposedFieldRemoved,
|
||||
} from 'features/nodes/store/workflowSlice';
|
||||
import type { ReactNode } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiMinusBold, PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
type Props = {
|
||||
nodeId: string;
|
||||
fieldName: string;
|
||||
kind: 'input' | 'output';
|
||||
children: ContextMenuProps<HTMLDivElement>['children'];
|
||||
};
|
||||
|
||||
const FieldContextMenu = ({ nodeId, fieldName, kind, children }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const label = useFieldLabel(nodeId, fieldName);
|
||||
const fieldTemplateTitle = useFieldTemplateTitle(nodeId, fieldName, kind);
|
||||
const input = useFieldInputKind(nodeId, fieldName);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const selectIsExposed = useMemo(
|
||||
() =>
|
||||
createSelector(selectWorkflowSlice, (workflow) => {
|
||||
return Boolean(workflow.exposedFields.find((f) => f.nodeId === nodeId && f.fieldName === fieldName));
|
||||
}),
|
||||
[fieldName, nodeId]
|
||||
);
|
||||
|
||||
const mayExpose = useMemo(() => input && ['any', 'direct'].includes(input), [input]);
|
||||
|
||||
const isExposed = useAppSelector(selectIsExposed);
|
||||
|
||||
const handleExposeField = useCallback(() => {
|
||||
dispatch(workflowExposedFieldAdded({ nodeId, fieldName }));
|
||||
}, [dispatch, fieldName, nodeId]);
|
||||
|
||||
const handleUnexposeField = useCallback(() => {
|
||||
dispatch(workflowExposedFieldRemoved({ nodeId, fieldName }));
|
||||
}, [dispatch, fieldName, nodeId]);
|
||||
|
||||
const menuItems = useMemo(() => {
|
||||
const menuItems: ReactNode[] = [];
|
||||
if (mayExpose && !isExposed) {
|
||||
menuItems.push(
|
||||
<MenuItem key={`${nodeId}.${fieldName}.expose-field`} icon={<PiPlusBold />} onClick={handleExposeField}>
|
||||
{t('nodes.addLinearView')}
|
||||
</MenuItem>
|
||||
);
|
||||
}
|
||||
if (mayExpose && isExposed) {
|
||||
menuItems.push(
|
||||
<MenuItem key={`${nodeId}.${fieldName}.unexpose-field`} icon={<PiMinusBold />} onClick={handleUnexposeField}>
|
||||
{t('nodes.removeLinearView')}
|
||||
</MenuItem>
|
||||
);
|
||||
}
|
||||
return menuItems;
|
||||
}, [fieldName, handleExposeField, handleUnexposeField, isExposed, mayExpose, nodeId, t]);
|
||||
|
||||
const renderMenuFunc = useCallback(
|
||||
() =>
|
||||
!menuItems.length ? null : (
|
||||
<MenuList visibility="visible">
|
||||
<MenuGroup title={label || fieldTemplateTitle || t('nodes.unknownField')}>{menuItems}</MenuGroup>
|
||||
</MenuList>
|
||||
),
|
||||
[fieldTemplateTitle, label, menuItems, t]
|
||||
);
|
||||
|
||||
return <ContextMenu renderMenu={renderMenuFunc}>{children}</ContextMenu>;
|
||||
};
|
||||
|
||||
export default memo(FieldContextMenu);
|
@ -0,0 +1,68 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useFieldValue } from 'features/nodes/hooks/useFieldValue';
|
||||
import {
|
||||
selectWorkflowSlice,
|
||||
workflowExposedFieldAdded,
|
||||
workflowExposedFieldRemoved,
|
||||
} from 'features/nodes/store/workflowSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiMinusBold, PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
type Props = {
|
||||
nodeId: string;
|
||||
fieldName: string;
|
||||
};
|
||||
|
||||
const FieldLinearViewToggle = ({ nodeId, fieldName }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
const value = useFieldValue(nodeId, fieldName);
|
||||
const selectIsExposed = useMemo(
|
||||
() =>
|
||||
createSelector(selectWorkflowSlice, (workflow) => {
|
||||
return Boolean(workflow.exposedFields.find((f) => f.nodeId === nodeId && f.fieldName === fieldName));
|
||||
}),
|
||||
[fieldName, nodeId]
|
||||
);
|
||||
|
||||
const isExposed = useAppSelector(selectIsExposed);
|
||||
|
||||
const handleExposeField = useCallback(() => {
|
||||
dispatch(workflowExposedFieldAdded({ nodeId, fieldName, value }));
|
||||
}, [dispatch, fieldName, nodeId, value]);
|
||||
|
||||
const handleUnexposeField = useCallback(() => {
|
||||
dispatch(workflowExposedFieldRemoved({ nodeId, fieldName }));
|
||||
}, [dispatch, fieldName, nodeId]);
|
||||
|
||||
if (!isExposed) {
|
||||
return (
|
||||
<IconButton
|
||||
variant="ghost"
|
||||
tooltip={t('nodes.addLinearView')}
|
||||
aria-label={t('nodes.addLinearView')}
|
||||
icon={<PiPlusBold />}
|
||||
onClick={handleExposeField}
|
||||
pointerEvents="auto"
|
||||
size="xs"
|
||||
/>
|
||||
);
|
||||
} else {
|
||||
return (
|
||||
<IconButton
|
||||
variant="ghost"
|
||||
tooltip={t('nodes.removeLinearView')}
|
||||
aria-label={t('nodes.removeLinearView')}
|
||||
icon={<PiMinusBold />}
|
||||
onClick={handleUnexposeField}
|
||||
pointerEvents="auto"
|
||||
size="xs"
|
||||
/>
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
export default memo(FieldLinearViewToggle);
|
@ -4,12 +4,12 @@ import { useDoesInputHaveValue } from 'features/nodes/hooks/useDoesInputHaveValu
|
||||
import { useFieldInputInstance } from 'features/nodes/hooks/useFieldInputInstance';
|
||||
import { useFieldInputTemplate } from 'features/nodes/hooks/useFieldInputTemplate';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import EditableFieldTitle from './EditableFieldTitle';
|
||||
import FieldContextMenu from './FieldContextMenu';
|
||||
import FieldHandle from './FieldHandle';
|
||||
import FieldLinearViewToggle from './FieldLinearViewToggle';
|
||||
import InputFieldRenderer from './InputFieldRenderer';
|
||||
|
||||
interface Props {
|
||||
@ -22,6 +22,7 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
const fieldTemplate = useFieldInputTemplate(nodeId, fieldName);
|
||||
const fieldInstance = useFieldInputInstance(nodeId, fieldName);
|
||||
const doesFieldHaveValue = useDoesInputHaveValue(nodeId, fieldName);
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
|
||||
const { isConnected, isConnectionInProgress, isConnectionStartField, connectionError, shouldDim } =
|
||||
useConnectionState({ nodeId, fieldName, kind: 'input' });
|
||||
@ -46,6 +47,14 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
return false;
|
||||
}, [fieldTemplate, isConnected, doesFieldHaveValue]);
|
||||
|
||||
const onMouseEnter = useCallback(() => {
|
||||
setIsHovered(true);
|
||||
}, []);
|
||||
|
||||
const onMouseLeave = useCallback(() => {
|
||||
setIsHovered(false);
|
||||
}, []);
|
||||
|
||||
if (!fieldTemplate || !fieldInstance) {
|
||||
return (
|
||||
<InputFieldWrapper shouldDim={shouldDim}>
|
||||
@ -87,19 +96,17 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
return (
|
||||
<InputFieldWrapper shouldDim={shouldDim}>
|
||||
<FormControl isInvalid={isMissingInput} isDisabled={isConnected} orientation="vertical" px={2}>
|
||||
<Flex flexDir="column" w="full" gap={1}>
|
||||
<FieldContextMenu nodeId={nodeId} fieldName={fieldName} kind="input">
|
||||
{(ref) => (
|
||||
<EditableFieldTitle
|
||||
ref={ref}
|
||||
nodeId={nodeId}
|
||||
fieldName={fieldName}
|
||||
kind="input"
|
||||
isMissingInput={isMissingInput}
|
||||
withTooltip
|
||||
/>
|
||||
)}
|
||||
</FieldContextMenu>
|
||||
<Flex flexDir="column" w="full" gap={1} onMouseEnter={onMouseEnter} onMouseLeave={onMouseLeave}>
|
||||
<Flex>
|
||||
<EditableFieldTitle
|
||||
nodeId={nodeId}
|
||||
fieldName={fieldName}
|
||||
kind="input"
|
||||
isMissingInput={isMissingInput}
|
||||
withTooltip
|
||||
/>
|
||||
{isHovered && <FieldLinearViewToggle nodeId={nodeId} fieldName={fieldName} />}
|
||||
</Flex>
|
||||
<InputFieldRenderer nodeId={nodeId} fieldName={fieldName} />
|
||||
</Flex>
|
||||
</FormControl>
|
||||
|
@ -1,12 +1,15 @@
|
||||
import { useSortable } from '@dnd-kit/sortable';
|
||||
import { CSS } from '@dnd-kit/utilities';
|
||||
import { Flex, Icon, IconButton, Spacer, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import NodeSelectionOverlay from 'common/components/NodeSelectionOverlay';
|
||||
import { useFieldOriginalValue } from 'features/nodes/hooks/useFieldOriginalValue';
|
||||
import { useMouseOverNode } from 'features/nodes/hooks/useMouseOverNode';
|
||||
import { workflowExposedFieldRemoved } from 'features/nodes/store/workflowSlice';
|
||||
import { HANDLE_TOOLTIP_OPEN_DELAY } from 'features/nodes/types/constants';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiInfoBold, PiTrashSimpleBold } from 'react-icons/pi';
|
||||
import { PiArrowCounterClockwiseBold, PiDotsSixVerticalBold, PiInfoBold, PiTrashSimpleBold } from 'react-icons/pi';
|
||||
|
||||
import EditableFieldTitle from './EditableFieldTitle';
|
||||
import FieldTooltipContent from './FieldTooltipContent';
|
||||
@ -19,46 +22,79 @@ type Props = {
|
||||
|
||||
const LinearViewField = ({ nodeId, fieldName }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { isValueChanged, onReset } = useFieldOriginalValue(nodeId, fieldName);
|
||||
const { isMouseOverNode, handleMouseOut, handleMouseOver } = useMouseOverNode(nodeId);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleRemoveField = useCallback(() => {
|
||||
dispatch(workflowExposedFieldRemoved({ nodeId, fieldName }));
|
||||
}, [dispatch, fieldName, nodeId]);
|
||||
|
||||
const { attributes, listeners, setNodeRef, transform, transition } = useSortable({ id: `${nodeId}.${fieldName}` });
|
||||
|
||||
const style = {
|
||||
transform: CSS.Translate.toString(transform),
|
||||
transition,
|
||||
};
|
||||
|
||||
return (
|
||||
<Flex
|
||||
onMouseEnter={handleMouseOver}
|
||||
onMouseLeave={handleMouseOut}
|
||||
layerStyle="second"
|
||||
alignItems="center"
|
||||
position="relative"
|
||||
borderRadius="base"
|
||||
w="full"
|
||||
p={4}
|
||||
flexDir="column"
|
||||
paddingLeft={0}
|
||||
ref={setNodeRef}
|
||||
style={style}
|
||||
>
|
||||
<Flex>
|
||||
<EditableFieldTitle nodeId={nodeId} fieldName={fieldName} kind="input" />
|
||||
<Spacer />
|
||||
<Tooltip
|
||||
label={<FieldTooltipContent nodeId={nodeId} fieldName={fieldName} kind="input" />}
|
||||
openDelay={HANDLE_TOOLTIP_OPEN_DELAY}
|
||||
placement="top"
|
||||
>
|
||||
<Flex h="full" alignItems="center">
|
||||
<Icon fontSize="sm" color="base.300" as={PiInfoBold} />
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
<IconButton
|
||||
aria-label={t('nodes.removeLinearView')}
|
||||
tooltip={t('nodes.removeLinearView')}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={handleRemoveField}
|
||||
icon={<PiTrashSimpleBold />}
|
||||
/>
|
||||
<IconButton
|
||||
aria-label={t('nodes.reorderLinearView')}
|
||||
variant="ghost"
|
||||
icon={<PiDotsSixVerticalBold />}
|
||||
{...listeners}
|
||||
{...attributes}
|
||||
mx={2}
|
||||
height="full"
|
||||
/>
|
||||
<Flex flexDir="column" w="full">
|
||||
<Flex alignItems="center">
|
||||
<EditableFieldTitle nodeId={nodeId} fieldName={fieldName} kind="input" />
|
||||
<Spacer />
|
||||
{isValueChanged && (
|
||||
<IconButton
|
||||
aria-label={t('nodes.resetToDefaultValue')}
|
||||
tooltip={t('nodes.resetToDefaultValue')}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={onReset}
|
||||
icon={<PiArrowCounterClockwiseBold />}
|
||||
/>
|
||||
)}
|
||||
<Tooltip
|
||||
label={<FieldTooltipContent nodeId={nodeId} fieldName={fieldName} kind="input" />}
|
||||
openDelay={HANDLE_TOOLTIP_OPEN_DELAY}
|
||||
placement="top"
|
||||
>
|
||||
<Flex h="full" alignItems="center">
|
||||
<Icon fontSize="sm" color="base.300" as={PiInfoBold} />
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
<IconButton
|
||||
aria-label={t('nodes.removeLinearView')}
|
||||
tooltip={t('nodes.removeLinearView')}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={handleRemoveField}
|
||||
icon={<PiTrashSimpleBold />}
|
||||
/>
|
||||
</Flex>
|
||||
<InputFieldRenderer nodeId={nodeId} fieldName={fieldName} />
|
||||
<NodeSelectionOverlay isSelected={false} isHovered={isMouseOverNode} />
|
||||
</Flex>
|
||||
<InputFieldRenderer nodeId={nodeId} fieldName={fieldName} />
|
||||
<NodeSelectionOverlay isSelected={false} isHovered={isMouseOverNode} />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
@ -1,25 +1,23 @@
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import AddNodeButton from 'features/nodes/components/flow/panels/TopPanel/AddNodeButton';
|
||||
import ClearFlowButton from 'features/nodes/components/flow/panels/TopPanel/ClearFlowButton';
|
||||
import SaveWorkflowButton from 'features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton';
|
||||
import UpdateNodesButton from 'features/nodes/components/flow/panels/TopPanel/UpdateNodesButton';
|
||||
import WorkflowName from 'features/nodes/components/flow/panels/TopPanel/WorkflowName';
|
||||
import WorkflowLibraryButton from 'features/workflowLibrary/components/WorkflowLibraryButton';
|
||||
import { WorkflowName } from 'features/nodes/components/sidePanel/WorkflowName';
|
||||
import WorkflowLibraryMenu from 'features/workflowLibrary/components/WorkflowLibraryMenu/WorkflowLibraryMenu';
|
||||
import { memo } from 'react';
|
||||
|
||||
const TopCenterPanel = () => {
|
||||
const name = useAppSelector((s) => s.workflow.name);
|
||||
return (
|
||||
<Flex gap={2} top={2} left={2} right={2} position="absolute" alignItems="flex-start" pointerEvents="none">
|
||||
<Flex flexDir="column" gap="2">
|
||||
<Flex gap="2">
|
||||
<AddNodeButton />
|
||||
<WorkflowLibraryButton />
|
||||
</Flex>
|
||||
<Flex gap="2">
|
||||
<AddNodeButton />
|
||||
<UpdateNodesButton />
|
||||
</Flex>
|
||||
<Spacer />
|
||||
<WorkflowName />
|
||||
{!!name.length && <WorkflowName />}
|
||||
<Spacer />
|
||||
<ClearFlowButton />
|
||||
<SaveWorkflowButton />
|
||||
|
@ -25,6 +25,7 @@ const UpdateNodesButton = () => {
|
||||
icon={<PiWarningBold />}
|
||||
onClick={handleClickUpdateNodes}
|
||||
pointerEvents="auto"
|
||||
colorScheme="warning"
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
@ -1,15 +0,0 @@
|
||||
import { Text } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { memo } from 'react';
|
||||
|
||||
const TopCenterPanel = () => {
|
||||
const name = useAppSelector((s) => s.workflow.name);
|
||||
|
||||
return (
|
||||
<Text m={2} fontSize="lg" userSelect="none" noOfLines={1} wordBreak="break-all" fontWeight="semibold" opacity={0.8}>
|
||||
{name}
|
||||
</Text>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(TopCenterPanel);
|
@ -1,17 +1,16 @@
|
||||
import { Button } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowsClockwiseBold } from 'react-icons/pi';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import { useLazyGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
const ReloadNodeTemplatesButton = () => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const [_getOpenAPISchema] = useLazyGetOpenAPISchemaQuery();
|
||||
|
||||
const handleReloadSchema = useCallback(() => {
|
||||
dispatch(receivedOpenAPISchema());
|
||||
}, [dispatch]);
|
||||
_getOpenAPISchema();
|
||||
}, [_getOpenAPISchema]);
|
||||
|
||||
return (
|
||||
<Button
|
||||
|
@ -1,15 +0,0 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import WorkflowLibraryButton from 'features/workflowLibrary/components/WorkflowLibraryButton';
|
||||
import WorkflowLibraryMenu from 'features/workflowLibrary/components/WorkflowLibraryMenu/WorkflowLibraryMenu';
|
||||
import { memo } from 'react';
|
||||
|
||||
const TopRightPanel = () => {
|
||||
return (
|
||||
<Flex gap={2} position="absolute" top={2} insetInlineEnd={2}>
|
||||
<WorkflowLibraryButton />
|
||||
<WorkflowLibraryMenu />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(TopRightPanel);
|
@ -0,0 +1,43 @@
|
||||
import { Flex, IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { workflowModeChanged } from 'features/nodes/store/workflowSlice';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiEyeBold, PiPencilBold } from 'react-icons/pi';
|
||||
|
||||
export const ModeToggle = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const mode = useAppSelector((s) => s.workflow.mode);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const onClickEdit = useCallback(() => {
|
||||
dispatch(workflowModeChanged('edit'));
|
||||
}, [dispatch]);
|
||||
|
||||
const onClickView = useCallback(() => {
|
||||
dispatch(workflowModeChanged('view'));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<Flex justifyContent="flex-end">
|
||||
{mode === 'view' && (
|
||||
<IconButton
|
||||
aria-label={t('nodes.editMode')}
|
||||
tooltip={t('nodes.editMode')}
|
||||
onClick={onClickEdit}
|
||||
icon={<PiPencilBold />}
|
||||
colorScheme="invokeBlue"
|
||||
/>
|
||||
)}
|
||||
{mode === 'edit' && (
|
||||
<IconButton
|
||||
aria-label={t('nodes.viewMode')}
|
||||
tooltip={t('nodes.viewMode')}
|
||||
onClick={onClickView}
|
||||
icon={<PiEyeBold />}
|
||||
colorScheme="invokeBlue"
|
||||
/>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
@ -1,22 +1,37 @@
|
||||
import 'reactflow/dist/style.css';
|
||||
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectWorkflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import QueueControls from 'features/queue/components/QueueControls';
|
||||
import ResizeHandle from 'features/ui/components/tabs/ResizeHandle';
|
||||
import { usePanelStorage } from 'features/ui/hooks/usePanelStorage';
|
||||
import WorkflowLibraryButton from 'features/workflowLibrary/components/WorkflowLibraryButton';
|
||||
import type { CSSProperties } from 'react';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import type { ImperativePanelGroupHandle } from 'react-resizable-panels';
|
||||
import { Panel, PanelGroup } from 'react-resizable-panels';
|
||||
|
||||
import InspectorPanel from './inspector/InspectorPanel';
|
||||
import { WorkflowViewMode } from './viewMode/WorkflowViewMode';
|
||||
import WorkflowPanel from './workflow/WorkflowPanel';
|
||||
import { WorkflowMenu } from './WorkflowMenu';
|
||||
import { WorkflowName } from './WorkflowName';
|
||||
|
||||
const panelGroupStyles: CSSProperties = { height: '100%', width: '100%' };
|
||||
|
||||
const selector = createMemoizedSelector(selectWorkflowSlice, (workflow) => {
|
||||
return {
|
||||
mode: workflow.mode,
|
||||
};
|
||||
});
|
||||
|
||||
const NodeEditorPanelGroup = () => {
|
||||
const { mode } = useAppSelector(selector);
|
||||
const panelGroupRef = useRef<ImperativePanelGroupHandle>(null);
|
||||
const panelStorage = usePanelStorage();
|
||||
|
||||
const handleDoubleClickHandle = useCallback(() => {
|
||||
if (!panelGroupRef.current) {
|
||||
return;
|
||||
@ -27,22 +42,33 @@ const NodeEditorPanelGroup = () => {
|
||||
return (
|
||||
<Flex w="full" h="full" gap={2} flexDir="column">
|
||||
<QueueControls />
|
||||
<PanelGroup
|
||||
ref={panelGroupRef}
|
||||
id="workflow-panel-group"
|
||||
autoSaveId="workflow-panel-group"
|
||||
direction="vertical"
|
||||
style={panelGroupStyles}
|
||||
storage={panelStorage}
|
||||
>
|
||||
<Panel id="workflow" collapsible minSize={25}>
|
||||
<WorkflowPanel />
|
||||
</Panel>
|
||||
<ResizeHandle orientation="horizontal" onDoubleClick={handleDoubleClickHandle} />
|
||||
<Panel id="inspector" collapsible minSize={25}>
|
||||
<InspectorPanel />
|
||||
</Panel>
|
||||
</PanelGroup>
|
||||
<Flex w="full" justifyContent="space-between" alignItems="center" gap="4" padding={1}>
|
||||
<Flex justifyContent="space-between" alignItems="center" gap="4">
|
||||
<WorkflowLibraryButton />
|
||||
<WorkflowName />
|
||||
</Flex>
|
||||
<WorkflowMenu />
|
||||
</Flex>
|
||||
|
||||
{mode === 'view' && <WorkflowViewMode />}
|
||||
{mode === 'edit' && (
|
||||
<PanelGroup
|
||||
ref={panelGroupRef}
|
||||
id="workflow-panel-group"
|
||||
autoSaveId="workflow-panel-group"
|
||||
direction="vertical"
|
||||
style={panelGroupStyles}
|
||||
storage={panelStorage}
|
||||
>
|
||||
<Panel id="workflow" collapsible minSize={25}>
|
||||
<WorkflowPanel />
|
||||
</Panel>
|
||||
<ResizeHandle orientation="horizontal" onDoubleClick={handleDoubleClickHandle} />
|
||||
<Panel id="inspector" collapsible minSize={25}>
|
||||
<InspectorPanel />
|
||||
</Panel>
|
||||
</PanelGroup>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
@ -0,0 +1,26 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import SaveWorkflowButton from 'features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton';
|
||||
import { selectWorkflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import { NewWorkflowButton } from 'features/workflowLibrary/components/NewWorkflowButton';
|
||||
|
||||
import { ModeToggle } from './ModeToggle';
|
||||
|
||||
const selector = createMemoizedSelector(selectWorkflowSlice, (workflow) => {
|
||||
return {
|
||||
mode: workflow.mode,
|
||||
};
|
||||
});
|
||||
|
||||
export const WorkflowMenu = () => {
|
||||
const { mode } = useAppSelector(selector);
|
||||
|
||||
return (
|
||||
<Flex gap="2" alignItems="center">
|
||||
{mode === 'edit' && <SaveWorkflowButton />}
|
||||
<NewWorkflowButton />
|
||||
<ModeToggle />
|
||||
</Flex>
|
||||
);
|
||||
};
|
@ -0,0 +1,37 @@
|
||||
import { Flex, Icon, Text, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDotOutlineFill } from 'react-icons/pi';
|
||||
|
||||
import WorkflowInfoTooltipContent from './viewMode/WorkflowInfoTooltipContent';
|
||||
import { WorkflowWarning } from './viewMode/WorkflowWarning';
|
||||
|
||||
export const WorkflowName = () => {
|
||||
const { name, isTouched, mode } = useAppSelector((s) => s.workflow);
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Flex gap="1" alignItems="center">
|
||||
{name.length ? (
|
||||
<Tooltip label={<WorkflowInfoTooltipContent />} placement="top">
|
||||
<Text fontSize="lg" userSelect="none" noOfLines={1} wordBreak="break-all" fontWeight="semibold">
|
||||
{name}
|
||||
</Text>
|
||||
</Tooltip>
|
||||
) : (
|
||||
<Text fontSize="lg" fontStyle="italic" fontWeight="semibold">
|
||||
{t('workflows.unnamedWorkflow')}
|
||||
</Text>
|
||||
)}
|
||||
|
||||
{isTouched && mode === 'edit' && (
|
||||
<Tooltip label="Workflow has unsaved changes">
|
||||
<Flex>
|
||||
<Icon as={PiDotOutlineFill} boxSize="20px" sx={{ color: 'invokeYellow.500' }} />
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
)}
|
||||
<WorkflowWarning />
|
||||
</Flex>
|
||||
);
|
||||
};
|
@ -0,0 +1,53 @@
|
||||
import { Flex, FormLabel, Icon, IconButton, Spacer, Tooltip } from '@invoke-ai/ui-library';
|
||||
import FieldTooltipContent from 'features/nodes/components/flow/nodes/Invocation/fields/FieldTooltipContent';
|
||||
import InputFieldRenderer from 'features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer';
|
||||
import { useFieldLabel } from 'features/nodes/hooks/useFieldLabel';
|
||||
import { useFieldOriginalValue } from 'features/nodes/hooks/useFieldOriginalValue';
|
||||
import { useFieldTemplateTitle } from 'features/nodes/hooks/useFieldTemplateTitle';
|
||||
import { HANDLE_TOOLTIP_OPEN_DELAY } from 'features/nodes/types/constants';
|
||||
import { t } from 'i18next';
|
||||
import { memo } from 'react';
|
||||
import { PiArrowCounterClockwiseBold, PiInfoBold } from 'react-icons/pi';
|
||||
|
||||
type Props = {
|
||||
nodeId: string;
|
||||
fieldName: string;
|
||||
};
|
||||
|
||||
const WorkflowField = ({ nodeId, fieldName }: Props) => {
|
||||
const label = useFieldLabel(nodeId, fieldName);
|
||||
const fieldTemplateTitle = useFieldTemplateTitle(nodeId, fieldName, 'input');
|
||||
const { isValueChanged, onReset } = useFieldOriginalValue(nodeId, fieldName);
|
||||
|
||||
return (
|
||||
<Flex layerStyle="second" position="relative" borderRadius="base" w="full" p={4} gap="2" flexDir="column">
|
||||
<Flex alignItems="center">
|
||||
<FormLabel fontSize="sm">{label || fieldTemplateTitle}</FormLabel>
|
||||
|
||||
<Spacer />
|
||||
{isValueChanged && (
|
||||
<IconButton
|
||||
aria-label={t('nodes.resetToDefaultValue')}
|
||||
tooltip={t('nodes.resetToDefaultValue')}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={onReset}
|
||||
icon={<PiArrowCounterClockwiseBold />}
|
||||
/>
|
||||
)}
|
||||
<Tooltip
|
||||
label={<FieldTooltipContent nodeId={nodeId} fieldName={fieldName} kind="input" />}
|
||||
openDelay={HANDLE_TOOLTIP_OPEN_DELAY}
|
||||
placement="top"
|
||||
>
|
||||
<Flex h="24px" alignItems="center">
|
||||
<Icon fontSize="md" color="base.300" as={PiInfoBold} />
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
</Flex>
|
||||
<InputFieldRenderer nodeId={nodeId} fieldName={fieldName} />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(WorkflowField);
|
@ -0,0 +1,68 @@
|
||||
import { Box, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectWorkflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selector = createMemoizedSelector(selectWorkflowSlice, (workflow) => {
|
||||
return {
|
||||
name: workflow.name,
|
||||
description: workflow.description,
|
||||
notes: workflow.notes,
|
||||
author: workflow.author,
|
||||
tags: workflow.tags,
|
||||
};
|
||||
});
|
||||
|
||||
const WorkflowInfoTooltipContent = () => {
|
||||
const { name, description, notes, author, tags } = useAppSelector(selector);
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap="2">
|
||||
{!!name.length && (
|
||||
<Box>
|
||||
<Text fontWeight="semibold">{t('nodes.workflowName')}</Text>
|
||||
<Text opacity={0.7} fontStyle="oblique 5deg">
|
||||
{name}
|
||||
</Text>
|
||||
</Box>
|
||||
)}
|
||||
{!!author.length && (
|
||||
<Box>
|
||||
<Text fontWeight="semibold">{t('nodes.workflowAuthor')}</Text>
|
||||
<Text opacity={0.7} fontStyle="oblique 5deg">
|
||||
{author}
|
||||
</Text>
|
||||
</Box>
|
||||
)}
|
||||
{!!tags.length && (
|
||||
<Box>
|
||||
<Text fontWeight="semibold">{t('nodes.workflowTags')}</Text>
|
||||
<Text opacity={0.7} fontStyle="oblique 5deg">
|
||||
{tags}
|
||||
</Text>
|
||||
</Box>
|
||||
)}
|
||||
{!!description.length && (
|
||||
<Box>
|
||||
<Text fontWeight="semibold">{t('nodes.workflowDescription')}</Text>
|
||||
<Text opacity={0.7} fontStyle="oblique 5deg">
|
||||
{description}
|
||||
</Text>
|
||||
</Box>
|
||||
)}
|
||||
{!!notes.length && (
|
||||
<Box>
|
||||
<Text fontWeight="semibold">{t('nodes.workflowNotes')}</Text>
|
||||
<Text opacity={0.7} fontStyle="oblique 5deg">
|
||||
{notes}
|
||||
</Text>
|
||||
</Box>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(WorkflowInfoTooltipContent);
|
@ -0,0 +1,39 @@
|
||||
import { Box, Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { selectWorkflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import { t } from 'i18next';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
import WorkflowField from './WorkflowField';
|
||||
|
||||
const selector = createMemoizedSelector(selectWorkflowSlice, (workflow) => {
|
||||
return {
|
||||
fields: workflow.exposedFields,
|
||||
name: workflow.name,
|
||||
};
|
||||
});
|
||||
|
||||
export const WorkflowViewMode = () => {
|
||||
const { isLoading } = useGetOpenAPISchemaQuery();
|
||||
const { fields } = useAppSelector(selector);
|
||||
return (
|
||||
<Box position="relative" w="full" h="full">
|
||||
<ScrollableContent>
|
||||
<Flex position="relative" flexDir="column" alignItems="flex-start" p={1} gap={2} h="full" w="full">
|
||||
{isLoading ? (
|
||||
<IAINoContentFallback label={t('nodes.loadingNodes')} icon={null} />
|
||||
) : fields.length ? (
|
||||
fields.map(({ nodeId, fieldName }) => (
|
||||
<WorkflowField key={`${nodeId}.${fieldName}`} nodeId={nodeId} fieldName={fieldName} />
|
||||
))
|
||||
) : (
|
||||
<IAINoContentFallback label={t('nodes.noFieldsLinearview')} icon={null} />
|
||||
)}
|
||||
</Flex>
|
||||
</ScrollableContent>
|
||||
</Box>
|
||||
);
|
||||
};
|
@ -0,0 +1,21 @@
|
||||
import { Flex, Icon, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useGetNodesNeedUpdate } from 'features/nodes/hooks/useGetNodesNeedUpdate';
|
||||
import { PiWarningBold } from 'react-icons/pi';
|
||||
|
||||
import { WorkflowWarningTooltip } from './WorkflowWarningTooltip';
|
||||
|
||||
export const WorkflowWarning = () => {
|
||||
const nodesNeedUpdate = useGetNodesNeedUpdate();
|
||||
|
||||
if (!nodesNeedUpdate) {
|
||||
return <></>;
|
||||
}
|
||||
|
||||
return (
|
||||
<Tooltip label={<WorkflowWarningTooltip />}>
|
||||
<Flex h="full" alignItems="center" gap="2">
|
||||
<Icon color="warning.400" as={PiWarningBold} />
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
@ -0,0 +1,20 @@
|
||||
import { Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const WorkflowWarningTooltip = () => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap="2">
|
||||
<Flex flexDir="column" gap="2">
|
||||
<Text fontWeight="semibold">{t('toast.loadedWithWarnings')}</Text>
|
||||
<Flex flexDir="column">
|
||||
<Text>{t('common.toResolve')}:</Text>
|
||||
<Text>
|
||||
{t('nodes.editMode')} >> {t('nodes.updateAllNodes')} >> {t('common.save')}
|
||||
</Text>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
};
|
@ -1,31 +1,61 @@
|
||||
import { arrayMove } from '@dnd-kit/sortable';
|
||||
import { Box, Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import DndSortable from 'features/dnd/components/DndSortable';
|
||||
import type { DragEndEvent } from 'features/dnd/types';
|
||||
import LinearViewField from 'features/nodes/components/flow/nodes/Invocation/fields/LinearViewField';
|
||||
import { selectWorkflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import { memo } from 'react';
|
||||
import { selectWorkflowSlice, workflowExposedFieldsReordered } from 'features/nodes/store/workflowSlice';
|
||||
import type { FieldIdentifier } from 'features/nodes/types/field';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
const selector = createMemoizedSelector(selectWorkflowSlice, (workflow) => workflow.exposedFields);
|
||||
|
||||
const WorkflowLinearTab = () => {
|
||||
const fields = useAppSelector(selector);
|
||||
const { isLoading } = useGetOpenAPISchemaQuery();
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const handleDragEnd = useCallback(
|
||||
(event: DragEndEvent) => {
|
||||
const { active, over } = event;
|
||||
const fieldsStrings = fields.map((field) => `${field.nodeId}.${field.fieldName}`);
|
||||
|
||||
if (over && active.id !== over.id) {
|
||||
const oldIndex = fieldsStrings.indexOf(active.id as string);
|
||||
const newIndex = fieldsStrings.indexOf(over.id as string);
|
||||
|
||||
const newFields = arrayMove(fieldsStrings, oldIndex, newIndex)
|
||||
.map((field) => fields.find((obj) => `${obj.nodeId}.${obj.fieldName}` === field))
|
||||
.filter((field) => field) as FieldIdentifier[];
|
||||
|
||||
dispatch(workflowExposedFieldsReordered(newFields));
|
||||
}
|
||||
},
|
||||
[dispatch, fields]
|
||||
);
|
||||
|
||||
return (
|
||||
<Box position="relative" w="full" h="full">
|
||||
<ScrollableContent>
|
||||
<Flex position="relative" flexDir="column" alignItems="flex-start" p={1} gap={2} h="full" w="full">
|
||||
{fields.length ? (
|
||||
fields.map(({ nodeId, fieldName }) => (
|
||||
<LinearViewField key={`${nodeId}.${fieldName}`} nodeId={nodeId} fieldName={fieldName} />
|
||||
))
|
||||
) : (
|
||||
<IAINoContentFallback label={t('nodes.noFieldsLinearview')} icon={null} />
|
||||
)}
|
||||
</Flex>
|
||||
<DndSortable onDragEnd={handleDragEnd} items={fields.map((field) => `${field.nodeId}.${field.fieldName}`)}>
|
||||
<Flex position="relative" flexDir="column" alignItems="flex-start" p={1} gap={2} h="full" w="full">
|
||||
{isLoading ? (
|
||||
<IAINoContentFallback label={t('nodes.loadingNodes')} icon={null} />
|
||||
) : fields.length ? (
|
||||
fields.map(({ nodeId, fieldName }) => (
|
||||
<LinearViewField key={`${nodeId}.${fieldName}`} nodeId={nodeId} fieldName={fieldName} />
|
||||
))
|
||||
) : (
|
||||
<IAINoContentFallback label={t('nodes.noFieldsLinearview')} icon={null} />
|
||||
)}
|
||||
</Flex>
|
||||
</DndSortable>
|
||||
</ScrollableContent>
|
||||
</Box>
|
||||
);
|
||||
|
@ -12,17 +12,17 @@ const WorkflowPanel = () => {
|
||||
<Flex layerStyle="first" flexDir="column" w="full" h="full" borderRadius="base" p={2} gap={2}>
|
||||
<Tabs variant="line" display="flex" w="full" h="full" flexDir="column">
|
||||
<TabList>
|
||||
<Tab>{t('common.linear')}</Tab>
|
||||
<Tab>{t('common.details')}</Tab>
|
||||
<Tab>{t('common.linear')}</Tab>
|
||||
<Tab>JSON</Tab>
|
||||
</TabList>
|
||||
|
||||
<TabPanels>
|
||||
<TabPanel>
|
||||
<WorkflowLinearTab />
|
||||
<WorkflowGeneralTab />
|
||||
</TabPanel>
|
||||
<TabPanel>
|
||||
<WorkflowGeneralTab />
|
||||
<WorkflowLinearTab />
|
||||
</TabPanel>
|
||||
<TabPanel>
|
||||
<WorkflowJSONTab />
|
||||
|
@ -0,0 +1,28 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useFieldValue } from 'features/nodes/hooks/useFieldValue';
|
||||
import { fieldValueReset } from 'features/nodes/store/nodesSlice';
|
||||
import { selectWorkflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
|
||||
export const useFieldOriginalValue = (nodeId: string, fieldName: string) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectOriginalExposedFieldValues = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
selectWorkflowSlice,
|
||||
(workflow) =>
|
||||
workflow.originalExposedFieldValues.find((v) => v.nodeId === nodeId && v.fieldName === fieldName)?.value
|
||||
),
|
||||
[nodeId, fieldName]
|
||||
);
|
||||
const originalValue = useAppSelector(selectOriginalExposedFieldValues);
|
||||
const value = useFieldValue(nodeId, fieldName);
|
||||
const isValueChanged = useMemo(() => !isEqual(value, originalValue), [value, originalValue]);
|
||||
const onReset = useCallback(() => {
|
||||
dispatch(fieldValueReset({ nodeId, fieldName, value: originalValue }));
|
||||
}, [dispatch, fieldName, nodeId, originalValue]);
|
||||
|
||||
return { originalValue, isValueChanged, onReset };
|
||||
};
|
@ -0,0 +1,23 @@
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { useMemo } from 'react';
|
||||
|
||||
export const useFieldValue = (nodeId: string, fieldName: string) => {
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectNodesSlice, (nodes) => {
|
||||
const node = nodes.nodes.find((node) => node.id === nodeId);
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
return node?.data.inputs[fieldName]?.value;
|
||||
}),
|
||||
[fieldName, nodeId]
|
||||
);
|
||||
|
||||
const value = useAppSelector(selector);
|
||||
|
||||
return value;
|
||||
};
|
@ -2,7 +2,6 @@ import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSlice, isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { workflowLoaded } from 'features/nodes/store/actions';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodeTemplatesSlice';
|
||||
import { SHARED_NODE_PROPERTIES } from 'features/nodes/types/constants';
|
||||
import type {
|
||||
BoardFieldValue,
|
||||
@ -19,6 +18,7 @@ import type {
|
||||
MainModelFieldValue,
|
||||
SchedulerFieldValue,
|
||||
SDXLRefinerModelFieldValue,
|
||||
StatefulFieldValue,
|
||||
StringFieldValue,
|
||||
T2IAdapterModelFieldValue,
|
||||
VAEModelFieldValue,
|
||||
@ -37,6 +37,7 @@ import {
|
||||
zMainModelFieldValue,
|
||||
zSchedulerFieldValue,
|
||||
zSDXLRefinerModelFieldValue,
|
||||
zStatefulFieldValue,
|
||||
zStringFieldValue,
|
||||
zT2IAdapterModelFieldValue,
|
||||
zVAEModelFieldValue,
|
||||
@ -65,7 +66,6 @@ import {
|
||||
SelectionMode,
|
||||
updateEdge,
|
||||
} from 'reactflow';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import {
|
||||
socketGeneratorProgress,
|
||||
socketInvocationComplete,
|
||||
@ -92,7 +92,6 @@ export const initialNodesState: NodesState = {
|
||||
_version: 1,
|
||||
nodes: [],
|
||||
edges: [],
|
||||
isReady: false,
|
||||
connectionStartParams: null,
|
||||
connectionStartFieldType: null,
|
||||
connectionMade: false,
|
||||
@ -481,6 +480,9 @@ export const nodesSlice = createSlice({
|
||||
selectedEdgesChanged: (state, action: PayloadAction<string[]>) => {
|
||||
state.selectedEdges = action.payload;
|
||||
},
|
||||
fieldValueReset: (state, action: FieldValueAction<StatefulFieldValue>) => {
|
||||
fieldValueReducer(state, action, zStatefulFieldValue);
|
||||
},
|
||||
fieldStringValueChanged: (state, action: FieldValueAction<StringFieldValue>) => {
|
||||
fieldValueReducer(state, action, zStringFieldValue);
|
||||
},
|
||||
@ -677,10 +679,6 @@ export const nodesSlice = createSlice({
|
||||
},
|
||||
},
|
||||
extraReducers: (builder) => {
|
||||
builder.addCase(receivedOpenAPISchema.pending, (state) => {
|
||||
state.isReady = false;
|
||||
});
|
||||
|
||||
builder.addCase(workflowLoaded, (state, action) => {
|
||||
const { nodes, edges } = action.payload;
|
||||
state.nodes = applyNodeChanges(
|
||||
@ -752,9 +750,6 @@ export const nodesSlice = createSlice({
|
||||
});
|
||||
}
|
||||
});
|
||||
builder.addCase(nodeTemplatesBuilt, (state) => {
|
||||
state.isReady = true;
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
@ -770,6 +765,7 @@ export const {
|
||||
edgesChanged,
|
||||
edgesDeleted,
|
||||
edgeUpdated,
|
||||
fieldValueReset,
|
||||
fieldBoardValueChanged,
|
||||
fieldBooleanValueChanged,
|
||||
fieldColorValueChanged,
|
||||
@ -844,7 +840,6 @@ export const isAnyNodeOrEdgeMutation = isAnyOf(
|
||||
nodeIsOpenChanged,
|
||||
nodeLabelChanged,
|
||||
nodeNotesChanged,
|
||||
nodesChanged,
|
||||
nodesDeleted,
|
||||
nodeUseCacheChanged,
|
||||
notesNodeValueChanged,
|
||||
@ -871,7 +866,6 @@ export const nodesPersistConfig: PersistConfig<NodesState> = {
|
||||
'connectionStartFieldType',
|
||||
'selectedNodes',
|
||||
'selectedEdges',
|
||||
'isReady',
|
||||
'nodesToCopy',
|
||||
'edgesToCopy',
|
||||
'connectionMade',
|
||||
|
@ -1,4 +1,4 @@
|
||||
import type { FieldType } from 'features/nodes/types/field';
|
||||
import type { FieldIdentifier, FieldType, StatefulFieldValue } from 'features/nodes/types/field';
|
||||
import type {
|
||||
AnyNode,
|
||||
InvocationNodeEdge,
|
||||
@ -26,7 +26,6 @@ export type NodesState = {
|
||||
selectedEdges: string[];
|
||||
nodeExecutionStates: Record<string, NodeExecutionState>;
|
||||
viewport: Viewport;
|
||||
isReady: boolean;
|
||||
nodesToCopy: AnyNode[];
|
||||
edgesToCopy: InvocationNodeEdge[];
|
||||
isAddNodePopoverOpen: boolean;
|
||||
@ -34,9 +33,16 @@ export type NodesState = {
|
||||
selectionMode: SelectionMode;
|
||||
};
|
||||
|
||||
export type WorkflowMode = 'edit' | 'view';
|
||||
export type FieldIdentifierWithValue = FieldIdentifier & {
|
||||
value: StatefulFieldValue;
|
||||
};
|
||||
|
||||
export type WorkflowsState = Omit<WorkflowV2, 'nodes' | 'edges'> & {
|
||||
_version: 1;
|
||||
isTouched: boolean;
|
||||
mode: WorkflowMode;
|
||||
originalExposedFieldValues: FieldIdentifierWithValue[];
|
||||
};
|
||||
|
||||
export type NodeTemplatesState = {
|
||||
|
@ -2,11 +2,16 @@ import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSlice } from '@reduxjs/toolkit';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { workflowLoaded } from 'features/nodes/store/actions';
|
||||
import { isAnyNodeOrEdgeMutation, nodeEditorReset, nodesDeleted } from 'features/nodes/store/nodesSlice';
|
||||
import type { WorkflowsState as WorkflowState } from 'features/nodes/store/types';
|
||||
import { isAnyNodeOrEdgeMutation, nodeEditorReset, nodesChanged, nodesDeleted } from 'features/nodes/store/nodesSlice';
|
||||
import type {
|
||||
FieldIdentifierWithValue,
|
||||
WorkflowMode,
|
||||
WorkflowsState as WorkflowState,
|
||||
} from 'features/nodes/store/types';
|
||||
import type { FieldIdentifier } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import type { WorkflowCategory, WorkflowV2 } from 'features/nodes/types/workflow';
|
||||
import { cloneDeep, isEqual, uniqBy } from 'lodash-es';
|
||||
import { cloneDeep, isEqual, omit, uniqBy } from 'lodash-es';
|
||||
|
||||
export const blankWorkflow: Omit<WorkflowV2, 'nodes' | 'edges'> = {
|
||||
name: '',
|
||||
@ -23,7 +28,9 @@ export const blankWorkflow: Omit<WorkflowV2, 'nodes' | 'edges'> = {
|
||||
|
||||
export const initialWorkflowState: WorkflowState = {
|
||||
_version: 1,
|
||||
isTouched: true,
|
||||
isTouched: false,
|
||||
mode: 'view',
|
||||
originalExposedFieldValues: [],
|
||||
...blankWorkflow,
|
||||
};
|
||||
|
||||
@ -31,15 +38,29 @@ export const workflowSlice = createSlice({
|
||||
name: 'workflow',
|
||||
initialState: initialWorkflowState,
|
||||
reducers: {
|
||||
workflowExposedFieldAdded: (state, action: PayloadAction<FieldIdentifier>) => {
|
||||
workflowModeChanged: (state, action: PayloadAction<WorkflowMode>) => {
|
||||
state.mode = action.payload;
|
||||
},
|
||||
workflowExposedFieldAdded: (state, action: PayloadAction<FieldIdentifierWithValue>) => {
|
||||
state.exposedFields = uniqBy(
|
||||
state.exposedFields.concat(action.payload),
|
||||
state.exposedFields.concat(omit(action.payload, 'value')),
|
||||
(field) => `${field.nodeId}-${field.fieldName}`
|
||||
);
|
||||
state.originalExposedFieldValues = uniqBy(
|
||||
state.originalExposedFieldValues.concat(action.payload),
|
||||
(field) => `${field.nodeId}-${field.fieldName}`
|
||||
);
|
||||
state.isTouched = true;
|
||||
},
|
||||
workflowExposedFieldRemoved: (state, action: PayloadAction<FieldIdentifier>) => {
|
||||
state.exposedFields = state.exposedFields.filter((field) => !isEqual(field, action.payload));
|
||||
state.originalExposedFieldValues = state.originalExposedFieldValues.filter(
|
||||
(field) => !isEqual(omit(field, 'value'), action.payload)
|
||||
);
|
||||
state.isTouched = true;
|
||||
},
|
||||
workflowExposedFieldsReordered: (state, action: PayloadAction<FieldIdentifier[]>) => {
|
||||
state.exposedFields = action.payload;
|
||||
state.isTouched = true;
|
||||
},
|
||||
workflowNameChanged: (state, action: PayloadAction<string>) => {
|
||||
@ -78,15 +99,43 @@ export const workflowSlice = createSlice({
|
||||
workflowIDChanged: (state, action: PayloadAction<string>) => {
|
||||
state.id = action.payload;
|
||||
},
|
||||
workflowReset: () => cloneDeep(initialWorkflowState),
|
||||
workflowSaved: (state) => {
|
||||
state.isTouched = false;
|
||||
},
|
||||
},
|
||||
extraReducers: (builder) => {
|
||||
builder.addCase(workflowLoaded, (state, action) => {
|
||||
const { nodes: _nodes, edges: _edges, ...workflowExtra } = action.payload;
|
||||
return { ...initialWorkflowState, ...cloneDeep(workflowExtra) };
|
||||
const { nodes, edges: _edges, ...workflowExtra } = action.payload;
|
||||
|
||||
const originalExposedFieldValues: FieldIdentifierWithValue[] = [];
|
||||
|
||||
workflowExtra.exposedFields.forEach((field) => {
|
||||
const node = nodes.find((n) => n.id === field.nodeId);
|
||||
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const input = node.data.inputs[field.fieldName];
|
||||
|
||||
if (!input) {
|
||||
return;
|
||||
}
|
||||
|
||||
const originalExposedFieldValue = {
|
||||
nodeId: field.nodeId,
|
||||
fieldName: field.fieldName,
|
||||
value: input.value,
|
||||
};
|
||||
originalExposedFieldValues.push(originalExposedFieldValue);
|
||||
});
|
||||
|
||||
return {
|
||||
...cloneDeep(initialWorkflowState),
|
||||
...cloneDeep(workflowExtra),
|
||||
originalExposedFieldValues,
|
||||
mode: state.mode,
|
||||
};
|
||||
});
|
||||
|
||||
builder.addCase(nodesDeleted, (state, action) => {
|
||||
@ -97,6 +146,29 @@ export const workflowSlice = createSlice({
|
||||
|
||||
builder.addCase(nodeEditorReset, () => cloneDeep(initialWorkflowState));
|
||||
|
||||
builder.addCase(nodesChanged, (state, action) => {
|
||||
// Not all changes to nodes should result in the workflow being marked touched
|
||||
const filteredChanges = action.payload.filter((change) => {
|
||||
// We always want to mark the workflow as touched if a node is added, removed, or reset
|
||||
if (['add', 'remove', 'reset'].includes(change.type)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Position changes can change the position and the dragging status of the node - ignore if the change doesn't
|
||||
// affect the position
|
||||
if (change.type === 'position' && (change.position || change.positionAbsolute)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// This change isn't relevant
|
||||
return false;
|
||||
});
|
||||
|
||||
if (filteredChanges.length > 0) {
|
||||
state.isTouched = true;
|
||||
}
|
||||
});
|
||||
|
||||
builder.addMatcher(isAnyNodeOrEdgeMutation, (state) => {
|
||||
state.isTouched = true;
|
||||
});
|
||||
@ -104,8 +176,10 @@ export const workflowSlice = createSlice({
|
||||
});
|
||||
|
||||
export const {
|
||||
workflowModeChanged,
|
||||
workflowExposedFieldAdded,
|
||||
workflowExposedFieldRemoved,
|
||||
workflowExposedFieldsReordered,
|
||||
workflowNameChanged,
|
||||
workflowCategoryChanged,
|
||||
workflowDescriptionChanged,
|
||||
@ -115,7 +189,6 @@ export const {
|
||||
workflowVersionChanged,
|
||||
workflowContactChanged,
|
||||
workflowIDChanged,
|
||||
workflowReset,
|
||||
workflowSaved,
|
||||
} = workflowSlice.actions;
|
||||
|
||||
|
@ -23,6 +23,7 @@ import {
|
||||
NOISE,
|
||||
NOISE_HRF,
|
||||
RESIZE_HRF,
|
||||
SEAMLESS,
|
||||
VAE_LOADER,
|
||||
} from './constants';
|
||||
import { setMetadataReceivingNode, upsertMetadata } from './metadata';
|
||||
@ -30,7 +31,6 @@ import { setMetadataReceivingNode, upsertMetadata } from './metadata';
|
||||
// Copy certain connections from previous DENOISE_LATENTS to new DENOISE_LATENTS_HRF.
|
||||
function copyConnectionsToDenoiseLatentsHrf(graph: NonNullableGraph): void {
|
||||
const destinationFields = [
|
||||
'vae',
|
||||
'control',
|
||||
'ip_adapter',
|
||||
'metadata',
|
||||
@ -107,9 +107,10 @@ export const addHrfToGraph = (state: RootState, graph: NonNullableGraph): void =
|
||||
}
|
||||
const log = logger('txt2img');
|
||||
|
||||
const { vae } = state.generation;
|
||||
const { vae, seamlessXAxis, seamlessYAxis } = state.generation;
|
||||
const { hrfStrength, hrfEnabled, hrfMethod } = state.hrf;
|
||||
const isAutoVae = !vae;
|
||||
const isSeamlessEnabled = seamlessXAxis || seamlessYAxis;
|
||||
const width = state.generation.width;
|
||||
const height = state.generation.height;
|
||||
const optimalDimension = selectOptimalDimension(state);
|
||||
@ -158,7 +159,7 @@ export const addHrfToGraph = (state: RootState, graph: NonNullableGraph): void =
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -259,7 +260,7 @@ export const addHrfToGraph = (state: RootState, graph: NonNullableGraph): void =
|
||||
graph.edges.push(
|
||||
{
|
||||
source: {
|
||||
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -322,7 +323,7 @@ export const addHrfToGraph = (state: RootState, graph: NonNullableGraph): void =
|
||||
graph.edges.push(
|
||||
{
|
||||
source: {
|
||||
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
|
@ -1,7 +1,7 @@
|
||||
import type { RootState } from 'app/store/store';
|
||||
import type { LoRAMetadataItem } from 'features/nodes/types/metadata';
|
||||
import { zLoRAMetadataItem } from 'features/nodes/types/metadata';
|
||||
import { forEach, size } from 'lodash-es';
|
||||
import { filter, size } from 'lodash-es';
|
||||
import type { NonNullableGraph, SDXLLoraLoaderInvocation } from 'services/api/types';
|
||||
|
||||
import {
|
||||
@ -31,8 +31,8 @@ export const addSDXLLoRAsToGraph = (
|
||||
* So we need to inject a LoRA chain into the graph.
|
||||
*/
|
||||
|
||||
const { loras } = state.lora;
|
||||
const loraCount = size(loras);
|
||||
const enabledLoRAs = filter(state.lora.loras, (l) => l.isEnabled ?? false);
|
||||
const loraCount = size(enabledLoRAs);
|
||||
|
||||
if (loraCount === 0) {
|
||||
return;
|
||||
@ -59,7 +59,7 @@ export const addSDXLLoRAsToGraph = (
|
||||
let lastLoraNodeId = '';
|
||||
let currentLoraIndex = 0;
|
||||
|
||||
forEach(loras, (lora) => {
|
||||
enabledLoRAs.forEach((lora) => {
|
||||
const { model_name, base_model, weight } = lora;
|
||||
const currentLoraNodeId = `${LORA_LOADER}_${model_name.replace('.', '_')}`;
|
||||
|
||||
|
@ -14,6 +14,7 @@ import {
|
||||
SDXL_IMAGE_TO_IMAGE_GRAPH,
|
||||
SDXL_TEXT_TO_IMAGE_GRAPH,
|
||||
SEAMLESS,
|
||||
VAE_LOADER,
|
||||
} from './constants';
|
||||
import { upsertMetadata } from './metadata';
|
||||
|
||||
@ -23,7 +24,8 @@ export const addSeamlessToLinearGraph = (
|
||||
modelLoaderNodeId: string
|
||||
): void => {
|
||||
// Remove Existing UNet Connections
|
||||
const { seamlessXAxis, seamlessYAxis } = state.generation;
|
||||
const { seamlessXAxis, seamlessYAxis, vae } = state.generation;
|
||||
const isAutoVae = !vae;
|
||||
|
||||
graph.nodes[SEAMLESS] = {
|
||||
id: SEAMLESS,
|
||||
@ -32,6 +34,15 @@ export const addSeamlessToLinearGraph = (
|
||||
seamless_y: seamlessYAxis,
|
||||
} as SeamlessModeInvocation;
|
||||
|
||||
if (!isAutoVae) {
|
||||
graph.nodes[VAE_LOADER] = {
|
||||
type: 'vae_loader',
|
||||
id: VAE_LOADER,
|
||||
is_intermediate: true,
|
||||
vae_model: vae,
|
||||
};
|
||||
}
|
||||
|
||||
if (seamlessXAxis) {
|
||||
upsertMetadata(graph, {
|
||||
seamless_x: seamlessXAxis,
|
||||
@ -75,7 +86,7 @@ export const addSeamlessToLinearGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: modelLoaderNodeId,
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
|
@ -21,6 +21,7 @@ import {
|
||||
SDXL_IMAGE_TO_IMAGE_GRAPH,
|
||||
SDXL_REFINER_INPAINT_CREATE_MASK,
|
||||
SDXL_TEXT_TO_IMAGE_GRAPH,
|
||||
SEAMLESS,
|
||||
TEXT_TO_IMAGE_GRAPH,
|
||||
VAE_LOADER,
|
||||
} from './constants';
|
||||
@ -31,15 +32,16 @@ export const addVAEToGraph = (
|
||||
graph: NonNullableGraph,
|
||||
modelLoaderNodeId: string = MAIN_MODEL_LOADER
|
||||
): void => {
|
||||
const { vae, canvasCoherenceMode } = state.generation;
|
||||
const { vae, canvasCoherenceMode, seamlessXAxis, seamlessYAxis } = state.generation;
|
||||
const { boundingBoxScaleMethod } = state.canvas;
|
||||
const { refinerModel } = state.sdxl;
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
|
||||
|
||||
const isAutoVae = !vae;
|
||||
const isSeamlessEnabled = seamlessXAxis || seamlessYAxis;
|
||||
|
||||
if (!isAutoVae) {
|
||||
if (!isAutoVae && !isSeamlessEnabled) {
|
||||
graph.nodes[VAE_LOADER] = {
|
||||
type: 'vae_loader',
|
||||
id: VAE_LOADER,
|
||||
@ -56,7 +58,7 @@ export const addVAEToGraph = (
|
||||
) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -74,7 +76,7 @@ export const addVAEToGraph = (
|
||||
) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -92,7 +94,7 @@ export const addVAEToGraph = (
|
||||
) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -111,7 +113,7 @@ export const addVAEToGraph = (
|
||||
graph.edges.push(
|
||||
{
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -121,7 +123,7 @@ export const addVAEToGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -131,7 +133,7 @@ export const addVAEToGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -145,7 +147,7 @@ export const addVAEToGraph = (
|
||||
if (canvasCoherenceMode !== 'unmasked') {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -160,7 +162,7 @@ export const addVAEToGraph = (
|
||||
if (graph.id === SDXL_CANVAS_INPAINT_GRAPH || graph.id === SDXL_CANVAS_OUTPAINT_GRAPH) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
|
@ -123,6 +123,7 @@ export const buildCanvasImageToImageGraph = (state: RootState, initialImage: Ima
|
||||
id: DENOISE_LATENTS,
|
||||
is_intermediate,
|
||||
cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
denoising_start: 1 - strength,
|
||||
|
@ -58,6 +58,7 @@ export const buildCanvasInpaintGraph = (
|
||||
negativePrompt,
|
||||
model,
|
||||
cfgScale: cfg_scale,
|
||||
cfgRescaleMultiplier: cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
img2imgStrength: strength,
|
||||
@ -152,6 +153,7 @@ export const buildCanvasInpaintGraph = (
|
||||
is_intermediate,
|
||||
steps: steps,
|
||||
cfg_scale: cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler: scheduler,
|
||||
denoising_start: 1 - strength,
|
||||
denoising_end: 1,
|
||||
@ -175,6 +177,7 @@ export const buildCanvasInpaintGraph = (
|
||||
is_intermediate,
|
||||
steps: canvasCoherenceSteps,
|
||||
cfg_scale: cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler: scheduler,
|
||||
denoising_start: 1 - canvasCoherenceStrength,
|
||||
denoising_end: 1,
|
||||
|
@ -60,6 +60,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
negativePrompt,
|
||||
model,
|
||||
cfgScale: cfg_scale,
|
||||
cfgRescaleMultiplier: cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
img2imgStrength: strength,
|
||||
@ -161,6 +162,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
is_intermediate,
|
||||
steps: steps,
|
||||
cfg_scale: cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler: scheduler,
|
||||
denoising_start: 1 - strength,
|
||||
denoising_end: 1,
|
||||
@ -184,6 +186,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
is_intermediate,
|
||||
steps: canvasCoherenceSteps,
|
||||
cfg_scale: cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler: scheduler,
|
||||
denoising_start: 1 - canvasCoherenceStrength,
|
||||
denoising_end: 1,
|
||||
|
@ -124,6 +124,7 @@ export const buildCanvasSDXLImageToImageGraph = (state: RootState, initialImage:
|
||||
id: SDXL_DENOISE_LATENTS,
|
||||
is_intermediate,
|
||||
cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
|
||||
|
@ -60,6 +60,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
negativePrompt,
|
||||
model,
|
||||
cfgScale: cfg_scale,
|
||||
cfgRescaleMultiplier: cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
seed,
|
||||
@ -151,6 +152,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
is_intermediate,
|
||||
steps: steps,
|
||||
cfg_scale: cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler: scheduler,
|
||||
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
|
||||
denoising_end: refinerModel ? refinerStart : 1,
|
||||
@ -174,6 +176,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
is_intermediate,
|
||||
steps: canvasCoherenceSteps,
|
||||
cfg_scale: cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler: scheduler,
|
||||
denoising_start: 1 - canvasCoherenceStrength,
|
||||
denoising_end: 1,
|
||||
|
@ -62,6 +62,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
negativePrompt,
|
||||
model,
|
||||
cfgScale: cfg_scale,
|
||||
cfgRescaleMultiplier: cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
seed,
|
||||
@ -160,6 +161,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
is_intermediate,
|
||||
steps: steps,
|
||||
cfg_scale: cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler: scheduler,
|
||||
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
|
||||
denoising_end: refinerModel ? refinerStart : 1,
|
||||
@ -183,6 +185,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
is_intermediate,
|
||||
steps: canvasCoherenceSteps,
|
||||
cfg_scale: cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler: scheduler,
|
||||
denoising_start: 1 - canvasCoherenceStrength,
|
||||
denoising_end: 1,
|
||||
|
@ -117,6 +117,7 @@ export const buildCanvasSDXLTextToImageGraph = (state: RootState): NonNullableGr
|
||||
id: SDXL_DENOISE_LATENTS,
|
||||
is_intermediate,
|
||||
cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
denoising_start: 0,
|
||||
|
@ -115,6 +115,7 @@ export const buildCanvasTextToImageGraph = (state: RootState): NonNullableGraph
|
||||
id: DENOISE_LATENTS,
|
||||
is_intermediate,
|
||||
cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
denoising_start: 0,
|
||||
|
@ -123,6 +123,7 @@ export const buildLinearImageToImageGraph = (state: RootState): NonNullableGraph
|
||||
type: 'denoise_latents',
|
||||
id: DENOISE_LATENTS,
|
||||
cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
denoising_start: 1 - strength,
|
||||
|
@ -126,6 +126,7 @@ export const buildLinearSDXLImageToImageGraph = (state: RootState): NonNullableG
|
||||
type: 'denoise_latents',
|
||||
id: SDXL_DENOISE_LATENTS,
|
||||
cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
|
||||
|
@ -109,6 +109,7 @@ export const buildLinearSDXLTextToImageGraph = (state: RootState): NonNullableGr
|
||||
type: 'denoise_latents',
|
||||
id: SDXL_DENOISE_LATENTS,
|
||||
cfg_scale,
|
||||
cfg_rescale_multiplier,
|
||||
scheduler,
|
||||
steps,
|
||||
denoising_start: 0,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user