mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
87 Commits
bugfix/cli
...
test/test-
Author | SHA1 | Date | |
---|---|---|---|
cd528eda32 | |||
4a27daa149 | |||
9eafec720d | |||
3d3775c962 | |||
8366cd2a00 | |||
ab1ec3720a | |||
71e298b722 | |||
89a039460d | |||
a342e64772 | |||
90a038c685 | |||
520ccdb0a9 | |||
1c7ea57492 | |||
6494e8e551 | |||
513fceac82 | |||
99a8ebe3a0 | |||
3a136420d5 | |||
43f2398e14 | |||
d0cf98d7f6 | |||
8111dd6cc5 | |||
99e4b87fae | |||
884ec0b5df | |||
9ccfa34e04 | |||
d5aa74623d | |||
d63a614b8b | |||
cbc905a4d6 | |||
6001d3d71d | |||
b9f607be56 | |||
8831d1ee41 | |||
a0be83e370 | |||
8702a63197 | |||
d7f0a7919f | |||
356b5a41a9 | |||
e56a6d85a9 | |||
e22a091d76 | |||
141d02939a | |||
5cb372e9d0 | |||
f95fe68753 | |||
6d33893844 | |||
fc53112d8e | |||
41f7aa6ab4 | |||
9bec755198 | |||
2570497d83 | |||
5d735a714d | |||
6aa87f973e | |||
f793fdf3d4 | |||
2e404b7cca | |||
a760bdae9f | |||
4cfd55936c | |||
5c3a27aac6 | |||
d573a23090 | |||
351abd2ca2 | |||
9733cd4199 | |||
9976bc6908 | |||
c68db6e40f | |||
3a50798a52 | |||
a98426d2c6 | |||
504f426f0a | |||
840cbc1d39 | |||
014d6187ab | |||
9fb15fae87 | |||
a07336a020 | |||
0718cc2392 | |||
935e4632c2 | |||
a83d8810c4 | |||
76b3f8956b | |||
ff8a8a1963 | |||
cb6d0c8851 | |||
67f2616d5a | |||
f8f1740668 | |||
e66d0f7372 | |||
546aaedbe4 | |||
7f650d00de | |||
4e6b579526 | |||
6334c4adf5 | |||
66b2366efc | |||
e147379aa7 | |||
5a821384d3 | |||
584b513038 | |||
55ad4feb5c | |||
b7555ddae8 | |||
8afc47018b | |||
a97ec88e06 | |||
282d36b640 | |||
14e25bf277 | |||
001bba1719 | |||
9db152bf75 | |||
15b33ad501 |
20
.github/workflows/pyflakes.yml
vendored
20
.github/workflows/pyflakes.yml
vendored
@ -1,20 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- development
|
||||
- 'release-candidate-*'
|
||||
|
||||
jobs:
|
||||
pyflakes:
|
||||
name: runner / pyflakes
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: pyflakes
|
||||
uses: reviewdog/action-pyflakes@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
reporter: github-pr-review
|
7
.github/workflows/style-checks.yml
vendored
7
.github/workflows/style-checks.yml
vendored
@ -18,8 +18,7 @@ jobs:
|
||||
|
||||
- name: Install dependencies with pip
|
||||
run: |
|
||||
pip install black flake8 Flake8-pyproject isort
|
||||
pip install ruff
|
||||
|
||||
- run: isort --check-only .
|
||||
- run: black --check .
|
||||
- run: flake8
|
||||
- run: ruff check --output-format=github .
|
||||
- run: ruff format --check .
|
||||
|
@ -11,5 +11,5 @@ INVOKEAI_ROOT=
|
||||
# HUGGING_FACE_HUB_TOKEN=
|
||||
|
||||
## optional variables specific to the docker setup.
|
||||
# GPU_DRIVER=cuda
|
||||
# CONTAINER_UID=1000
|
||||
# GPU_DRIVER=cuda # or rocm
|
||||
# CONTAINER_UID=1000
|
||||
|
@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ARG TORCH_VERSION=2.0.1
|
||||
ARG TORCHVISION_VERSION=0.15.2
|
||||
ARG TORCH_VERSION=2.1.0
|
||||
ARG TORCHVISION_VERSION=0.16
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \
|
||||
extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||
fi &&\
|
||||
|
@ -15,6 +15,10 @@ services:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
# For AMD support, comment out the deploy section above and uncomment the devices section below:
|
||||
#devices:
|
||||
# - /dev/kfd:/dev/kfd
|
||||
# - /dev/dri:/dev/dri
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile
|
||||
|
@ -7,5 +7,5 @@ set -e
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
docker compose up --build -d
|
||||
docker compose up -d
|
||||
docker compose logs -f
|
||||
|
@ -198,6 +198,7 @@ The list of schedulers has been completely revamped and brought up to date:
|
||||
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
||||
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
||||
| **unipc** | UniPCMultistepScheduler | CPU only |
|
||||
| **lcm** | LCMScheduler | |
|
||||
|
||||
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
||||
|
||||
|
@ -137,7 +137,7 @@ def dest_path(dest=None) -> Path:
|
||||
path_completer = PathCompleter(
|
||||
only_directories=True,
|
||||
expanduser=True,
|
||||
get_paths=lambda: [browse_start],
|
||||
get_paths=lambda: [browse_start], # noqa: B023
|
||||
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
||||
)
|
||||
|
||||
@ -149,7 +149,7 @@ def dest_path(dest=None) -> Path:
|
||||
completer=path_completer,
|
||||
default=str(browse_start) + os.sep,
|
||||
vi_mode=True,
|
||||
complete_while_typing=True
|
||||
complete_while_typing=True,
|
||||
# Test that this is not needed on Windows
|
||||
# complete_style=CompleteStyle.READLINE_LIKE,
|
||||
)
|
||||
|
@ -28,7 +28,7 @@ class FastAPIEventService(EventServiceBase):
|
||||
self.__queue.put(None)
|
||||
|
||||
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||
self.__queue.put(dict(event_name=event_name, payload=payload))
|
||||
self.__queue.put({"event_name": event_name, "payload": payload})
|
||||
|
||||
async def __dispatch_from_queue(self, stop_event: threading.Event):
|
||||
"""Get events on from the queue and dispatch them, from the correct thread"""
|
||||
|
@ -55,7 +55,7 @@ async def list_models(
|
||||
) -> ModelsList:
|
||||
"""Gets a list of models"""
|
||||
if base_models and len(base_models) > 0:
|
||||
models_raw = list()
|
||||
models_raw = []
|
||||
for base_model in base_models:
|
||||
models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type))
|
||||
else:
|
||||
|
@ -34,4 +34,4 @@ class SocketIO:
|
||||
|
||||
async def _handle_unsub_queue(self, sid, data, *args, **kwargs):
|
||||
if "queue_id" in data:
|
||||
await self.__sio.enter_room(sid, data["queue_id"])
|
||||
await self.__sio.leave_room(sid, data["queue_id"])
|
||||
|
@ -130,7 +130,7 @@ def custom_openapi() -> dict[str, Any]:
|
||||
# Add all outputs
|
||||
all_invocations = BaseInvocation.get_invocations()
|
||||
output_types = set()
|
||||
output_type_titles = dict()
|
||||
output_type_titles = {}
|
||||
for invoker in all_invocations:
|
||||
output_type = signature(invoker.invoke).return_annotation
|
||||
output_types.add(output_type)
|
||||
@ -171,12 +171,12 @@ def custom_openapi() -> dict[str, Any]:
|
||||
# print(f"Config with name {name} already defined")
|
||||
continue
|
||||
|
||||
openapi_schema["components"]["schemas"][name] = dict(
|
||||
title=name,
|
||||
description="An enumeration.",
|
||||
type="string",
|
||||
enum=list(v.value for v in model_config_format_enum),
|
||||
)
|
||||
openapi_schema["components"]["schemas"][name] = {
|
||||
"title": name,
|
||||
"description": "An enumeration.",
|
||||
"type": "string",
|
||||
"enum": [v.value for v in model_config_format_enum],
|
||||
}
|
||||
|
||||
app.openapi_schema = openapi_schema
|
||||
return app.openapi_schema
|
||||
|
@ -25,4 +25,4 @@ spec.loader.exec_module(module)
|
||||
|
||||
# add core nodes to __all__
|
||||
python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.glob("*.py"))
|
||||
__all__ = list(f.stem for f in python_files) # type: ignore
|
||||
__all__ = [f.stem for f in python_files] # type: ignore
|
||||
|
@ -16,6 +16,7 @@ from pydantic.fields import FieldInfo, _Unset
|
||||
from pydantic_core import PydanticUndefined
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -30,70 +31,6 @@ class InvalidFieldError(TypeError):
|
||||
pass
|
||||
|
||||
|
||||
class FieldDescriptions:
|
||||
denoising_start = "When to start denoising, expressed a percentage of total steps"
|
||||
denoising_end = "When to stop denoising, expressed a percentage of total steps"
|
||||
cfg_scale = "Classifier-Free Guidance scale"
|
||||
scheduler = "Scheduler to use during inference"
|
||||
positive_cond = "Positive conditioning tensor"
|
||||
negative_cond = "Negative conditioning tensor"
|
||||
noise = "Noise tensor"
|
||||
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
|
||||
unet = "UNet (scheduler, LoRAs)"
|
||||
vae = "VAE"
|
||||
cond = "Conditioning tensor"
|
||||
controlnet_model = "ControlNet model to load"
|
||||
vae_model = "VAE model to load"
|
||||
lora_model = "LoRA model to load"
|
||||
main_model = "Main model (UNet, VAE, CLIP) to load"
|
||||
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
|
||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||
lora_weight = "The weight at which the LoRA is applied to each model"
|
||||
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
|
||||
raw_prompt = "Raw prompt text (no parsing)"
|
||||
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
|
||||
skipped_layers = "Number of layers to skip in text encoder"
|
||||
seed = "Seed for random number generation"
|
||||
steps = "Number of steps to run"
|
||||
width = "Width of output (px)"
|
||||
height = "Height of output (px)"
|
||||
control = "ControlNet(s) to apply"
|
||||
ip_adapter = "IP-Adapter to apply"
|
||||
t2i_adapter = "T2I-Adapter(s) to apply"
|
||||
denoised_latents = "Denoised latents tensor"
|
||||
latents = "Latents tensor"
|
||||
strength = "Strength of denoising (proportional to steps)"
|
||||
metadata = "Optional metadata to be saved with the image"
|
||||
metadata_collection = "Collection of Metadata"
|
||||
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
|
||||
metadata_item_label = "Label for this metadata item"
|
||||
metadata_item_value = "The value for this metadata item (may be any type)"
|
||||
workflow = "Optional workflow to be saved with the image"
|
||||
interp_mode = "Interpolation mode"
|
||||
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
|
||||
fp32 = "Whether or not to use full float32 precision"
|
||||
precision = "Precision to use"
|
||||
tiled = "Processing using overlapping tiles (reduce memory consumption)"
|
||||
detect_res = "Pixel resolution for detection"
|
||||
image_res = "Pixel resolution for output image"
|
||||
safe_mode = "Whether or not to use safe mode"
|
||||
scribble_mode = "Whether or not to use scribble mode"
|
||||
scale_factor = "The factor by which to scale"
|
||||
blend_alpha = (
|
||||
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
|
||||
)
|
||||
num_1 = "The first number"
|
||||
num_2 = "The second number"
|
||||
mask = "The mask to use for the operation"
|
||||
board = "The board to save the image to"
|
||||
image = "The image to process"
|
||||
tile_size = "Tile size"
|
||||
inclusive_low = "The inclusive low value"
|
||||
exclusive_high = "The exclusive high value"
|
||||
decimal_places = "The number of decimal places to round to"
|
||||
|
||||
|
||||
class Input(str, Enum):
|
||||
"""
|
||||
The type of input a field accepts.
|
||||
@ -299,35 +236,35 @@ def InputField(
|
||||
Ignored for non-collection fields.
|
||||
"""
|
||||
|
||||
json_schema_extra_: dict[str, Any] = dict(
|
||||
input=input,
|
||||
ui_type=ui_type,
|
||||
ui_component=ui_component,
|
||||
ui_hidden=ui_hidden,
|
||||
ui_order=ui_order,
|
||||
item_default=item_default,
|
||||
ui_choice_labels=ui_choice_labels,
|
||||
_field_kind="input",
|
||||
)
|
||||
json_schema_extra_: dict[str, Any] = {
|
||||
"input": input,
|
||||
"ui_type": ui_type,
|
||||
"ui_component": ui_component,
|
||||
"ui_hidden": ui_hidden,
|
||||
"ui_order": ui_order,
|
||||
"item_default": item_default,
|
||||
"ui_choice_labels": ui_choice_labels,
|
||||
"_field_kind": "input",
|
||||
}
|
||||
|
||||
field_args = dict(
|
||||
default=default,
|
||||
default_factory=default_factory,
|
||||
title=title,
|
||||
description=description,
|
||||
pattern=pattern,
|
||||
strict=strict,
|
||||
gt=gt,
|
||||
ge=ge,
|
||||
lt=lt,
|
||||
le=le,
|
||||
multiple_of=multiple_of,
|
||||
allow_inf_nan=allow_inf_nan,
|
||||
max_digits=max_digits,
|
||||
decimal_places=decimal_places,
|
||||
min_length=min_length,
|
||||
max_length=max_length,
|
||||
)
|
||||
field_args = {
|
||||
"default": default,
|
||||
"default_factory": default_factory,
|
||||
"title": title,
|
||||
"description": description,
|
||||
"pattern": pattern,
|
||||
"strict": strict,
|
||||
"gt": gt,
|
||||
"ge": ge,
|
||||
"lt": lt,
|
||||
"le": le,
|
||||
"multiple_of": multiple_of,
|
||||
"allow_inf_nan": allow_inf_nan,
|
||||
"max_digits": max_digits,
|
||||
"decimal_places": decimal_places,
|
||||
"min_length": min_length,
|
||||
"max_length": max_length,
|
||||
}
|
||||
|
||||
"""
|
||||
Invocation definitions have their fields typed correctly for their `invoke()` functions.
|
||||
@ -362,24 +299,24 @@ def InputField(
|
||||
|
||||
# because we are manually making fields optional, we need to store the original required bool for reference later
|
||||
if default is PydanticUndefined and default_factory is PydanticUndefined:
|
||||
json_schema_extra_.update(dict(orig_required=True))
|
||||
json_schema_extra_.update({"orig_required": True})
|
||||
else:
|
||||
json_schema_extra_.update(dict(orig_required=False))
|
||||
json_schema_extra_.update({"orig_required": False})
|
||||
|
||||
# make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one
|
||||
if (input is Input.Any or input is Input.Connection) and default_factory is PydanticUndefined:
|
||||
default_ = None if default is PydanticUndefined else default
|
||||
provided_args.update(dict(default=default_))
|
||||
provided_args.update({"default": default_})
|
||||
if default is not PydanticUndefined:
|
||||
# before invoking, we'll grab the original default value and set it on the field if the field wasn't provided a value
|
||||
json_schema_extra_.update(dict(default=default))
|
||||
json_schema_extra_.update(dict(orig_default=default))
|
||||
json_schema_extra_.update({"default": default})
|
||||
json_schema_extra_.update({"orig_default": default})
|
||||
elif default is not PydanticUndefined and default_factory is PydanticUndefined:
|
||||
default_ = default
|
||||
provided_args.update(dict(default=default_))
|
||||
json_schema_extra_.update(dict(orig_default=default_))
|
||||
provided_args.update({"default": default_})
|
||||
json_schema_extra_.update({"orig_default": default_})
|
||||
elif default_factory is not PydanticUndefined:
|
||||
provided_args.update(dict(default_factory=default_factory))
|
||||
provided_args.update({"default_factory": default_factory})
|
||||
# TODO: cannot serialize default_factory...
|
||||
# json_schema_extra_.update(dict(orig_default_factory=default_factory))
|
||||
|
||||
@ -446,12 +383,12 @@ def OutputField(
|
||||
decimal_places=decimal_places,
|
||||
min_length=min_length,
|
||||
max_length=max_length,
|
||||
json_schema_extra=dict(
|
||||
ui_type=ui_type,
|
||||
ui_hidden=ui_hidden,
|
||||
ui_order=ui_order,
|
||||
_field_kind="output",
|
||||
),
|
||||
json_schema_extra={
|
||||
"ui_type": ui_type,
|
||||
"ui_hidden": ui_hidden,
|
||||
"ui_order": ui_order,
|
||||
"_field_kind": "output",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@ -523,14 +460,14 @@ class BaseInvocationOutput(BaseModel):
|
||||
|
||||
@classmethod
|
||||
def get_output_types(cls) -> Iterable[str]:
|
||||
return map(lambda i: get_type(i), BaseInvocationOutput.get_outputs())
|
||||
return (get_type(i) for i in BaseInvocationOutput.get_outputs())
|
||||
|
||||
@staticmethod
|
||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
|
||||
# Because we use a pydantic Literal field with default value for the invocation type,
|
||||
# it will be typed as optional in the OpenAPI schema. Make it required manually.
|
||||
if "required" not in schema or not isinstance(schema["required"], list):
|
||||
schema["required"] = list()
|
||||
schema["required"] = []
|
||||
schema["required"].extend(["type"])
|
||||
|
||||
model_config = ConfigDict(
|
||||
@ -590,16 +527,11 @@ class BaseInvocation(ABC, BaseModel):
|
||||
@classmethod
|
||||
def get_invocations_map(cls) -> dict[str, BaseInvocation]:
|
||||
# Get the type strings out of the literals and into a dictionary
|
||||
return dict(
|
||||
map(
|
||||
lambda i: (get_type(i), i),
|
||||
BaseInvocation.get_invocations(),
|
||||
)
|
||||
)
|
||||
return {get_type(i): i for i in BaseInvocation.get_invocations()}
|
||||
|
||||
@classmethod
|
||||
def get_invocation_types(cls) -> Iterable[str]:
|
||||
return map(lambda i: get_type(i), BaseInvocation.get_invocations())
|
||||
return (get_type(i) for i in BaseInvocation.get_invocations())
|
||||
|
||||
@classmethod
|
||||
def get_output_type(cls) -> BaseInvocationOutput:
|
||||
@ -618,7 +550,7 @@ class BaseInvocation(ABC, BaseModel):
|
||||
if uiconfig and hasattr(uiconfig, "version"):
|
||||
schema["version"] = uiconfig.version
|
||||
if "required" not in schema or not isinstance(schema["required"], list):
|
||||
schema["required"] = list()
|
||||
schema["required"] = []
|
||||
schema["required"].extend(["type", "id"])
|
||||
|
||||
@abstractmethod
|
||||
@ -672,15 +604,15 @@ class BaseInvocation(ABC, BaseModel):
|
||||
id: str = Field(
|
||||
default_factory=uuid_string,
|
||||
description="The id of this instance of an invocation. Must be unique among all instances of invocations.",
|
||||
json_schema_extra=dict(_field_kind="internal"),
|
||||
json_schema_extra={"_field_kind": "internal"},
|
||||
)
|
||||
is_intermediate: bool = Field(
|
||||
default=False,
|
||||
description="Whether or not this is an intermediate invocation.",
|
||||
json_schema_extra=dict(ui_type=UIType.IsIntermediate, _field_kind="internal"),
|
||||
json_schema_extra={"ui_type": UIType.IsIntermediate, "_field_kind": "internal"},
|
||||
)
|
||||
use_cache: bool = Field(
|
||||
default=True, description="Whether or not to use the cache", json_schema_extra=dict(_field_kind="internal")
|
||||
default=True, description="Whether or not to use the cache", json_schema_extra={"_field_kind": "internal"}
|
||||
)
|
||||
|
||||
UIConfig: ClassVar[Type[UIConfigBase]]
|
||||
@ -714,7 +646,7 @@ class _Model(BaseModel):
|
||||
|
||||
|
||||
# Get all pydantic model attrs, methods, etc
|
||||
RESERVED_PYDANTIC_FIELD_NAMES = set(map(lambda m: m[0], inspect.getmembers(_Model())))
|
||||
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
|
||||
|
||||
|
||||
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
|
||||
@ -729,9 +661,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
||||
|
||||
field_kind = (
|
||||
# _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file
|
||||
field.json_schema_extra.get("_field_kind", None)
|
||||
if field.json_schema_extra
|
||||
else None
|
||||
field.json_schema_extra.get("_field_kind", None) if field.json_schema_extra else None
|
||||
)
|
||||
|
||||
# must have a field_kind
|
||||
@ -792,7 +722,7 @@ def invocation(
|
||||
# Add OpenAPI schema extras
|
||||
uiconf_name = cls.__qualname__ + ".UIConfig"
|
||||
if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name:
|
||||
cls.UIConfig = type(uiconf_name, (UIConfigBase,), dict())
|
||||
cls.UIConfig = type(uiconf_name, (UIConfigBase,), {})
|
||||
if title is not None:
|
||||
cls.UIConfig.title = title
|
||||
if tags is not None:
|
||||
@ -819,7 +749,7 @@ def invocation(
|
||||
|
||||
invocation_type_annotation = Literal[invocation_type] # type: ignore
|
||||
invocation_type_field = Field(
|
||||
title="type", default=invocation_type, json_schema_extra=dict(_field_kind="internal")
|
||||
title="type", default=invocation_type, json_schema_extra={"_field_kind": "internal"}
|
||||
)
|
||||
|
||||
docstring = cls.__doc__
|
||||
@ -865,7 +795,7 @@ def invocation_output(
|
||||
# Add the output type to the model.
|
||||
|
||||
output_type_annotation = Literal[output_type] # type: ignore
|
||||
output_type_field = Field(title="type", default=output_type, json_schema_extra=dict(_field_kind="internal"))
|
||||
output_type_field = Field(title="type", default=output_type, json_schema_extra={"_field_kind": "internal"})
|
||||
|
||||
docstring = cls.__doc__
|
||||
cls = create_model(
|
||||
@ -897,7 +827,7 @@ WorkflowFieldValidator = TypeAdapter(WorkflowField)
|
||||
|
||||
class WithWorkflow(BaseModel):
|
||||
workflow: Optional[WorkflowField] = Field(
|
||||
default=None, description=FieldDescriptions.workflow, json_schema_extra=dict(_field_kind="internal")
|
||||
default=None, description=FieldDescriptions.workflow, json_schema_extra={"_field_kind": "internal"}
|
||||
)
|
||||
|
||||
|
||||
@ -915,5 +845,5 @@ MetadataFieldValidator = TypeAdapter(MetadataField)
|
||||
|
||||
class WithMetadata(BaseModel):
|
||||
metadata: Optional[MetadataField] = Field(
|
||||
default=None, description=FieldDescriptions.metadata, json_schema_extra=dict(_field_kind="internal")
|
||||
default=None, description=FieldDescriptions.metadata, json_schema_extra={"_field_kind": "internal"}
|
||||
)
|
||||
|
@ -7,6 +7,7 @@ from compel import Compel, ReturnedEmbeddingsType
|
||||
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
|
||||
|
||||
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
BasicConditioningInfo,
|
||||
ExtraConditioningInfo,
|
||||
@ -19,7 +20,6 @@ from ...backend.util.devices import torch_dtype
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
|
@ -28,12 +28,12 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
|
||||
from ...backend.model_management import BaseModelType
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
|
@ -131,7 +131,7 @@ def prepare_faces_list(
|
||||
deduped_faces: list[FaceResultData] = []
|
||||
|
||||
if len(face_result_list) == 0:
|
||||
return list()
|
||||
return []
|
||||
|
||||
for candidate in face_result_list:
|
||||
should_add = True
|
||||
@ -210,7 +210,7 @@ def generate_face_box_mask(
|
||||
# Check if any face is detected.
|
||||
if results.multi_face_landmarks: # type: ignore # this are via protobuf and not typed
|
||||
# Search for the face_id in the detected faces.
|
||||
for face_id, face_landmarks in enumerate(results.multi_face_landmarks): # type: ignore #this are via protobuf and not typed
|
||||
for _face_id, face_landmarks in enumerate(results.multi_face_landmarks): # type: ignore #this are via protobuf and not typed
|
||||
# Get the bounding box of the face mesh.
|
||||
x_coordinates = [landmark.x for landmark in face_landmarks.landmark]
|
||||
y_coordinates = [landmark.y for landmark in face_landmarks.landmark]
|
||||
|
@ -9,19 +9,11 @@ from PIL import Image, ImageChops, ImageFilter, ImageOps
|
||||
|
||||
from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
WithMetadata,
|
||||
WithWorkflow,
|
||||
invocation,
|
||||
)
|
||||
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
|
||||
|
||||
|
||||
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")
|
||||
|
@ -7,7 +7,6 @@ from pydantic import BaseModel, ConfigDict, Field
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
@ -17,6 +16,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
|
||||
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id
|
||||
|
||||
|
@ -10,7 +10,7 @@ import torch
|
||||
import torchvision.transforms as T
|
||||
from diffusers import AutoencoderKL, AutoencoderTiny
|
||||
from diffusers.image_processor import VaeImageProcessor
|
||||
from diffusers.models.adapter import FullAdapterXL, T2IAdapter
|
||||
from diffusers.models.adapter import T2IAdapter
|
||||
from diffusers.models.attention_processor import (
|
||||
AttnProcessor2_0,
|
||||
LoRAAttnProcessor2_0,
|
||||
@ -34,6 +34,7 @@ from invokeai.app.invocations.primitives import (
|
||||
)
|
||||
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus
|
||||
@ -57,7 +58,6 @@ from ...backend.util.devices import choose_precision, choose_torch_device
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
@ -77,7 +77,7 @@ if choose_torch_device() == torch.device("mps"):
|
||||
|
||||
DEFAULT_PRECISION = choose_precision(choose_torch_device())
|
||||
|
||||
SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))]
|
||||
SAMPLER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())]
|
||||
|
||||
|
||||
@invocation_output("scheduler_output")
|
||||
@ -562,10 +562,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
t2i_adapter_model: T2IAdapter
|
||||
with t2i_adapter_model_info as t2i_adapter_model:
|
||||
total_downscale_factor = t2i_adapter_model.total_downscale_factor
|
||||
if isinstance(t2i_adapter_model.adapter, FullAdapterXL):
|
||||
# HACK(ryand): Work around a bug in FullAdapterXL. This is being addressed upstream in diffusers by
|
||||
# this PR: https://github.com/huggingface/diffusers/pull/5134.
|
||||
total_downscale_factor = total_downscale_factor // 2
|
||||
|
||||
# Resize the T2I-Adapter input image.
|
||||
# We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the
|
||||
@ -710,6 +706,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
)
|
||||
with (
|
||||
ExitStack() as exit_stack,
|
||||
ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()),
|
||||
ModelPatcher.apply_freeu(unet_info.context.model, self.unet.freeu_config),
|
||||
set_seamless(unet_info.context.model, self.unet.seamless_axes),
|
||||
unet_info as unet,
|
||||
# Apply the LoRA after unet has been moved to its target device for faster patching.
|
||||
@ -1107,7 +1105,7 @@ class BlendLatentsInvocation(BaseInvocation):
|
||||
latents_b = context.services.latents.get(self.latents_b.latents_name)
|
||||
|
||||
if latents_a.shape != latents_b.shape:
|
||||
raise "Latents to blend must be the same size."
|
||||
raise Exception("Latents to blend must be the same size.")
|
||||
|
||||
# TODO:
|
||||
device = choose_torch_device()
|
||||
|
@ -6,8 +6,9 @@ import numpy as np
|
||||
from pydantic import ValidationInfo, field_validator
|
||||
|
||||
from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
|
||||
from .baseinvocation import BaseInvocation, FieldDescriptions, InputField, InvocationContext, invocation
|
||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
|
||||
|
||||
|
||||
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0")
|
||||
@ -144,17 +145,17 @@ INTEGER_OPERATIONS = Literal[
|
||||
]
|
||||
|
||||
|
||||
INTEGER_OPERATIONS_LABELS = dict(
|
||||
ADD="Add A+B",
|
||||
SUB="Subtract A-B",
|
||||
MUL="Multiply A*B",
|
||||
DIV="Divide A/B",
|
||||
EXP="Exponentiate A^B",
|
||||
MOD="Modulus A%B",
|
||||
ABS="Absolute Value of A",
|
||||
MIN="Minimum(A,B)",
|
||||
MAX="Maximum(A,B)",
|
||||
)
|
||||
INTEGER_OPERATIONS_LABELS = {
|
||||
"ADD": "Add A+B",
|
||||
"SUB": "Subtract A-B",
|
||||
"MUL": "Multiply A*B",
|
||||
"DIV": "Divide A/B",
|
||||
"EXP": "Exponentiate A^B",
|
||||
"MOD": "Modulus A%B",
|
||||
"ABS": "Absolute Value of A",
|
||||
"MIN": "Minimum(A,B)",
|
||||
"MAX": "Maximum(A,B)",
|
||||
}
|
||||
|
||||
|
||||
@invocation(
|
||||
@ -182,8 +183,8 @@ class IntegerMathInvocation(BaseInvocation):
|
||||
operation: INTEGER_OPERATIONS = InputField(
|
||||
default="ADD", description="The operation to perform", ui_choice_labels=INTEGER_OPERATIONS_LABELS
|
||||
)
|
||||
a: int = InputField(default=0, description=FieldDescriptions.num_1)
|
||||
b: int = InputField(default=0, description=FieldDescriptions.num_2)
|
||||
a: int = InputField(default=1, description=FieldDescriptions.num_1)
|
||||
b: int = InputField(default=1, description=FieldDescriptions.num_2)
|
||||
|
||||
@field_validator("b")
|
||||
def no_unrepresentable_results(cls, v: int, info: ValidationInfo):
|
||||
@ -230,17 +231,17 @@ FLOAT_OPERATIONS = Literal[
|
||||
]
|
||||
|
||||
|
||||
FLOAT_OPERATIONS_LABELS = dict(
|
||||
ADD="Add A+B",
|
||||
SUB="Subtract A-B",
|
||||
MUL="Multiply A*B",
|
||||
DIV="Divide A/B",
|
||||
EXP="Exponentiate A^B",
|
||||
ABS="Absolute Value of A",
|
||||
SQRT="Square Root of A",
|
||||
MIN="Minimum(A,B)",
|
||||
MAX="Maximum(A,B)",
|
||||
)
|
||||
FLOAT_OPERATIONS_LABELS = {
|
||||
"ADD": "Add A+B",
|
||||
"SUB": "Subtract A-B",
|
||||
"MUL": "Multiply A*B",
|
||||
"DIV": "Divide A/B",
|
||||
"EXP": "Exponentiate A^B",
|
||||
"ABS": "Absolute Value of A",
|
||||
"SQRT": "Square Root of A",
|
||||
"MIN": "Minimum(A,B)",
|
||||
"MAX": "Maximum(A,B)",
|
||||
}
|
||||
|
||||
|
||||
@invocation(
|
||||
@ -256,8 +257,8 @@ class FloatMathInvocation(BaseInvocation):
|
||||
operation: FLOAT_OPERATIONS = InputField(
|
||||
default="ADD", description="The operation to perform", ui_choice_labels=FLOAT_OPERATIONS_LABELS
|
||||
)
|
||||
a: float = InputField(default=0, description=FieldDescriptions.num_1)
|
||||
b: float = InputField(default=0, description=FieldDescriptions.num_2)
|
||||
a: float = InputField(default=1, description=FieldDescriptions.num_1)
|
||||
b: float = InputField(default=1, description=FieldDescriptions.num_2)
|
||||
|
||||
@field_validator("b")
|
||||
def no_unrepresentable_results(cls, v: float, info: ValidationInfo):
|
||||
@ -265,7 +266,7 @@ class FloatMathInvocation(BaseInvocation):
|
||||
raise ValueError("Cannot divide by zero")
|
||||
elif info.data["operation"] == "EXP" and info.data["a"] == 0 and v < 0:
|
||||
raise ValueError("Cannot raise zero to a negative power")
|
||||
elif info.data["operation"] == "EXP" and type(info.data["a"] ** v) is complex:
|
||||
elif info.data["operation"] == "EXP" and isinstance(info.data["a"] ** v, complex):
|
||||
raise ValueError("Root operation resulted in a complex number")
|
||||
return v
|
||||
|
||||
|
@ -5,7 +5,6 @@ from pydantic import BaseModel, ConfigDict, Field
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
MetadataField,
|
||||
@ -19,6 +18,7 @@ from invokeai.app.invocations.ip_adapter import IPAdapterModelField
|
||||
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
|
||||
from ...version import __version__
|
||||
|
||||
@ -160,13 +160,14 @@ class CoreMetadataInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
# High resolution fix metadata.
|
||||
hrf_width: Optional[int] = InputField(
|
||||
hrf_enabled: Optional[float] = InputField(
|
||||
default=None,
|
||||
description="The high resolution fix height and width multipler.",
|
||||
description="Whether or not high resolution fix was enabled.",
|
||||
)
|
||||
hrf_height: Optional[int] = InputField(
|
||||
# TODO: should this be stricter or do we just let the UI handle it?
|
||||
hrf_method: Optional[str] = InputField(
|
||||
default=None,
|
||||
description="The high resolution fix height and width multipler.",
|
||||
description="The high resolution fix upscale method.",
|
||||
)
|
||||
hrf_strength: Optional[float] = InputField(
|
||||
default=None,
|
||||
|
@ -3,11 +3,13 @@ from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
|
||||
from ...backend.model_management import BaseModelType, ModelType, SubModelType
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
@ -36,6 +38,7 @@ class UNetField(BaseModel):
|
||||
scheduler: ModelInfo = Field(description="Info to load scheduler submodel")
|
||||
loras: List[LoraInfo] = Field(description="Loras to apply on model loading")
|
||||
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
|
||||
freeu_config: Optional[FreeUConfig] = Field(default=None, description="FreeU configuration")
|
||||
|
||||
|
||||
class ClipField(BaseModel):
|
||||
@ -51,15 +54,34 @@ class VaeField(BaseModel):
|
||||
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
|
||||
|
||||
|
||||
@invocation_output("model_loader_output")
|
||||
class ModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Model loader output"""
|
||||
@invocation_output("unet_output")
|
||||
class UNetOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output a UNet field"""
|
||||
|
||||
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
|
||||
clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation_output("vae_output")
|
||||
class VAEOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output a VAE field"""
|
||||
|
||||
vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation_output("clip_output")
|
||||
class CLIPOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output a CLIP field"""
|
||||
|
||||
clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation_output("model_loader_output")
|
||||
class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput):
|
||||
"""Model loader output"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MainModelField(BaseModel):
|
||||
"""Main model field"""
|
||||
|
||||
@ -366,13 +388,6 @@ class VAEModelField(BaseModel):
|
||||
model_config = ConfigDict(protected_namespaces=())
|
||||
|
||||
|
||||
@invocation_output("vae_loader_output")
|
||||
class VaeLoaderOutput(BaseInvocationOutput):
|
||||
"""VAE output"""
|
||||
|
||||
vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.0")
|
||||
class VaeLoaderInvocation(BaseInvocation):
|
||||
"""Loads a VAE model, outputting a VaeLoaderOutput"""
|
||||
@ -384,7 +399,7 @@ class VaeLoaderInvocation(BaseInvocation):
|
||||
title="VAE",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> VaeLoaderOutput:
|
||||
def invoke(self, context: InvocationContext) -> VAEOutput:
|
||||
base_model = self.vae_model.base_model
|
||||
model_name = self.vae_model.model_name
|
||||
model_type = ModelType.Vae
|
||||
@ -395,7 +410,7 @@ class VaeLoaderInvocation(BaseInvocation):
|
||||
model_type=model_type,
|
||||
):
|
||||
raise Exception(f"Unkown vae name: {model_name}!")
|
||||
return VaeLoaderOutput(
|
||||
return VAEOutput(
|
||||
vae=VaeField(
|
||||
vae=ModelInfo(
|
||||
model_name=model_name,
|
||||
@ -457,3 +472,24 @@ class SeamlessModeInvocation(BaseInvocation):
|
||||
vae.seamless_axes = seamless_axes_list
|
||||
|
||||
return SeamlessModeOutput(unet=unet, vae=vae)
|
||||
|
||||
|
||||
@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.0")
|
||||
class FreeUInvocation(BaseInvocation):
|
||||
"""
|
||||
Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2):
|
||||
|
||||
SD1.5: 1.2/1.4/0.9/0.2,
|
||||
SD2: 1.1/1.2/0.9/0.2,
|
||||
SDXL: 1.1/1.2/0.6/0.4,
|
||||
"""
|
||||
|
||||
unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection, title="UNet")
|
||||
b1: float = InputField(default=1.2, ge=-1, le=3, description=FieldDescriptions.freeu_b1)
|
||||
b2: float = InputField(default=1.4, ge=-1, le=3, description=FieldDescriptions.freeu_b2)
|
||||
s1: float = InputField(default=0.9, ge=-1, le=3, description=FieldDescriptions.freeu_s1)
|
||||
s2: float = InputField(default=0.2, ge=-1, le=3, description=FieldDescriptions.freeu_s2)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> UNetOutput:
|
||||
self.unet.freeu_config = FreeUConfig(s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2)
|
||||
return UNetOutput(unet=self.unet)
|
||||
|
@ -5,13 +5,13 @@ import torch
|
||||
from pydantic import field_validator
|
||||
|
||||
from invokeai.app.invocations.latent import LatentsField
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
|
||||
from ...backend.util.devices import choose_torch_device, torch_dtype
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
OutputField,
|
||||
|
@ -14,6 +14,7 @@ from tqdm import tqdm
|
||||
|
||||
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
||||
from invokeai.backend import BaseModelType, ModelType, SubModelType
|
||||
|
||||
@ -23,7 +24,6 @@ from ...backend.util import choose_torch_device
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
@ -54,7 +54,7 @@ ORT_TO_NP_TYPE = {
|
||||
"tensor(double)": np.float64,
|
||||
}
|
||||
|
||||
PRECISION_VALUES = Literal[tuple(list(ORT_TO_NP_TYPE.keys()))]
|
||||
PRECISION_VALUES = Literal[tuple(ORT_TO_NP_TYPE.keys())]
|
||||
|
||||
|
||||
@invocation("prompt_onnx", title="ONNX Prompt (Raw)", tags=["prompt", "onnx"], category="conditioning", version="1.0.0")
|
||||
@ -252,7 +252,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation):
|
||||
scheduler.set_timesteps(self.steps)
|
||||
latents = latents * np.float64(scheduler.init_noise_sigma)
|
||||
|
||||
extra_step_kwargs = dict()
|
||||
extra_step_kwargs = {}
|
||||
if "eta" in set(inspect.signature(scheduler.step).parameters.keys()):
|
||||
extra_step_kwargs.update(
|
||||
eta=0.0,
|
||||
|
@ -100,7 +100,7 @@ EASING_FUNCTIONS_MAP = {
|
||||
"BounceInOut": BounceEaseInOut,
|
||||
}
|
||||
|
||||
EASING_FUNCTION_KEYS = Literal[tuple(list(EASING_FUNCTIONS_MAP.keys()))]
|
||||
EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())]
|
||||
|
||||
|
||||
# actually I think for now could just use CollectionOutput (which is list[Any]
|
||||
@ -161,7 +161,7 @@ class StepParamEasingInvocation(BaseInvocation):
|
||||
easing_class = EASING_FUNCTIONS_MAP[self.easing]
|
||||
if log_diagnostics:
|
||||
context.services.logger.debug("easing class: " + str(easing_class))
|
||||
easing_list = list()
|
||||
easing_list = []
|
||||
if self.mirror: # "expected" mirroring
|
||||
# if number of steps is even, squeeze duration down to (number_of_steps)/2
|
||||
# and create reverse copy of list to append
|
||||
@ -178,7 +178,7 @@ class StepParamEasingInvocation(BaseInvocation):
|
||||
end=self.end_value,
|
||||
duration=base_easing_duration - 1,
|
||||
)
|
||||
base_easing_vals = list()
|
||||
base_easing_vals = []
|
||||
for step_index in range(base_easing_duration):
|
||||
easing_val = easing_function.ease(step_index)
|
||||
base_easing_vals.append(easing_val)
|
||||
|
@ -5,10 +5,11 @@ from typing import Optional, Tuple
|
||||
import torch
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
|
@ -1,8 +1,9 @@
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
|
||||
from ...backend.model_management import ModelType, SubModelType
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
|
@ -5,7 +5,6 @@ from pydantic import BaseModel, ConfigDict, Field
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
@ -16,6 +15,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.model_management.models.base import BaseModelType
|
||||
|
||||
|
||||
|
@ -139,7 +139,7 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
(board_id,),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
images = list(map(lambda r: deserialize_image_record(dict(r)), result))
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
@ -167,7 +167,7 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
(board_id,),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
image_names = list(map(lambda r: r[0], result))
|
||||
image_names = [r[0] for r in result]
|
||||
return image_names
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
|
@ -199,7 +199,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
)
|
||||
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
boards = list(map(lambda r: deserialize_board_record(dict(r)), result))
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
|
||||
# Get the total number of boards
|
||||
self._cursor.execute(
|
||||
@ -236,7 +236,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
)
|
||||
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
boards = list(map(lambda r: deserialize_board_record(dict(r)), result))
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
|
||||
return boards
|
||||
|
||||
|
@ -55,7 +55,7 @@ class InvokeAISettings(BaseSettings):
|
||||
"""
|
||||
cls = self.__class__
|
||||
type = get_args(get_type_hints(cls)["type"])[0]
|
||||
field_dict = dict({type: dict()})
|
||||
field_dict = {type: {}}
|
||||
for name, field in self.model_fields.items():
|
||||
if name in cls._excluded_from_yaml():
|
||||
continue
|
||||
@ -64,7 +64,7 @@ class InvokeAISettings(BaseSettings):
|
||||
)
|
||||
value = getattr(self, name)
|
||||
if category not in field_dict[type]:
|
||||
field_dict[type][category] = dict()
|
||||
field_dict[type][category] = {}
|
||||
# keep paths as strings to make it easier to read
|
||||
field_dict[type][category][name] = str(value) if isinstance(value, Path) else value
|
||||
conf = OmegaConf.create(field_dict)
|
||||
@ -89,7 +89,7 @@ class InvokeAISettings(BaseSettings):
|
||||
# create an upcase version of the environment in
|
||||
# order to achieve case-insensitive environment
|
||||
# variables (the way Windows does)
|
||||
upcase_environ = dict()
|
||||
upcase_environ = {}
|
||||
for key, value in os.environ.items():
|
||||
upcase_environ[key.upper()] = value
|
||||
|
||||
|
@ -188,18 +188,18 @@ DEFAULT_MAX_VRAM = 0.5
|
||||
|
||||
|
||||
class Categories(object):
|
||||
WebServer = dict(category="Web Server")
|
||||
Features = dict(category="Features")
|
||||
Paths = dict(category="Paths")
|
||||
Logging = dict(category="Logging")
|
||||
Development = dict(category="Development")
|
||||
Other = dict(category="Other")
|
||||
ModelCache = dict(category="Model Cache")
|
||||
Device = dict(category="Device")
|
||||
Generation = dict(category="Generation")
|
||||
Queue = dict(category="Queue")
|
||||
Nodes = dict(category="Nodes")
|
||||
MemoryPerformance = dict(category="Memory/Performance")
|
||||
WebServer = {"category": "Web Server"}
|
||||
Features = {"category": "Features"}
|
||||
Paths = {"category": "Paths"}
|
||||
Logging = {"category": "Logging"}
|
||||
Development = {"category": "Development"}
|
||||
Other = {"category": "Other"}
|
||||
ModelCache = {"category": "Model Cache"}
|
||||
Device = {"category": "Device"}
|
||||
Generation = {"category": "Generation"}
|
||||
Queue = {"category": "Queue"}
|
||||
Nodes = {"category": "Nodes"}
|
||||
MemoryPerformance = {"category": "Memory/Performance"}
|
||||
|
||||
|
||||
class InvokeAIAppConfig(InvokeAISettings):
|
||||
@ -482,7 +482,7 @@ def _find_root() -> Path:
|
||||
venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
|
||||
if os.environ.get("INVOKEAI_ROOT"):
|
||||
root = Path(os.environ["INVOKEAI_ROOT"])
|
||||
elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]):
|
||||
elif any((venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]):
|
||||
root = (venv.parent).resolve()
|
||||
else:
|
||||
root = Path("~/invokeai").expanduser().resolve()
|
||||
|
@ -27,7 +27,7 @@ class EventServiceBase:
|
||||
payload["timestamp"] = get_timestamp()
|
||||
self.dispatch(
|
||||
event_name=EventServiceBase.queue_event,
|
||||
payload=dict(event=event_name, data=payload),
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
|
||||
# Define events here for every event in the system.
|
||||
@ -48,18 +48,18 @@ class EventServiceBase:
|
||||
"""Emitted when there is generation progress"""
|
||||
self.__emit_queue_event(
|
||||
event_name="generator_progress",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
node_id=node.get("id"),
|
||||
source_node_id=source_node_id,
|
||||
progress_image=progress_image.model_dump() if progress_image is not None else None,
|
||||
step=step,
|
||||
order=order,
|
||||
total_steps=total_steps,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node_id": node.get("id"),
|
||||
"source_node_id": source_node_id,
|
||||
"progress_image": progress_image.model_dump() if progress_image is not None else None,
|
||||
"step": step,
|
||||
"order": order,
|
||||
"total_steps": total_steps,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_invocation_complete(
|
||||
@ -75,15 +75,15 @@ class EventServiceBase:
|
||||
"""Emitted when an invocation has completed"""
|
||||
self.__emit_queue_event(
|
||||
event_name="invocation_complete",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
node=node,
|
||||
source_node_id=source_node_id,
|
||||
result=result,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node": node,
|
||||
"source_node_id": source_node_id,
|
||||
"result": result,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_invocation_error(
|
||||
@ -100,16 +100,16 @@ class EventServiceBase:
|
||||
"""Emitted when an invocation has completed"""
|
||||
self.__emit_queue_event(
|
||||
event_name="invocation_error",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
node=node,
|
||||
source_node_id=source_node_id,
|
||||
error_type=error_type,
|
||||
error=error,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node": node,
|
||||
"source_node_id": source_node_id,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_invocation_started(
|
||||
@ -124,14 +124,14 @@ class EventServiceBase:
|
||||
"""Emitted when an invocation has started"""
|
||||
self.__emit_queue_event(
|
||||
event_name="invocation_started",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
node=node,
|
||||
source_node_id=source_node_id,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node": node,
|
||||
"source_node_id": source_node_id,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_graph_execution_complete(
|
||||
@ -140,12 +140,12 @@ class EventServiceBase:
|
||||
"""Emitted when a session has completed all invocations"""
|
||||
self.__emit_queue_event(
|
||||
event_name="graph_execution_state_complete",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_load_started(
|
||||
@ -162,16 +162,16 @@ class EventServiceBase:
|
||||
"""Emitted when a model is requested"""
|
||||
self.__emit_queue_event(
|
||||
event_name="model_load_started",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
submodel=submodel,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"model_name": model_name,
|
||||
"base_model": base_model,
|
||||
"model_type": model_type,
|
||||
"submodel": submodel,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_load_completed(
|
||||
@ -189,19 +189,19 @@ class EventServiceBase:
|
||||
"""Emitted when a model is correctly loaded (returns model info)"""
|
||||
self.__emit_queue_event(
|
||||
event_name="model_load_completed",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
submodel=submodel,
|
||||
hash=model_info.hash,
|
||||
location=str(model_info.location),
|
||||
precision=str(model_info.precision),
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"model_name": model_name,
|
||||
"base_model": base_model,
|
||||
"model_type": model_type,
|
||||
"submodel": submodel,
|
||||
"hash": model_info.hash,
|
||||
"location": str(model_info.location),
|
||||
"precision": str(model_info.precision),
|
||||
},
|
||||
)
|
||||
|
||||
def emit_session_retrieval_error(
|
||||
@ -216,14 +216,14 @@ class EventServiceBase:
|
||||
"""Emitted when session retrieval fails"""
|
||||
self.__emit_queue_event(
|
||||
event_name="session_retrieval_error",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
error_type=error_type,
|
||||
error=error,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_invocation_retrieval_error(
|
||||
@ -239,15 +239,15 @@ class EventServiceBase:
|
||||
"""Emitted when invocation retrieval fails"""
|
||||
self.__emit_queue_event(
|
||||
event_name="invocation_retrieval_error",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
node_id=node_id,
|
||||
error_type=error_type,
|
||||
error=error,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node_id": node_id,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_session_canceled(
|
||||
@ -260,12 +260,12 @@ class EventServiceBase:
|
||||
"""Emitted when a session is canceled"""
|
||||
self.__emit_queue_event(
|
||||
event_name="session_canceled",
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_queue_item_status_changed(
|
||||
@ -277,39 +277,39 @@ class EventServiceBase:
|
||||
"""Emitted when a queue item's status changes"""
|
||||
self.__emit_queue_event(
|
||||
event_name="queue_item_status_changed",
|
||||
payload=dict(
|
||||
queue_id=queue_status.queue_id,
|
||||
queue_item=dict(
|
||||
queue_id=session_queue_item.queue_id,
|
||||
item_id=session_queue_item.item_id,
|
||||
status=session_queue_item.status,
|
||||
batch_id=session_queue_item.batch_id,
|
||||
session_id=session_queue_item.session_id,
|
||||
error=session_queue_item.error,
|
||||
created_at=str(session_queue_item.created_at) if session_queue_item.created_at else None,
|
||||
updated_at=str(session_queue_item.updated_at) if session_queue_item.updated_at else None,
|
||||
started_at=str(session_queue_item.started_at) if session_queue_item.started_at else None,
|
||||
completed_at=str(session_queue_item.completed_at) if session_queue_item.completed_at else None,
|
||||
),
|
||||
batch_status=batch_status.model_dump(),
|
||||
queue_status=queue_status.model_dump(),
|
||||
),
|
||||
payload={
|
||||
"queue_id": queue_status.queue_id,
|
||||
"queue_item": {
|
||||
"queue_id": session_queue_item.queue_id,
|
||||
"item_id": session_queue_item.item_id,
|
||||
"status": session_queue_item.status,
|
||||
"batch_id": session_queue_item.batch_id,
|
||||
"session_id": session_queue_item.session_id,
|
||||
"error": session_queue_item.error,
|
||||
"created_at": str(session_queue_item.created_at) if session_queue_item.created_at else None,
|
||||
"updated_at": str(session_queue_item.updated_at) if session_queue_item.updated_at else None,
|
||||
"started_at": str(session_queue_item.started_at) if session_queue_item.started_at else None,
|
||||
"completed_at": str(session_queue_item.completed_at) if session_queue_item.completed_at else None,
|
||||
},
|
||||
"batch_status": batch_status.model_dump(),
|
||||
"queue_status": queue_status.model_dump(),
|
||||
},
|
||||
)
|
||||
|
||||
def emit_batch_enqueued(self, enqueue_result: EnqueueBatchResult) -> None:
|
||||
"""Emitted when a batch is enqueued"""
|
||||
self.__emit_queue_event(
|
||||
event_name="batch_enqueued",
|
||||
payload=dict(
|
||||
queue_id=enqueue_result.queue_id,
|
||||
batch_id=enqueue_result.batch.batch_id,
|
||||
enqueued=enqueue_result.enqueued,
|
||||
),
|
||||
payload={
|
||||
"queue_id": enqueue_result.queue_id,
|
||||
"batch_id": enqueue_result.batch.batch_id,
|
||||
"enqueued": enqueue_result.enqueued,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_queue_cleared(self, queue_id: str) -> None:
|
||||
"""Emitted when the queue is cleared"""
|
||||
self.__emit_queue_event(
|
||||
event_name="queue_cleared",
|
||||
payload=dict(queue_id=queue_id),
|
||||
payload={"queue_id": queue_id},
|
||||
)
|
||||
|
@ -25,7 +25,7 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
__invoker: Invoker
|
||||
|
||||
def __init__(self, output_folder: Union[str, Path]):
|
||||
self.__cache = dict()
|
||||
self.__cache = {}
|
||||
self.__cache_ids = Queue()
|
||||
self.__max_cache_size = 10 # TODO: get this from config
|
||||
|
||||
|
@ -90,25 +90,23 @@ class ImageRecordDeleteException(Exception):
|
||||
|
||||
|
||||
IMAGE_DTO_COLS = ", ".join(
|
||||
list(
|
||||
map(
|
||||
lambda c: "images." + c,
|
||||
[
|
||||
"image_name",
|
||||
"image_origin",
|
||||
"image_category",
|
||||
"width",
|
||||
"height",
|
||||
"session_id",
|
||||
"node_id",
|
||||
"is_intermediate",
|
||||
"created_at",
|
||||
"updated_at",
|
||||
"deleted_at",
|
||||
"starred",
|
||||
],
|
||||
)
|
||||
)
|
||||
[
|
||||
"images." + c
|
||||
for c in [
|
||||
"image_name",
|
||||
"image_origin",
|
||||
"image_category",
|
||||
"width",
|
||||
"height",
|
||||
"session_id",
|
||||
"node_id",
|
||||
"is_intermediate",
|
||||
"created_at",
|
||||
"updated_at",
|
||||
"deleted_at",
|
||||
"starred",
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
|
@ -263,7 +263,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = list(map(lambda c: c.value, set(categories)))
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
|
||||
@ -307,7 +307,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
# Build the list of images, deserializing each row
|
||||
self._cursor.execute(images_query, images_params)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
images = list(map(lambda r: deserialize_image_record(dict(r)), result))
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
|
||||
# Set up and execute the count query, without pagination
|
||||
count_query += query_conditions + ";"
|
||||
@ -386,7 +386,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
"""
|
||||
)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
image_names = list(map(lambda r: r[0], result))
|
||||
image_names = [r[0] for r in result]
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
|
@ -21,8 +21,8 @@ class ImageServiceABC(ABC):
|
||||
_on_deleted_callbacks: list[Callable[[str], None]]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._on_changed_callbacks = list()
|
||||
self._on_deleted_callbacks = list()
|
||||
self._on_changed_callbacks = []
|
||||
self._on_deleted_callbacks = []
|
||||
|
||||
def on_changed(self, on_changed: Callable[[ImageDTO], None]) -> None:
|
||||
"""Register a callback for when an image is changed"""
|
||||
|
@ -217,18 +217,16 @@ class ImageService(ImageServiceABC):
|
||||
board_id,
|
||||
)
|
||||
|
||||
image_dtos = list(
|
||||
map(
|
||||
lambda r: image_record_to_dto(
|
||||
image_record=r,
|
||||
image_url=self.__invoker.services.urls.get_image_url(r.image_name),
|
||||
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
|
||||
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
|
||||
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name),
|
||||
),
|
||||
results.items,
|
||||
image_dtos = [
|
||||
image_record_to_dto(
|
||||
image_record=r,
|
||||
image_url=self.__invoker.services.urls.get_image_url(r.image_name),
|
||||
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
|
||||
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
|
||||
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name),
|
||||
)
|
||||
)
|
||||
for r in results.items
|
||||
]
|
||||
|
||||
return OffsetPaginatedResults[ImageDTO](
|
||||
items=image_dtos,
|
||||
|
@ -1,5 +1,5 @@
|
||||
from abc import ABC
|
||||
|
||||
|
||||
class InvocationProcessorABC(ABC):
|
||||
class InvocationProcessorABC(ABC): # noqa: B024
|
||||
pass
|
||||
|
@ -26,7 +26,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
self.__invoker_thread = Thread(
|
||||
name="invoker_processor",
|
||||
target=self.__process,
|
||||
kwargs=dict(stop_event=self.__stop_event),
|
||||
kwargs={"stop_event": self.__stop_event},
|
||||
)
|
||||
self.__invoker_thread.daemon = True # TODO: make async and do not use threads
|
||||
self.__invoker_thread.start()
|
||||
|
@ -14,7 +14,7 @@ class MemoryInvocationQueue(InvocationQueueABC):
|
||||
|
||||
def __init__(self):
|
||||
self.__queue = Queue()
|
||||
self.__cancellations = dict()
|
||||
self.__cancellations = {}
|
||||
|
||||
def get(self) -> InvocationQueueItem:
|
||||
item = self.__queue.get()
|
||||
|
@ -122,7 +122,7 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
def log_stats(self):
|
||||
completed = set()
|
||||
errored = set()
|
||||
for graph_id, node_log in self._stats.items():
|
||||
for graph_id, _node_log in self._stats.items():
|
||||
try:
|
||||
current_graph_state = self._invoker.services.graph_execution_manager.get(graph_id)
|
||||
except Exception:
|
||||
@ -142,7 +142,7 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
cache_stats = self._cache_stats[graph_id]
|
||||
hwm = cache_stats.high_watermark / GIG
|
||||
tot = cache_stats.cache_size / GIG
|
||||
loaded = sum([v for v in cache_stats.loaded_model_sizes.values()]) / GIG
|
||||
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GIG
|
||||
|
||||
logger.info(f"TOTAL GRAPH EXECUTION TIME: {total_time:7.3f}s")
|
||||
logger.info("RAM used by InvokeAI process: " + "%4.2fG" % self.ram_used + f" ({self.ram_changed:+5.3f}G)")
|
||||
|
@ -15,8 +15,8 @@ class ItemStorageABC(ABC, Generic[T]):
|
||||
_on_deleted_callbacks: list[Callable[[str], None]]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._on_changed_callbacks = list()
|
||||
self._on_deleted_callbacks = list()
|
||||
self._on_changed_callbacks = []
|
||||
self._on_deleted_callbacks = []
|
||||
|
||||
"""Base item storage class"""
|
||||
|
||||
|
@ -112,7 +112,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
|
||||
)
|
||||
result = self._cursor.fetchall()
|
||||
|
||||
items = list(map(lambda r: self._parse_item(r[0]), result))
|
||||
items = [self._parse_item(r[0]) for r in result]
|
||||
|
||||
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
|
||||
count = self._cursor.fetchone()[0]
|
||||
@ -132,7 +132,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
|
||||
)
|
||||
result = self._cursor.fetchall()
|
||||
|
||||
items = list(map(lambda r: self._parse_item(r[0]), result))
|
||||
items = [self._parse_item(r[0]) for r in result]
|
||||
|
||||
self._cursor.execute(
|
||||
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",
|
||||
|
@ -13,8 +13,8 @@ class LatentsStorageBase(ABC):
|
||||
_on_deleted_callbacks: list[Callable[[str], None]]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._on_changed_callbacks = list()
|
||||
self._on_deleted_callbacks = list()
|
||||
self._on_changed_callbacks = []
|
||||
self._on_deleted_callbacks = []
|
||||
|
||||
@abstractmethod
|
||||
def get(self, name: str) -> torch.Tensor:
|
||||
|
@ -19,7 +19,7 @@ class ForwardCacheLatentsStorage(LatentsStorageBase):
|
||||
def __init__(self, underlying_storage: LatentsStorageBase, max_cache_size: int = 20):
|
||||
super().__init__()
|
||||
self.__underlying_storage = underlying_storage
|
||||
self.__cache = dict()
|
||||
self.__cache = {}
|
||||
self.__cache_ids = Queue()
|
||||
self.__max_cache_size = max_cache_size
|
||||
|
||||
|
@ -33,9 +33,11 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
self.__thread = Thread(
|
||||
name="session_processor",
|
||||
target=self.__process,
|
||||
kwargs=dict(
|
||||
stop_event=self.__stop_event, poll_now_event=self.__poll_now_event, resume_event=self.__resume_event
|
||||
),
|
||||
kwargs={
|
||||
"stop_event": self.__stop_event,
|
||||
"poll_now_event": self.__poll_now_event,
|
||||
"resume_event": self.__resume_event,
|
||||
},
|
||||
)
|
||||
self.__thread.start()
|
||||
|
||||
|
@ -129,12 +129,12 @@ class Batch(BaseModel):
|
||||
return v
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra=dict(
|
||||
required=[
|
||||
json_schema_extra={
|
||||
"required": [
|
||||
"graph",
|
||||
"runs",
|
||||
]
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@ -191,8 +191,8 @@ class SessionQueueItemWithoutGraph(BaseModel):
|
||||
return SessionQueueItemDTO(**queue_item_dict)
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra=dict(
|
||||
required=[
|
||||
json_schema_extra={
|
||||
"required": [
|
||||
"item_id",
|
||||
"status",
|
||||
"batch_id",
|
||||
@ -203,7 +203,7 @@ class SessionQueueItemWithoutGraph(BaseModel):
|
||||
"created_at",
|
||||
"updated_at",
|
||||
]
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@ -222,8 +222,8 @@ class SessionQueueItem(SessionQueueItemWithoutGraph):
|
||||
return SessionQueueItem(**queue_item_dict)
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra=dict(
|
||||
required=[
|
||||
json_schema_extra={
|
||||
"required": [
|
||||
"item_id",
|
||||
"status",
|
||||
"batch_id",
|
||||
@ -235,7 +235,7 @@ class SessionQueueItem(SessionQueueItemWithoutGraph):
|
||||
"created_at",
|
||||
"updated_at",
|
||||
]
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@ -355,7 +355,7 @@ def create_session_nfv_tuples(
|
||||
for item in batch_datum.items
|
||||
]
|
||||
node_field_values_to_zip.append(node_field_values)
|
||||
data.append(list(zip(*node_field_values_to_zip))) # type: ignore [arg-type]
|
||||
data.append(list(zip(*node_field_values_to_zip, strict=True))) # type: ignore [arg-type]
|
||||
|
||||
# create generator to yield session,nfv tuples
|
||||
count = 0
|
||||
@ -383,7 +383,7 @@ def calc_session_count(batch: Batch) -> int:
|
||||
for batch_datum in batch_datum_list:
|
||||
batch_data_items = range(len(batch_datum.items))
|
||||
to_zip.append(batch_data_items)
|
||||
data.append(list(zip(*to_zip)))
|
||||
data.append(list(zip(*to_zip, strict=True)))
|
||||
data_product = list(product(*data))
|
||||
return len(data_product) * batch.runs
|
||||
|
||||
|
@ -78,7 +78,7 @@ def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[Li
|
||||
"""Creates the default system graphs, or adds new versions if the old ones don't match"""
|
||||
|
||||
# TODO: Uncomment this when we are ready to fix this up to prevent breaking changes
|
||||
graphs: list[LibraryGraph] = list()
|
||||
graphs: list[LibraryGraph] = []
|
||||
|
||||
text_to_image = graph_library.get(default_text_to_image_graph_id)
|
||||
|
||||
|
@ -352,7 +352,7 @@ class Graph(BaseModel):
|
||||
|
||||
# Validate that all node ids are unique
|
||||
node_ids = [n.id for n in self.nodes.values()]
|
||||
duplicate_node_ids = set([node_id for node_id in node_ids if node_ids.count(node_id) >= 2])
|
||||
duplicate_node_ids = {node_id for node_id in node_ids if node_ids.count(node_id) >= 2}
|
||||
if duplicate_node_ids:
|
||||
raise DuplicateNodeIdError(f"Node ids must be unique, found duplicates {duplicate_node_ids}")
|
||||
|
||||
@ -616,7 +616,7 @@ class Graph(BaseModel):
|
||||
self, node_path: str, prefix: Optional[str] = None
|
||||
) -> list[tuple["Graph", Union[str, None], Edge]]:
|
||||
"""Gets all input edges for a node along with the graph they are in and the graph's path"""
|
||||
edges = list()
|
||||
edges = []
|
||||
|
||||
# Return any input edges that appear in this graph
|
||||
edges.extend([(self, prefix, e) for e in self.edges if e.destination.node_id == node_path])
|
||||
@ -658,7 +658,7 @@ class Graph(BaseModel):
|
||||
self, node_path: str, prefix: Optional[str] = None
|
||||
) -> list[tuple["Graph", Union[str, None], Edge]]:
|
||||
"""Gets all output edges for a node along with the graph they are in and the graph's path"""
|
||||
edges = list()
|
||||
edges = []
|
||||
|
||||
# Return any input edges that appear in this graph
|
||||
edges.extend([(self, prefix, e) for e in self.edges if e.source.node_id == node_path])
|
||||
@ -680,8 +680,8 @@ class Graph(BaseModel):
|
||||
new_input: Optional[EdgeConnection] = None,
|
||||
new_output: Optional[EdgeConnection] = None,
|
||||
) -> bool:
|
||||
inputs = list([e.source for e in self._get_input_edges(node_path, "collection")])
|
||||
outputs = list([e.destination for e in self._get_output_edges(node_path, "item")])
|
||||
inputs = [e.source for e in self._get_input_edges(node_path, "collection")]
|
||||
outputs = [e.destination for e in self._get_output_edges(node_path, "item")]
|
||||
|
||||
if new_input is not None:
|
||||
inputs.append(new_input)
|
||||
@ -694,7 +694,7 @@ class Graph(BaseModel):
|
||||
|
||||
# Get input and output fields (the fields linked to the iterator's input/output)
|
||||
input_field = get_output_field(self.get_node(inputs[0].node_id), inputs[0].field)
|
||||
output_fields = list([get_input_field(self.get_node(e.node_id), e.field) for e in outputs])
|
||||
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
|
||||
# Input type must be a list
|
||||
if get_origin(input_field) != list:
|
||||
@ -713,8 +713,8 @@ class Graph(BaseModel):
|
||||
new_input: Optional[EdgeConnection] = None,
|
||||
new_output: Optional[EdgeConnection] = None,
|
||||
) -> bool:
|
||||
inputs = list([e.source for e in self._get_input_edges(node_path, "item")])
|
||||
outputs = list([e.destination for e in self._get_output_edges(node_path, "collection")])
|
||||
inputs = [e.source for e in self._get_input_edges(node_path, "item")]
|
||||
outputs = [e.destination for e in self._get_output_edges(node_path, "collection")]
|
||||
|
||||
if new_input is not None:
|
||||
inputs.append(new_input)
|
||||
@ -722,18 +722,16 @@ class Graph(BaseModel):
|
||||
outputs.append(new_output)
|
||||
|
||||
# Get input and output fields (the fields linked to the iterator's input/output)
|
||||
input_fields = list([get_output_field(self.get_node(e.node_id), e.field) for e in inputs])
|
||||
output_fields = list([get_input_field(self.get_node(e.node_id), e.field) for e in outputs])
|
||||
input_fields = [get_output_field(self.get_node(e.node_id), e.field) for e in inputs]
|
||||
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
|
||||
# Validate that all inputs are derived from or match a single type
|
||||
input_field_types = set(
|
||||
[
|
||||
t
|
||||
for input_field in input_fields
|
||||
for t in ([input_field] if get_origin(input_field) is None else get_args(input_field))
|
||||
if t != NoneType
|
||||
]
|
||||
) # Get unique types
|
||||
input_field_types = {
|
||||
t
|
||||
for input_field in input_fields
|
||||
for t in ([input_field] if get_origin(input_field) is None else get_args(input_field))
|
||||
if t != NoneType
|
||||
} # Get unique types
|
||||
type_tree = nx.DiGraph()
|
||||
type_tree.add_nodes_from(input_field_types)
|
||||
type_tree.add_edges_from([e for e in itertools.permutations(input_field_types, 2) if issubclass(e[1], e[0])])
|
||||
@ -761,15 +759,15 @@ class Graph(BaseModel):
|
||||
"""Returns a NetworkX DiGraph representing the layout of this graph"""
|
||||
# TODO: Cache this?
|
||||
g = nx.DiGraph()
|
||||
g.add_nodes_from([n for n in self.nodes.keys()])
|
||||
g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))
|
||||
g.add_nodes_from(list(self.nodes.keys()))
|
||||
g.add_edges_from({(e.source.node_id, e.destination.node_id) for e in self.edges})
|
||||
return g
|
||||
|
||||
def nx_graph_with_data(self) -> nx.DiGraph:
|
||||
"""Returns a NetworkX DiGraph representing the data and layout of this graph"""
|
||||
g = nx.DiGraph()
|
||||
g.add_nodes_from([n for n in self.nodes.items()])
|
||||
g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))
|
||||
g.add_nodes_from(list(self.nodes.items()))
|
||||
g.add_edges_from({(e.source.node_id, e.destination.node_id) for e in self.edges})
|
||||
return g
|
||||
|
||||
def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None, prefix: Optional[str] = None) -> nx.DiGraph:
|
||||
@ -791,7 +789,7 @@ class Graph(BaseModel):
|
||||
|
||||
# TODO: figure out if iteration nodes need to be expanded
|
||||
|
||||
unique_edges = set([(e.source.node_id, e.destination.node_id) for e in self.edges])
|
||||
unique_edges = {(e.source.node_id, e.destination.node_id) for e in self.edges}
|
||||
g.add_edges_from([(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) for e in unique_edges])
|
||||
return g
|
||||
|
||||
@ -843,8 +841,8 @@ class GraphExecutionState(BaseModel):
|
||||
return v
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra=dict(
|
||||
required=[
|
||||
json_schema_extra={
|
||||
"required": [
|
||||
"id",
|
||||
"graph",
|
||||
"execution_graph",
|
||||
@ -855,7 +853,7 @@ class GraphExecutionState(BaseModel):
|
||||
"prepared_source_mapping",
|
||||
"source_prepared_mapping",
|
||||
]
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
def next(self) -> Optional[BaseInvocation]:
|
||||
@ -895,7 +893,7 @@ class GraphExecutionState(BaseModel):
|
||||
source_node = self.prepared_source_mapping[node_id]
|
||||
prepared_nodes = self.source_prepared_mapping[source_node]
|
||||
|
||||
if all([n in self.executed for n in prepared_nodes]):
|
||||
if all(n in self.executed for n in prepared_nodes):
|
||||
self.executed.add(source_node)
|
||||
self.executed_history.append(source_node)
|
||||
|
||||
@ -930,7 +928,7 @@ class GraphExecutionState(BaseModel):
|
||||
input_collection = getattr(input_collection_prepared_node_output, input_collection_edge.source.field)
|
||||
self_iteration_count = len(input_collection)
|
||||
|
||||
new_nodes: list[str] = list()
|
||||
new_nodes: list[str] = []
|
||||
if self_iteration_count == 0:
|
||||
# TODO: should this raise a warning? It might just happen if an empty collection is input, and should be valid.
|
||||
return new_nodes
|
||||
@ -940,7 +938,7 @@ class GraphExecutionState(BaseModel):
|
||||
|
||||
# Create new edges for this iteration
|
||||
# For collect nodes, this may contain multiple inputs to the same field
|
||||
new_edges: list[Edge] = list()
|
||||
new_edges: list[Edge] = []
|
||||
for edge in input_edges:
|
||||
for input_node_id in (n[1] for n in iteration_node_map if n[0] == edge.source.node_id):
|
||||
new_edge = Edge(
|
||||
@ -1034,7 +1032,7 @@ class GraphExecutionState(BaseModel):
|
||||
|
||||
# Create execution nodes
|
||||
next_node = self.graph.get_node(next_node_id)
|
||||
new_node_ids = list()
|
||||
new_node_ids = []
|
||||
if isinstance(next_node, CollectInvocation):
|
||||
# Collapse all iterator input mappings and create a single execution node for the collect invocation
|
||||
all_iteration_mappings = list(
|
||||
@ -1055,7 +1053,10 @@ class GraphExecutionState(BaseModel):
|
||||
# For every iterator, the parent must either not be a child of that iterator, or must match the prepared iteration for that iterator
|
||||
# TODO: Handle a node mapping to none
|
||||
eg = self.execution_graph.nx_graph_flat()
|
||||
prepared_parent_mappings = [[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents] for it in iterator_node_prepared_combinations] # type: ignore
|
||||
prepared_parent_mappings = [
|
||||
[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents]
|
||||
for it in iterator_node_prepared_combinations
|
||||
] # type: ignore
|
||||
|
||||
# Create execution node for each iteration
|
||||
for iteration_mappings in prepared_parent_mappings:
|
||||
@ -1121,7 +1122,7 @@ class GraphExecutionState(BaseModel):
|
||||
for edge in input_edges
|
||||
if edge.destination.field == "item"
|
||||
]
|
||||
setattr(node, "collection", output_collection)
|
||||
node.collection = output_collection
|
||||
else:
|
||||
for edge in input_edges:
|
||||
output_value = getattr(self.results[edge.source.node_id], edge.source.field)
|
||||
@ -1201,7 +1202,7 @@ class LibraryGraph(BaseModel):
|
||||
|
||||
@field_validator("exposed_inputs", "exposed_outputs")
|
||||
def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]):
|
||||
if len(v) != len(set(i.alias for i in v)):
|
||||
if len(v) != len({i.alias for i in v}):
|
||||
raise ValueError("Duplicate exposed alias")
|
||||
return v
|
||||
|
||||
|
5
invokeai/app/shared/__init__.py
Normal file
5
invokeai/app/shared/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
"""
|
||||
This module contains various classes, functions and models which are shared across the app, particularly by invocations.
|
||||
|
||||
Lifting these classes, functions and models into this shared module helps to reduce circular imports.
|
||||
"""
|
66
invokeai/app/shared/fields.py
Normal file
66
invokeai/app/shared/fields.py
Normal file
@ -0,0 +1,66 @@
|
||||
class FieldDescriptions:
|
||||
denoising_start = "When to start denoising, expressed a percentage of total steps"
|
||||
denoising_end = "When to stop denoising, expressed a percentage of total steps"
|
||||
cfg_scale = "Classifier-Free Guidance scale"
|
||||
scheduler = "Scheduler to use during inference"
|
||||
positive_cond = "Positive conditioning tensor"
|
||||
negative_cond = "Negative conditioning tensor"
|
||||
noise = "Noise tensor"
|
||||
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
|
||||
unet = "UNet (scheduler, LoRAs)"
|
||||
vae = "VAE"
|
||||
cond = "Conditioning tensor"
|
||||
controlnet_model = "ControlNet model to load"
|
||||
vae_model = "VAE model to load"
|
||||
lora_model = "LoRA model to load"
|
||||
main_model = "Main model (UNet, VAE, CLIP) to load"
|
||||
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
|
||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||
lora_weight = "The weight at which the LoRA is applied to each model"
|
||||
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
|
||||
raw_prompt = "Raw prompt text (no parsing)"
|
||||
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
|
||||
skipped_layers = "Number of layers to skip in text encoder"
|
||||
seed = "Seed for random number generation"
|
||||
steps = "Number of steps to run"
|
||||
width = "Width of output (px)"
|
||||
height = "Height of output (px)"
|
||||
control = "ControlNet(s) to apply"
|
||||
ip_adapter = "IP-Adapter to apply"
|
||||
t2i_adapter = "T2I-Adapter(s) to apply"
|
||||
denoised_latents = "Denoised latents tensor"
|
||||
latents = "Latents tensor"
|
||||
strength = "Strength of denoising (proportional to steps)"
|
||||
metadata = "Optional metadata to be saved with the image"
|
||||
metadata_collection = "Collection of Metadata"
|
||||
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
|
||||
metadata_item_label = "Label for this metadata item"
|
||||
metadata_item_value = "The value for this metadata item (may be any type)"
|
||||
workflow = "Optional workflow to be saved with the image"
|
||||
interp_mode = "Interpolation mode"
|
||||
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
|
||||
fp32 = "Whether or not to use full float32 precision"
|
||||
precision = "Precision to use"
|
||||
tiled = "Processing using overlapping tiles (reduce memory consumption)"
|
||||
detect_res = "Pixel resolution for detection"
|
||||
image_res = "Pixel resolution for output image"
|
||||
safe_mode = "Whether or not to use safe mode"
|
||||
scribble_mode = "Whether or not to use scribble mode"
|
||||
scale_factor = "The factor by which to scale"
|
||||
blend_alpha = (
|
||||
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
|
||||
)
|
||||
num_1 = "The first number"
|
||||
num_2 = "The second number"
|
||||
mask = "The mask to use for the operation"
|
||||
board = "The board to save the image to"
|
||||
image = "The image to process"
|
||||
tile_size = "Tile size"
|
||||
inclusive_low = "The inclusive low value"
|
||||
exclusive_high = "The exclusive high value"
|
||||
decimal_places = "The number of decimal places to round to"
|
||||
freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
|
||||
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
|
||||
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
|
||||
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
|
16
invokeai/app/shared/models.py
Normal file
16
invokeai/app/shared/models.py
Normal file
@ -0,0 +1,16 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
|
||||
|
||||
class FreeUConfig(BaseModel):
|
||||
"""
|
||||
Configuration for the FreeU hyperparameters.
|
||||
- https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu
|
||||
- https://github.com/ChenyangSi/FreeU
|
||||
"""
|
||||
|
||||
s1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s1)
|
||||
s2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s2)
|
||||
b1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b1)
|
||||
b2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b2)
|
@ -59,7 +59,7 @@ def thin_one_time(x, kernels):
|
||||
|
||||
def lvmin_thin(x, prunings=True):
|
||||
y = x
|
||||
for i in range(32):
|
||||
for _i in range(32):
|
||||
y, is_done = thin_one_time(y, lvmin_kernels)
|
||||
if is_done:
|
||||
break
|
||||
|
@ -21,11 +21,11 @@ def get_metadata_graph_from_raw_session(session_raw: str) -> Optional[dict]:
|
||||
|
||||
# sanity check make sure the graph is at least reasonably shaped
|
||||
if (
|
||||
type(graph) is not dict
|
||||
not isinstance(graph, dict)
|
||||
or "nodes" not in graph
|
||||
or type(graph["nodes"]) is not dict
|
||||
or not isinstance(graph["nodes"], dict)
|
||||
or "edges" not in graph
|
||||
or type(graph["edges"]) is not list
|
||||
or not isinstance(graph["edges"], list)
|
||||
):
|
||||
# something has gone terribly awry, return an empty dict
|
||||
return None
|
||||
|
@ -88,7 +88,7 @@ class PromptFormatter:
|
||||
t2i = self.t2i
|
||||
opt = self.opt
|
||||
|
||||
switches = list()
|
||||
switches = []
|
||||
switches.append(f'"{opt.prompt}"')
|
||||
switches.append(f"-s{opt.steps or t2i.steps}")
|
||||
switches.append(f"-W{opt.width or t2i.width}")
|
||||
|
@ -88,7 +88,7 @@ class Txt2Mask(object):
|
||||
provided image and returns a SegmentedGrayscale object in which the brighter
|
||||
pixels indicate where the object is inferred to be.
|
||||
"""
|
||||
if type(image) is str:
|
||||
if isinstance(image, str):
|
||||
image = Image.open(image).convert("RGB")
|
||||
|
||||
image = ImageOps.exif_transpose(image)
|
||||
|
@ -40,7 +40,7 @@ class InitImageResizer:
|
||||
(rw, rh) = (int(scale * im.width), int(scale * im.height))
|
||||
|
||||
# round everything to multiples of 64
|
||||
width, height, rw, rh = map(lambda x: x - x % 64, (width, height, rw, rh))
|
||||
width, height, rw, rh = (x - x % 64 for x in (width, height, rw, rh))
|
||||
|
||||
# no resize necessary, but return a copy
|
||||
if im.width == width and im.height == height:
|
||||
|
@ -32,7 +32,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
|
||||
from huggingface_hub import HfFolder
|
||||
from huggingface_hub import login as hf_hub_login
|
||||
from omegaconf import OmegaConf
|
||||
from pydantic.error_wrappers import ValidationError
|
||||
from pydantic import ValidationError
|
||||
from tqdm import tqdm
|
||||
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
@ -197,7 +197,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
|
||||
|
||||
def download_conversion_models():
|
||||
target_dir = config.models_path / "core/convert"
|
||||
kwargs = dict() # for future use
|
||||
kwargs = {} # for future use
|
||||
try:
|
||||
logger.info("Downloading core tokenizers and text encoders")
|
||||
|
||||
@ -252,26 +252,26 @@ def download_conversion_models():
|
||||
def download_realesrgan():
|
||||
logger.info("Installing ESRGAN Upscaling models...")
|
||||
URLs = [
|
||||
dict(
|
||||
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
|
||||
dest="core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
|
||||
description="RealESRGAN_x4plus.pth",
|
||||
),
|
||||
dict(
|
||||
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
|
||||
dest="core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
|
||||
description="RealESRGAN_x4plus_anime_6B.pth",
|
||||
),
|
||||
dict(
|
||||
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
dest="core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
description="ESRGAN_SRx4_DF2KOST_official.pth",
|
||||
),
|
||||
dict(
|
||||
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
|
||||
dest="core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
|
||||
description="RealESRGAN_x2plus.pth",
|
||||
),
|
||||
{
|
||||
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
|
||||
"dest": "core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
|
||||
"description": "RealESRGAN_x4plus.pth",
|
||||
},
|
||||
{
|
||||
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
|
||||
"dest": "core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
|
||||
"description": "RealESRGAN_x4plus_anime_6B.pth",
|
||||
},
|
||||
{
|
||||
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
"dest": "core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
"description": "ESRGAN_SRx4_DF2KOST_official.pth",
|
||||
},
|
||||
{
|
||||
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
|
||||
"dest": "core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
|
||||
"description": "RealESRGAN_x2plus.pth",
|
||||
},
|
||||
]
|
||||
for model in URLs:
|
||||
download_with_progress_bar(model["url"], config.models_path / model["dest"], model["description"])
|
||||
@ -680,7 +680,7 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
if program_opts.default_only
|
||||
else [models[x].path or models[x].repo_id for x in installer.recommended_models()]
|
||||
if program_opts.yes_to_all
|
||||
else list(),
|
||||
else [],
|
||||
)
|
||||
|
||||
|
||||
|
@ -38,6 +38,7 @@ SAMPLER_CHOICES = [
|
||||
"k_heun",
|
||||
"k_lms",
|
||||
"plms",
|
||||
"lcm",
|
||||
]
|
||||
|
||||
PRECISION_CHOICES = [
|
||||
|
@ -123,8 +123,6 @@ class MigrateTo3(object):
|
||||
logger.error(str(e))
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(str(e))
|
||||
for f in files:
|
||||
# don't copy raw learned_embeds.bin or pytorch_lora_weights.bin
|
||||
# let them be copied as part of a tree copy operation
|
||||
@ -143,8 +141,6 @@ class MigrateTo3(object):
|
||||
logger.error(str(e))
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(str(e))
|
||||
|
||||
def migrate_support_models(self):
|
||||
"""
|
||||
@ -182,10 +178,10 @@ class MigrateTo3(object):
|
||||
"""
|
||||
|
||||
dest_directory = self.dest_models
|
||||
kwargs = dict(
|
||||
cache_dir=self.root_directory / "models/hub",
|
||||
kwargs = {
|
||||
"cache_dir": self.root_directory / "models/hub",
|
||||
# local_files_only = True
|
||||
)
|
||||
}
|
||||
try:
|
||||
logger.info("Migrating core tokenizers and text encoders")
|
||||
target_dir = dest_directory / "core" / "convert"
|
||||
@ -316,11 +312,11 @@ class MigrateTo3(object):
|
||||
dest_dir = self.dest_models
|
||||
|
||||
cache = self.root_directory / "models/hub"
|
||||
kwargs = dict(
|
||||
cache_dir=cache,
|
||||
safety_checker=None,
|
||||
kwargs = {
|
||||
"cache_dir": cache,
|
||||
"safety_checker": None,
|
||||
# local_files_only = True,
|
||||
)
|
||||
}
|
||||
|
||||
owner, repo_name = repo_id.split("/")
|
||||
model_name = model_name or repo_name
|
||||
|
@ -120,7 +120,7 @@ class ModelInstall(object):
|
||||
be treated uniformly. It also sorts the models alphabetically
|
||||
by their name, to improve the display somewhat.
|
||||
"""
|
||||
model_dict = dict()
|
||||
model_dict = {}
|
||||
|
||||
# first populate with the entries in INITIAL_MODELS.yaml
|
||||
for key, value in self.datasets.items():
|
||||
@ -134,7 +134,7 @@ class ModelInstall(object):
|
||||
model_dict[key] = model_info
|
||||
|
||||
# supplement with entries in models.yaml
|
||||
installed_models = [x for x in self.mgr.list_models()]
|
||||
installed_models = list(self.mgr.list_models())
|
||||
|
||||
for md in installed_models:
|
||||
base = md["base_model"]
|
||||
@ -176,7 +176,7 @@ class ModelInstall(object):
|
||||
# logic here a little reversed to maintain backward compatibility
|
||||
def starter_models(self, all_models: bool = False) -> Set[str]:
|
||||
models = set()
|
||||
for key, value in self.datasets.items():
|
||||
for key, _value in self.datasets.items():
|
||||
name, base, model_type = ModelManager.parse_key(key)
|
||||
if all_models or model_type in [ModelType.Main, ModelType.Vae]:
|
||||
models.add(key)
|
||||
@ -184,7 +184,7 @@ class ModelInstall(object):
|
||||
|
||||
def recommended_models(self) -> Set[str]:
|
||||
starters = self.starter_models(all_models=True)
|
||||
return set([x for x in starters if self.datasets[x].get("recommended", False)])
|
||||
return {x for x in starters if self.datasets[x].get("recommended", False)}
|
||||
|
||||
def default_model(self) -> str:
|
||||
starters = self.starter_models()
|
||||
@ -234,7 +234,7 @@ class ModelInstall(object):
|
||||
"""
|
||||
|
||||
if not models_installed:
|
||||
models_installed = dict()
|
||||
models_installed = {}
|
||||
|
||||
model_path_id_or_url = str(model_path_id_or_url).strip("\"' ")
|
||||
|
||||
@ -252,10 +252,14 @@ class ModelInstall(object):
|
||||
|
||||
# folders style or similar
|
||||
elif path.is_dir() and any(
|
||||
[
|
||||
(path / x).exists()
|
||||
for x in {"config.json", "model_index.json", "learned_embeds.bin", "pytorch_lora_weights.bin"}
|
||||
]
|
||||
(path / x).exists()
|
||||
for x in {
|
||||
"config.json",
|
||||
"model_index.json",
|
||||
"learned_embeds.bin",
|
||||
"pytorch_lora_weights.bin",
|
||||
"pytorch_lora_weights.safetensors",
|
||||
}
|
||||
):
|
||||
models_installed.update({str(model_path_id_or_url): self._install_path(path)})
|
||||
|
||||
@ -357,7 +361,7 @@ class ModelInstall(object):
|
||||
for suffix in ["safetensors", "bin"]:
|
||||
if f"{prefix}pytorch_lora_weights.{suffix}" in files:
|
||||
location = self._download_hf_model(
|
||||
repo_id, ["pytorch_lora_weights.bin"], staging, subfolder=subfolder
|
||||
repo_id, [f"pytorch_lora_weights.{suffix}"], staging, subfolder=subfolder
|
||||
) # LoRA
|
||||
break
|
||||
elif (
|
||||
@ -427,17 +431,17 @@ class ModelInstall(object):
|
||||
|
||||
rel_path = self.relative_to_root(path, self.config.models_path)
|
||||
|
||||
attributes = dict(
|
||||
path=str(rel_path),
|
||||
description=str(description),
|
||||
model_format=info.format,
|
||||
)
|
||||
attributes = {
|
||||
"path": str(rel_path),
|
||||
"description": str(description),
|
||||
"model_format": info.format,
|
||||
}
|
||||
legacy_conf = None
|
||||
if info.model_type == ModelType.Main or info.model_type == ModelType.ONNX:
|
||||
attributes.update(
|
||||
dict(
|
||||
variant=info.variant_type,
|
||||
)
|
||||
{
|
||||
"variant": info.variant_type,
|
||||
}
|
||||
)
|
||||
if info.format == "checkpoint":
|
||||
try:
|
||||
@ -468,7 +472,7 @@ class ModelInstall(object):
|
||||
)
|
||||
|
||||
if legacy_conf:
|
||||
attributes.update(dict(config=str(legacy_conf)))
|
||||
attributes.update({"config": str(legacy_conf)})
|
||||
return attributes
|
||||
|
||||
def relative_to_root(self, path: Path, root: Optional[Path] = None) -> Path:
|
||||
@ -513,7 +517,7 @@ class ModelInstall(object):
|
||||
def _download_hf_model(self, repo_id: str, files: List[str], staging: Path, subfolder: None) -> Path:
|
||||
_, name = repo_id.split("/")
|
||||
location = staging / name
|
||||
paths = list()
|
||||
paths = []
|
||||
for filename in files:
|
||||
filePath = Path(filename)
|
||||
p = hf_download_with_resume(
|
||||
|
@ -130,7 +130,9 @@ class IPAttnProcessor2_0(torch.nn.Module):
|
||||
assert ip_adapter_image_prompt_embeds is not None
|
||||
assert len(ip_adapter_image_prompt_embeds) == len(self._weights)
|
||||
|
||||
for ipa_embed, ipa_weights, scale in zip(ip_adapter_image_prompt_embeds, self._weights, self._scales):
|
||||
for ipa_embed, ipa_weights, scale in zip(
|
||||
ip_adapter_image_prompt_embeds, self._weights, self._scales, strict=True
|
||||
):
|
||||
# The batch dimensions should match.
|
||||
assert ipa_embed.shape[0] == encoder_hidden_states.shape[0]
|
||||
# The token_len dimensions should match.
|
||||
|
@ -56,7 +56,7 @@ class PerceiverAttention(nn.Module):
|
||||
x = self.norm1(x)
|
||||
latents = self.norm2(latents)
|
||||
|
||||
b, l, _ = latents.shape
|
||||
b, L, _ = latents.shape
|
||||
|
||||
q = self.to_q(latents)
|
||||
kv_input = torch.cat((x, latents), dim=-2)
|
||||
@ -72,7 +72,7 @@ class PerceiverAttention(nn.Module):
|
||||
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
|
||||
out = weight @ v
|
||||
|
||||
out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
|
||||
out = out.permute(0, 2, 1, 3).reshape(b, L, -1)
|
||||
|
||||
return self.to_out(out)
|
||||
|
||||
|
@ -269,7 +269,7 @@ def create_unet_diffusers_config(original_config, image_size: int, controlnet=Fa
|
||||
resolution *= 2
|
||||
|
||||
up_block_types = []
|
||||
for i in range(len(block_out_channels)):
|
||||
for _i in range(len(block_out_channels)):
|
||||
block_type = "CrossAttnUpBlock2D" if resolution in unet_params.attention_resolutions else "UpBlock2D"
|
||||
up_block_types.append(block_type)
|
||||
resolution //= 2
|
||||
@ -1223,7 +1223,7 @@ def download_from_original_stable_diffusion_ckpt(
|
||||
# scan model
|
||||
scan_result = scan_file_path(checkpoint_path)
|
||||
if scan_result.infected_files != 0:
|
||||
raise "The model {checkpoint_path} is potentially infected by malware. Aborting import."
|
||||
raise Exception("The model {checkpoint_path} is potentially infected by malware. Aborting import.")
|
||||
if device is None:
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
checkpoint = torch.load(checkpoint_path, map_location=device)
|
||||
@ -1664,7 +1664,7 @@ def download_controlnet_from_original_ckpt(
|
||||
# scan model
|
||||
scan_result = scan_file_path(checkpoint_path)
|
||||
if scan_result.infected_files != 0:
|
||||
raise "The model {checkpoint_path} is potentially infected by malware. Aborting import."
|
||||
raise Exception("The model {checkpoint_path} is potentially infected by malware. Aborting import.")
|
||||
if device is None:
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
checkpoint = torch.load(checkpoint_path, map_location=device)
|
||||
|
@ -12,6 +12,8 @@ from diffusers.models import UNet2DConditionModel
|
||||
from safetensors.torch import load_file
|
||||
from transformers import CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
|
||||
from .models.lora import LoRAModel
|
||||
|
||||
"""
|
||||
@ -102,7 +104,7 @@ class ModelPatcher:
|
||||
loras: List[Tuple[LoRAModel, float]],
|
||||
prefix: str,
|
||||
):
|
||||
original_weights = dict()
|
||||
original_weights = {}
|
||||
try:
|
||||
with torch.no_grad():
|
||||
for lora, lora_weight in loras:
|
||||
@ -164,6 +166,15 @@ class ModelPatcher:
|
||||
init_tokens_count = None
|
||||
new_tokens_added = None
|
||||
|
||||
# TODO: This is required since Transformers 4.32 see
|
||||
# https://github.com/huggingface/transformers/pull/25088
|
||||
# More information by NVIDIA:
|
||||
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc
|
||||
# This value might need to be changed in the future and take the GPUs model into account as there seem
|
||||
# to be ideal values for different GPUS. This value is temporary!
|
||||
# For references to the current discussion please see https://github.com/invoke-ai/InvokeAI/pull/4817
|
||||
pad_to_multiple_of = 8
|
||||
|
||||
try:
|
||||
# HACK: The CLIPTokenizer API does not include a way to remove tokens after calling add_tokens(...). As a
|
||||
# workaround, we create a full copy of `tokenizer` so that its original behavior can be restored after
|
||||
@ -173,7 +184,7 @@ class ModelPatcher:
|
||||
# but a pickle roundtrip was found to be much faster (1 sec vs. 0.05 secs).
|
||||
ti_tokenizer = pickle.loads(pickle.dumps(tokenizer))
|
||||
ti_manager = TextualInversionManager(ti_tokenizer)
|
||||
init_tokens_count = text_encoder.resize_token_embeddings(None).num_embeddings
|
||||
init_tokens_count = text_encoder.resize_token_embeddings(None, pad_to_multiple_of).num_embeddings
|
||||
|
||||
def _get_trigger(ti_name, index):
|
||||
trigger = ti_name
|
||||
@ -188,7 +199,7 @@ class ModelPatcher:
|
||||
new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))
|
||||
|
||||
# modify text_encoder
|
||||
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added)
|
||||
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added, pad_to_multiple_of)
|
||||
model_embeddings = text_encoder.get_input_embeddings()
|
||||
|
||||
for ti_name, ti in ti_list:
|
||||
@ -220,7 +231,7 @@ class ModelPatcher:
|
||||
|
||||
finally:
|
||||
if init_tokens_count and new_tokens_added:
|
||||
text_encoder.resize_token_embeddings(init_tokens_count)
|
||||
text_encoder.resize_token_embeddings(init_tokens_count, pad_to_multiple_of)
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
@ -231,7 +242,7 @@ class ModelPatcher:
|
||||
):
|
||||
skipped_layers = []
|
||||
try:
|
||||
for i in range(clip_skip):
|
||||
for _i in range(clip_skip):
|
||||
skipped_layers.append(text_encoder.text_model.encoder.layers.pop(-1))
|
||||
|
||||
yield
|
||||
@ -240,6 +251,25 @@ class ModelPatcher:
|
||||
while len(skipped_layers) > 0:
|
||||
text_encoder.text_model.encoder.layers.append(skipped_layers.pop())
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def apply_freeu(
|
||||
cls,
|
||||
unet: UNet2DConditionModel,
|
||||
freeu_config: Optional[FreeUConfig] = None,
|
||||
):
|
||||
did_apply_freeu = False
|
||||
try:
|
||||
if freeu_config is not None:
|
||||
unet.enable_freeu(b1=freeu_config.b1, b2=freeu_config.b2, s1=freeu_config.s1, s2=freeu_config.s2)
|
||||
did_apply_freeu = True
|
||||
|
||||
yield
|
||||
|
||||
finally:
|
||||
if did_apply_freeu:
|
||||
unet.disable_freeu()
|
||||
|
||||
|
||||
class TextualInversionModel:
|
||||
embedding: torch.Tensor # [n, 768]|[n, 1280]
|
||||
@ -294,7 +324,7 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
tokenizer: CLIPTokenizer
|
||||
|
||||
def __init__(self, tokenizer: CLIPTokenizer):
|
||||
self.pad_tokens = dict()
|
||||
self.pad_tokens = {}
|
||||
self.tokenizer = tokenizer
|
||||
|
||||
def expand_textual_inversion_token_ids_if_necessary(self, token_ids: list[int]) -> list[int]:
|
||||
@ -355,10 +385,10 @@ class ONNXModelPatcher:
|
||||
if not isinstance(model, IAIOnnxRuntimeModel):
|
||||
raise Exception("Only IAIOnnxRuntimeModel models supported")
|
||||
|
||||
orig_weights = dict()
|
||||
orig_weights = {}
|
||||
|
||||
try:
|
||||
blended_loras = dict()
|
||||
blended_loras = {}
|
||||
|
||||
for lora, lora_weight in loras:
|
||||
for layer_key, layer in lora.layers.items():
|
||||
@ -374,7 +404,7 @@ class ONNXModelPatcher:
|
||||
else:
|
||||
blended_loras[layer_key] = layer_weight
|
||||
|
||||
node_names = dict()
|
||||
node_names = {}
|
||||
for node in model.nodes.values():
|
||||
node_names[node.name.replace("/", "_").replace(".", "_").lstrip("_")] = node.name
|
||||
|
||||
|
@ -66,11 +66,13 @@ class CacheStats(object):
|
||||
|
||||
class ModelLocker(object):
|
||||
"Forward declaration"
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ModelCache(object):
|
||||
"Forward declaration"
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@ -132,7 +134,7 @@ class ModelCache(object):
|
||||
snapshots, so it is recommended to disable this feature unless you are actively inspecting the model cache's
|
||||
behaviour.
|
||||
"""
|
||||
self.model_infos: Dict[str, ModelBase] = dict()
|
||||
self.model_infos: Dict[str, ModelBase] = {}
|
||||
# allow lazy offloading only when vram cache enabled
|
||||
self.lazy_offloading = lazy_offloading and max_vram_cache_size > 0
|
||||
self.precision: torch.dtype = precision
|
||||
@ -147,8 +149,8 @@ class ModelCache(object):
|
||||
# used for stats collection
|
||||
self.stats = None
|
||||
|
||||
self._cached_models = dict()
|
||||
self._cache_stack = list()
|
||||
self._cached_models = {}
|
||||
self._cache_stack = []
|
||||
|
||||
def _capture_memory_snapshot(self) -> Optional[MemorySnapshot]:
|
||||
if self._log_memory_usage:
|
||||
|
@ -26,5 +26,5 @@ def skip_torch_weight_init():
|
||||
|
||||
yield None
|
||||
finally:
|
||||
for torch_module, saved_function in zip(torch_modules, saved_functions):
|
||||
for torch_module, saved_function in zip(torch_modules, saved_functions, strict=True):
|
||||
torch_module.reset_parameters = saved_function
|
||||
|
@ -363,7 +363,7 @@ class ModelManager(object):
|
||||
else:
|
||||
return
|
||||
|
||||
self.models = dict()
|
||||
self.models = {}
|
||||
for model_key, model_config in config.items():
|
||||
if model_key.startswith("_"):
|
||||
continue
|
||||
@ -374,7 +374,7 @@ class ModelManager(object):
|
||||
self.models[model_key] = model_class.create_config(**model_config)
|
||||
|
||||
# check config version number and update on disk/RAM if necessary
|
||||
self.cache_keys = dict()
|
||||
self.cache_keys = {}
|
||||
|
||||
# add controlnet, lora and textual_inversion models from disk
|
||||
self.scan_models_directory()
|
||||
@ -655,7 +655,7 @@ class ModelManager(object):
|
||||
"""
|
||||
# TODO: redo
|
||||
for model_dict in self.list_models():
|
||||
for model_name, model_info in model_dict.items():
|
||||
for _model_name, model_info in model_dict.items():
|
||||
line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}'
|
||||
print(line)
|
||||
|
||||
@ -902,7 +902,7 @@ class ModelManager(object):
|
||||
"""
|
||||
Write current configuration out to the indicated file.
|
||||
"""
|
||||
data_to_save = dict()
|
||||
data_to_save = {}
|
||||
data_to_save["__metadata__"] = self.config_meta.model_dump()
|
||||
|
||||
for model_key, model_config in self.models.items():
|
||||
@ -1034,7 +1034,7 @@ class ModelManager(object):
|
||||
self.ignore = ignore
|
||||
|
||||
def on_search_started(self):
|
||||
self.new_models_found = dict()
|
||||
self.new_models_found = {}
|
||||
|
||||
def on_model_found(self, model: Path):
|
||||
if model not in self.ignore:
|
||||
@ -1106,7 +1106,7 @@ class ModelManager(object):
|
||||
# avoid circular import here
|
||||
from invokeai.backend.install.model_install_backend import ModelInstall
|
||||
|
||||
successfully_installed = dict()
|
||||
successfully_installed = {}
|
||||
|
||||
installer = ModelInstall(
|
||||
config=self.app_config, prediction_type_helper=prediction_type_helper, model_manager=self
|
||||
|
@ -92,7 +92,7 @@ class ModelMerger(object):
|
||||
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
|
||||
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
||||
"""
|
||||
model_paths = list()
|
||||
model_paths = []
|
||||
config = self.manager.app_config
|
||||
base_model = BaseModelType(base_model)
|
||||
vae = None
|
||||
@ -124,13 +124,13 @@ class ModelMerger(object):
|
||||
dump_path = (dump_path / merged_model_name).as_posix()
|
||||
|
||||
merged_pipe.save_pretrained(dump_path, safe_serialization=True)
|
||||
attributes = dict(
|
||||
path=dump_path,
|
||||
description=f"Merge of models {', '.join(model_names)}",
|
||||
model_format="diffusers",
|
||||
variant=ModelVariantType.Normal.value,
|
||||
vae=vae,
|
||||
)
|
||||
attributes = {
|
||||
"path": dump_path,
|
||||
"description": f"Merge of models {', '.join(model_names)}",
|
||||
"model_format": "diffusers",
|
||||
"variant": ModelVariantType.Normal.value,
|
||||
"vae": vae,
|
||||
}
|
||||
return self.manager.add_model(
|
||||
merged_model_name,
|
||||
base_model=base_model,
|
||||
|
@ -183,12 +183,13 @@ class ModelProbe(object):
|
||||
if model:
|
||||
class_name = model.__class__.__name__
|
||||
else:
|
||||
for suffix in ["bin", "safetensors"]:
|
||||
if (folder_path / f"learned_embeds.{suffix}").exists():
|
||||
return ModelType.TextualInversion
|
||||
if (folder_path / f"pytorch_lora_weights.{suffix}").exists():
|
||||
return ModelType.Lora
|
||||
if (folder_path / "unet/model.onnx").exists():
|
||||
return ModelType.ONNX
|
||||
if (folder_path / "learned_embeds.bin").exists():
|
||||
return ModelType.TextualInversion
|
||||
if (folder_path / "pytorch_lora_weights.bin").exists():
|
||||
return ModelType.Lora
|
||||
if (folder_path / "image_encoder.txt").exists():
|
||||
return ModelType.IPAdapter
|
||||
|
||||
@ -236,7 +237,7 @@ class ModelProbe(object):
|
||||
# scan model
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
raise "The model {model_name} is potentially infected by malware. Aborting import."
|
||||
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")
|
||||
|
||||
|
||||
# ##################################################3
|
||||
|
@ -59,7 +59,7 @@ class ModelSearch(ABC):
|
||||
for root, dirs, files in os.walk(path, followlinks=True):
|
||||
if str(Path(root).name).startswith("."):
|
||||
self._pruned_paths.add(root)
|
||||
if any([Path(root).is_relative_to(x) for x in self._pruned_paths]):
|
||||
if any(Path(root).is_relative_to(x) for x in self._pruned_paths):
|
||||
continue
|
||||
|
||||
self._items_scanned += len(dirs) + len(files)
|
||||
@ -69,16 +69,14 @@ class ModelSearch(ABC):
|
||||
self._scanned_dirs.add(path)
|
||||
continue
|
||||
if any(
|
||||
[
|
||||
(path / x).exists()
|
||||
for x in {
|
||||
"config.json",
|
||||
"model_index.json",
|
||||
"learned_embeds.bin",
|
||||
"pytorch_lora_weights.bin",
|
||||
"image_encoder.txt",
|
||||
}
|
||||
]
|
||||
(path / x).exists()
|
||||
for x in {
|
||||
"config.json",
|
||||
"model_index.json",
|
||||
"learned_embeds.bin",
|
||||
"pytorch_lora_weights.bin",
|
||||
"image_encoder.txt",
|
||||
}
|
||||
):
|
||||
try:
|
||||
self.on_model_found(path)
|
||||
|
@ -97,8 +97,8 @@ MODEL_CLASSES = {
|
||||
# },
|
||||
}
|
||||
|
||||
MODEL_CONFIGS = list()
|
||||
OPENAPI_MODEL_CONFIGS = list()
|
||||
MODEL_CONFIGS = []
|
||||
OPENAPI_MODEL_CONFIGS = []
|
||||
|
||||
|
||||
class OpenAPIModelInfoBase(BaseModel):
|
||||
@ -109,7 +109,7 @@ class OpenAPIModelInfoBase(BaseModel):
|
||||
model_config = ConfigDict(protected_namespaces=())
|
||||
|
||||
|
||||
for base_model, models in MODEL_CLASSES.items():
|
||||
for _base_model, models in MODEL_CLASSES.items():
|
||||
for model_type, model_class in models.items():
|
||||
model_configs = set(model_class._get_configs().values())
|
||||
model_configs.discard(None)
|
||||
@ -133,7 +133,7 @@ for base_model, models in MODEL_CLASSES.items():
|
||||
|
||||
|
||||
def get_model_config_enums():
|
||||
enums = list()
|
||||
enums = []
|
||||
|
||||
for model_config in MODEL_CONFIGS:
|
||||
if hasattr(inspect, "get_annotations"):
|
||||
|
@ -153,7 +153,7 @@ class ModelBase(metaclass=ABCMeta):
|
||||
|
||||
else:
|
||||
res_type = sys.modules["diffusers"]
|
||||
res_type = getattr(res_type, "pipelines")
|
||||
res_type = res_type.pipelines
|
||||
|
||||
for subtype in subtypes:
|
||||
res_type = getattr(res_type, subtype)
|
||||
@ -164,7 +164,7 @@ class ModelBase(metaclass=ABCMeta):
|
||||
with suppress(Exception):
|
||||
return cls.__configs
|
||||
|
||||
configs = dict()
|
||||
configs = {}
|
||||
for name in dir(cls):
|
||||
if name.startswith("__"):
|
||||
continue
|
||||
@ -246,8 +246,8 @@ class DiffusersModel(ModelBase):
|
||||
def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType):
|
||||
super().__init__(model_path, base_model, model_type)
|
||||
|
||||
self.child_types: Dict[str, Type] = dict()
|
||||
self.child_sizes: Dict[str, int] = dict()
|
||||
self.child_types: Dict[str, Type] = {}
|
||||
self.child_sizes: Dict[str, int] = {}
|
||||
|
||||
try:
|
||||
config_data = DiffusionPipeline.load_config(self.model_path)
|
||||
@ -326,8 +326,8 @@ def calc_model_size_by_fs(model_path: str, subfolder: Optional[str] = None, vari
|
||||
all_files = os.listdir(model_path)
|
||||
all_files = [f for f in all_files if os.path.isfile(os.path.join(model_path, f))]
|
||||
|
||||
fp16_files = set([f for f in all_files if ".fp16." in f or ".fp16-" in f])
|
||||
bit8_files = set([f for f in all_files if ".8bit." in f or ".8bit-" in f])
|
||||
fp16_files = {f for f in all_files if ".fp16." in f or ".fp16-" in f}
|
||||
bit8_files = {f for f in all_files if ".8bit." in f or ".8bit-" in f}
|
||||
other_files = set(all_files) - fp16_files - bit8_files
|
||||
|
||||
if variant is None:
|
||||
@ -413,7 +413,7 @@ def _calc_onnx_model_by_data(model) -> int:
|
||||
|
||||
|
||||
def _fast_safetensors_reader(path: str):
|
||||
checkpoint = dict()
|
||||
checkpoint = {}
|
||||
device = torch.device("meta")
|
||||
with open(path, "rb") as f:
|
||||
definition_len = int.from_bytes(f.read(8), "little")
|
||||
@ -483,7 +483,7 @@ class IAIOnnxRuntimeModel:
|
||||
class _tensor_access:
|
||||
def __init__(self, model):
|
||||
self.model = model
|
||||
self.indexes = dict()
|
||||
self.indexes = {}
|
||||
for idx, obj in enumerate(self.model.proto.graph.initializer):
|
||||
self.indexes[obj.name] = idx
|
||||
|
||||
@ -524,7 +524,7 @@ class IAIOnnxRuntimeModel:
|
||||
|
||||
class _access_helper:
|
||||
def __init__(self, raw_proto):
|
||||
self.indexes = dict()
|
||||
self.indexes = {}
|
||||
self.raw_proto = raw_proto
|
||||
for idx, obj in enumerate(raw_proto):
|
||||
self.indexes[obj.name] = idx
|
||||
@ -549,7 +549,7 @@ class IAIOnnxRuntimeModel:
|
||||
return self.indexes.keys()
|
||||
|
||||
def values(self):
|
||||
return [obj for obj in self.raw_proto]
|
||||
return list(self.raw_proto)
|
||||
|
||||
def __init__(self, model_path: str, provider: Optional[str]):
|
||||
self.path = model_path
|
||||
|
@ -104,7 +104,7 @@ class ControlNetModel(ModelBase):
|
||||
return ControlNetModelFormat.Diffusers
|
||||
|
||||
if os.path.isfile(path):
|
||||
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]]):
|
||||
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]):
|
||||
return ControlNetModelFormat.Checkpoint
|
||||
|
||||
raise InvalidModelException(f"Not a valid model: {path}")
|
||||
|
@ -68,11 +68,12 @@ class LoRAModel(ModelBase):
|
||||
raise ModelNotFoundException()
|
||||
|
||||
if os.path.isdir(path):
|
||||
if os.path.exists(os.path.join(path, "pytorch_lora_weights.bin")):
|
||||
return LoRAModelFormat.Diffusers
|
||||
for ext in ["safetensors", "bin"]:
|
||||
if os.path.exists(os.path.join(path, f"pytorch_lora_weights.{ext}")):
|
||||
return LoRAModelFormat.Diffusers
|
||||
|
||||
if os.path.isfile(path):
|
||||
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
|
||||
return LoRAModelFormat.LyCORIS
|
||||
|
||||
raise InvalidModelException(f"Not a valid model: {path}")
|
||||
@ -86,8 +87,10 @@ class LoRAModel(ModelBase):
|
||||
base_model: BaseModelType,
|
||||
) -> str:
|
||||
if cls.detect_format(model_path) == LoRAModelFormat.Diffusers:
|
||||
# TODO: add diffusers lora when it stabilizes a bit
|
||||
raise NotImplementedError("Diffusers lora not supported")
|
||||
for ext in ["safetensors", "bin"]: # return path to the safetensors file inside the folder
|
||||
path = Path(model_path, f"pytorch_lora_weights.{ext}")
|
||||
if path.exists():
|
||||
return path
|
||||
else:
|
||||
return model_path
|
||||
|
||||
@ -459,7 +462,7 @@ class LoRAModelRaw: # (torch.nn.Module):
|
||||
dtype: Optional[torch.dtype] = None,
|
||||
):
|
||||
# TODO: try revert if exception?
|
||||
for key, layer in self.layers.items():
|
||||
for _key, layer in self.layers.items():
|
||||
layer.to(device=device, dtype=dtype)
|
||||
|
||||
def calc_size(self) -> int:
|
||||
@ -496,7 +499,7 @@ class LoRAModelRaw: # (torch.nn.Module):
|
||||
stability_unet_keys = list(SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP)
|
||||
stability_unet_keys.sort()
|
||||
|
||||
new_state_dict = dict()
|
||||
new_state_dict = {}
|
||||
for full_key, value in state_dict.items():
|
||||
if full_key.startswith("lora_unet_"):
|
||||
search_key = full_key.replace("lora_unet_", "")
|
||||
@ -542,7 +545,7 @@ class LoRAModelRaw: # (torch.nn.Module):
|
||||
|
||||
model = cls(
|
||||
name=file_path.stem, # TODO:
|
||||
layers=dict(),
|
||||
layers={},
|
||||
)
|
||||
|
||||
if file_path.suffix == ".safetensors":
|
||||
@ -590,12 +593,12 @@ class LoRAModelRaw: # (torch.nn.Module):
|
||||
|
||||
@staticmethod
|
||||
def _group_state(state_dict: dict):
|
||||
state_dict_groupped = dict()
|
||||
state_dict_groupped = {}
|
||||
|
||||
for key, value in state_dict.items():
|
||||
stem, leaf = key.split(".", 1)
|
||||
if stem not in state_dict_groupped:
|
||||
state_dict_groupped[stem] = dict()
|
||||
state_dict_groupped[stem] = {}
|
||||
state_dict_groupped[stem][leaf] = value
|
||||
|
||||
return state_dict_groupped
|
||||
|
@ -110,7 +110,7 @@ class StableDiffusion1Model(DiffusersModel):
|
||||
return StableDiffusion1ModelFormat.Diffusers
|
||||
|
||||
if os.path.isfile(model_path):
|
||||
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||
if any(model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
|
||||
return StableDiffusion1ModelFormat.Checkpoint
|
||||
|
||||
raise InvalidModelException(f"Not a valid model: {model_path}")
|
||||
@ -221,7 +221,7 @@ class StableDiffusion2Model(DiffusersModel):
|
||||
return StableDiffusion2ModelFormat.Diffusers
|
||||
|
||||
if os.path.isfile(model_path):
|
||||
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||
if any(model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
|
||||
return StableDiffusion2ModelFormat.Checkpoint
|
||||
|
||||
raise InvalidModelException(f"Not a valid model: {model_path}")
|
||||
|
@ -71,7 +71,7 @@ class TextualInversionModel(ModelBase):
|
||||
return None # diffusers-ti
|
||||
|
||||
if os.path.isfile(path):
|
||||
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "bin"]]):
|
||||
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "bin"]):
|
||||
return None
|
||||
|
||||
raise InvalidModelException(f"Not a valid model: {path}")
|
||||
|
@ -89,7 +89,7 @@ class VaeModel(ModelBase):
|
||||
return VaeModelFormat.Diffusers
|
||||
|
||||
if os.path.isfile(path):
|
||||
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
|
||||
return VaeModelFormat.Checkpoint
|
||||
|
||||
raise InvalidModelException(f"Not a valid model: {path}")
|
||||
|
@ -193,6 +193,7 @@ class InvokeAIStableDiffusionPipelineOutput(StableDiffusionPipelineOutput):
|
||||
attention_map_saver (`AttentionMapSaver`): Object containing attention maps that can be displayed to the user
|
||||
after generation completes. Optional.
|
||||
"""
|
||||
|
||||
attention_map_saver: Optional[AttentionMapSaver]
|
||||
|
||||
|
||||
@ -546,11 +547,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
# Handle ControlNet(s) and T2I-Adapter(s)
|
||||
down_block_additional_residuals = None
|
||||
mid_block_additional_residual = None
|
||||
if control_data is not None and t2i_adapter_data is not None:
|
||||
# TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility
|
||||
# between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers.
|
||||
raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).")
|
||||
elif control_data is not None:
|
||||
down_intrablock_additional_residuals = None
|
||||
# if control_data is not None and t2i_adapter_data is not None:
|
||||
# TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility
|
||||
# between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers.
|
||||
# raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).")
|
||||
# elif control_data is not None:
|
||||
if control_data is not None:
|
||||
down_block_additional_residuals, mid_block_additional_residual = self.invokeai_diffuser.do_controlnet_step(
|
||||
control_data=control_data,
|
||||
sample=latent_model_input,
|
||||
@ -559,7 +562,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
total_step_count=total_step_count,
|
||||
conditioning_data=conditioning_data,
|
||||
)
|
||||
elif t2i_adapter_data is not None:
|
||||
# elif t2i_adapter_data is not None:
|
||||
if t2i_adapter_data is not None:
|
||||
accum_adapter_state = None
|
||||
for single_t2i_adapter_data in t2i_adapter_data:
|
||||
# Determine the T2I-Adapter weights for the current denoising step.
|
||||
@ -584,7 +588,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
for idx, value in enumerate(single_t2i_adapter_data.adapter_state):
|
||||
accum_adapter_state[idx] += value * t2i_adapter_weight
|
||||
|
||||
down_block_additional_residuals = accum_adapter_state
|
||||
# down_block_additional_residuals = accum_adapter_state
|
||||
down_intrablock_additional_residuals = accum_adapter_state
|
||||
|
||||
uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step(
|
||||
sample=latent_model_input,
|
||||
@ -593,8 +598,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
total_step_count=total_step_count,
|
||||
conditioning_data=conditioning_data,
|
||||
# extra:
|
||||
down_block_additional_residuals=down_block_additional_residuals,
|
||||
mid_block_additional_residual=mid_block_additional_residual,
|
||||
down_block_additional_residuals=down_block_additional_residuals, # for ControlNet
|
||||
mid_block_additional_residual=mid_block_additional_residual, # for ControlNet
|
||||
down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter
|
||||
)
|
||||
|
||||
guidance_scale = conditioning_data.guidance_scale
|
||||
|
@ -54,13 +54,13 @@ class Context:
|
||||
self.clear_requests(cleanup=True)
|
||||
|
||||
def register_cross_attention_modules(self, model):
|
||||
for name, module in get_cross_attention_modules(model, CrossAttentionType.SELF):
|
||||
for name, _module in get_cross_attention_modules(model, CrossAttentionType.SELF):
|
||||
if name in self.self_cross_attention_module_identifiers:
|
||||
assert False, f"name {name} cannot appear more than once"
|
||||
raise AssertionError(f"name {name} cannot appear more than once")
|
||||
self.self_cross_attention_module_identifiers.append(name)
|
||||
for name, module in get_cross_attention_modules(model, CrossAttentionType.TOKENS):
|
||||
for name, _module in get_cross_attention_modules(model, CrossAttentionType.TOKENS):
|
||||
if name in self.tokens_cross_attention_module_identifiers:
|
||||
assert False, f"name {name} cannot appear more than once"
|
||||
raise AssertionError(f"name {name} cannot appear more than once")
|
||||
self.tokens_cross_attention_module_identifiers.append(name)
|
||||
|
||||
def request_save_attention_maps(self, cross_attention_type: CrossAttentionType):
|
||||
@ -170,7 +170,7 @@ class Context:
|
||||
self.saved_cross_attention_maps = {}
|
||||
|
||||
def offload_saved_attention_slices_to_cpu(self):
|
||||
for key, map_dict in self.saved_cross_attention_maps.items():
|
||||
for _key, map_dict in self.saved_cross_attention_maps.items():
|
||||
for offset, slice in map_dict["slices"].items():
|
||||
map_dict[offset] = slice.to("cpu")
|
||||
|
||||
@ -433,7 +433,7 @@ def inject_attention_function(unet, context: Context):
|
||||
module.identifier = identifier
|
||||
try:
|
||||
module.set_attention_slice_wrangler(attention_slice_wrangler)
|
||||
module.set_slicing_strategy_getter(lambda module: context.get_slicing_strategy(identifier))
|
||||
module.set_slicing_strategy_getter(lambda module: context.get_slicing_strategy(identifier)) # noqa: B023
|
||||
except AttributeError as e:
|
||||
if is_attribute_error_about(e, "set_attention_slice_wrangler"):
|
||||
print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") # TODO
|
||||
@ -445,7 +445,7 @@ def remove_attention_function(unet):
|
||||
cross_attention_modules = get_cross_attention_modules(
|
||||
unet, CrossAttentionType.TOKENS
|
||||
) + get_cross_attention_modules(unet, CrossAttentionType.SELF)
|
||||
for identifier, module in cross_attention_modules:
|
||||
for _identifier, module in cross_attention_modules:
|
||||
try:
|
||||
# clear wrangler callback
|
||||
module.set_attention_slice_wrangler(None)
|
||||
|
@ -56,7 +56,7 @@ class AttentionMapSaver:
|
||||
|
||||
merged = None
|
||||
|
||||
for key, maps in self.collated_maps.items():
|
||||
for _key, maps in self.collated_maps.items():
|
||||
# maps has shape [(H*W), N] for N tokens
|
||||
# but we want [N, H, W]
|
||||
this_scale_factor = math.sqrt(maps.shape[0] / (latents_width * latents_height))
|
||||
|
@ -123,7 +123,7 @@ class InvokeAIDiffuserComponent:
|
||||
# control_data should be type List[ControlNetData]
|
||||
# this loop covers both ControlNet (one ControlNetData in list)
|
||||
# and MultiControlNet (multiple ControlNetData in list)
|
||||
for i, control_datum in enumerate(control_data):
|
||||
for _i, control_datum in enumerate(control_data):
|
||||
control_mode = control_datum.control_mode
|
||||
# soft_injection and cfg_injection are the two ControlNet control_mode booleans
|
||||
# that are combined at higher level to make control_mode enum
|
||||
@ -214,7 +214,7 @@ class InvokeAIDiffuserComponent:
|
||||
# add controlnet outputs together if have multiple controlnets
|
||||
down_block_res_samples = [
|
||||
samples_prev + samples_curr
|
||||
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
|
||||
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples, strict=True)
|
||||
]
|
||||
mid_block_res_sample += mid_sample
|
||||
|
||||
@ -260,7 +260,6 @@ class InvokeAIDiffuserComponent:
|
||||
conditioning_data,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
else:
|
||||
(
|
||||
unconditioned_next_x,
|
||||
@ -410,6 +409,15 @@ class InvokeAIDiffuserComponent:
|
||||
uncond_down_block.append(_uncond_down)
|
||||
cond_down_block.append(_cond_down)
|
||||
|
||||
uncond_down_intrablock, cond_down_intrablock = None, None
|
||||
down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None)
|
||||
if down_intrablock_additional_residuals is not None:
|
||||
uncond_down_intrablock, cond_down_intrablock = [], []
|
||||
for down_intrablock in down_intrablock_additional_residuals:
|
||||
_uncond_down, _cond_down = down_intrablock.chunk(2)
|
||||
uncond_down_intrablock.append(_uncond_down)
|
||||
cond_down_intrablock.append(_cond_down)
|
||||
|
||||
uncond_mid_block, cond_mid_block = None, None
|
||||
mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None)
|
||||
if mid_block_additional_residual is not None:
|
||||
@ -441,6 +449,7 @@ class InvokeAIDiffuserComponent:
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
down_block_additional_residuals=uncond_down_block,
|
||||
mid_block_additional_residual=uncond_mid_block,
|
||||
down_intrablock_additional_residuals=uncond_down_intrablock,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
**kwargs,
|
||||
)
|
||||
@ -470,6 +479,7 @@ class InvokeAIDiffuserComponent:
|
||||
cross_attention_kwargs=cross_attention_kwargs,
|
||||
down_block_additional_residuals=cond_down_block,
|
||||
mid_block_additional_residual=cond_mid_block,
|
||||
down_intrablock_additional_residuals=cond_down_intrablock,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
**kwargs,
|
||||
)
|
||||
@ -494,6 +504,15 @@ class InvokeAIDiffuserComponent:
|
||||
uncond_down_block.append(_uncond_down)
|
||||
cond_down_block.append(_cond_down)
|
||||
|
||||
uncond_down_intrablock, cond_down_intrablock = None, None
|
||||
down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None)
|
||||
if down_intrablock_additional_residuals is not None:
|
||||
uncond_down_intrablock, cond_down_intrablock = [], []
|
||||
for down_intrablock in down_intrablock_additional_residuals:
|
||||
_uncond_down, _cond_down = down_intrablock.chunk(2)
|
||||
uncond_down_intrablock.append(_uncond_down)
|
||||
cond_down_intrablock.append(_cond_down)
|
||||
|
||||
uncond_mid_block, cond_mid_block = None, None
|
||||
mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None)
|
||||
if mid_block_additional_residual is not None:
|
||||
@ -522,6 +541,7 @@ class InvokeAIDiffuserComponent:
|
||||
{"swap_cross_attn_context": cross_attn_processor_context},
|
||||
down_block_additional_residuals=uncond_down_block,
|
||||
mid_block_additional_residual=uncond_mid_block,
|
||||
down_intrablock_additional_residuals=uncond_down_intrablock,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
**kwargs,
|
||||
)
|
||||
@ -541,6 +561,7 @@ class InvokeAIDiffuserComponent:
|
||||
{"swap_cross_attn_context": cross_attn_processor_context},
|
||||
down_block_additional_residuals=cond_down_block,
|
||||
mid_block_additional_residual=cond_mid_block,
|
||||
down_intrablock_additional_residuals=cond_down_intrablock,
|
||||
added_cond_kwargs=added_cond_kwargs,
|
||||
**kwargs,
|
||||
)
|
||||
@ -621,7 +642,9 @@ class InvokeAIDiffuserComponent:
|
||||
|
||||
deltas = None
|
||||
uncond_latents = None
|
||||
weighted_cond_list = c_or_weighted_c_list if type(c_or_weighted_c_list) is list else [(c_or_weighted_c_list, 1)]
|
||||
weighted_cond_list = (
|
||||
c_or_weighted_c_list if isinstance(c_or_weighted_c_list, list) else [(c_or_weighted_c_list, 1)]
|
||||
)
|
||||
|
||||
# below is fugly omg
|
||||
conditionings = [uc] + [c for c, weight in weighted_cond_list]
|
||||
|
@ -10,32 +10,34 @@ from diffusers import (
|
||||
HeunDiscreteScheduler,
|
||||
KDPM2AncestralDiscreteScheduler,
|
||||
KDPM2DiscreteScheduler,
|
||||
LCMScheduler,
|
||||
LMSDiscreteScheduler,
|
||||
PNDMScheduler,
|
||||
UniPCMultistepScheduler,
|
||||
)
|
||||
|
||||
SCHEDULER_MAP = dict(
|
||||
ddim=(DDIMScheduler, dict()),
|
||||
ddpm=(DDPMScheduler, dict()),
|
||||
deis=(DEISMultistepScheduler, dict()),
|
||||
lms=(LMSDiscreteScheduler, dict(use_karras_sigmas=False)),
|
||||
lms_k=(LMSDiscreteScheduler, dict(use_karras_sigmas=True)),
|
||||
pndm=(PNDMScheduler, dict()),
|
||||
heun=(HeunDiscreteScheduler, dict(use_karras_sigmas=False)),
|
||||
heun_k=(HeunDiscreteScheduler, dict(use_karras_sigmas=True)),
|
||||
euler=(EulerDiscreteScheduler, dict(use_karras_sigmas=False)),
|
||||
euler_k=(EulerDiscreteScheduler, dict(use_karras_sigmas=True)),
|
||||
euler_a=(EulerAncestralDiscreteScheduler, dict()),
|
||||
kdpm_2=(KDPM2DiscreteScheduler, dict()),
|
||||
kdpm_2_a=(KDPM2AncestralDiscreteScheduler, dict()),
|
||||
dpmpp_2s=(DPMSolverSinglestepScheduler, dict(use_karras_sigmas=False)),
|
||||
dpmpp_2s_k=(DPMSolverSinglestepScheduler, dict(use_karras_sigmas=True)),
|
||||
dpmpp_2m=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False)),
|
||||
dpmpp_2m_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True)),
|
||||
dpmpp_2m_sde=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False, algorithm_type="sde-dpmsolver++")),
|
||||
dpmpp_2m_sde_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")),
|
||||
dpmpp_sde=(DPMSolverSDEScheduler, dict(use_karras_sigmas=False, noise_sampler_seed=0)),
|
||||
dpmpp_sde_k=(DPMSolverSDEScheduler, dict(use_karras_sigmas=True, noise_sampler_seed=0)),
|
||||
unipc=(UniPCMultistepScheduler, dict(cpu_only=True)),
|
||||
)
|
||||
SCHEDULER_MAP = {
|
||||
"ddim": (DDIMScheduler, {}),
|
||||
"ddpm": (DDPMScheduler, {}),
|
||||
"deis": (DEISMultistepScheduler, {}),
|
||||
"lms": (LMSDiscreteScheduler, {"use_karras_sigmas": False}),
|
||||
"lms_k": (LMSDiscreteScheduler, {"use_karras_sigmas": True}),
|
||||
"pndm": (PNDMScheduler, {}),
|
||||
"heun": (HeunDiscreteScheduler, {"use_karras_sigmas": False}),
|
||||
"heun_k": (HeunDiscreteScheduler, {"use_karras_sigmas": True}),
|
||||
"euler": (EulerDiscreteScheduler, {"use_karras_sigmas": False}),
|
||||
"euler_k": (EulerDiscreteScheduler, {"use_karras_sigmas": True}),
|
||||
"euler_a": (EulerAncestralDiscreteScheduler, {}),
|
||||
"kdpm_2": (KDPM2DiscreteScheduler, {}),
|
||||
"kdpm_2_a": (KDPM2AncestralDiscreteScheduler, {}),
|
||||
"dpmpp_2s": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False}),
|
||||
"dpmpp_2s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True}),
|
||||
"dpmpp_2m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False}),
|
||||
"dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True}),
|
||||
"dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "algorithm_type": "sde-dpmsolver++"}),
|
||||
"dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "algorithm_type": "sde-dpmsolver++"}),
|
||||
"dpmpp_sde": (DPMSolverSDEScheduler, {"use_karras_sigmas": False, "noise_sampler_seed": 0}),
|
||||
"dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}),
|
||||
"unipc": (UniPCMultistepScheduler, {"cpu_only": True}),
|
||||
"lcm": (LCMScheduler, {}),
|
||||
}
|
||||
|
@ -615,7 +615,7 @@ def do_textual_inversion_training(
|
||||
vae_info = model_manager.get_model(*model_meta, submodel=SubModelType.Vae)
|
||||
unet_info = model_manager.get_model(*model_meta, submodel=SubModelType.UNet)
|
||||
|
||||
pipeline_args = dict(local_files_only=True)
|
||||
pipeline_args = {"local_files_only": True}
|
||||
if tokenizer_name:
|
||||
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name, **pipeline_args)
|
||||
else:
|
||||
|
@ -732,7 +732,9 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
||||
|
||||
controlnet_down_block_res_samples = ()
|
||||
|
||||
for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
|
||||
for down_block_res_sample, controlnet_block in zip(
|
||||
down_block_res_samples, self.controlnet_down_blocks, strict=True
|
||||
):
|
||||
down_block_res_sample = controlnet_block(down_block_res_sample)
|
||||
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
|
||||
|
||||
@ -745,7 +747,9 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
||||
scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
|
||||
|
||||
scales = scales * conditioning_scale
|
||||
down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
|
||||
down_block_res_samples = [
|
||||
sample * scale for sample, scale in zip(down_block_res_samples, scales, strict=True)
|
||||
]
|
||||
mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
|
||||
else:
|
||||
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
|
||||
|
@ -225,34 +225,34 @@ def basicConfig(**kwargs):
|
||||
|
||||
|
||||
_FACILITY_MAP = (
|
||||
dict(
|
||||
LOG_KERN=syslog.LOG_KERN,
|
||||
LOG_USER=syslog.LOG_USER,
|
||||
LOG_MAIL=syslog.LOG_MAIL,
|
||||
LOG_DAEMON=syslog.LOG_DAEMON,
|
||||
LOG_AUTH=syslog.LOG_AUTH,
|
||||
LOG_LPR=syslog.LOG_LPR,
|
||||
LOG_NEWS=syslog.LOG_NEWS,
|
||||
LOG_UUCP=syslog.LOG_UUCP,
|
||||
LOG_CRON=syslog.LOG_CRON,
|
||||
LOG_SYSLOG=syslog.LOG_SYSLOG,
|
||||
LOG_LOCAL0=syslog.LOG_LOCAL0,
|
||||
LOG_LOCAL1=syslog.LOG_LOCAL1,
|
||||
LOG_LOCAL2=syslog.LOG_LOCAL2,
|
||||
LOG_LOCAL3=syslog.LOG_LOCAL3,
|
||||
LOG_LOCAL4=syslog.LOG_LOCAL4,
|
||||
LOG_LOCAL5=syslog.LOG_LOCAL5,
|
||||
LOG_LOCAL6=syslog.LOG_LOCAL6,
|
||||
LOG_LOCAL7=syslog.LOG_LOCAL7,
|
||||
)
|
||||
{
|
||||
"LOG_KERN": syslog.LOG_KERN,
|
||||
"LOG_USER": syslog.LOG_USER,
|
||||
"LOG_MAIL": syslog.LOG_MAIL,
|
||||
"LOG_DAEMON": syslog.LOG_DAEMON,
|
||||
"LOG_AUTH": syslog.LOG_AUTH,
|
||||
"LOG_LPR": syslog.LOG_LPR,
|
||||
"LOG_NEWS": syslog.LOG_NEWS,
|
||||
"LOG_UUCP": syslog.LOG_UUCP,
|
||||
"LOG_CRON": syslog.LOG_CRON,
|
||||
"LOG_SYSLOG": syslog.LOG_SYSLOG,
|
||||
"LOG_LOCAL0": syslog.LOG_LOCAL0,
|
||||
"LOG_LOCAL1": syslog.LOG_LOCAL1,
|
||||
"LOG_LOCAL2": syslog.LOG_LOCAL2,
|
||||
"LOG_LOCAL3": syslog.LOG_LOCAL3,
|
||||
"LOG_LOCAL4": syslog.LOG_LOCAL4,
|
||||
"LOG_LOCAL5": syslog.LOG_LOCAL5,
|
||||
"LOG_LOCAL6": syslog.LOG_LOCAL6,
|
||||
"LOG_LOCAL7": syslog.LOG_LOCAL7,
|
||||
}
|
||||
if SYSLOG_AVAILABLE
|
||||
else dict()
|
||||
else {}
|
||||
)
|
||||
|
||||
_SOCK_MAP = dict(
|
||||
SOCK_STREAM=socket.SOCK_STREAM,
|
||||
SOCK_DGRAM=socket.SOCK_DGRAM,
|
||||
)
|
||||
_SOCK_MAP = {
|
||||
"SOCK_STREAM": socket.SOCK_STREAM,
|
||||
"SOCK_DGRAM": socket.SOCK_DGRAM,
|
||||
}
|
||||
|
||||
|
||||
class InvokeAIFormatter(logging.Formatter):
|
||||
@ -344,7 +344,7 @@ LOG_FORMATTERS = {
|
||||
|
||||
|
||||
class InvokeAILogger(object):
|
||||
loggers = dict()
|
||||
loggers = {}
|
||||
|
||||
@classmethod
|
||||
def get_logger(
|
||||
@ -364,7 +364,7 @@ class InvokeAILogger(object):
|
||||
@classmethod
|
||||
def get_loggers(cls, config: InvokeAIAppConfig) -> list[logging.Handler]:
|
||||
handler_strs = config.log_handlers
|
||||
handlers = list()
|
||||
handlers = []
|
||||
for handler in handler_strs:
|
||||
handler_name, *args = handler.split("=", 2)
|
||||
args = args[0] if len(args) > 0 else None
|
||||
@ -398,7 +398,7 @@ class InvokeAILogger(object):
|
||||
raise ValueError("syslog is not available on this system")
|
||||
if not args:
|
||||
args = "/dev/log" if Path("/dev/log").exists() else "address:localhost:514"
|
||||
syslog_args = dict()
|
||||
syslog_args = {}
|
||||
try:
|
||||
for a in args.split(","):
|
||||
arg_name, *arg_value = a.split(":", 2)
|
||||
@ -434,7 +434,7 @@ class InvokeAILogger(object):
|
||||
path = url.path
|
||||
port = url.port or 80
|
||||
|
||||
syslog_args = dict()
|
||||
syslog_args = {}
|
||||
for a in arg_list:
|
||||
arg_name, *arg_value = a.split(":", 2)
|
||||
if arg_name == "method":
|
||||
|
@ -26,7 +26,7 @@ def log_txt_as_img(wh, xc, size=10):
|
||||
# wh a tuple of (width, height)
|
||||
# xc a list of captions to plot
|
||||
b = len(xc)
|
||||
txts = list()
|
||||
txts = []
|
||||
for bi in range(b):
|
||||
txt = Image.new("RGB", wh, color="white")
|
||||
draw = ImageDraw.Draw(txt)
|
||||
@ -90,7 +90,7 @@ def instantiate_from_config(config, **kwargs):
|
||||
elif config == "__is_unconditional__":
|
||||
return None
|
||||
raise KeyError("Expected key `target` to instantiate.")
|
||||
return get_obj_from_str(config["target"])(**config.get("params", dict()), **kwargs)
|
||||
return get_obj_from_str(config["target"])(**config.get("params", {}), **kwargs)
|
||||
|
||||
|
||||
def get_obj_from_str(string, reload=False):
|
||||
@ -228,11 +228,12 @@ def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10
|
||||
angles = 2 * math.pi * rand_val
|
||||
gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1).to(device)
|
||||
|
||||
tile_grads = (
|
||||
lambda slice1, slice2: gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]]
|
||||
.repeat_interleave(d[0], 0)
|
||||
.repeat_interleave(d[1], 1)
|
||||
)
|
||||
def tile_grads(slice1, slice2):
|
||||
return (
|
||||
gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]]
|
||||
.repeat_interleave(d[0], 0)
|
||||
.repeat_interleave(d[1], 1)
|
||||
)
|
||||
|
||||
def dot(grad, shift):
|
||||
return (
|
||||
|
@ -117,9 +117,6 @@ sd-1/embedding/EasyNegative:
|
||||
recommended: True
|
||||
sd-1/embedding/ahx-beta-453407d:
|
||||
repo_id: sd-concepts-library/ahx-beta-453407d
|
||||
sd-1/lora/LowRA:
|
||||
path: https://civitai.com/api/download/models/63006
|
||||
recommended: True
|
||||
sd-1/lora/Ink scenery:
|
||||
path: https://civitai.com/api/download/models/83390
|
||||
sd-1/ip_adapter/ip_adapter_sd15:
|
||||
|
@ -341,19 +341,19 @@ class InvokeAIMetadataParser:
|
||||
# this was more elegant as a case statement, but that's not available in python 3.9
|
||||
if old_scheduler is None:
|
||||
return None
|
||||
scheduler_map = dict(
|
||||
ddim="ddim",
|
||||
plms="pnmd",
|
||||
k_lms="lms",
|
||||
k_dpm_2="kdpm_2",
|
||||
k_dpm_2_a="kdpm_2_a",
|
||||
dpmpp_2="dpmpp_2s",
|
||||
k_dpmpp_2="dpmpp_2m",
|
||||
k_dpmpp_2_a=None, # invalid, in 2.3.x, selecting this sample would just fallback to last run or plms if new session
|
||||
k_euler="euler",
|
||||
k_euler_a="euler_a",
|
||||
k_heun="heun",
|
||||
)
|
||||
scheduler_map = {
|
||||
"ddim": "ddim",
|
||||
"plms": "pnmd",
|
||||
"k_lms": "lms",
|
||||
"k_dpm_2": "kdpm_2",
|
||||
"k_dpm_2_a": "kdpm_2_a",
|
||||
"dpmpp_2": "dpmpp_2s",
|
||||
"k_dpmpp_2": "dpmpp_2m",
|
||||
"k_dpmpp_2_a": None, # invalid, in 2.3.x, selecting this sample would just fallback to last run or plms if new session
|
||||
"k_euler": "euler",
|
||||
"k_euler_a": "euler_a",
|
||||
"k_heun": "heun",
|
||||
}
|
||||
return scheduler_map.get(old_scheduler)
|
||||
|
||||
def split_prompt(self, raw_prompt: str):
|
||||
|
@ -63,8 +63,8 @@ def welcome(latest_release: str, latest_prerelease: str):
|
||||
yield "[bold yellow]Options:"
|
||||
yield f"""[1] Update to the latest [bold]official release[/bold] ([italic]{latest_release}[/italic])
|
||||
[2] Update to the latest [bold]pre-release[/bold] (may be buggy; caveat emptor!) ([italic]{latest_prerelease}[/italic])
|
||||
[2] Manually enter the [bold]tag name[/bold] for the version you wish to update to
|
||||
[3] Manually enter the [bold]branch name[/bold] for the version you wish to update to"""
|
||||
[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to
|
||||
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to"""
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
|
@ -72,7 +72,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
def __init__(self, parentApp, name, multipage=False, *args, **keywords):
|
||||
self.multipage = multipage
|
||||
self.subprocess = None
|
||||
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
|
||||
super().__init__(parentApp=parentApp, name=name, *args, **keywords) # noqa: B026 # TODO: maybe this is bad?
|
||||
|
||||
def create(self):
|
||||
self.keypress_timeout = 10
|
||||
@ -203,14 +203,14 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
)
|
||||
|
||||
# This restores the selected page on return from an installation
|
||||
for i in range(1, self.current_tab + 1):
|
||||
for _i in range(1, self.current_tab + 1):
|
||||
self.tabs.h_cursor_line_down(1)
|
||||
self._toggle_tables([self.current_tab])
|
||||
|
||||
############# diffusers tab ##########
|
||||
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
|
||||
"""Add widgets responsible for selecting diffusers models"""
|
||||
widgets = dict()
|
||||
widgets = {}
|
||||
models = self.all_models
|
||||
starters = self.starter_models
|
||||
starter_model_labels = self.model_labels
|
||||
@ -258,10 +258,12 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
model_type: ModelType,
|
||||
window_width: int = 120,
|
||||
install_prompt: str = None,
|
||||
exclude: set = set(),
|
||||
exclude: set = None,
|
||||
) -> dict[str, npyscreen.widget]:
|
||||
"""Generic code to create model selection widgets"""
|
||||
widgets = dict()
|
||||
if exclude is None:
|
||||
exclude = set()
|
||||
widgets = {}
|
||||
model_list = [x for x in self.all_models if self.all_models[x].model_type == model_type and x not in exclude]
|
||||
model_labels = [self.model_labels[x] for x in model_list]
|
||||
|
||||
@ -366,13 +368,13 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
]
|
||||
|
||||
for group in widgets:
|
||||
for k, v in group.items():
|
||||
for _k, v in group.items():
|
||||
try:
|
||||
v.hidden = True
|
||||
v.editable = False
|
||||
except Exception:
|
||||
pass
|
||||
for k, v in widgets[selected_tab].items():
|
||||
for _k, v in widgets[selected_tab].items():
|
||||
try:
|
||||
v.hidden = False
|
||||
if not isinstance(v, (npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
|
||||
@ -391,7 +393,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
label_width = max([len(models[x].name) for x in models])
|
||||
description_width = window_width - label_width - checkbox_width - spacing_width
|
||||
|
||||
result = dict()
|
||||
result = {}
|
||||
for x in models.keys():
|
||||
description = models[x].description
|
||||
description = (
|
||||
@ -433,11 +435,11 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
parent_conn, child_conn = Pipe()
|
||||
p = Process(
|
||||
target=process_and_execute,
|
||||
kwargs=dict(
|
||||
opt=app.program_opts,
|
||||
selections=app.install_selections,
|
||||
conn_out=child_conn,
|
||||
),
|
||||
kwargs={
|
||||
"opt": app.program_opts,
|
||||
"selections": app.install_selections,
|
||||
"conn_out": child_conn,
|
||||
},
|
||||
)
|
||||
p.start()
|
||||
child_conn.close()
|
||||
@ -558,7 +560,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
for section in ui_sections:
|
||||
if "models_selected" not in section:
|
||||
continue
|
||||
selected = set([section["models"][x] for x in section["models_selected"].value])
|
||||
selected = {section["models"][x] for x in section["models_selected"].value}
|
||||
models_to_install = [x for x in selected if not self.all_models[x].installed]
|
||||
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
|
||||
selections.remove_models.extend(models_to_remove)
|
||||
|
@ -11,6 +11,7 @@ import sys
|
||||
import textwrap
|
||||
from curses import BUTTON2_CLICKED, BUTTON3_CLICKED
|
||||
from shutil import get_terminal_size
|
||||
from typing import Optional
|
||||
|
||||
import npyscreen
|
||||
import npyscreen.wgmultiline as wgmultiline
|
||||
@ -243,7 +244,9 @@ class SelectColumnBase:
|
||||
|
||||
|
||||
class MultiSelectColumns(SelectColumnBase, npyscreen.MultiSelect):
|
||||
def __init__(self, screen, columns: int = 1, values: list = [], **keywords):
|
||||
def __init__(self, screen, columns: int = 1, values: Optional[list] = None, **keywords):
|
||||
if values is None:
|
||||
values = []
|
||||
self.columns = columns
|
||||
self.value_cnt = len(values)
|
||||
self.rows = math.ceil(self.value_cnt / self.columns)
|
||||
@ -267,7 +270,9 @@ class SingleSelectWithChanged(npyscreen.SelectOne):
|
||||
class SingleSelectColumnsSimple(SelectColumnBase, SingleSelectWithChanged):
|
||||
"""Row of radio buttons. Spacebar to select."""
|
||||
|
||||
def __init__(self, screen, columns: int = 1, values: list = [], **keywords):
|
||||
def __init__(self, screen, columns: int = 1, values: list = None, **keywords):
|
||||
if values is None:
|
||||
values = []
|
||||
self.columns = columns
|
||||
self.value_cnt = len(values)
|
||||
self.rows = math.ceil(self.value_cnt / self.columns)
|
||||
|
@ -274,14 +274,15 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
else:
|
||||
interp = self.interpolations[self.merge_method.value[0]]
|
||||
|
||||
args = dict(
|
||||
model_names=models,
|
||||
base_model=tuple(BaseModelType)[self.base_select.value[0]],
|
||||
alpha=self.alpha.value,
|
||||
interp=interp,
|
||||
force=self.force.value,
|
||||
merged_model_name=self.merged_model_name.value,
|
||||
)
|
||||
bases = ["sd-1", "sd-2", "sdxl"]
|
||||
args = {
|
||||
"model_names": models,
|
||||
"base_model": BaseModelType(bases[self.base_select.value[0]]),
|
||||
"alpha": self.alpha.value,
|
||||
"interp": interp,
|
||||
"force": self.force.value,
|
||||
"merged_model_name": self.merged_model_name.value,
|
||||
}
|
||||
return args
|
||||
|
||||
def check_for_overwrite(self) -> bool:
|
||||
@ -296,7 +297,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
def validate_field_values(self) -> bool:
|
||||
bad_fields = []
|
||||
model_names = self.model_names
|
||||
selected_models = set((model_names[self.model1.value[0]], model_names[self.model2.value[0]]))
|
||||
selected_models = {model_names[self.model1.value[0]], model_names[self.model2.value[0]]}
|
||||
if self.model3.value[0] > 0:
|
||||
selected_models.add(model_names[self.model3.value[0] - 1])
|
||||
if len(selected_models) < 2:
|
||||
|
@ -276,13 +276,13 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
|
||||
def get_model_names(self) -> Tuple[List[str], int]:
|
||||
conf = OmegaConf.load(config.root_dir / "configs/models.yaml")
|
||||
model_names = [idx for idx in sorted(list(conf.keys())) if conf[idx].get("format", None) == "diffusers"]
|
||||
model_names = [idx for idx in sorted(conf.keys()) if conf[idx].get("format", None) == "diffusers"]
|
||||
defaults = [idx for idx in range(len(model_names)) if "default" in conf[model_names[idx]]]
|
||||
default = defaults[0] if len(defaults) > 0 else 0
|
||||
return (model_names, default)
|
||||
|
||||
def marshall_arguments(self) -> dict:
|
||||
args = dict()
|
||||
args = {}
|
||||
|
||||
# the choices
|
||||
args.update(
|
||||
|
@ -54,42 +54,35 @@
|
||||
]
|
||||
},
|
||||
"dependencies": {
|
||||
"@chakra-ui/anatomy": "^2.2.1",
|
||||
"@chakra-ui/anatomy": "^2.2.2",
|
||||
"@chakra-ui/icons": "^2.1.1",
|
||||
"@chakra-ui/react": "^2.8.1",
|
||||
"@chakra-ui/styled-system": "^2.9.1",
|
||||
"@chakra-ui/theme-tools": "^2.1.1",
|
||||
"@chakra-ui/react": "^2.8.2",
|
||||
"@chakra-ui/styled-system": "^2.9.2",
|
||||
"@chakra-ui/theme-tools": "^2.1.2",
|
||||
"@dagrejs/graphlib": "^2.1.13",
|
||||
"@dnd-kit/core": "^6.0.8",
|
||||
"@dnd-kit/modifiers": "^6.0.1",
|
||||
"@dnd-kit/utilities": "^3.2.1",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@emotion/react": "^11.11.1",
|
||||
"@emotion/styled": "^11.11.0",
|
||||
"@floating-ui/react-dom": "^2.0.2",
|
||||
"@fontsource-variable/inter": "^5.0.13",
|
||||
"@fontsource/inter": "^5.0.13",
|
||||
"@fontsource-variable/inter": "^5.0.15",
|
||||
"@mantine/core": "^6.0.19",
|
||||
"@mantine/form": "^6.0.19",
|
||||
"@mantine/hooks": "^6.0.19",
|
||||
"@nanostores/react": "^0.7.1",
|
||||
"@reduxjs/toolkit": "^1.9.7",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"@stevebel/png": "^1.5.1",
|
||||
"compare-versions": "^6.1.0",
|
||||
"dateformat": "^5.0.3",
|
||||
"formik": "^2.4.5",
|
||||
"framer-motion": "^10.16.4",
|
||||
"fuse.js": "^6.6.2",
|
||||
"i18next": "^23.5.1",
|
||||
"i18next-browser-languagedetector": "^7.0.2",
|
||||
"i18next-http-backend": "^2.2.2",
|
||||
"konva": "^9.2.2",
|
||||
"i18next": "^23.6.0",
|
||||
"i18next-http-backend": "^2.3.1",
|
||||
"konva": "^9.2.3",
|
||||
"lodash-es": "^4.17.21",
|
||||
"nanostores": "^0.9.2",
|
||||
"nanostores": "^0.9.4",
|
||||
"new-github-issue-url": "^1.0.0",
|
||||
"openapi-fetch": "^0.7.10",
|
||||
"overlayscrollbars": "^2.3.2",
|
||||
"overlayscrollbars-react": "^0.5.2",
|
||||
"openapi-fetch": "^0.8.1",
|
||||
"overlayscrollbars": "^2.4.4",
|
||||
"overlayscrollbars-react": "^0.5.3",
|
||||
"patch-package": "^8.0.0",
|
||||
"query-string": "^8.1.0",
|
||||
"react": "^18.2.0",
|
||||
@ -98,26 +91,25 @@
|
||||
"react-dropzone": "^14.2.3",
|
||||
"react-error-boundary": "^4.0.11",
|
||||
"react-hotkeys-hook": "4.4.1",
|
||||
"react-i18next": "^13.3.0",
|
||||
"react-i18next": "^13.3.1",
|
||||
"react-icons": "^4.11.0",
|
||||
"react-konva": "^18.2.10",
|
||||
"react-redux": "^8.1.3",
|
||||
"react-resizable-panels": "^0.0.55",
|
||||
"react-use": "^17.4.0",
|
||||
"react-virtuoso": "^4.6.1",
|
||||
"react-zoom-pan-pinch": "^3.2.0",
|
||||
"reactflow": "^11.9.3",
|
||||
"react-virtuoso": "^4.6.2",
|
||||
"reactflow": "^11.9.4",
|
||||
"redux-dynamic-middlewares": "^2.2.0",
|
||||
"redux-remember": "^4.0.4",
|
||||
"roarr": "^7.15.1",
|
||||
"roarr": "^7.18.3",
|
||||
"serialize-error": "^11.0.2",
|
||||
"socket.io-client": "^4.7.2",
|
||||
"type-fest": "^4.4.0",
|
||||
"use-debounce": "^9.0.4",
|
||||
"type-fest": "^4.7.1",
|
||||
"use-debounce": "^10.0.0",
|
||||
"use-image": "^1.1.1",
|
||||
"uuid": "^9.0.1",
|
||||
"zod": "^3.22.4",
|
||||
"zod-validation-error": "^1.5.0"
|
||||
"zod-validation-error": "^2.1.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@chakra-ui/cli": "^2.4.0",
|
||||
@ -128,39 +120,33 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chakra-ui/cli": "^2.4.1",
|
||||
"@types/dateformat": "^5.0.0",
|
||||
"@types/lodash-es": "^4.17.9",
|
||||
"@types/node": "^20.8.6",
|
||||
"@types/react": "^18.2.28",
|
||||
"@types/react-dom": "^18.2.13",
|
||||
"@types/react-redux": "^7.1.27",
|
||||
"@types/react-transition-group": "^4.4.7",
|
||||
"@types/uuid": "^9.0.5",
|
||||
"@typescript-eslint/eslint-plugin": "^6.7.5",
|
||||
"@typescript-eslint/parser": "^6.7.5",
|
||||
"@vitejs/plugin-react-swc": "^3.4.0",
|
||||
"axios": "^1.5.1",
|
||||
"babel-plugin-transform-imports": "^2.0.0",
|
||||
"concurrently": "^8.2.1",
|
||||
"eslint": "^8.51.0",
|
||||
"@types/dateformat": "^5.0.2",
|
||||
"@types/lodash-es": "^4.17.11",
|
||||
"@types/node": "^20.9.0",
|
||||
"@types/react": "^18.2.37",
|
||||
"@types/react-dom": "^18.2.15",
|
||||
"@types/react-redux": "^7.1.30",
|
||||
"@types/uuid": "^9.0.7",
|
||||
"@typescript-eslint/eslint-plugin": "^6.10.0",
|
||||
"@typescript-eslint/parser": "^6.10.0",
|
||||
"@vitejs/plugin-react-swc": "^3.4.1",
|
||||
"concurrently": "^8.2.2",
|
||||
"eslint": "^8.53.0",
|
||||
"eslint-config-prettier": "^9.0.0",
|
||||
"eslint-plugin-prettier": "^5.0.1",
|
||||
"eslint-plugin-react": "^7.33.2",
|
||||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"form-data": "^4.0.0",
|
||||
"husky": "^8.0.3",
|
||||
"lint-staged": "^15.0.1",
|
||||
"lint-staged": "^15.0.2",
|
||||
"madge": "^6.1.0",
|
||||
"openapi-types": "^12.1.3",
|
||||
"openapi-typescript": "^6.7.0",
|
||||
"postinstall-postinstall": "^2.1.0",
|
||||
"prettier": "^3.0.3",
|
||||
"rollup-plugin-visualizer": "^5.9.2",
|
||||
"ts-toolbelt": "^9.6.0",
|
||||
"typescript": "^5.2.2",
|
||||
"vite": "^4.4.11",
|
||||
"vite": "^4.5.0",
|
||||
"vite-plugin-css-injected-by-js": "^3.3.0",
|
||||
"vite-plugin-dts": "^3.6.0",
|
||||
"vite-plugin-dts": "^3.6.3",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
"vite-tsconfig-paths": "^4.2.1",
|
||||
"yarn": "^1.22.19"
|
||||
|
@ -83,13 +83,14 @@
|
||||
"darkMode": "Dunkler Modus",
|
||||
"loading": "Lade",
|
||||
"random": "Zufall",
|
||||
"batch": "Batch-Manager",
|
||||
"batch": "Stapel-Manager",
|
||||
"advanced": "Erweitert",
|
||||
"langBrPortuguese": "Portugiesisch (Brasilien)",
|
||||
"unifiedCanvas": "Einheitliche Leinwand",
|
||||
"openInNewTab": "In einem neuem Tab öffnen",
|
||||
"statusProcessing": "wird bearbeitet",
|
||||
"linear": "Linear"
|
||||
"linear": "Linear",
|
||||
"imagePrompt": "Bild Prompt"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Erzeugungen",
|
||||
@ -655,7 +656,8 @@
|
||||
"downloadBoard": "Ordner runterladen",
|
||||
"changeBoard": "Ordner wechseln",
|
||||
"loading": "Laden...",
|
||||
"clearSearch": "Suche leeren"
|
||||
"clearSearch": "Suche leeren",
|
||||
"bottomMessage": "Durch das Löschen dieses Ordners und seiner Bilder werden alle Funktionen zurückgesetzt, die sie derzeit verwenden."
|
||||
},
|
||||
"controlnet": {
|
||||
"showAdvanced": "Zeige Erweitert",
|
||||
@ -704,7 +706,17 @@
|
||||
"depthZoeDescription": "Tiefenmap erstellen mit Zoe",
|
||||
"setControlImageDimensions": "Setze Control Bild Auflösung auf Breite/Höhe",
|
||||
"handAndFace": "Hand und Gesicht",
|
||||
"enableIPAdapter": "Aktiviere IP Adapter"
|
||||
"enableIPAdapter": "Aktiviere IP Adapter",
|
||||
"resize": "Größe ändern",
|
||||
"resetControlImage": "Zurücksetzen vom Referenz Bild",
|
||||
"balanced": "Ausgewogen",
|
||||
"prompt": "Prompt",
|
||||
"resizeMode": "Größenänderungsmodus",
|
||||
"processor": "Prozessor",
|
||||
"saveControlImage": "Speichere Referenz Bild",
|
||||
"safe": "Speichern",
|
||||
"ipAdapterImageFallback": "Kein IP Adapter Bild ausgewählt",
|
||||
"resetIPAdapterImage": "Zurücksetzen vom IP Adapter Bild"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@ -734,7 +746,19 @@
|
||||
"session": "Sitzung",
|
||||
"queueTotal": "{{total}} Gesamt",
|
||||
"resume": "Wieder aufnehmen",
|
||||
"item": "Auftrag"
|
||||
"item": "Auftrag",
|
||||
"notReady": "Warteschlange noch nicht bereit",
|
||||
"batchValues": "Stapel Werte",
|
||||
"queueCountPrediction": "{{predicted}} zur Warteschlange hinzufügen",
|
||||
"queuedCount": "{{pending}} wartenden Elemente",
|
||||
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"completedIn": "Fertig in",
|
||||
"cancelBatchSucceeded": "Stapel abgebrochen",
|
||||
"cancelBatch": "Stapel stoppen",
|
||||
"enqueueing": "Stapel in der Warteschlange",
|
||||
"queueMaxExceeded": "Maximum von {{max_queue_size}} Elementen erreicht, würde {{skip}} Elemente überspringen",
|
||||
"cancelBatchFailed": "Problem beim Abbruch vom Stapel",
|
||||
"clearQueueAlertDialog2": "bist du sicher die Warteschlange zu leeren?"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
@ -788,5 +812,10 @@
|
||||
"hits": "Cache Treffer",
|
||||
"enable": "Aktivieren",
|
||||
"clear": "Leeren"
|
||||
},
|
||||
"embedding": {
|
||||
"noMatchingEmbedding": "Keine passenden Embeddings",
|
||||
"addEmbedding": "Embedding hinzufügen",
|
||||
"incompatibleModel": "Inkompatibles Basismodell:"
|
||||
}
|
||||
}
|
||||
|
@ -221,6 +221,19 @@
|
||||
"resetIPAdapterImage": "Reset IP Adapter Image",
|
||||
"ipAdapterImageFallback": "No IP Adapter Image Selected"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
"enableHrf": "Enable High Resolution Fix",
|
||||
"enableHrfTooltip": "Generate with a lower initial resolution, upscale to the base resolution, then run Image-to-Image.",
|
||||
"upscaleMethod": "Upscale Method",
|
||||
"hrfStrength": "High Resolution Fix Strength",
|
||||
"strengthTooltip": "Lower values result in fewer details, which may reduce potential artifacts.",
|
||||
"metadata": {
|
||||
"enabled": "High Resolution Fix Enabled",
|
||||
"strength": "High Resolution Fix Strength",
|
||||
"method": "High Resolution Fix Method"
|
||||
}
|
||||
},
|
||||
"embedding": {
|
||||
"addEmbedding": "Add Embedding",
|
||||
"incompatibleModel": "Incompatible base model:",
|
||||
@ -570,6 +583,7 @@
|
||||
"strength": "Image to image strength",
|
||||
"Threshold": "Noise Threshold",
|
||||
"variations": "Seed-weight pairs",
|
||||
"vae": "VAE",
|
||||
"width": "Width",
|
||||
"workflow": "Workflow"
|
||||
},
|
||||
@ -1258,15 +1272,11 @@
|
||||
},
|
||||
"compositingBlur": {
|
||||
"heading": "Blur",
|
||||
"paragraphs": [
|
||||
"The blur radius of the mask."
|
||||
]
|
||||
"paragraphs": ["The blur radius of the mask."]
|
||||
},
|
||||
"compositingBlurMethod": {
|
||||
"heading": "Blur Method",
|
||||
"paragraphs": [
|
||||
"The method of blur applied to the masked area."
|
||||
]
|
||||
"paragraphs": ["The method of blur applied to the masked area."]
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
"heading": "Coherence Pass",
|
||||
@ -1276,9 +1286,7 @@
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "Mode",
|
||||
"paragraphs": [
|
||||
"The mode of the Coherence Pass."
|
||||
]
|
||||
"paragraphs": ["The mode of the Coherence Pass."]
|
||||
},
|
||||
"compositingCoherenceSteps": {
|
||||
"heading": "Steps",
|
||||
@ -1296,9 +1304,7 @@
|
||||
},
|
||||
"compositingMaskAdjustments": {
|
||||
"heading": "Mask Adjustments",
|
||||
"paragraphs": [
|
||||
"Adjust the mask."
|
||||
]
|
||||
"paragraphs": ["Adjust the mask."]
|
||||
},
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
@ -1356,9 +1362,7 @@
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "Infill Method",
|
||||
"paragraphs": [
|
||||
"Method to infill the selected area."
|
||||
]
|
||||
"paragraphs": ["Method to infill the selected area."]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA Weight",
|
||||
|
@ -1025,7 +1025,8 @@
|
||||
"imageFieldDescription": "Le immagini possono essere passate tra i nodi.",
|
||||
"unableToParseEdge": "Impossibile analizzare il bordo",
|
||||
"latentsCollectionDescription": "Le immagini latenti possono essere passate tra i nodi.",
|
||||
"imageCollection": "Raccolta Immagini"
|
||||
"imageCollection": "Raccolta Immagini",
|
||||
"loRAModelField": "LoRA"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@ -1192,7 +1193,9 @@
|
||||
"noLoRAsAvailable": "Nessun LoRA disponibile",
|
||||
"noModelsAvailable": "Nessun modello disponibile",
|
||||
"selectModel": "Seleziona un modello",
|
||||
"selectLoRA": "Seleziona un LoRA"
|
||||
"selectLoRA": "Seleziona un LoRA",
|
||||
"noRefinerModelsInstalled": "Nessun modello SDXL Refiner installato",
|
||||
"noLoRAsInstalled": "Nessun LoRA installato"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Disabilita",
|
||||
@ -1484,5 +1487,18 @@
|
||||
"scheduler": "Campionatore",
|
||||
"recallParameters": "Richiama i parametri",
|
||||
"noRecallParameters": "Nessun parametro da richiamare trovato"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Abilita Correzione Alta Risoluzione",
|
||||
"upscaleMethod": "Metodo di ampliamento",
|
||||
"enableHrfTooltip": "Genera con una risoluzione iniziale inferiore, esegue l'ampliamento alla risoluzione di base, quindi esegue Immagine a Immagine.",
|
||||
"metadata": {
|
||||
"strength": "Forza della Correzione Alta Risoluzione",
|
||||
"enabled": "Correzione Alta Risoluzione Abilitata",
|
||||
"method": "Metodo della Correzione Alta Risoluzione"
|
||||
},
|
||||
"hrf": "Correzione Alta Risoluzione",
|
||||
"hrfStrength": "Forza della Correzione Alta Risoluzione",
|
||||
"strengthTooltip": "Valori più bassi comportano meno dettagli, il che può ridurre potenziali artefatti."
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user