Merge branch 'main' into sdxl-convert-safetensors

This commit is contained in:
Brandon
2024-02-02 10:10:49 -05:00
committed by GitHub
35 changed files with 437 additions and 384 deletions

View File

@ -2,6 +2,7 @@
from logging import Logger
from invokeai.app.services.item_storage.item_storage_memory import ItemStorageMemory
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
from invokeai.backend.model_manager.metadata import ModelMetadataStore
from invokeai.backend.util.logging import InvokeAILogger
@ -22,7 +23,6 @@ from ..services.invocation_queue.invocation_queue_memory import MemoryInvocation
from ..services.invocation_services import InvocationServices
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
from ..services.invoker import Invoker
from ..services.item_storage.item_storage_sqlite import SqliteItemStorage
from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage
from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage
from ..services.model_install import ModelInstallService
@ -80,7 +80,7 @@ class ApiDependencies:
board_records = SqliteBoardRecordStorage(db=db)
boards = BoardService()
events = FastAPIEventService(event_handler_id)
graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions")
graph_execution_manager = ItemStorageMemory[GraphExecutionState]()
image_records = SqliteImageRecordStorage(db=db)
images = ImageService()
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)

View File

@ -274,7 +274,7 @@ class InvokeAIAppConfig(InvokeAISettings):
attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation)
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation)
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation)
png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
png_compress_level : int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
# QUEUE
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue)

View File

@ -1,10 +1,8 @@
from abc import ABC, abstractmethod
from typing import Callable, Generic, Optional, TypeVar
from typing import Callable, Generic, TypeVar
from pydantic import BaseModel
from invokeai.app.services.shared.pagination import PaginatedResults
T = TypeVar("T", bound=BaseModel)
@ -25,23 +23,14 @@ class ItemStorageABC(ABC, Generic[T]):
"""Gets the item, parsing it into a Pydantic model"""
pass
@abstractmethod
def get_raw(self, item_id: str) -> Optional[str]:
"""Gets the raw item as a string, skipping Pydantic parsing"""
pass
@abstractmethod
def set(self, item: T) -> None:
"""Sets the item"""
pass
@abstractmethod
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
"""Gets a paginated list of items"""
pass
@abstractmethod
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
def delete(self, item_id: str) -> None:
"""Deletes the item"""
pass
def on_changed(self, on_changed: Callable[[T], None]) -> None:

View File

@ -0,0 +1,50 @@
from collections import OrderedDict
from contextlib import suppress
from typing import Generic, Optional, TypeVar
from pydantic import BaseModel
from invokeai.app.services.item_storage.item_storage_base import ItemStorageABC
T = TypeVar("T", bound=BaseModel)
class ItemStorageMemory(ItemStorageABC, Generic[T]):
"""
Provides a simple in-memory storage for items, with a maximum number of items to store.
The storage uses the LRU strategy to evict items from storage when the max has been reached.
"""
def __init__(self, id_field: str = "id", max_items: int = 10) -> None:
super().__init__()
if max_items < 1:
raise ValueError("max_items must be at least 1")
if not id_field:
raise ValueError("id_field must not be empty")
self._id_field = id_field
self._items: OrderedDict[str, T] = OrderedDict()
self._max_items = max_items
def get(self, item_id: str) -> Optional[T]:
# If the item exists, move it to the end of the OrderedDict.
item = self._items.pop(item_id, None)
if item is not None:
self._items[item_id] = item
return self._items.get(item_id)
def set(self, item: T) -> None:
item_id = getattr(item, self._id_field)
if item_id in self._items:
# If item already exists, remove it and add it to the end
self._items.pop(item_id)
elif len(self._items) >= self._max_items:
# If cache is full, evict the least recently used item
self._items.popitem(last=False)
self._items[item_id] = item
self._on_changed(item)
def delete(self, item_id: str) -> None:
# This is a no-op if the item doesn't exist.
with suppress(KeyError):
del self._items[item_id]
self._on_deleted(item_id)

View File

@ -1,147 +0,0 @@
import sqlite3
import threading
from typing import Generic, Optional, TypeVar, get_args
from pydantic import BaseModel, TypeAdapter
from invokeai.app.services.shared.pagination import PaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from .item_storage_base import ItemStorageABC
T = TypeVar("T", bound=BaseModel)
class SqliteItemStorage(ItemStorageABC, Generic[T]):
_table_name: str
_conn: sqlite3.Connection
_cursor: sqlite3.Cursor
_id_field: str
_lock: threading.RLock
_validator: Optional[TypeAdapter[T]]
def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"):
super().__init__()
self._lock = db.lock
self._conn = db.conn
self._table_name = table_name
self._id_field = id_field # TODO: validate that T has this field
self._cursor = self._conn.cursor()
self._validator: Optional[TypeAdapter[T]] = None
self._create_table()
def _create_table(self):
try:
self._lock.acquire()
self._cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self._table_name} (
item TEXT,
id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);"""
)
self._cursor.execute(
f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);"""
)
finally:
self._lock.release()
def _parse_item(self, item: str) -> T:
if self._validator is None:
"""
We don't get access to `__orig_class__` in `__init__()`, and we need this before start(), so
we can create it when it is first needed instead.
__orig_class__ is technically an implementation detail of the typing module, not a supported API
"""
self._validator = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined]
return self._validator.validate_json(item)
def set(self, item: T):
try:
self._lock.acquire()
self._cursor.execute(
f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""",
(item.model_dump_json(warnings=False, exclude_none=True),),
)
self._conn.commit()
finally:
self._lock.release()
self._on_changed(item)
def get(self, id: str) -> Optional[T]:
try:
self._lock.acquire()
self._cursor.execute(f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),))
result = self._cursor.fetchone()
finally:
self._lock.release()
if not result:
return None
return self._parse_item(result[0])
def get_raw(self, id: str) -> Optional[str]:
try:
self._lock.acquire()
self._cursor.execute(f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),))
result = self._cursor.fetchone()
finally:
self._lock.release()
if not result:
return None
return result[0]
def delete(self, id: str):
try:
self._lock.acquire()
self._cursor.execute(f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),))
self._conn.commit()
finally:
self._lock.release()
self._on_deleted(id)
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""",
(per_page, page * per_page),
)
result = self._cursor.fetchall()
items = [self._parse_item(r[0]) for r in result]
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
count = self._cursor.fetchone()[0]
finally:
self._lock.release()
pageCount = int(count / per_page) + 1
return PaginatedResults[T](items=items, page=page, pages=pageCount, per_page=per_page, total=count)
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""",
(f"%{query}%", per_page, page * per_page),
)
result = self._cursor.fetchall()
items = [self._parse_item(r[0]) for r in result]
self._cursor.execute(
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",
(f"%{query}%",),
)
count = self._cursor.fetchone()[0]
finally:
self._lock.release()
pageCount = int(count / per_page) + 1
return PaginatedResults[T](items=items, page=page, pages=pageCount, per_page=per_page, total=count)

View File

@ -7,6 +7,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_1 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_4 import build_migration_4
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_5 import build_migration_5
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@ -31,6 +32,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
migrator.register_migration(build_migration_3(app_config=config, logger=logger))
migrator.register_migration(build_migration_4())
migrator.register_migration(build_migration_5())
migrator.run_migrations()
return db

View File

@ -0,0 +1,34 @@
import sqlite3
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
class Migration5Callback:
def __call__(self, cursor: sqlite3.Cursor) -> None:
self._drop_graph_executions(cursor)
def _drop_graph_executions(self, cursor: sqlite3.Cursor) -> None:
"""Drops the `graph_executions` table."""
cursor.execute(
"""--sql
DROP TABLE IF EXISTS graph_executions;
"""
)
def build_migration_5() -> Migration:
"""
Build the migration from database version 4 to 5.
Introduced in v3.6.3, this migration:
- Drops the `graph_executions` table. We are able to do this because we are moving the graph storage
to be purely in-memory.
"""
migration_5 = Migration(
from_version=4,
to_version=5,
callback=Migration5Callback(),
)
return migration_5

View File

@ -12,7 +12,7 @@ import psutil
import torch
from compel.cross_attention_control import Arguments
from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from torch import nn
import invokeai.backend.util.logging as logger

View File

@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import diffusers
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.loaders import FromOriginalControlnetMixin
from diffusers.loaders import FromOriginalControlNetMixin
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
from diffusers.models.embeddings import (
@ -14,8 +14,13 @@ from diffusers.models.embeddings import (
Timesteps,
)
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, get_down_block
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.unets.unet_2d_blocks import (
CrossAttnDownBlock2D,
DownBlock2D,
UNetMidBlock2DCrossAttn,
get_down_block,
)
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from torch import nn
from invokeai.backend.util.logging import InvokeAILogger
@ -27,7 +32,7 @@ from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger(__name__)
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
"""
A ControlNet model.

View File

@ -5,14 +5,14 @@ pip install <path_to_git_source>.
import os
import platform
from distutils.version import LooseVersion
from importlib.metadata import PackageNotFoundError, distribution, distributions
import pkg_resources
import psutil
import requests
from rich import box, print
from rich.console import Console, group
from rich.panel import Panel
from rich.prompt import Prompt
from rich.prompt import Confirm, Prompt
from rich.style import Style
from invokeai.version import __version__
@ -61,6 +61,65 @@ def get_pypi_versions():
return latest_version, latest_release_candidate, versions
def get_torch_extra_index_url() -> str | None:
"""
Determine torch wheel source URL and optional modules based on the user's OS.
"""
resolved_url = None
# In all other cases (like MacOS (MPS) or Linux+CUDA), there is no need to specify the extra index URL.
torch_package_urls = {
"windows_cuda": "https://download.pytorch.org/whl/cu121",
"linux_rocm": "https://download.pytorch.org/whl/rocm5.6",
"linux_cpu": "https://download.pytorch.org/whl/cpu",
}
nvidia_packages_present = (
len([d.metadata["Name"] for d in distributions() if d.metadata["Name"].startswith("nvidia")]) > 0
)
device = "cuda" if nvidia_packages_present else None
manual_gpu_selection_prompt = (
"[bold]We tried and failed to guess your GPU capabilities[/] :thinking_face:. Please select the GPU type:"
)
if OS == "Linux":
if not device:
# do we even need to offer a CPU-only install option?
print(manual_gpu_selection_prompt)
print("1: NVIDIA (CUDA)")
print("2: AMD (ROCm)")
print("3: No GPU - CPU only")
answer = Prompt.ask("Choice:", choices=["1", "2", "3"], default="1")
match answer:
case "1":
device = "cuda"
case "2":
device = "rocm"
case "3":
device = "cpu"
if device != "cuda":
resolved_url = torch_package_urls[f"linux_{device}"]
if OS == "Windows":
if not device:
print(manual_gpu_selection_prompt)
print("1: NVIDIA (CUDA)")
print("2: No GPU - CPU only")
answer = Prompt.ask("Your choice:", choices=["1", "2"], default="1")
match answer:
case "1":
device = "cuda"
case "2":
device = "cpu"
if device == "cuda":
resolved_url = torch_package_urls[f"windows_{device}"]
return resolved_url
def welcome(latest_release: str, latest_prerelease: str):
@group()
def text():
@ -89,12 +148,11 @@ def welcome(latest_release: str, latest_prerelease: str):
def get_extras():
extras = ""
try:
_ = pkg_resources.get_distribution("xformers")
distribution("xformers")
extras = "[xformers]"
except pkg_resources.DistributionNotFound:
pass
except PackageNotFoundError:
extras = ""
return extras
@ -125,8 +183,22 @@ def main():
extras = get_extras()
console.line()
force_reinstall = Confirm.ask(
"[bold]Force reinstallation of all dependencies?[/] This [i]may[/] help fix a broken upgrade, but is usually not necessary.",
default=False,
)
console.line()
flags = []
if (index_url := get_torch_extra_index_url()) is not None:
flags.append(f"--extra-index-url {index_url}")
if force_reinstall:
flags.append("--force-reinstall")
flags = " ".join(flags)
print(f":crossed_fingers: Upgrading to [yellow]{release}[/yellow]")
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade'
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade {flags}'
print("")
print("")

View File

@ -1,9 +1,26 @@
module.exports = {
extends: ['@invoke-ai/eslint-config-react'],
plugins: ['path', 'i18next'],
rules: {
// TODO(psyche): Enable this rule. Requires no default exports in components - many changes.
'react-refresh/only-export-components': 'off',
// TODO(psyche): Enable this rule. Requires a lot of eslint-disable-next-line comments.
'@typescript-eslint/consistent-type-assertions': 'off',
// https://github.com/qdanik/eslint-plugin-path
'path/no-relative-imports': ['error', { maxDepth: 0 }],
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
'i18next/no-literal-string': 'error',
},
overrides: [
/**
* Overrides for stories
*/
{
files: ['*.stories.tsx'],
rules: {
// We may not have i18n available in stories.
'i18next/no-literal-string': 'off',
},
},
],
};

View File

@ -111,7 +111,7 @@
},
"devDependencies": {
"@arthurgeron/eslint-plugin-react-usememo": "^2.2.3",
"@invoke-ai/eslint-config-react": "^0.0.12",
"@invoke-ai/eslint-config-react": "^0.0.13",
"@invoke-ai/prettier-config-react": "^0.0.6",
"@storybook/addon-docs": "^7.6.10",
"@storybook/addon-essentials": "^7.6.10",

View File

@ -178,8 +178,8 @@ devDependencies:
specifier: ^2.2.3
version: 2.2.3
'@invoke-ai/eslint-config-react':
specifier: ^0.0.12
version: 0.0.12(@typescript-eslint/eslint-plugin@6.19.0)(@typescript-eslint/parser@6.19.0)(eslint-config-prettier@9.1.0)(eslint-plugin-i18next@6.0.3)(eslint-plugin-import@2.29.1)(eslint-plugin-react-hooks@4.6.0)(eslint-plugin-react-refresh@0.4.5)(eslint-plugin-react@7.33.2)(eslint-plugin-simple-import-sort@10.0.0)(eslint-plugin-storybook@0.6.15)(eslint-plugin-unused-imports@3.0.0)(eslint@8.56.0)
specifier: ^0.0.13
version: 0.0.13(@typescript-eslint/eslint-plugin@6.19.0)(@typescript-eslint/parser@6.19.0)(eslint-config-prettier@9.1.0)(eslint-plugin-import@2.29.1)(eslint-plugin-react-hooks@4.6.0)(eslint-plugin-react-refresh@0.4.5)(eslint-plugin-react@7.33.2)(eslint-plugin-simple-import-sort@10.0.0)(eslint-plugin-storybook@0.6.15)(eslint-plugin-unused-imports@3.0.0)(eslint@8.56.0)
'@invoke-ai/prettier-config-react':
specifier: ^0.0.6
version: 0.0.6(prettier@3.2.4)
@ -3551,14 +3551,13 @@ packages:
'@swc/helpers': 0.5.3
dev: false
/@invoke-ai/eslint-config-react@0.0.12(@typescript-eslint/eslint-plugin@6.19.0)(@typescript-eslint/parser@6.19.0)(eslint-config-prettier@9.1.0)(eslint-plugin-i18next@6.0.3)(eslint-plugin-import@2.29.1)(eslint-plugin-react-hooks@4.6.0)(eslint-plugin-react-refresh@0.4.5)(eslint-plugin-react@7.33.2)(eslint-plugin-simple-import-sort@10.0.0)(eslint-plugin-storybook@0.6.15)(eslint-plugin-unused-imports@3.0.0)(eslint@8.56.0):
resolution: {integrity: sha512-6IXENcSa7vv+YPO/TYmC8qXXJFQt3JqDY+Yc1AMf4/d3b3o+CA7/mqepXIhydG9Gqo5jTRknXdDmjSaLxgCJ/g==}
/@invoke-ai/eslint-config-react@0.0.13(@typescript-eslint/eslint-plugin@6.19.0)(@typescript-eslint/parser@6.19.0)(eslint-config-prettier@9.1.0)(eslint-plugin-import@2.29.1)(eslint-plugin-react-hooks@4.6.0)(eslint-plugin-react-refresh@0.4.5)(eslint-plugin-react@7.33.2)(eslint-plugin-simple-import-sort@10.0.0)(eslint-plugin-storybook@0.6.15)(eslint-plugin-unused-imports@3.0.0)(eslint@8.56.0):
resolution: {integrity: sha512-dfo9k+wPHdvpy1z6ABoYXR/Ttzs1FAnbC46ttIxVhZuqDq8K5cLWznivrOfl7f0hJb8Cb8HiuQb4pHDxhHBDqA==}
peerDependencies:
'@typescript-eslint/eslint-plugin': ^6.19.0
'@typescript-eslint/parser': ^6.19.0
eslint: ^8.56.0
eslint-config-prettier: ^9.1.0
eslint-plugin-i18next: ^6.0.3
eslint-plugin-import: ^2.29.1
eslint-plugin-react: ^7.33.2
eslint-plugin-react-hooks: ^4.6.0
@ -3571,7 +3570,6 @@ packages:
'@typescript-eslint/parser': 6.19.0(eslint@8.56.0)(typescript@5.3.3)
eslint: 8.56.0
eslint-config-prettier: 9.1.0(eslint@8.56.0)
eslint-plugin-i18next: 6.0.3
eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.19.0)(eslint@8.56.0)
eslint-plugin-react: 7.33.2(eslint@8.56.0)
eslint-plugin-react-hooks: 4.6.0(eslint@8.56.0)

View File

@ -98,7 +98,7 @@
"outputs": "Ausgabe",
"data": "Daten",
"safetensors": "Safetensors",
"outpaint": "outpaint",
"outpaint": "Ausmalen",
"details": "Details",
"format": "Format",
"unknown": "Unbekannt",
@ -131,7 +131,8 @@
"localSystem": "Lokales System",
"orderBy": "Ordnen nach",
"saveAs": "Speicher als",
"updated": "Aktualisiert"
"updated": "Aktualisiert",
"copy": "Kopieren"
},
"gallery": {
"generations": "Erzeugungen",
@ -161,7 +162,13 @@
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
"noImageSelected": "Kein Bild ausgewählt"
"noImageSelected": "Kein Bild ausgewählt",
"problemDeletingImagesDesc": "Eins oder mehr Bilder könnten nicht gelöscht werden",
"starImage": "Bild markieren",
"assets": "Ressourcen",
"unstarImage": "Markierung Entfernen",
"image": "Bild",
"deleteSelection": "Lösche markierte"
},
"hotkeys": {
"keyboardShortcuts": "Tastenkürzel",
@ -365,7 +372,13 @@
"addNodes": {
"title": "Knotenpunkt hinzufügen",
"desc": "Öffnet das Menü zum Hinzufügen von Knoten"
}
},
"cancelAndClear": {
"title": "Abbruch und leeren"
},
"noHotkeysFound": "Kein Hotkey gefunden",
"searchHotkeys": "Hotkeys durchsuchen",
"clearSearch": "Suche leeren"
},
"modelManager": {
"modelAdded": "Model hinzugefügt",
@ -832,7 +845,13 @@
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
"scribble": "Scribble",
"maxFaces": "Maximal Anzahl Gesichter",
"resizeSimple": "Größe ändern (einfach)"
"resizeSimple": "Größe ändern (einfach)",
"large": "Groß",
"modelSize": "Modell Größe",
"small": "Klein",
"base": "Basis",
"depthAnything": "Depth Anything",
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik"
},
"queue": {
"status": "Status",
@ -865,7 +884,7 @@
"item": "Auftrag",
"notReady": "Warteschlange noch nicht bereit",
"batchValues": "Stapel Werte",
"queueCountPrediction": "{{predicted}} zur Warteschlange hinzufügen",
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
"queuedCount": "{{pending}} wartenden Elemente",
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
"completedIn": "Fertig in",
@ -887,7 +906,9 @@
"back": "Hinten",
"resumeSucceeded": "Prozessor wieder aufgenommen",
"resumeTooltip": "Prozessor wieder aufnehmen",
"time": "Zeit"
"time": "Zeit",
"batchQueuedDesc_one": "{{count}} Eintrage ans {{direction}} der Wartschlange hinzugefügt",
"batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt"
},
"metadata": {
"negativePrompt": "Negativ Beschreibung",
@ -956,7 +977,8 @@
"enable": "Aktivieren",
"clear": "Leeren",
"maxCacheSize": "Maximale Cache Größe",
"cacheSize": "Cache Größe"
"cacheSize": "Cache Größe",
"useCache": "Benutze Cache"
},
"embedding": {
"noMatchingEmbedding": "Keine passenden Embeddings",
@ -1042,7 +1064,8 @@
},
"compositing": {
"coherenceTab": "Kohärenzpass",
"infillTab": "Füllung"
"infillTab": "Füllung",
"title": "Compositing"
}
}
}

View File

@ -1376,6 +1376,7 @@
"problemCopyingCanvasDesc": "Unable to export base layer",
"problemCopyingImage": "Unable to Copy Image",
"problemCopyingImageLink": "Unable to Copy Image Link",
"problemDownloadingImage": "Unable to Download Image",
"problemDownloadingCanvas": "Problem Downloading Canvas",
"problemDownloadingCanvasDesc": "Unable to export base layer",
"problemImportingMask": "Problem Importing Mask",

View File

@ -0,0 +1,43 @@
import { useAppToaster } from 'app/components/Toaster';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { useImageUrlToBlob } from './useImageUrlToBlob';
export const useDownloadImage = () => {
const toaster = useAppToaster();
const { t } = useTranslation();
const imageUrlToBlob = useImageUrlToBlob();
const downloadImage = useCallback(
async (image_url: string, image_name: string) => {
try {
const blob = await imageUrlToBlob(image_url);
if (!blob) {
throw new Error('Unable to create Blob');
}
const url = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.style.display = 'none';
a.href = url;
a.download = image_name;
document.body.appendChild(a);
a.click();
window.URL.revokeObjectURL(url);
} catch (err) {
toaster({
title: t('toast.problemDownloadingImage'),
description: String(err),
status: 'error',
duration: 2500,
isClosable: true,
});
}
},
[t, toaster, imageUrlToBlob]
);
return { downloadImage };
};

View File

@ -4,6 +4,7 @@ import { useAppToaster } from 'app/components/Toaster';
import { $customStarUI } from 'app/store/nanostores/customStarUI';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useCopyImageToClipboard } from 'common/hooks/useCopyImageToClipboard';
import { useDownloadImage } from 'common/hooks/useDownloadImage';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
import { imagesToChangeSelected, isModalOpenChanged } from 'features/changeBoardModal/store/slice';
import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice';
@ -47,7 +48,7 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
const toaster = useAppToaster();
const isCanvasEnabled = useFeatureStatus('unifiedCanvas').isFeatureEnabled;
const customStarUi = useStore($customStarUI);
const { downloadImage } = useDownloadImage();
const { metadata, isLoading: isLoadingMetadata } = useDebouncedMetadata(imageDTO?.image_name);
const { getAndLoadEmbeddedWorkflow, getAndLoadEmbeddedWorkflowResult } = useGetAndLoadEmbeddedWorkflow({});
@ -143,6 +144,10 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
}
}, [unstarImages, imageDTO]);
const handleDownloadImage = useCallback(() => {
downloadImage(imageDTO.image_url, imageDTO.image_name);
}, [downloadImage, imageDTO.image_name, imageDTO.image_url]);
return (
<>
<MenuItem as="a" href={imageDTO.image_url} target="_blank" icon={<PiShareFatBold />}>
@ -153,14 +158,7 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
{t('parameters.copyImage')}
</MenuItem>
)}
<MenuItem
as="a"
download={true}
href={imageDTO.image_url}
target="_blank"
icon={<PiDownloadSimpleBold />}
w="100%"
>
<MenuItem icon={<PiDownloadSimpleBold />} onClickCapture={handleDownloadImage}>
{t('parameters.downloadImage')}
</MenuItem>
<MenuDivider />

View File

@ -1,13 +1,12 @@
import { ConfirmationAlertDialog, Flex, IconButton, Text, useDisclosure } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
import { addToast } from 'features/system/store/systemSlice';
import { makeToast } from 'features/system/util/makeToast';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiTrashSimpleFill } from 'react-icons/pi';
import { addToast } from '../../../../../system/store/systemSlice';
import { makeToast } from '../../../../../system/util/makeToast';
import { nodeEditorReset } from '../../../../store/nodesSlice';
const ClearFlowButton = () => {
const dispatch = useAppDispatch();
const { t } = useTranslation();

View File

@ -1,13 +1,12 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { $builtWorkflow } from 'features/nodes/hooks/useWorkflowWatcher';
import { useSaveWorkflowAsDialog } from 'features/workflowLibrary/components/SaveWorkflowAsDialog/useSaveWorkflowAsDialog';
import { isWorkflowWithID, useSaveLibraryWorkflow } from 'features/workflowLibrary/hooks/useSaveWorkflow';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiFloppyDiskBold } from 'react-icons/pi';
import { isWorkflowWithID, useSaveLibraryWorkflow } from '../../../../../workflowLibrary/hooks/useSaveWorkflow';
import { $builtWorkflow } from '../../../../hooks/useWorkflowWatcher';
const SaveWorkflowButton = () => {
const { t } = useTranslation();
const isTouched = useAppSelector((s) => s.workflow.isTouched);

View File

@ -26,7 +26,7 @@ import { ImageSizeLinear } from './ImageSizeLinear';
const selector = createMemoizedSelector(
[selectGenerationSlice, selectCanvasSlice, selectHrfSlice, activeTabNameSelector],
(generation, canvas, hrf, activeTabName) => {
const { shouldRandomizeSeed } = generation;
const { shouldRandomizeSeed, model } = generation;
const { hrfEnabled } = hrf;
const badges: string[] = [];
@ -56,7 +56,7 @@ const selector = createMemoizedSelector(
if (hrfEnabled) {
badges.push('HiRes Fix');
}
return { badges, activeTabName };
return { badges, activeTabName, isSDXL: model?.base_model === 'sdxl' };
}
);
@ -66,7 +66,7 @@ const scalingLabelProps: FormLabelProps = {
export const ImageSettingsAccordion = memo(() => {
const { t } = useTranslation();
const { badges, activeTabName } = useAppSelector(selector);
const { badges, activeTabName, isSDXL } = useAppSelector(selector);
const { isOpen: isOpenAccordion, onToggle: onToggleAccordion } = useStandaloneAccordionToggle({
id: 'image-settings',
defaultIsOpen: true,
@ -94,7 +94,7 @@ export const ImageSettingsAccordion = memo(() => {
</Flex>
{(activeTabName === 'img2img' || activeTabName === 'unifiedCanvas') && <ImageToImageStrength />}
{activeTabName === 'img2img' && <ImageToImageFit />}
{activeTabName === 'txt2img' && <HrfSettings />}
{activeTabName === 'txt2img' && !isSDXL && <HrfSettings />}
{activeTabName === 'unifiedCanvas' && (
<>
<ParamScaleBeforeProcessing />

View File

@ -13,14 +13,13 @@ import {
Input,
} from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { $workflowCategories } from 'app/store/nanostores/workflowCategories';
import { useSaveWorkflowAsDialog } from 'features/workflowLibrary/components/SaveWorkflowAsDialog/useSaveWorkflowAsDialog';
import { useSaveWorkflowAs } from 'features/workflowLibrary/hooks/useSaveWorkflowAs';
import { t } from 'i18next';
import type { ChangeEvent } from 'react';
import { useCallback, useRef } from 'react';
import { $workflowCategories } from '../../../../app/store/nanostores/workflowCategories';
import { useSaveWorkflowAs } from '../../hooks/useSaveWorkflowAs';
export const SaveWorkflowAsDialog = () => {
const { isOpen, onClose, workflowName, setWorkflowName, shouldSaveToProject, setShouldSaveToProject } =
useSaveWorkflowAsDialog();

View File

@ -1,13 +1,12 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { $builtWorkflow } from 'features/nodes/hooks/useWorkflowWatcher';
import { useSaveWorkflowAsDialog } from 'features/workflowLibrary/components/SaveWorkflowAsDialog/useSaveWorkflowAsDialog';
import { isWorkflowWithID, useSaveLibraryWorkflow } from 'features/workflowLibrary/hooks/useSaveWorkflow';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiFloppyDiskBold } from 'react-icons/pi';
import { useAppSelector } from '../../../../app/store/storeHooks';
import { $builtWorkflow } from '../../../nodes/hooks/useWorkflowWatcher';
const SaveWorkflowMenuItem = () => {
const { t } = useTranslation();
const { saveWorkflow } = useSaveLibraryWorkflow();

View File

@ -8,12 +8,11 @@ import {
workflowNameChanged,
workflowSaved,
} from 'features/nodes/store/workflowSlice';
import type { WorkflowCategory } from 'features/nodes/types/workflow';
import { useCallback, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import { useCreateWorkflowMutation, workflowsApi } from 'services/api/endpoints/workflows';
import type { WorkflowCategory } from '../../nodes/types/workflow';
type SaveWorkflowAsArg = {
name: string;
category: WorkflowCategory;