diff --git a/Makefile b/Makefile index 24722e2264..10d7a257c5 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,20 @@ # simple Makefile with scripts that are otherwise hard to remember # to use, run from the repo root `make ` +default: help + +help: + @echo Developer commands: + @echo + @echo "ruff Run ruff, fixing any safely-fixable errors and formatting" + @echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting" + @echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors" + @echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports" + @echo "frontend-build Build the frontend in order to run on localhost:9090" + @echo "frontend-dev Run the frontend in developer mode on localhost:5173" + @echo "installer-zip Build the installer .zip file for the current version" + @echo "tag-release Tag the GitHub repository with the current version (use at release time only!)" + # Runs ruff, fixing any safely-fixable errors and formatting ruff: ruff check . --fix @@ -18,4 +32,21 @@ mypy: # Runs mypy, ignoring the config in pyproject.toml but still ignoring missing (untyped) imports # (many files are ignored by the config, so this is useful for checking all files) mypy-all: - mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports \ No newline at end of file + mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports + +# Build the frontend +frontend-build: + cd invokeai/frontend/web && pnpm build + +# Run the frontend in dev mode +frontend-dev: + cd invokeai/frontend/web && pnpm dev + +# Installer zip file +installer-zip: + cd installer && ./create_installer.sh + +# Tag the release +tag-release: + cd installer && ./tag_release.sh + diff --git a/docs/features/CONFIGURATION.md b/docs/features/CONFIGURATION.md index f83caf522d..f98037d968 100644 --- a/docs/features/CONFIGURATION.md +++ b/docs/features/CONFIGURATION.md @@ -154,14 +154,16 @@ groups in `invokeia.yaml`: ### Web Server -| Setting | Default Value | Description | -|----------|----------------|--------------| -| `host` | `localhost` | Name or IP address of the network interface that the web server will listen on | -| `port` | `9090` | Network port number that the web server will listen on | -| `allow_origins` | `[]` | A list of host names or IP addresses that are allowed to connect to the InvokeAI API in the format `['host1','host2',...]` | -| `allow_credentials` | `true` | Require credentials for a foreign host to access the InvokeAI API (don't change this) | -| `allow_methods` | `*` | List of HTTP methods ("GET", "POST") that the web server is allowed to use when accessing the API | -| `allow_headers` | `*` | List of HTTP headers that the web server will accept when accessing the API | +| Setting | Default Value | Description | +|---------------------|---------------|----------------------------------------------------------------------------------------------------------------------------| +| `host` | `localhost` | Name or IP address of the network interface that the web server will listen on | +| `port` | `9090` | Network port number that the web server will listen on | +| `allow_origins` | `[]` | A list of host names or IP addresses that are allowed to connect to the InvokeAI API in the format `['host1','host2',...]` | +| `allow_credentials` | `true` | Require credentials for a foreign host to access the InvokeAI API (don't change this) | +| `allow_methods` | `*` | List of HTTP methods ("GET", "POST") that the web server is allowed to use when accessing the API | +| `allow_headers` | `*` | List of HTTP headers that the web server will accept when accessing the API | +| `ssl_certfile` | null | Path to an SSL certificate file, used to enable HTTPS. | +| `ssl_keyfile` | null | Path to an SSL keyfile, if the key is not included in the certificate file. | The documentation for InvokeAI's API can be accessed by browsing to the following URL: [http://localhost:9090/docs]. diff --git a/installer/create_installer.sh b/installer/create_installer.sh index ef489af751..ed8cbb0d0a 100755 --- a/installer/create_installer.sh +++ b/installer/create_installer.sh @@ -13,14 +13,6 @@ function is_bin_in_path { builtin type -P "$1" &>/dev/null } -function does_tag_exist { - git rev-parse --quiet --verify "refs/tags/$1" >/dev/null -} - -function git_show_ref { - git show-ref --dereference $1 --abbrev 7 -} - function git_show { git show -s --format='%h %s' $1 } @@ -53,50 +45,11 @@ VERSION=$( ) PATCH="" VERSION="v${VERSION}${PATCH}" -LATEST_TAG="v3-latest" - -echo "Building installer for version $VERSION..." -echo - -if does_tag_exist $VERSION; then - echo -e "${BCYAN}${VERSION}${RESET} already exists:" - git_show_ref tags/$VERSION - echo -fi -if does_tag_exist $LATEST_TAG; then - echo -e "${BCYAN}${LATEST_TAG}${RESET} already exists:" - git_show_ref tags/$LATEST_TAG - echo -fi echo -e "${BGREEN}HEAD${RESET}:" git_show echo -echo -e -n "Create tags ${BCYAN}${VERSION}${RESET} and ${BCYAN}${LATEST_TAG}${RESET} @ ${BGREEN}HEAD${RESET}, ${RED}deleting existing tags on remote${RESET}? " -read -e -p 'y/n [n]: ' input -RESPONSE=${input:='n'} -if [ "$RESPONSE" == 'y' ]; then - echo - echo -e "Deleting ${BCYAN}${VERSION}${RESET} tag on remote..." - git push origin :refs/tags/$VERSION - - echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${VERSION}${RESET} locally..." - if ! git tag -fa $VERSION; then - echo "Existing/invalid tag" - exit -1 - fi - - echo -e "Deleting ${BCYAN}${LATEST_TAG}${RESET} tag on remote..." - git push origin :refs/tags/$LATEST_TAG - - echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${LATEST_TAG}${RESET} locally..." - git tag -fa $LATEST_TAG - - echo - echo -e "${BYELLOW}Remember to 'git push origin --tags'!${RESET}" -fi - # ---------------------- FRONTEND ---------------------- pushd ../invokeai/frontend/web >/dev/null diff --git a/installer/tag_release.sh b/installer/tag_release.sh new file mode 100755 index 0000000000..a914c1a505 --- /dev/null +++ b/installer/tag_release.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +set -e + +BCYAN="\e[1;36m" +BYELLOW="\e[1;33m" +BGREEN="\e[1;32m" +BRED="\e[1;31m" +RED="\e[31m" +RESET="\e[0m" + +function does_tag_exist { + git rev-parse --quiet --verify "refs/tags/$1" >/dev/null +} + +function git_show_ref { + git show-ref --dereference $1 --abbrev 7 +} + +function git_show { + git show -s --format='%h %s' $1 +} + +VERSION=$( + cd .. + python -c "from invokeai.version import __version__ as version; print(version)" +) +PATCH="" +MAJOR_VERSION=$(echo $VERSION | sed 's/\..*$//') +VERSION="v${VERSION}${PATCH}" +LATEST_TAG="v${MAJOR_VERSION}-latest" + +if does_tag_exist $VERSION; then + echo -e "${BCYAN}${VERSION}${RESET} already exists:" + git_show_ref tags/$VERSION + echo +fi +if does_tag_exist $LATEST_TAG; then + echo -e "${BCYAN}${LATEST_TAG}${RESET} already exists:" + git_show_ref tags/$LATEST_TAG + echo +fi + +echo -e "${BGREEN}HEAD${RESET}:" +git_show +echo + +echo -e -n "Create tags ${BCYAN}${VERSION}${RESET} and ${BCYAN}${LATEST_TAG}${RESET} @ ${BGREEN}HEAD${RESET}, ${RED}deleting existing tags on remote${RESET}? " +read -e -p 'y/n [n]: ' input +RESPONSE=${input:='n'} +if [ "$RESPONSE" == 'y' ]; then + echo + echo -e "Deleting ${BCYAN}${VERSION}${RESET} tag on remote..." + git push --delete origin $VERSION + + echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${VERSION}${RESET} locally..." + if ! git tag -fa $VERSION; then + echo "Existing/invalid tag" + exit -1 + fi + + echo -e "Deleting ${BCYAN}${LATEST_TAG}${RESET} tag on remote..." + git push --delete origin $LATEST_TAG + + echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${LATEST_TAG}${RESET} locally..." + git tag -fa $LATEST_TAG + + echo -e "Pushing updated tags to remote..." + git push origin --tags +fi +exit 0 diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 13fd541139..ea28cdfe8e 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -272,6 +272,8 @@ def invoke_api() -> None: port=port, loop="asyncio", log_level=app_config.log_level, + ssl_certfile=app_config.ssl_certfile, + ssl_keyfile=app_config.ssl_keyfile, ) server = uvicorn.Server(config) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index cd689a510b..d9e0c7ba0d 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -39,6 +39,19 @@ class InvalidFieldError(TypeError): pass +class Classification(str, Enum, metaclass=MetaEnum): + """ + The classification of an Invocation. + - `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation. + - `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term. + - `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation. + """ + + Stable = "stable" + Beta = "beta" + Prototype = "prototype" + + class Input(str, Enum, metaclass=MetaEnum): """ The type of input a field accepts. @@ -439,6 +452,7 @@ class UIConfigBase(BaseModel): description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".', ) node_pack: Optional[str] = Field(default=None, description="Whether or not this is a custom node") + classification: Classification = Field(default=Classification.Stable, description="The node's classification") model_config = ConfigDict( validate_assignment=True, @@ -607,6 +621,7 @@ class BaseInvocation(ABC, BaseModel): schema["category"] = uiconfig.category if uiconfig.node_pack is not None: schema["node_pack"] = uiconfig.node_pack + schema["classification"] = uiconfig.classification schema["version"] = uiconfig.version if "required" not in schema or not isinstance(schema["required"], list): schema["required"] = [] @@ -782,6 +797,7 @@ def invocation( category: Optional[str] = None, version: Optional[str] = None, use_cache: Optional[bool] = True, + classification: Classification = Classification.Stable, ) -> Callable[[Type[TBaseInvocation]], Type[TBaseInvocation]]: """ Registers an invocation. @@ -792,6 +808,7 @@ def invocation( :param Optional[str] category: Adds a category to the invocation. Used to group the invocations in the UI. Defaults to None. :param Optional[str] version: Adds a version to the invocation. Must be a valid semver string. Defaults to None. :param Optional[bool] use_cache: Whether or not to use the invocation cache. Defaults to True. The user may override this in the workflow editor. + :param Classification classification: The classification of the invocation. Defaults to FeatureClassification.Stable. Use Beta or Prototype if the invocation is unstable. """ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: @@ -812,6 +829,7 @@ def invocation( cls.UIConfig.title = title cls.UIConfig.tags = tags cls.UIConfig.category = category + cls.UIConfig.classification = classification # Grab the node pack's name from the module name, if it's a custom node is_custom_node = cls.__module__.rsplit(".", 1)[0] == "invokeai.app.invocations" diff --git a/invokeai/app/invocations/tiles.py b/invokeai/app/invocations/tiles.py index e59a0530ee..c0582986a8 100644 --- a/invokeai/app/invocations/tiles.py +++ b/invokeai/app/invocations/tiles.py @@ -1,3 +1,5 @@ +from typing import Literal + import numpy as np from PIL import Image from pydantic import BaseModel @@ -5,6 +7,8 @@ from pydantic import BaseModel from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, + Classification, + Input, InputField, InvocationContext, OutputField, @@ -14,7 +18,13 @@ from invokeai.app.invocations.baseinvocation import ( ) from invokeai.app.invocations.primitives import ImageField, ImageOutput from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin -from invokeai.backend.tiles.tiles import calc_tiles_with_overlap, merge_tiles_with_linear_blending +from invokeai.backend.tiles.tiles import ( + calc_tiles_even_split, + calc_tiles_min_overlap, + calc_tiles_with_overlap, + merge_tiles_with_linear_blending, + merge_tiles_with_seam_blending, +) from invokeai.backend.tiles.utils import Tile @@ -55,6 +65,79 @@ class CalculateImageTilesInvocation(BaseInvocation): return CalculateImageTilesOutput(tiles=tiles) +@invocation( + "calculate_image_tiles_even_split", + title="Calculate Image Tiles Even Split", + tags=["tiles"], + category="tiles", + version="1.0.0", + classification=Classification.Beta, +) +class CalculateImageTilesEvenSplitInvocation(BaseInvocation): + """Calculate the coordinates and overlaps of tiles that cover a target image shape.""" + + image_width: int = InputField(ge=1, default=1024, description="The image width, in pixels, to calculate tiles for.") + image_height: int = InputField( + ge=1, default=1024, description="The image height, in pixels, to calculate tiles for." + ) + num_tiles_x: int = InputField( + default=2, + ge=1, + description="Number of tiles to divide image into on the x axis", + ) + num_tiles_y: int = InputField( + default=2, + ge=1, + description="Number of tiles to divide image into on the y axis", + ) + overlap_fraction: float = InputField( + default=0.25, + ge=0, + lt=1, + description="Overlap between adjacent tiles as a fraction of the tile's dimensions (0-1)", + ) + + def invoke(self, context: InvocationContext) -> CalculateImageTilesOutput: + tiles = calc_tiles_even_split( + image_height=self.image_height, + image_width=self.image_width, + num_tiles_x=self.num_tiles_x, + num_tiles_y=self.num_tiles_y, + overlap_fraction=self.overlap_fraction, + ) + return CalculateImageTilesOutput(tiles=tiles) + + +@invocation( + "calculate_image_tiles_min_overlap", + title="Calculate Image Tiles Minimum Overlap", + tags=["tiles"], + category="tiles", + version="1.0.0", + classification=Classification.Beta, +) +class CalculateImageTilesMinimumOverlapInvocation(BaseInvocation): + """Calculate the coordinates and overlaps of tiles that cover a target image shape.""" + + image_width: int = InputField(ge=1, default=1024, description="The image width, in pixels, to calculate tiles for.") + image_height: int = InputField( + ge=1, default=1024, description="The image height, in pixels, to calculate tiles for." + ) + tile_width: int = InputField(ge=1, default=576, description="The tile width, in pixels.") + tile_height: int = InputField(ge=1, default=576, description="The tile height, in pixels.") + min_overlap: int = InputField(default=128, ge=0, description="Minimum overlap between adjacent tiles, in pixels.") + + def invoke(self, context: InvocationContext) -> CalculateImageTilesOutput: + tiles = calc_tiles_min_overlap( + image_height=self.image_height, + image_width=self.image_width, + tile_height=self.tile_height, + tile_width=self.tile_width, + min_overlap=self.min_overlap, + ) + return CalculateImageTilesOutput(tiles=tiles) + + @invocation_output("tile_to_properties_output") class TileToPropertiesOutput(BaseInvocationOutput): coords_left: int = OutputField(description="Left coordinate of the tile relative to its parent image.") @@ -76,7 +159,14 @@ class TileToPropertiesOutput(BaseInvocationOutput): overlap_right: int = OutputField(description="Overlap between this tile and its right neighbor.") -@invocation("tile_to_properties", title="Tile to Properties", tags=["tiles"], category="tiles", version="1.0.0") +@invocation( + "tile_to_properties", + title="Tile to Properties", + tags=["tiles"], + category="tiles", + version="1.0.0", + classification=Classification.Beta, +) class TileToPropertiesInvocation(BaseInvocation): """Split a Tile into its individual properties.""" @@ -102,7 +192,14 @@ class PairTileImageOutput(BaseInvocationOutput): tile_with_image: TileWithImage = OutputField(description="A tile description with its corresponding image.") -@invocation("pair_tile_image", title="Pair Tile with Image", tags=["tiles"], category="tiles", version="1.0.0") +@invocation( + "pair_tile_image", + title="Pair Tile with Image", + tags=["tiles"], + category="tiles", + version="1.0.0", + classification=Classification.Beta, +) class PairTileImageInvocation(BaseInvocation): """Pair an image with its tile properties.""" @@ -121,13 +218,29 @@ class PairTileImageInvocation(BaseInvocation): ) -@invocation("merge_tiles_to_image", title="Merge Tiles to Image", tags=["tiles"], category="tiles", version="1.1.0") +BLEND_MODES = Literal["Linear", "Seam"] + + +@invocation( + "merge_tiles_to_image", + title="Merge Tiles to Image", + tags=["tiles"], + category="tiles", + version="1.1.0", + classification=Classification.Beta, +) class MergeTilesToImageInvocation(BaseInvocation, WithMetadata): """Merge multiple tile images into a single image.""" # Inputs tiles_with_images: list[TileWithImage] = InputField(description="A list of tile images with tile properties.") + blend_mode: BLEND_MODES = InputField( + default="Seam", + description="blending type Linear or Seam", + input=Input.Direct, + ) blend_amount: int = InputField( + default=32, ge=0, description="The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles.", ) @@ -157,10 +270,18 @@ class MergeTilesToImageInvocation(BaseInvocation, WithMetadata): channels = tile_np_images[0].shape[-1] dtype = tile_np_images[0].dtype np_image = np.zeros(shape=(height, width, channels), dtype=dtype) + if self.blend_mode == "Linear": + merge_tiles_with_linear_blending( + dst_image=np_image, tiles=tiles, tile_images=tile_np_images, blend_amount=self.blend_amount + ) + elif self.blend_mode == "Seam": + merge_tiles_with_seam_blending( + dst_image=np_image, tiles=tiles, tile_images=tile_np_images, blend_amount=self.blend_amount + ) + else: + raise ValueError(f"Unsupported blend mode: '{self.blend_mode}'.") - merge_tiles_with_linear_blending( - dst_image=np_image, tiles=tiles, tile_images=tile_np_images, blend_amount=self.blend_amount - ) + # Convert into a PIL image and save pil_image = Image.fromarray(np_image) image_dto = context.services.images.create( diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index f712640d9c..a55bcd3a21 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -221,6 +221,9 @@ class InvokeAIAppConfig(InvokeAISettings): allow_credentials : bool = Field(default=True, description="Allow CORS credentials", json_schema_extra=Categories.WebServer) allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", json_schema_extra=Categories.WebServer) allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", json_schema_extra=Categories.WebServer) + # SSL options correspond to https://www.uvicorn.org/settings/#https + ssl_certfile : Optional[Path] = Field(default=None, description="SSL certificate file (for HTTPS)", json_schema_extra=Categories.WebServer) + ssl_keyfile : Optional[Path] = Field(default=None, description="SSL key file", json_schema_extra=Categories.WebServer) # FEATURES esrgan : bool = Field(default=True, description="Enable/disable upscaling code", json_schema_extra=Categories.Features) diff --git a/invokeai/app/services/model_install/model_install_default.py b/invokeai/app/services/model_install/model_install_default.py index 9022fc100c..70cc4d5018 100644 --- a/invokeai/app/services/model_install/model_install_default.py +++ b/invokeai/app/services/model_install/model_install_default.py @@ -85,7 +85,7 @@ class ModelInstallService(ModelInstallServiceBase): def event_bus(self) -> Optional[EventServiceBase]: # noqa D102 return self._event_bus - def stop(self) -> None: + def stop(self, *args, **kwargs) -> None: """Stop the install thread; after this the object can be deleted and garbage collected.""" self._install_queue.put(STOP_JOB) diff --git a/invokeai/app/services/model_records/model_records_sql.py b/invokeai/app/services/model_records/model_records_sql.py index 83b4d5b627..08956a960f 100644 --- a/invokeai/app/services/model_records/model_records_sql.py +++ b/invokeai/app/services/model_records/model_records_sql.py @@ -95,21 +95,13 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): """--sql INSERT INTO model_config ( id, - base, - type, - name, - path, original_hash, config ) - VALUES (?,?,?,?,?,?,?); + VALUES (?,?,?); """, ( key, - record.base, - record.type, - record.name, - record.path, record.original_hash, json_serialized, ), @@ -173,14 +165,11 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): self._cursor.execute( """--sql UPDATE model_config - SET base=?, - type=?, - name=?, - path=?, + SET config=? WHERE id=?; """, - (record.base, record.type, record.name, record.path, json_serialized, key), + (json_serialized, key), ) if self._cursor.rowcount == 0: raise UnknownModelException("model not found") @@ -278,7 +267,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): self._cursor.execute( """--sql SELECT config FROM model_config - WHERE model_path=?; + WHERE path=?; """, (str(path),), ) diff --git a/invokeai/app/services/shared/sqlite_migrator/migrations/migration_2.py b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_2.py index 87cc3ddcea..73148769d5 100644 --- a/invokeai/app/services/shared/sqlite_migrator/migrations/migration_2.py +++ b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_2.py @@ -22,6 +22,7 @@ def migrate_callback(cursor: sqlite3.Cursor, **kwargs) -> None: _drop_old_workflow_tables(cursor) _add_workflow_library(cursor) _drop_model_manager_metadata(cursor) + _recreate_model_config(cursor) _migrate_embedded_workflows(cursor, logger, image_files) @@ -101,6 +102,41 @@ def _drop_model_manager_metadata(cursor: sqlite3.Cursor) -> None: cursor.execute("DROP TABLE IF EXISTS model_manager_metadata;") +def _recreate_model_config(cursor: sqlite3.Cursor) -> None: + """ + Drops the `model_config` table, recreating it. + + In 3.4.0, this table used explicit columns but was changed to use json_extract 3.5.0. + + Because this table is not used in production, we are able to simply drop it and recreate it. + """ + + cursor.execute("DROP TABLE IF EXISTS model_config;") + + cursor.execute( + """--sql + CREATE TABLE IF NOT EXISTS model_config ( + id TEXT NOT NULL PRIMARY KEY, + -- The next 3 fields are enums in python, unrestricted string here + base TEXT GENERATED ALWAYS as (json_extract(config, '$.base')) VIRTUAL NOT NULL, + type TEXT GENERATED ALWAYS as (json_extract(config, '$.type')) VIRTUAL NOT NULL, + name TEXT GENERATED ALWAYS as (json_extract(config, '$.name')) VIRTUAL NOT NULL, + path TEXT GENERATED ALWAYS as (json_extract(config, '$.path')) VIRTUAL NOT NULL, + format TEXT GENERATED ALWAYS as (json_extract(config, '$.format')) VIRTUAL NOT NULL, + original_hash TEXT, -- could be null + -- Serialized JSON representation of the whole config object, + -- which will contain additional fields from subclasses + config TEXT NOT NULL, + created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')), + -- Updated via trigger + updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')), + -- unique constraint on combo of name, base and type + UNIQUE(name, base, type) + ); + """ + ) + + def _migrate_embedded_workflows( cursor: sqlite3.Cursor, logger: Logger, diff --git a/invokeai/backend/model_manager/migrate_to_db.py b/invokeai/backend/model_manager/migrate_to_db.py index c744ddd7d3..e68a2eab36 100644 --- a/invokeai/backend/model_manager/migrate_to_db.py +++ b/invokeai/backend/model_manager/migrate_to_db.py @@ -2,6 +2,7 @@ """Migrate from the InvokeAI v2 models.yaml format to the v3 sqlite format.""" from hashlib import sha1 +from logging import Logger from omegaconf import DictConfig, OmegaConf from pydantic import TypeAdapter @@ -10,6 +11,7 @@ from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.model_records import ( DuplicateModelException, ModelRecordServiceSQL, + UnknownModelException, ) from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase from invokeai.backend.model_manager.config import ( @@ -38,9 +40,9 @@ class MigrateModelYamlToDb: """ config: InvokeAIAppConfig - logger: InvokeAILogger + logger: Logger - def __init__(self): + def __init__(self) -> None: self.config = InvokeAIAppConfig.get_config() self.config.parse_args() self.logger = InvokeAILogger.get_logger() @@ -54,9 +56,11 @@ class MigrateModelYamlToDb: def get_yaml(self) -> DictConfig: """Fetch the models.yaml DictConfig for this installation.""" yaml_path = self.config.model_conf_path - return OmegaConf.load(yaml_path) + omegaconf = OmegaConf.load(yaml_path) + assert isinstance(omegaconf, DictConfig) + return omegaconf - def migrate(self): + def migrate(self) -> None: """Do the migration from models.yaml to invokeai.db.""" db = self.get_db() yaml = self.get_yaml() @@ -70,6 +74,7 @@ class MigrateModelYamlToDb: base_type, model_type, model_name = str(model_key).split("/") hash = FastModelHash.hash(self.config.models_path / stanza.path) + assert isinstance(model_key, str) new_key = sha1(model_key.encode("utf-8")).hexdigest() stanza["base"] = BaseModelType(base_type) @@ -78,12 +83,20 @@ class MigrateModelYamlToDb: stanza["original_hash"] = hash stanza["current_hash"] = hash - new_config = ModelsValidator.validate_python(stanza) - self.logger.info(f"Adding model {model_name} with key {model_key}") + new_config: AnyModelConfig = ModelsValidator.validate_python(stanza) # type: ignore # see https://github.com/pydantic/pydantic/discussions/7094 + try: - db.add_model(new_key, new_config) + if original_record := db.search_by_path(stanza.path): + key = original_record[0].key + self.logger.info(f"Updating model {model_name} with information from models.yaml using key {key}") + db.update_model(key, new_config) + else: + self.logger.info(f"Adding model {model_name} with key {model_key}") + db.add_model(new_key, new_config) except DuplicateModelException: self.logger.warning(f"Model {model_name} is already in the database") + except UnknownModelException: + self.logger.warning(f"Model at {stanza.path} could not be found in database") def main(): diff --git a/invokeai/backend/tiles/tiles.py b/invokeai/backend/tiles/tiles.py index 3a678d825e..1948f6624e 100644 --- a/invokeai/backend/tiles/tiles.py +++ b/invokeai/backend/tiles/tiles.py @@ -3,7 +3,42 @@ from typing import Union import numpy as np -from invokeai.backend.tiles.utils import TBLR, Tile, paste +from invokeai.app.invocations.latent import LATENT_SCALE_FACTOR +from invokeai.backend.tiles.utils import TBLR, Tile, paste, seam_blend + + +def calc_overlap(tiles: list[Tile], num_tiles_x: int, num_tiles_y: int) -> list[Tile]: + """Calculate and update the overlap of a list of tiles. + + Args: + tiles (list[Tile]): The list of tiles describing the locations of the respective `tile_images`. + num_tiles_x: the number of tiles on the x axis. + num_tiles_y: the number of tiles on the y axis. + """ + + def get_tile_or_none(idx_y: int, idx_x: int) -> Union[Tile, None]: + if idx_y < 0 or idx_y > num_tiles_y or idx_x < 0 or idx_x > num_tiles_x: + return None + return tiles[idx_y * num_tiles_x + idx_x] + + for tile_idx_y in range(num_tiles_y): + for tile_idx_x in range(num_tiles_x): + cur_tile = get_tile_or_none(tile_idx_y, tile_idx_x) + top_neighbor_tile = get_tile_or_none(tile_idx_y - 1, tile_idx_x) + left_neighbor_tile = get_tile_or_none(tile_idx_y, tile_idx_x - 1) + + assert cur_tile is not None + + # Update cur_tile top-overlap and corresponding top-neighbor bottom-overlap. + if top_neighbor_tile is not None: + cur_tile.overlap.top = max(0, top_neighbor_tile.coords.bottom - cur_tile.coords.top) + top_neighbor_tile.overlap.bottom = cur_tile.overlap.top + + # Update cur_tile left-overlap and corresponding left-neighbor right-overlap. + if left_neighbor_tile is not None: + cur_tile.overlap.left = max(0, left_neighbor_tile.coords.right - cur_tile.coords.left) + left_neighbor_tile.overlap.right = cur_tile.overlap.left + return tiles def calc_tiles_with_overlap( @@ -63,31 +98,125 @@ def calc_tiles_with_overlap( tiles.append(tile) - def get_tile_or_none(idx_y: int, idx_x: int) -> Union[Tile, None]: - if idx_y < 0 or idx_y > num_tiles_y or idx_x < 0 or idx_x > num_tiles_x: - return None - return tiles[idx_y * num_tiles_x + idx_x] + return calc_overlap(tiles, num_tiles_x, num_tiles_y) - # Iterate over tiles again and calculate overlaps. + +def calc_tiles_even_split( + image_height: int, image_width: int, num_tiles_x: int, num_tiles_y: int, overlap_fraction: float = 0 +) -> list[Tile]: + """Calculate the tile coordinates for a given image shape with the number of tiles requested. + + Args: + image_height (int): The image height in px. + image_width (int): The image width in px. + num_x_tiles (int): The number of tile to split the image into on the X-axis. + num_y_tiles (int): The number of tile to split the image into on the Y-axis. + overlap_fraction (float, optional): The target overlap as fraction of the tiles size. Defaults to 0. + + Returns: + list[Tile]: A list of tiles that cover the image shape. Ordered from left-to-right, top-to-bottom. + """ + + # Ensure tile size is divisible by 8 + if image_width % LATENT_SCALE_FACTOR != 0 or image_height % LATENT_SCALE_FACTOR != 0: + raise ValueError(f"image size (({image_width}, {image_height})) must be divisible by {LATENT_SCALE_FACTOR}") + + # Calculate the overlap size based on the percentage and adjust it to be divisible by 8 (rounding up) + overlap_x = LATENT_SCALE_FACTOR * math.ceil( + int((image_width / num_tiles_x) * overlap_fraction) / LATENT_SCALE_FACTOR + ) + overlap_y = LATENT_SCALE_FACTOR * math.ceil( + int((image_height / num_tiles_y) * overlap_fraction) / LATENT_SCALE_FACTOR + ) + + # Calculate the tile size based on the number of tiles and overlap, and ensure it's divisible by 8 (rounding down) + tile_size_x = LATENT_SCALE_FACTOR * math.floor( + ((image_width + overlap_x * (num_tiles_x - 1)) // num_tiles_x) / LATENT_SCALE_FACTOR + ) + tile_size_y = LATENT_SCALE_FACTOR * math.floor( + ((image_height + overlap_y * (num_tiles_y - 1)) // num_tiles_y) / LATENT_SCALE_FACTOR + ) + + # tiles[y * num_tiles_x + x] is the tile for the y'th row, x'th column. + tiles: list[Tile] = [] + + # Calculate tile coordinates. (Ignore overlap values for now.) for tile_idx_y in range(num_tiles_y): + # Calculate the top and bottom of the row + top = tile_idx_y * (tile_size_y - overlap_y) + bottom = min(top + tile_size_y, image_height) + # For the last row adjust bottom to be the height of the image + if tile_idx_y == num_tiles_y - 1: + bottom = image_height + for tile_idx_x in range(num_tiles_x): - cur_tile = get_tile_or_none(tile_idx_y, tile_idx_x) - top_neighbor_tile = get_tile_or_none(tile_idx_y - 1, tile_idx_x) - left_neighbor_tile = get_tile_or_none(tile_idx_y, tile_idx_x - 1) + # Calculate the left & right coordinate of each tile + left = tile_idx_x * (tile_size_x - overlap_x) + right = min(left + tile_size_x, image_width) + # For the last tile in the row adjust right to be the width of the image + if tile_idx_x == num_tiles_x - 1: + right = image_width - assert cur_tile is not None + tile = Tile( + coords=TBLR(top=top, bottom=bottom, left=left, right=right), + overlap=TBLR(top=0, bottom=0, left=0, right=0), + ) - # Update cur_tile top-overlap and corresponding top-neighbor bottom-overlap. - if top_neighbor_tile is not None: - cur_tile.overlap.top = max(0, top_neighbor_tile.coords.bottom - cur_tile.coords.top) - top_neighbor_tile.overlap.bottom = cur_tile.overlap.top + tiles.append(tile) - # Update cur_tile left-overlap and corresponding left-neighbor right-overlap. - if left_neighbor_tile is not None: - cur_tile.overlap.left = max(0, left_neighbor_tile.coords.right - cur_tile.coords.left) - left_neighbor_tile.overlap.right = cur_tile.overlap.left + return calc_overlap(tiles, num_tiles_x, num_tiles_y) - return tiles + +def calc_tiles_min_overlap( + image_height: int, + image_width: int, + tile_height: int, + tile_width: int, + min_overlap: int = 0, +) -> list[Tile]: + """Calculate the tile coordinates for a given image shape under a simple tiling scheme with overlaps. + + Args: + image_height (int): The image height in px. + image_width (int): The image width in px. + tile_height (int): The tile height in px. All tiles will have this height. + tile_width (int): The tile width in px. All tiles will have this width. + min_overlap (int): The target minimum overlap between adjacent tiles. If the tiles do not evenly cover the image + shape, then the overlap will be spread between the tiles. + + Returns: + list[Tile]: A list of tiles that cover the image shape. Ordered from left-to-right, top-to-bottom. + """ + + assert min_overlap < tile_height + assert min_overlap < tile_width + + # The If Else catches the case when the tile size is larger than the images size and just clips the number of tiles to 1 + num_tiles_x = math.ceil((image_width - min_overlap) / (tile_width - min_overlap)) if tile_width < image_width else 1 + num_tiles_y = ( + math.ceil((image_height - min_overlap) / (tile_height - min_overlap)) if tile_height < image_height else 1 + ) + + # tiles[y * num_tiles_x + x] is the tile for the y'th row, x'th column. + tiles: list[Tile] = [] + + # Calculate tile coordinates. (Ignore overlap values for now.) + for tile_idx_y in range(num_tiles_y): + top = (tile_idx_y * (image_height - tile_height)) // (num_tiles_y - 1) if num_tiles_y > 1 else 0 + bottom = top + tile_height + + for tile_idx_x in range(num_tiles_x): + left = (tile_idx_x * (image_width - tile_width)) // (num_tiles_x - 1) if num_tiles_x > 1 else 0 + right = left + tile_width + + tile = Tile( + coords=TBLR(top=top, bottom=bottom, left=left, right=right), + overlap=TBLR(top=0, bottom=0, left=0, right=0), + ) + + tiles.append(tile) + + return calc_overlap(tiles, num_tiles_x, num_tiles_y) def merge_tiles_with_linear_blending( @@ -199,3 +328,91 @@ def merge_tiles_with_linear_blending( ), mask=mask, ) + + +def merge_tiles_with_seam_blending( + dst_image: np.ndarray, tiles: list[Tile], tile_images: list[np.ndarray], blend_amount: int +): + """Merge a set of image tiles into `dst_image` with seam blending between the tiles. + + We expect every tile edge to either: + 1) have an overlap of 0, because it is aligned with the image edge, or + 2) have an overlap >= blend_amount. + If neither of these conditions are satisfied, we raise an exception. + + The seam blending is centered on a seam of least energy of the overlap between adjacent tiles. + + Args: + dst_image (np.ndarray): The destination image. Shape: (H, W, C). + tiles (list[Tile]): The list of tiles describing the locations of the respective `tile_images`. + tile_images (list[np.ndarray]): The tile images to merge into `dst_image`. + blend_amount (int): The amount of blending (in px) between adjacent overlapping tiles. + """ + # Sort tiles and images first by left x coordinate, then by top y coordinate. During tile processing, we want to + # iterate over tiles left-to-right, top-to-bottom. + tiles_and_images = list(zip(tiles, tile_images, strict=True)) + tiles_and_images = sorted(tiles_and_images, key=lambda x: x[0].coords.left) + tiles_and_images = sorted(tiles_and_images, key=lambda x: x[0].coords.top) + + # Organize tiles into rows. + tile_and_image_rows: list[list[tuple[Tile, np.ndarray]]] = [] + cur_tile_and_image_row: list[tuple[Tile, np.ndarray]] = [] + first_tile_in_cur_row, _ = tiles_and_images[0] + for tile_and_image in tiles_and_images: + tile, _ = tile_and_image + if not ( + tile.coords.top == first_tile_in_cur_row.coords.top + and tile.coords.bottom == first_tile_in_cur_row.coords.bottom + ): + # Store the previous row, and start a new one. + tile_and_image_rows.append(cur_tile_and_image_row) + cur_tile_and_image_row = [] + first_tile_in_cur_row, _ = tile_and_image + + cur_tile_and_image_row.append(tile_and_image) + tile_and_image_rows.append(cur_tile_and_image_row) + + for tile_and_image_row in tile_and_image_rows: + first_tile_in_row, _ = tile_and_image_row[0] + row_height = first_tile_in_row.coords.bottom - first_tile_in_row.coords.top + row_image = np.zeros((row_height, dst_image.shape[1], dst_image.shape[2]), dtype=dst_image.dtype) + + # Blend the tiles in the row horizontally. + for tile, tile_image in tile_and_image_row: + # We expect the tiles to be ordered left-to-right. + # For each tile: + # - extract the overlap regions and pass to seam_blend() + # - apply blended region to the row_image + # - apply the un-blended region to the row_image + tile_height, tile_width, _ = tile_image.shape + overlap_size = tile.overlap.left + # Left blending: + if overlap_size > 0: + assert overlap_size >= blend_amount + + overlap_coord_right = tile.coords.left + overlap_size + src_overlap = row_image[:, tile.coords.left : overlap_coord_right] + dst_overlap = tile_image[:, :overlap_size] + blended_overlap = seam_blend(src_overlap, dst_overlap, blend_amount, x_seam=False) + row_image[:, tile.coords.left : overlap_coord_right] = blended_overlap + row_image[:, overlap_coord_right : tile.coords.right] = tile_image[:, overlap_size:] + else: + # no overlap just paste the tile + row_image[:, tile.coords.left : tile.coords.right] = tile_image + + # Blend the row into the dst_image + # We assume that the entire row has the same vertical overlaps as the first_tile_in_row. + # Rows are processed in the same way as tiles (extract overlap, blend, apply) + row_overlap_size = first_tile_in_row.overlap.top + if row_overlap_size > 0: + assert row_overlap_size >= blend_amount + + overlap_coords_bottom = first_tile_in_row.coords.top + row_overlap_size + src_overlap = dst_image[first_tile_in_row.coords.top : overlap_coords_bottom, :] + dst_overlap = row_image[:row_overlap_size, :] + blended_overlap = seam_blend(src_overlap, dst_overlap, blend_amount, x_seam=True) + dst_image[first_tile_in_row.coords.top : overlap_coords_bottom, :] = blended_overlap + dst_image[overlap_coords_bottom : first_tile_in_row.coords.bottom, :] = row_image[row_overlap_size:, :] + else: + # no overlap just paste the row + dst_image[first_tile_in_row.coords.top : first_tile_in_row.coords.bottom, :] = row_image diff --git a/invokeai/backend/tiles/utils.py b/invokeai/backend/tiles/utils.py index 4ad40ffa35..dc6d914170 100644 --- a/invokeai/backend/tiles/utils.py +++ b/invokeai/backend/tiles/utils.py @@ -1,5 +1,7 @@ +import math from typing import Optional +import cv2 import numpy as np from pydantic import BaseModel, Field @@ -31,10 +33,10 @@ def paste(dst_image: np.ndarray, src_image: np.ndarray, box: TBLR, mask: Optiona """Paste a source image into a destination image. Args: - dst_image (torch.Tensor): The destination image to paste into. Shape: (H, W, C). - src_image (torch.Tensor): The source image to paste. Shape: (H, W, C). H and W must be compatible with 'box'. + dst_image (np.array): The destination image to paste into. Shape: (H, W, C). + src_image (np.array): The source image to paste. Shape: (H, W, C). H and W must be compatible with 'box'. box (TBLR): Box defining the region in the 'dst_image' where 'src_image' will be pasted. - mask (Optional[torch.Tensor]): A mask that defines the blending between 'src_image' and 'dst_image'. + mask (Optional[np.array]): A mask that defines the blending between 'src_image' and 'dst_image'. Range: [0.0, 1.0], Shape: (H, W). The output is calculate per-pixel according to `src * mask + dst * (1 - mask)`. """ @@ -45,3 +47,106 @@ def paste(dst_image: np.ndarray, src_image: np.ndarray, box: TBLR, mask: Optiona mask = np.expand_dims(mask, -1) dst_image_box = dst_image[box.top : box.bottom, box.left : box.right] dst_image[box.top : box.bottom, box.left : box.right] = src_image * mask + dst_image_box * (1.0 - mask) + + +def seam_blend(ia1: np.ndarray, ia2: np.ndarray, blend_amount: int, x_seam: bool) -> np.ndarray: + """Blend two overlapping tile sections using a seams to find a path. + + It is assumed that input images will be RGB np arrays and are the same size. + + Args: + ia1 (np.array): Image array 1 Shape: (H, W, C). + ia2 (np.array): Image array 2 Shape: (H, W, C). + x_seam (bool): If the images should be blended on the x axis or not. + blend_amount (int): The size of the blur to use on the seam. Half of this value will be used to avoid the edges of the image. + """ + assert ia1.shape == ia2.shape + assert ia2.size == ia2.size + + def shift(arr, num, fill_value=255.0): + result = np.full_like(arr, fill_value) + if num > 0: + result[num:] = arr[:-num] + elif num < 0: + result[:num] = arr[-num:] + else: + result[:] = arr + return result + + # Assume RGB and convert to grey + # Could offer other options for the luminance conversion + # BT.709 [0.2126, 0.7152, 0.0722], BT.2020 [0.2627, 0.6780, 0.0593]) + # it might not have a huge impact due to the blur that is applied over the seam + iag1 = np.dot(ia1, [0.2989, 0.5870, 0.1140]) # BT.601 perceived brightness + iag2 = np.dot(ia2, [0.2989, 0.5870, 0.1140]) + + # Calc Difference between the images + ia = iag2 - iag1 + + # If the seam is on the X-axis rotate the array so we can treat it like a vertical seam + if x_seam: + ia = np.rot90(ia, 1) + + # Calc max and min X & Y limits + # gutter is used to avoid the blur hitting the edge of the image + gutter = math.ceil(blend_amount / 2) if blend_amount > 0 else 0 + max_y, max_x = ia.shape + max_x -= gutter + min_x = gutter + + # Calc the energy in the difference + # Could offer different energy calculations e.g. Sobel or Scharr + energy = np.abs(np.gradient(ia, axis=0)) + np.abs(np.gradient(ia, axis=1)) + + # Find the starting position of the seam + res = np.copy(energy) + for y in range(1, max_y): + row = res[y, :] + rowl = shift(row, -1) + rowr = shift(row, 1) + res[y, :] = res[y - 1, :] + np.min([row, rowl, rowr], axis=0) + + # create an array max_y long + lowest_energy_line = np.empty([max_y], dtype="uint16") + lowest_energy_line[max_y - 1] = np.argmin(res[max_y - 1, min_x : max_x - 1]) + + # Calc the path of the seam + # could offer options for larger search than just 1 pixel by adjusting lpos and rpos + for ypos in range(max_y - 2, -1, -1): + lowest_pos = lowest_energy_line[ypos + 1] + lpos = lowest_pos - 1 + rpos = lowest_pos + 1 + lpos = np.clip(lpos, min_x, max_x - 1) + rpos = np.clip(rpos, min_x, max_x - 1) + lowest_energy_line[ypos] = np.argmin(energy[ypos, lpos : rpos + 1]) + lpos + + # Draw the mask + mask = np.zeros_like(ia) + for ypos in range(0, max_y): + to_fill = lowest_energy_line[ypos] + mask[ypos, :to_fill] = 1 + + # If the seam is on the X-axis rotate the array back + if x_seam: + mask = np.rot90(mask, 3) + + # blur the seam mask if required + if blend_amount > 0: + mask = cv2.blur(mask, (blend_amount, blend_amount)) + + # for visual debugging + # from PIL import Image + # m_image = Image.fromarray((mask * 255.0).astype("uint8")) + + # copy ia2 over ia1 while applying the seam mask + mask = np.expand_dims(mask, -1) + blended_image = ia1 * mask + ia2 * (1.0 - mask) + + # for visual debugging + # i1 = Image.fromarray(ia1.astype("uint8")) + # i2 = Image.fromarray(ia2.astype("uint8")) + # b_image = Image.fromarray(blended_image.astype("uint8")) + # print(f"{ia1.shape}, {ia2.shape}, {mask.shape}, {blended_image.shape}") + # print(f"{i1.size}, {i2.size}, {m_image.size}, {b_image.size}") + + return blended_image diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index c948311c29..ec257ee044 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1032,7 +1032,9 @@ "workflowValidation": "Workflow Validation Error", "workflowVersion": "Version", "zoomInNodes": "Zoom In", - "zoomOutNodes": "Zoom Out" + "zoomOutNodes": "Zoom Out", + "betaDesc": "This invocation is in beta. Until it is stable, it may have breaking changes during app updates. We plan to support this invocation long-term.", + "prototypeDesc": "This invocation is a prototype. It may have breaking changes during app updates and may be removed at any time." }, "parameters": { "aspectRatio": "Aspect Ratio", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index ac03603923..b1b6852f25 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -109,7 +109,8 @@ "somethingWentWrong": "出了点问题", "copyError": "$t(gallery.copy) 错误", "input": "输入", - "notInstalled": "非 $t(common.installed)" + "notInstalled": "非 $t(common.installed)", + "delete": "删除" }, "gallery": { "generations": "生成的图像", diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx index b5b0828ff7..b2719c621f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx @@ -3,6 +3,7 @@ import { useStore } from '@nanostores/react'; import { $customStarUI } from 'app/store/nanostores/customStarUI'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import IAIDndImage from 'common/components/IAIDndImage'; +import IAIDndImageIcon from 'common/components/IAIDndImageIcon'; import IAIFillSkeleton from 'common/components/IAIFillSkeleton'; import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice'; import { @@ -10,7 +11,9 @@ import { ImageDraggableData, TypesafeDraggableData, } from 'features/dnd/types'; +import { VirtuosoGalleryContext } from 'features/gallery/components/ImageGrid/types'; import { useMultiselect } from 'features/gallery/hooks/useMultiselect'; +import { useScrollToVisible } from 'features/gallery/hooks/useScrollToVisible'; import { MouseEvent, memo, useCallback, useMemo, useState } from 'react'; import { useTranslation } from 'react-i18next'; import { FaTrash } from 'react-icons/fa'; @@ -20,15 +23,16 @@ import { useStarImagesMutation, useUnstarImagesMutation, } from 'services/api/endpoints/images'; -import IAIDndImageIcon from 'common/components/IAIDndImageIcon'; interface HoverableImageProps { imageName: string; + index: number; + virtuosoContext: VirtuosoGalleryContext; } const GalleryImage = (props: HoverableImageProps) => { const dispatch = useAppDispatch(); - const { imageName } = props; + const { imageName, virtuosoContext } = props; const { currentData: imageDTO } = useGetImageDTOQuery(imageName); const shift = useAppSelector((state) => state.hotkeys.shift); const { t } = useTranslation(); @@ -38,6 +42,13 @@ const GalleryImage = (props: HoverableImageProps) => { const customStarUi = useStore($customStarUI); + const imageContainerRef = useScrollToVisible( + isSelected, + props.index, + selectionCount, + virtuosoContext + ); + const handleDelete = useCallback( (e: MouseEvent) => { e.stopPropagation(); @@ -122,6 +133,7 @@ const GalleryImage = (props: HoverableImageProps) => { data-testid={`image-${imageDTO.image_name}`} > { const { currentViewTotal } = useBoardTotal(selectedBoardId); const queryArgs = useAppSelector(selectListImagesBaseQueryArgs); + const virtuosoRangeRef = useRef(null); + + const virtuosoRef = useRef(null); + const { currentData, isFetching, isSuccess, isError } = useListImagesQuery(queryArgs); @@ -72,12 +83,26 @@ const GalleryImageGrid = () => { }); }, [areMoreAvailable, listImages, queryArgs, currentData?.ids.length]); - const itemContentFunc = useCallback( - (index: number, imageName: EntityId) => ( - - ), - [] - ); + const virtuosoContext = useMemo(() => { + return { + virtuosoRef, + rootRef, + virtuosoRangeRef, + }; + }, []); + + const itemContentFunc: ItemContent = + useCallback( + (index, imageName, virtuosoContext) => ( + + ), + [] + ); useEffect(() => { // Initialize the gallery's custom scrollbar @@ -93,6 +118,15 @@ const GalleryImageGrid = () => { return () => osInstance()?.destroy(); }, [scroller, initialize, osInstance]); + const onRangeChanged = useCallback((range: ListRange) => { + virtuosoRangeRef.current = range; + }, []); + + useEffect(() => { + $useNextPrevImageState.setKey('virtuosoRef', virtuosoRef); + $useNextPrevImageState.setKey('virtuosoRangeRef', virtuosoRangeRef); + }, []); + if (!currentData) { return ( { }} scrollerRef={setScroller} itemContent={itemContentFunc} + ref={virtuosoRef} + rangeChanged={onRangeChanged} + context={virtuosoContext} + overscan={10} /> ; + rootRef: RefObject; + virtuosoRangeRef: RefObject; +}; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataWorkflowTabContent.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataWorkflowTabContent.tsx index f719d22478..ddc3572083 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataWorkflowTabContent.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataWorkflowTabContent.tsx @@ -1,7 +1,7 @@ import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; -import { useGetImageWorkflowQuery } from 'services/api/endpoints/images'; +import { useDebouncedImageWorkflow } from 'services/api/hooks/useDebouncedImageWorkflow'; import { ImageDTO } from 'services/api/types'; import DataViewer from './DataViewer'; @@ -11,7 +11,7 @@ type Props = { const ImageMetadataWorkflowTabContent = ({ image }: Props) => { const { t } = useTranslation(); - const { currentData: workflow } = useGetImageWorkflowQuery(image.image_name); + const { workflow } = useDebouncedImageWorkflow(image); if (!workflow) { return ; diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useNextPrevImage.ts b/invokeai/frontend/web/src/features/gallery/hooks/useNextPrevImage.ts index 26f4aaca6d..bbf6c26a4f 100644 --- a/invokeai/frontend/web/src/features/gallery/hooks/useNextPrevImage.ts +++ b/invokeai/frontend/web/src/features/gallery/hooks/useNextPrevImage.ts @@ -4,8 +4,11 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { selectListImagesBaseQueryArgs } from 'features/gallery/store/gallerySelectors'; import { imageSelected } from 'features/gallery/store/gallerySlice'; import { IMAGE_LIMIT } from 'features/gallery/store/types'; +import { getScrollToIndexAlign } from 'features/gallery/util/getScrollToIndexAlign'; import { clamp } from 'lodash-es'; -import { useCallback } from 'react'; +import { map } from 'nanostores'; +import { RefObject, useCallback } from 'react'; +import { ListRange, VirtuosoGridHandle } from 'react-virtuoso'; import { boardsApi } from 'services/api/endpoints/boards'; import { imagesApi, @@ -14,6 +17,16 @@ import { import { ListImagesArgs } from 'services/api/types'; import { imagesAdapter } from 'services/api/util'; +export type UseNextPrevImageState = { + virtuosoRef: RefObject | undefined; + virtuosoRangeRef: RefObject | undefined; +}; + +export const $useNextPrevImageState = map({ + virtuosoRef: undefined, + virtuosoRangeRef: undefined, +}); + export const nextPrevImageButtonsSelector = createMemoizedSelector( [stateSelector, selectListImagesBaseQueryArgs], (state, baseQueryArgs) => { @@ -78,6 +91,8 @@ export const nextPrevImageButtonsSelector = createMemoizedSelector( isFetching: status === 'pending', nextImage, prevImage, + nextImageIndex, + prevImageIndex, queryArgs, }; } @@ -88,7 +103,9 @@ export const useNextPrevImage = () => { const { nextImage, + nextImageIndex, prevImage, + prevImageIndex, areMoreImagesAvailable, isFetching, queryArgs, @@ -98,11 +115,43 @@ export const useNextPrevImage = () => { const handlePrevImage = useCallback(() => { prevImage && dispatch(imageSelected(prevImage)); - }, [dispatch, prevImage]); + const range = $useNextPrevImageState.get().virtuosoRangeRef?.current; + const virtuoso = $useNextPrevImageState.get().virtuosoRef?.current; + if (!range || !virtuoso) { + return; + } + + if ( + prevImageIndex !== undefined && + (prevImageIndex < range.startIndex || prevImageIndex > range.endIndex) + ) { + virtuoso.scrollToIndex({ + index: prevImageIndex, + behavior: 'smooth', + align: getScrollToIndexAlign(prevImageIndex, range), + }); + } + }, [dispatch, prevImage, prevImageIndex]); const handleNextImage = useCallback(() => { nextImage && dispatch(imageSelected(nextImage)); - }, [dispatch, nextImage]); + const range = $useNextPrevImageState.get().virtuosoRangeRef?.current; + const virtuoso = $useNextPrevImageState.get().virtuosoRef?.current; + if (!range || !virtuoso) { + return; + } + + if ( + nextImageIndex !== undefined && + (nextImageIndex < range.startIndex || nextImageIndex > range.endIndex) + ) { + virtuoso.scrollToIndex({ + index: nextImageIndex, + behavior: 'smooth', + align: getScrollToIndexAlign(nextImageIndex, range), + }); + } + }, [dispatch, nextImage, nextImageIndex]); const [listImages] = useLazyListImagesQuery(); diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useScrollToVisible.ts b/invokeai/frontend/web/src/features/gallery/hooks/useScrollToVisible.ts new file mode 100644 index 0000000000..b74b7cbbdb --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/hooks/useScrollToVisible.ts @@ -0,0 +1,46 @@ +import { VirtuosoGalleryContext } from 'features/gallery/components/ImageGrid/types'; +import { getScrollToIndexAlign } from 'features/gallery/util/getScrollToIndexAlign'; +import { useEffect, useRef } from 'react'; + +export const useScrollToVisible = ( + isSelected: boolean, + index: number, + selectionCount: number, + virtuosoContext: VirtuosoGalleryContext +) => { + const imageContainerRef = useRef(null); + + useEffect(() => { + if ( + !isSelected || + selectionCount !== 1 || + !virtuosoContext.rootRef.current || + !virtuosoContext.virtuosoRef.current || + !virtuosoContext.virtuosoRangeRef.current || + !imageContainerRef.current + ) { + return; + } + + const itemRect = imageContainerRef.current.getBoundingClientRect(); + const rootRect = virtuosoContext.rootRef.current.getBoundingClientRect(); + const itemIsVisible = + itemRect.top >= rootRect.top && + itemRect.bottom <= rootRect.bottom && + itemRect.left >= rootRect.left && + itemRect.right <= rootRect.right; + + if (!itemIsVisible) { + virtuosoContext.virtuosoRef.current.scrollToIndex({ + index, + behavior: 'smooth', + align: getScrollToIndexAlign( + index, + virtuosoContext.virtuosoRangeRef.current + ), + }); + } + }, [isSelected, index, selectionCount, virtuosoContext]); + + return imageContainerRef; +}; diff --git a/invokeai/frontend/web/src/features/gallery/util/getScrollToIndexAlign.ts b/invokeai/frontend/web/src/features/gallery/util/getScrollToIndexAlign.ts new file mode 100644 index 0000000000..357c3365d2 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/util/getScrollToIndexAlign.ts @@ -0,0 +1,17 @@ +import { ListRange } from 'react-virtuoso'; + +/** + * Gets the alignment for react-virtuoso's scrollToIndex function. + * @param index The index of the item. + * @param range The range of items currently visible. + * @returns + */ +export const getScrollToIndexAlign = ( + index: number, + range: ListRange +): 'start' | 'end' => { + if (index > (range.endIndex - range.startIndex) / 2 + range.startIndex) { + return 'end'; + } + return 'start'; +}; diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeClassificationIcon.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeClassificationIcon.tsx new file mode 100644 index 0000000000..049d7d2072 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeClassificationIcon.tsx @@ -0,0 +1,68 @@ +import { Icon, Tooltip } from '@chakra-ui/react'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { FaFlask } from 'react-icons/fa'; +import { useNodeClassification } from 'features/nodes/hooks/useNodeClassification'; +import { Classification } from 'features/nodes/types/common'; +import { FaHammer } from 'react-icons/fa6'; + +interface Props { + nodeId: string; +} + +const InvocationNodeClassificationIcon = ({ nodeId }: Props) => { + const classification = useNodeClassification(nodeId); + + if (!classification || classification === 'stable') { + return null; + } + + return ( + } + placement="top" + shouldWrapChildren + > + + + ); +}; + +export default memo(InvocationNodeClassificationIcon); + +const ClassificationTooltipContent = memo( + ({ classification }: { classification: Classification }) => { + const { t } = useTranslation(); + + if (classification === 'beta') { + return t('nodes.betaDesc'); + } + + if (classification === 'prototype') { + return t('nodes.prototypeDesc'); + } + + return null; + } +); + +ClassificationTooltipContent.displayName = 'ClassificationTooltipContent'; + +const getIcon = (classification: Classification) => { + if (classification === 'beta') { + return FaHammer; + } + + if (classification === 'prototype') { + return FaFlask; + } + + return undefined; +}; diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx index bd8c44770c..77496abb3a 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx @@ -5,6 +5,7 @@ import NodeTitle from 'features/nodes/components/flow/nodes/common/NodeTitle'; import InvocationNodeCollapsedHandles from './InvocationNodeCollapsedHandles'; import InvocationNodeInfoIcon from './InvocationNodeInfoIcon'; import InvocationNodeStatusIndicator from './InvocationNodeStatusIndicator'; +import InvocationNodeClassificationIcon from 'features/nodes/components/flow/nodes/Invocation/InvocationNodeClassificationIcon'; type Props = { nodeId: string; @@ -31,6 +32,7 @@ const InvocationNodeHeader = ({ nodeId, isOpen }: Props) => { }} > + diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useNodeClassification.ts b/invokeai/frontend/web/src/features/nodes/hooks/useNodeClassification.ts new file mode 100644 index 0000000000..773f6de249 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/hooks/useNodeClassification.ts @@ -0,0 +1,23 @@ +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { stateSelector } from 'app/store/store'; +import { useAppSelector } from 'app/store/storeHooks'; +import { isInvocationNode } from 'features/nodes/types/invocation'; +import { useMemo } from 'react'; + +export const useNodeClassification = (nodeId: string) => { + const selector = useMemo( + () => + createMemoizedSelector(stateSelector, ({ nodes }) => { + const node = nodes.nodes.find((node) => node.id === nodeId); + if (!isInvocationNode(node)) { + return false; + } + const nodeTemplate = nodes.nodeTemplates[node?.data.type ?? '']; + return nodeTemplate?.classification; + }), + [nodeId] + ); + + const title = useAppSelector(selector); + return title; +}; diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index 460b301685..4dcebf0fe4 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -19,6 +19,9 @@ export const zColorField = z.object({ }); export type ColorField = z.infer; +export const zClassification = z.enum(['stable', 'beta', 'prototype']); +export type Classification = z.infer; + export const zSchedulerField = z.enum([ 'euler', 'deis', diff --git a/invokeai/frontend/web/src/features/nodes/types/invocation.ts b/invokeai/frontend/web/src/features/nodes/types/invocation.ts index 9e9fdeb955..927245aef4 100644 --- a/invokeai/frontend/web/src/features/nodes/types/invocation.ts +++ b/invokeai/frontend/web/src/features/nodes/types/invocation.ts @@ -1,6 +1,6 @@ import { Edge, Node } from 'reactflow'; import { z } from 'zod'; -import { zProgressImage } from './common'; +import { zClassification, zProgressImage } from './common'; import { zFieldInputInstance, zFieldInputTemplate, @@ -21,6 +21,7 @@ export const zInvocationTemplate = z.object({ version: zSemVer, useCache: z.boolean(), nodePack: z.string().min(1).nullish(), + classification: zClassification, }); export type InvocationTemplate = z.infer; // #endregion diff --git a/invokeai/frontend/web/src/features/nodes/util/schema/parseSchema.ts b/invokeai/frontend/web/src/features/nodes/util/schema/parseSchema.ts index 9ad391d7c3..c2930c2187 100644 --- a/invokeai/frontend/web/src/features/nodes/util/schema/parseSchema.ts +++ b/invokeai/frontend/web/src/features/nodes/util/schema/parseSchema.ts @@ -83,6 +83,7 @@ export const parseSchema = ( const description = schema.description ?? ''; const version = schema.version; const nodePack = schema.node_pack; + const classification = schema.classification; const inputs = reduce( schema.properties, @@ -245,6 +246,7 @@ export const parseSchema = ( outputs, useCache, nodePack, + classification, }; Object.assign(invocationsAccumulator, { [type]: invocation }); diff --git a/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts b/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts new file mode 100644 index 0000000000..e025041336 --- /dev/null +++ b/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts @@ -0,0 +1,22 @@ +import { skipToken } from '@reduxjs/toolkit/query'; +import { useAppSelector } from 'app/store/storeHooks'; +import { useGetImageWorkflowQuery } from 'services/api/endpoints/images'; +import { ImageDTO } from 'services/api/types'; +import { useDebounce } from 'use-debounce'; + +export const useDebouncedImageWorkflow = (imageDTO?: ImageDTO | null) => { + const workflowFetchDebounce = useAppSelector( + (state) => state.config.workflowFetchDebounce ?? 300 + ); + + const [debouncedImageName] = useDebounce( + imageDTO?.has_workflow ? imageDTO.image_name : null, + workflowFetchDebounce + ); + + const { data: workflow, isLoading } = useGetImageWorkflowQuery( + debouncedImageName ?? skipToken + ); + + return { workflow, isLoading }; +}; diff --git a/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts b/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts index e9727ef6ae..1ed3b27475 100644 --- a/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts +++ b/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts @@ -1,17 +1,14 @@ import { skipToken } from '@reduxjs/toolkit/query'; -import { useDebounce } from 'use-debounce'; -import { useGetImageMetadataQuery } from 'services/api/endpoints/images'; import { useAppSelector } from 'app/store/storeHooks'; +import { useGetImageMetadataQuery } from 'services/api/endpoints/images'; +import { useDebounce } from 'use-debounce'; export const useDebouncedMetadata = (imageName?: string | null) => { const metadataFetchDebounce = useAppSelector( - (state) => state.config.metadataFetchDebounce + (state) => state.config.metadataFetchDebounce ?? 300 ); - const [debouncedImageName] = useDebounce( - imageName, - metadataFetchDebounce ?? 0 - ); + const [debouncedImageName] = useDebounce(imageName, metadataFetchDebounce); const { data: metadata, isLoading } = useGetImageMetadataQuery( debouncedImageName ?? skipToken diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 5a99f39e4e..aaa37d6370 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -100,7 +100,10 @@ export type paths = { get: operations["get_model_record"]; /** * Del Model Record - * @description Delete Model + * @description Delete model record from database. + * + * The configuration record will be removed. The corresponding weights files will be + * deleted as well if they reside within the InvokeAI "models" directory. */ delete: operations["del_model_record"]; /** @@ -116,6 +119,86 @@ export type paths = { */ post: operations["add_model_record"]; }; + "/api/v1/model/record/import": { + /** + * List Model Install Jobs + * @description Return list of model install jobs. + * + * If the optional 'source' argument is provided, then the list will be filtered + * for partial string matches against the install source. + */ + get: operations["list_model_install_jobs"]; + /** + * Import Model + * @description Add a model using its local path, repo_id, or remote URL. + * + * Models will be downloaded, probed, configured and installed in a + * series of background threads. The return object has `status` attribute + * that can be used to monitor progress. + * + * The source object is a discriminated Union of LocalModelSource, + * HFModelSource and URLModelSource. Set the "type" field to the + * appropriate value: + * + * * To install a local path using LocalModelSource, pass a source of form: + * `{ + * "type": "local", + * "path": "/path/to/model", + * "inplace": false + * }` + * The "inplace" flag, if true, will register the model in place in its + * current filesystem location. Otherwise, the model will be copied + * into the InvokeAI models directory. + * + * * To install a HuggingFace repo_id using HFModelSource, pass a source of form: + * `{ + * "type": "hf", + * "repo_id": "stabilityai/stable-diffusion-2.0", + * "variant": "fp16", + * "subfolder": "vae", + * "access_token": "f5820a918aaf01" + * }` + * The `variant`, `subfolder` and `access_token` fields are optional. + * + * * To install a remote model using an arbitrary URL, pass: + * `{ + * "type": "url", + * "url": "http://www.civitai.com/models/123456", + * "access_token": "f5820a918aaf01" + * }` + * The `access_token` field is optonal + * + * The model's configuration record will be probed and filled in + * automatically. To override the default guesses, pass "metadata" + * with a Dict containing the attributes you wish to override. + * + * Installation occurs in the background. Either use list_model_install_jobs() + * to poll for completion, or listen on the event bus for the following events: + * + * "model_install_started" + * "model_install_completed" + * "model_install_error" + * + * On successful completion, the event's payload will contain the field "key" + * containing the installed ID of the model. On an error, the event's payload + * will contain the fields "error_type" and "error" describing the nature of the + * error and its traceback, respectively. + */ + post: operations["import_model_record"]; + /** + * Prune Model Install Jobs + * @description Prune all completed and errored jobs from the install job list. + */ + patch: operations["prune_model_install_jobs"]; + }; + "/api/v1/model/record/sync": { + /** + * Sync Models To Config + * @description Traverse the models and autoimport directories. Model files without a corresponding + * record in the database are added. Orphan records without a models file are deleted. + */ + patch: operations["sync_models_to_config"]; + }; "/api/v1/images/upload": { /** * Upload Image @@ -946,6 +1029,16 @@ export type components = { */ prediction_type?: ("v_prediction" | "epsilon" | "sample") | null; }; + /** Body_import_model_record */ + Body_import_model_record: { + /** Source */ + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + /** + * Config + * @description Dict of fields that override auto-probed values in the model config record, such as name, description and prediction_type + */ + config?: Record | null; + }; /** Body_merge_models */ Body_merge_models: { /** @description Model configuration */ @@ -1247,6 +1340,65 @@ export type components = { */ type: "infill_cv2"; }; + /** + * Calculate Image Tiles Even Split + * @description Calculate the coordinates and overlaps of tiles that cover a target image shape. + */ + CalculateImageTilesEvenSplitInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Image Width + * @description The image width, in pixels, to calculate tiles for. + * @default 1024 + */ + image_width?: number; + /** + * Image Height + * @description The image height, in pixels, to calculate tiles for. + * @default 1024 + */ + image_height?: number; + /** + * Num Tiles X + * @description Number of tiles to divide image into on the x axis + * @default 2 + */ + num_tiles_x?: number; + /** + * Num Tiles Y + * @description Number of tiles to divide image into on the y axis + * @default 2 + */ + num_tiles_y?: number; + /** + * Overlap Fraction + * @description Overlap between adjacent tiles as a fraction of the tile's dimensions (0-1) + * @default 0.25 + */ + overlap_fraction?: number; + /** + * type + * @default calculate_image_tiles_even_split + * @constant + */ + type: "calculate_image_tiles_even_split"; + }; /** * Calculate Image Tiles * @description Calculate the coordinates and overlaps of tiles that cover a target image shape. @@ -1306,6 +1458,65 @@ export type components = { */ type: "calculate_image_tiles"; }; + /** + * Calculate Image Tiles Minimum Overlap + * @description Calculate the coordinates and overlaps of tiles that cover a target image shape. + */ + CalculateImageTilesMinimumOverlapInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Image Width + * @description The image width, in pixels, to calculate tiles for. + * @default 1024 + */ + image_width?: number; + /** + * Image Height + * @description The image height, in pixels, to calculate tiles for. + * @default 1024 + */ + image_height?: number; + /** + * Tile Width + * @description The tile width, in pixels. + * @default 576 + */ + tile_width?: number; + /** + * Tile Height + * @description The tile height, in pixels. + * @default 576 + */ + tile_height?: number; + /** + * Min Overlap + * @description Minimum overlap between adjacent tiles, in pixels. + * @default 128 + */ + min_overlap?: number; + /** + * type + * @default calculate_image_tiles_min_overlap + * @constant + */ + type: "calculate_image_tiles_min_overlap"; + }; /** CalculateImageTilesOutput */ CalculateImageTilesOutput: { /** @@ -3509,7 +3720,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["SchedulerInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["LinearUIOutputInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["MaskCombineInvocation"]; + [key: string]: components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LinearUIOutputInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"]; }; /** * Edges @@ -3546,7 +3757,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["ModelLoaderOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ColorOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ConditioningCollectionOutput"]; + [key: string]: components["schemas"]["ImageOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["String2Output"]; }; /** * Errors @@ -3610,6 +3821,26 @@ export type components = { */ type: "graph_output"; }; + /** + * HFModelSource + * @description A HuggingFace repo_id, with optional variant and sub-folder. + */ + HFModelSource: { + /** Repo Id */ + repo_id: string; + /** Variant */ + variant?: string | null; + /** Subfolder */ + subfolder?: string | null; + /** Access Token */ + access_token?: string | null; + /** + * Type + * @default hf + * @constant + */ + type?: "hf"; + }; /** HTTPValidationError */ HTTPValidationError: { /** Detail */ @@ -4992,6 +5223,12 @@ export type components = { */ type: "infill_tile"; }; + /** + * InstallStatus + * @description State of an install job running in the background. + * @enum {string} + */ + InstallStatus: "waiting" | "running" | "completed" | "error"; /** * Integer Collection Primitive * @description A collection of integer primitive values @@ -5731,6 +5968,25 @@ export type components = { * @enum {string} */ LoRAModelFormat: "lycoris" | "diffusers"; + /** + * LocalModelSource + * @description A local file or directory path. + */ + LocalModelSource: { + /** Path */ + path: string; + /** + * Inplace + * @default false + */ + inplace?: boolean | null; + /** + * Type + * @default local + * @constant + */ + type?: "local"; + }; /** * LogLevel * @enum {integer} @@ -6267,9 +6523,17 @@ export type components = { * @description A list of tile images with tile properties. */ tiles_with_images?: components["schemas"]["TileWithImage"][]; + /** + * Blend Mode + * @description blending type Linear or Seam + * @default Seam + * @enum {string} + */ + blend_mode?: "Linear" | "Seam"; /** * Blend Amount * @description The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles. + * @default 32 */ blend_amount?: number; /** @@ -6517,6 +6781,54 @@ export type components = { /** @description Info to load submodel */ submodel?: components["schemas"]["SubModelType"] | null; }; + /** + * ModelInstallJob + * @description Object that tracks the current status of an install request. + */ + ModelInstallJob: { + /** + * @description Current status of install process + * @default waiting + */ + status?: components["schemas"]["InstallStatus"]; + /** + * Config In + * @description Configuration information (e.g. 'description') to apply to model. + */ + config_in?: Record; + /** + * Config Out + * @description After successful installation, this will hold the configuration object. + */ + config_out?: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"]) | (components["schemas"]["ONNXSD1Config"] | components["schemas"]["ONNXSD2Config"]) | (components["schemas"]["VaeDiffusersConfig"] | components["schemas"]["VaeCheckpointConfig"]) | (components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"]) | components["schemas"]["LoRAConfig"] | components["schemas"]["TextualInversionConfig"] | components["schemas"]["IPAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["T2IConfig"] | null; + /** + * Inplace + * @description Leave model in its current location; otherwise install under models directory + * @default false + */ + inplace?: boolean; + /** + * Source + * @description Source (URL, repo_id, or local path) of model + */ + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + /** + * Local Path + * Format: path + * @description Path to locally-downloaded model; may be the same as the source + */ + local_path: string; + /** + * Error Type + * @description Class name of the exception that led to status==ERROR + */ + error_type?: string | null; + /** + * Error + * @description Error traceback + */ + error?: string | null; + }; /** * ModelLoaderOutput * @description Model loader output @@ -9687,6 +9999,25 @@ export type components = { */ type: "unet_output"; }; + /** + * URLModelSource + * @description A generic URL point to a checkpoint file. + */ + URLModelSource: { + /** + * Url + * Format: uri + */ + url: string; + /** Access Token */ + access_token?: string | null; + /** + * Type + * @default generic_url + * @constant + */ + type?: "generic_url"; + }; /** Upscaler */ Upscaler: { /** @@ -10200,6 +10531,15 @@ export type components = { * @enum {string} */ invokeai__backend__model_manager__config__SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; + /** + * Classification + * @description The feature classification of an Invocation. + * - `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation. + * - `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term. + * - `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation. + * @enum {string} + */ + Classification: "stable" | "beta" | "prototype"; /** * FieldKind * @description The kind of field. @@ -10323,6 +10663,11 @@ export type components = { * @default null */ node_pack: string | null; + /** + * @description The node's classification + * @default stable + */ + classification: components["schemas"]["Classification"]; }; /** * UIType @@ -10352,54 +10697,54 @@ export type components = { * @enum {string} */ UIType: "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VAEModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_MainModel" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * T2IAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - T2IAdapterModelFormat: "diffusers"; /** * StableDiffusion2ModelFormat * @description An enumeration. * @enum {string} */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * CLIPVisionModelFormat - * @description An enumeration. - * @enum {string} - */ - CLIPVisionModelFormat: "diffusers"; - /** - * IPAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - IPAdapterModelFormat: "invokeai"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; + /** + * IPAdapterModelFormat + * @description An enumeration. + * @enum {string} + */ + IPAdapterModelFormat: "invokeai"; + /** + * CLIPVisionModelFormat + * @description An enumeration. + * @enum {string} + */ + CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * ControlNetModelFormat * @description An enumeration. * @enum {string} */ ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * T2IAdapterModelFormat + * @description An enumeration. + * @enum {string} + */ + T2IAdapterModelFormat: "diffusers"; }; responses: never; parameters: never; @@ -10802,6 +11147,8 @@ export type operations = { base_models?: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"][] | null; /** @description The type of model to get */ model_type?: components["schemas"]["invokeai__backend__model_manager__config__ModelType"] | null; + /** @description Exact match on the name of the model */ + model_name?: string | null; }; }; responses: { @@ -10855,7 +11202,10 @@ export type operations = { }; /** * Del Model Record - * @description Delete Model + * @description Delete model record from database. + * + * The configuration record will be removed. The corresponding weights files will be + * deleted as well if they reside within the InvokeAI "models" directory. */ del_model_record: { parameters: { @@ -10957,6 +11307,157 @@ export type operations = { }; }; }; + /** + * List Model Install Jobs + * @description Return list of model install jobs. + * + * If the optional 'source' argument is provided, then the list will be filtered + * for partial string matches against the install source. + */ + list_model_install_jobs: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["ModelInstallJob"][]; + }; + }; + }; + }; + /** + * Import Model + * @description Add a model using its local path, repo_id, or remote URL. + * + * Models will be downloaded, probed, configured and installed in a + * series of background threads. The return object has `status` attribute + * that can be used to monitor progress. + * + * The source object is a discriminated Union of LocalModelSource, + * HFModelSource and URLModelSource. Set the "type" field to the + * appropriate value: + * + * * To install a local path using LocalModelSource, pass a source of form: + * `{ + * "type": "local", + * "path": "/path/to/model", + * "inplace": false + * }` + * The "inplace" flag, if true, will register the model in place in its + * current filesystem location. Otherwise, the model will be copied + * into the InvokeAI models directory. + * + * * To install a HuggingFace repo_id using HFModelSource, pass a source of form: + * `{ + * "type": "hf", + * "repo_id": "stabilityai/stable-diffusion-2.0", + * "variant": "fp16", + * "subfolder": "vae", + * "access_token": "f5820a918aaf01" + * }` + * The `variant`, `subfolder` and `access_token` fields are optional. + * + * * To install a remote model using an arbitrary URL, pass: + * `{ + * "type": "url", + * "url": "http://www.civitai.com/models/123456", + * "access_token": "f5820a918aaf01" + * }` + * The `access_token` field is optonal + * + * The model's configuration record will be probed and filled in + * automatically. To override the default guesses, pass "metadata" + * with a Dict containing the attributes you wish to override. + * + * Installation occurs in the background. Either use list_model_install_jobs() + * to poll for completion, or listen on the event bus for the following events: + * + * "model_install_started" + * "model_install_completed" + * "model_install_error" + * + * On successful completion, the event's payload will contain the field "key" + * containing the installed ID of the model. On an error, the event's payload + * will contain the fields "error_type" and "error" describing the nature of the + * error and its traceback, respectively. + */ + import_model_record: { + requestBody: { + content: { + "application/json": components["schemas"]["Body_import_model_record"]; + }; + }; + responses: { + /** @description The model imported successfully */ + 201: { + content: { + "application/json": components["schemas"]["ModelInstallJob"]; + }; + }; + /** @description There is already a model corresponding to this path or repo_id */ + 409: { + content: never; + }; + /** @description Unrecognized file/folder format */ + 415: { + content: never; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + /** @description The model appeared to import successfully, but could not be found in the model manager */ + 424: { + content: never; + }; + }; + }; + /** + * Prune Model Install Jobs + * @description Prune all completed and errored jobs from the install job list. + */ + prune_model_install_jobs: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + /** @description All completed and errored jobs have been pruned */ + 204: { + content: never; + }; + /** @description Bad request */ + 400: { + content: never; + }; + }; + }; + /** + * Sync Models To Config + * @description Traverse the models and autoimport directories. Model files without a corresponding + * record in the database are added. Orphan records without a models file are deleted. + */ + sync_models_to_config: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + /** @description Model config record database resynced with files on disk */ + 204: { + content: never; + }; + /** @description Bad request */ + 400: { + content: never; + }; + }; + }; /** * Upload Image * @description Uploads an image diff --git a/tests/backend/tiles/test_tiles.py b/tests/backend/tiles/test_tiles.py index 353e65d336..0b18f9ed54 100644 --- a/tests/backend/tiles/test_tiles.py +++ b/tests/backend/tiles/test_tiles.py @@ -1,7 +1,12 @@ import numpy as np import pytest -from invokeai.backend.tiles.tiles import calc_tiles_with_overlap, merge_tiles_with_linear_blending +from invokeai.backend.tiles.tiles import ( + calc_tiles_even_split, + calc_tiles_min_overlap, + calc_tiles_with_overlap, + merge_tiles_with_linear_blending, +) from invokeai.backend.tiles.utils import TBLR, Tile #################################### @@ -14,7 +19,10 @@ def test_calc_tiles_with_overlap_single_tile(): tiles = calc_tiles_with_overlap(image_height=512, image_width=1024, tile_height=512, tile_width=1024, overlap=64) expected_tiles = [ - Tile(coords=TBLR(top=0, bottom=512, left=0, right=1024), overlap=TBLR(top=0, bottom=0, left=0, right=0)) + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=1024), + overlap=TBLR(top=0, bottom=0, left=0, right=0), + ) ] assert tiles == expected_tiles @@ -27,13 +35,31 @@ def test_calc_tiles_with_overlap_evenly_divisible(): expected_tiles = [ # Row 0 - Tile(coords=TBLR(top=0, bottom=320, left=0, right=576), overlap=TBLR(top=0, bottom=64, left=0, right=64)), - Tile(coords=TBLR(top=0, bottom=320, left=512, right=1088), overlap=TBLR(top=0, bottom=64, left=64, right=64)), - Tile(coords=TBLR(top=0, bottom=320, left=1024, right=1600), overlap=TBLR(top=0, bottom=64, left=64, right=0)), + Tile( + coords=TBLR(top=0, bottom=320, left=0, right=576), + overlap=TBLR(top=0, bottom=64, left=0, right=64), + ), + Tile( + coords=TBLR(top=0, bottom=320, left=512, right=1088), + overlap=TBLR(top=0, bottom=64, left=64, right=64), + ), + Tile( + coords=TBLR(top=0, bottom=320, left=1024, right=1600), + overlap=TBLR(top=0, bottom=64, left=64, right=0), + ), # Row 1 - Tile(coords=TBLR(top=256, bottom=576, left=0, right=576), overlap=TBLR(top=64, bottom=0, left=0, right=64)), - Tile(coords=TBLR(top=256, bottom=576, left=512, right=1088), overlap=TBLR(top=64, bottom=0, left=64, right=64)), - Tile(coords=TBLR(top=256, bottom=576, left=1024, right=1600), overlap=TBLR(top=64, bottom=0, left=64, right=0)), + Tile( + coords=TBLR(top=256, bottom=576, left=0, right=576), + overlap=TBLR(top=64, bottom=0, left=0, right=64), + ), + Tile( + coords=TBLR(top=256, bottom=576, left=512, right=1088), + overlap=TBLR(top=64, bottom=0, left=64, right=64), + ), + Tile( + coords=TBLR(top=256, bottom=576, left=1024, right=1600), + overlap=TBLR(top=64, bottom=0, left=64, right=0), + ), ] assert tiles == expected_tiles @@ -46,16 +72,30 @@ def test_calc_tiles_with_overlap_not_evenly_divisible(): expected_tiles = [ # Row 0 - Tile(coords=TBLR(top=0, bottom=256, left=0, right=512), overlap=TBLR(top=0, bottom=112, left=0, right=64)), - Tile(coords=TBLR(top=0, bottom=256, left=448, right=960), overlap=TBLR(top=0, bottom=112, left=64, right=272)), - Tile(coords=TBLR(top=0, bottom=256, left=688, right=1200), overlap=TBLR(top=0, bottom=112, left=272, right=0)), - # Row 1 - Tile(coords=TBLR(top=144, bottom=400, left=0, right=512), overlap=TBLR(top=112, bottom=0, left=0, right=64)), Tile( - coords=TBLR(top=144, bottom=400, left=448, right=960), overlap=TBLR(top=112, bottom=0, left=64, right=272) + coords=TBLR(top=0, bottom=256, left=0, right=512), + overlap=TBLR(top=0, bottom=112, left=0, right=64), ), Tile( - coords=TBLR(top=144, bottom=400, left=688, right=1200), overlap=TBLR(top=112, bottom=0, left=272, right=0) + coords=TBLR(top=0, bottom=256, left=448, right=960), + overlap=TBLR(top=0, bottom=112, left=64, right=272), + ), + Tile( + coords=TBLR(top=0, bottom=256, left=688, right=1200), + overlap=TBLR(top=0, bottom=112, left=272, right=0), + ), + # Row 1 + Tile( + coords=TBLR(top=144, bottom=400, left=0, right=512), + overlap=TBLR(top=112, bottom=0, left=0, right=64), + ), + Tile( + coords=TBLR(top=144, bottom=400, left=448, right=960), + overlap=TBLR(top=112, bottom=0, left=64, right=272), + ), + Tile( + coords=TBLR(top=144, bottom=400, left=688, right=1200), + overlap=TBLR(top=112, bottom=0, left=272, right=0), ), ] @@ -75,7 +115,12 @@ def test_calc_tiles_with_overlap_not_evenly_divisible(): ], ) def test_calc_tiles_with_overlap_input_validation( - image_height: int, image_width: int, tile_height: int, tile_width: int, overlap: int, raises: bool + image_height: int, + image_width: int, + tile_height: int, + tile_width: int, + overlap: int, + raises: bool, ): """Test that calc_tiles_with_overlap() raises an exception if the inputs are invalid.""" if raises: @@ -85,6 +130,306 @@ def test_calc_tiles_with_overlap_input_validation( calc_tiles_with_overlap(image_height, image_width, tile_height, tile_width, overlap) +#################################### +# Test calc_tiles_min_overlap(...) +#################################### + + +def test_calc_tiles_min_overlap_single_tile(): + """Test calc_tiles_min_overlap() behavior when a single tile covers the image.""" + tiles = calc_tiles_min_overlap( + image_height=512, + image_width=1024, + tile_height=512, + tile_width=1024, + min_overlap=64, + ) + + expected_tiles = [ + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=1024), + overlap=TBLR(top=0, bottom=0, left=0, right=0), + ) + ] + + assert tiles == expected_tiles + + +def test_calc_tiles_min_overlap_evenly_divisible(): + """Test calc_tiles_min_overlap() behavior when the image is evenly covered by multiple tiles.""" + # Parameters mimic roughly the same output as the original tile generations of the same test name + tiles = calc_tiles_min_overlap( + image_height=576, + image_width=1600, + tile_height=320, + tile_width=576, + min_overlap=64, + ) + + expected_tiles = [ + # Row 0 + Tile( + coords=TBLR(top=0, bottom=320, left=0, right=576), + overlap=TBLR(top=0, bottom=64, left=0, right=64), + ), + Tile( + coords=TBLR(top=0, bottom=320, left=512, right=1088), + overlap=TBLR(top=0, bottom=64, left=64, right=64), + ), + Tile( + coords=TBLR(top=0, bottom=320, left=1024, right=1600), + overlap=TBLR(top=0, bottom=64, left=64, right=0), + ), + # Row 1 + Tile( + coords=TBLR(top=256, bottom=576, left=0, right=576), + overlap=TBLR(top=64, bottom=0, left=0, right=64), + ), + Tile( + coords=TBLR(top=256, bottom=576, left=512, right=1088), + overlap=TBLR(top=64, bottom=0, left=64, right=64), + ), + Tile( + coords=TBLR(top=256, bottom=576, left=1024, right=1600), + overlap=TBLR(top=64, bottom=0, left=64, right=0), + ), + ] + + assert tiles == expected_tiles + + +def test_calc_tiles_min_overlap_not_evenly_divisible(): + """Test calc_tiles_min_overlap() behavior when the image requires 'uneven' overlaps to achieve proper coverage.""" + # Parameters mimic roughly the same output as the original tile generations of the same test name + tiles = calc_tiles_min_overlap( + image_height=400, + image_width=1200, + tile_height=256, + tile_width=512, + min_overlap=64, + ) + + expected_tiles = [ + # Row 0 + Tile( + coords=TBLR(top=0, bottom=256, left=0, right=512), + overlap=TBLR(top=0, bottom=112, left=0, right=168), + ), + Tile( + coords=TBLR(top=0, bottom=256, left=344, right=856), + overlap=TBLR(top=0, bottom=112, left=168, right=168), + ), + Tile( + coords=TBLR(top=0, bottom=256, left=688, right=1200), + overlap=TBLR(top=0, bottom=112, left=168, right=0), + ), + # Row 1 + Tile( + coords=TBLR(top=144, bottom=400, left=0, right=512), + overlap=TBLR(top=112, bottom=0, left=0, right=168), + ), + Tile( + coords=TBLR(top=144, bottom=400, left=344, right=856), + overlap=TBLR(top=112, bottom=0, left=168, right=168), + ), + Tile( + coords=TBLR(top=144, bottom=400, left=688, right=1200), + overlap=TBLR(top=112, bottom=0, left=168, right=0), + ), + ] + + assert tiles == expected_tiles + + +@pytest.mark.parametrize( + [ + "image_height", + "image_width", + "tile_height", + "tile_width", + "min_overlap", + "raises", + ], + [ + (128, 128, 128, 128, 127, False), # OK + (128, 128, 128, 128, 0, False), # OK + (128, 128, 64, 64, 0, False), # OK + (128, 128, 129, 128, 0, False), # tile_height exceeds image_height defaults to 1 tile. + (128, 128, 128, 129, 0, False), # tile_width exceeds image_width defaults to 1 tile. + (128, 128, 64, 128, 64, True), # overlap equals tile_height. + (128, 128, 128, 64, 64, True), # overlap equals tile_width. + ], +) +def test_calc_tiles_min_overlap_input_validation( + image_height: int, + image_width: int, + tile_height: int, + tile_width: int, + min_overlap: int, + raises: bool, +): + """Test that calc_tiles_min_overlap() raises an exception if the inputs are invalid.""" + if raises: + with pytest.raises(AssertionError): + calc_tiles_min_overlap(image_height, image_width, tile_height, tile_width, min_overlap) + else: + calc_tiles_min_overlap(image_height, image_width, tile_height, tile_width, min_overlap) + + +#################################### +# Test calc_tiles_even_split(...) +#################################### + + +def test_calc_tiles_even_split_single_tile(): + """Test calc_tiles_even_split() behavior when a single tile covers the image.""" + tiles = calc_tiles_even_split( + image_height=512, image_width=1024, num_tiles_x=1, num_tiles_y=1, overlap_fraction=0.25 + ) + + expected_tiles = [ + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=1024), + overlap=TBLR(top=0, bottom=0, left=0, right=0), + ) + ] + + assert tiles == expected_tiles + + +def test_calc_tiles_even_split_evenly_divisible(): + """Test calc_tiles_even_split() behavior when the image is evenly covered by multiple tiles.""" + # Parameters mimic roughly the same output as the original tile generations of the same test name + tiles = calc_tiles_even_split( + image_height=576, image_width=1600, num_tiles_x=3, num_tiles_y=2, overlap_fraction=0.25 + ) + + expected_tiles = [ + # Row 0 + Tile( + coords=TBLR(top=0, bottom=320, left=0, right=624), + overlap=TBLR(top=0, bottom=72, left=0, right=136), + ), + Tile( + coords=TBLR(top=0, bottom=320, left=488, right=1112), + overlap=TBLR(top=0, bottom=72, left=136, right=136), + ), + Tile( + coords=TBLR(top=0, bottom=320, left=976, right=1600), + overlap=TBLR(top=0, bottom=72, left=136, right=0), + ), + # Row 1 + Tile( + coords=TBLR(top=248, bottom=576, left=0, right=624), + overlap=TBLR(top=72, bottom=0, left=0, right=136), + ), + Tile( + coords=TBLR(top=248, bottom=576, left=488, right=1112), + overlap=TBLR(top=72, bottom=0, left=136, right=136), + ), + Tile( + coords=TBLR(top=248, bottom=576, left=976, right=1600), + overlap=TBLR(top=72, bottom=0, left=136, right=0), + ), + ] + assert tiles == expected_tiles + + +def test_calc_tiles_even_split_not_evenly_divisible(): + """Test calc_tiles_even_split() behavior when the image requires 'uneven' overlaps to achieve proper coverage.""" + # Parameters mimic roughly the same output as the original tile generations of the same test name + tiles = calc_tiles_even_split( + image_height=400, image_width=1200, num_tiles_x=3, num_tiles_y=2, overlap_fraction=0.25 + ) + + expected_tiles = [ + # Row 0 + Tile( + coords=TBLR(top=0, bottom=224, left=0, right=464), + overlap=TBLR(top=0, bottom=56, left=0, right=104), + ), + Tile( + coords=TBLR(top=0, bottom=224, left=360, right=824), + overlap=TBLR(top=0, bottom=56, left=104, right=104), + ), + Tile( + coords=TBLR(top=0, bottom=224, left=720, right=1200), + overlap=TBLR(top=0, bottom=56, left=104, right=0), + ), + # Row 1 + Tile( + coords=TBLR(top=168, bottom=400, left=0, right=464), + overlap=TBLR(top=56, bottom=0, left=0, right=104), + ), + Tile( + coords=TBLR(top=168, bottom=400, left=360, right=824), + overlap=TBLR(top=56, bottom=0, left=104, right=104), + ), + Tile( + coords=TBLR(top=168, bottom=400, left=720, right=1200), + overlap=TBLR(top=56, bottom=0, left=104, right=0), + ), + ] + + assert tiles == expected_tiles + + +def test_calc_tiles_even_split_difficult_size(): + """Test calc_tiles_even_split() behavior when the image is a difficult size to spilt evenly and keep div8.""" + # Parameters are a difficult size for other tile gen routines to calculate + tiles = calc_tiles_even_split( + image_height=1000, image_width=1000, num_tiles_x=2, num_tiles_y=2, overlap_fraction=0.25 + ) + + expected_tiles = [ + # Row 0 + Tile( + coords=TBLR(top=0, bottom=560, left=0, right=560), + overlap=TBLR(top=0, bottom=128, left=0, right=128), + ), + Tile( + coords=TBLR(top=0, bottom=560, left=432, right=1000), + overlap=TBLR(top=0, bottom=128, left=128, right=0), + ), + # Row 1 + Tile( + coords=TBLR(top=432, bottom=1000, left=0, right=560), + overlap=TBLR(top=128, bottom=0, left=0, right=128), + ), + Tile( + coords=TBLR(top=432, bottom=1000, left=432, right=1000), + overlap=TBLR(top=128, bottom=0, left=128, right=0), + ), + ] + + assert tiles == expected_tiles + + +@pytest.mark.parametrize( + ["image_height", "image_width", "num_tiles_x", "num_tiles_y", "overlap_fraction", "raises"], + [ + (128, 128, 1, 1, 0.25, False), # OK + (128, 128, 1, 1, 0, False), # OK + (128, 128, 2, 1, 0, False), # OK + (127, 127, 1, 1, 0, True), # image size must be dividable by 8 + ], +) +def test_calc_tiles_even_split_input_validation( + image_height: int, + image_width: int, + num_tiles_x: int, + num_tiles_y: int, + overlap_fraction: float, + raises: bool, +): + """Test that calc_tiles_even_split() raises an exception if the inputs are invalid.""" + if raises: + with pytest.raises(ValueError): + calc_tiles_even_split(image_height, image_width, num_tiles_x, num_tiles_y, overlap_fraction) + else: + calc_tiles_even_split(image_height, image_width, num_tiles_x, num_tiles_y, overlap_fraction) + + ############################################# # Test merge_tiles_with_linear_blending(...) ############################################# @@ -95,8 +440,14 @@ def test_merge_tiles_with_linear_blending_horizontal(blend_amount: int): """Test merge_tiles_with_linear_blending(...) behavior when merging horizontally.""" # Initialize 2 tiles side-by-side. tiles = [ - Tile(coords=TBLR(top=0, bottom=512, left=0, right=512), overlap=TBLR(top=0, bottom=0, left=0, right=64)), - Tile(coords=TBLR(top=0, bottom=512, left=448, right=960), overlap=TBLR(top=0, bottom=0, left=64, right=0)), + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=512), + overlap=TBLR(top=0, bottom=0, left=0, right=64), + ), + Tile( + coords=TBLR(top=0, bottom=512, left=448, right=960), + overlap=TBLR(top=0, bottom=0, left=64, right=0), + ), ] dst_image = np.zeros((512, 960, 3), dtype=np.uint8) @@ -116,7 +467,10 @@ def test_merge_tiles_with_linear_blending_horizontal(blend_amount: int): expected_output[:, 480 + (blend_amount // 2) :, :] = 128 merge_tiles_with_linear_blending( - dst_image=dst_image, tiles=tiles, tile_images=tile_images, blend_amount=blend_amount + dst_image=dst_image, + tiles=tiles, + tile_images=tile_images, + blend_amount=blend_amount, ) np.testing.assert_array_equal(dst_image, expected_output, strict=True) @@ -127,8 +481,14 @@ def test_merge_tiles_with_linear_blending_vertical(blend_amount: int): """Test merge_tiles_with_linear_blending(...) behavior when merging vertically.""" # Initialize 2 tiles stacked vertically. tiles = [ - Tile(coords=TBLR(top=0, bottom=512, left=0, right=512), overlap=TBLR(top=0, bottom=64, left=0, right=0)), - Tile(coords=TBLR(top=448, bottom=960, left=0, right=512), overlap=TBLR(top=64, bottom=0, left=0, right=0)), + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=512), + overlap=TBLR(top=0, bottom=64, left=0, right=0), + ), + Tile( + coords=TBLR(top=448, bottom=960, left=0, right=512), + overlap=TBLR(top=64, bottom=0, left=0, right=0), + ), ] dst_image = np.zeros((960, 512, 3), dtype=np.uint8) @@ -148,7 +508,10 @@ def test_merge_tiles_with_linear_blending_vertical(blend_amount: int): expected_output[480 + (blend_amount // 2) :, :, :] = 128 merge_tiles_with_linear_blending( - dst_image=dst_image, tiles=tiles, tile_images=tile_images, blend_amount=blend_amount + dst_image=dst_image, + tiles=tiles, + tile_images=tile_images, + blend_amount=blend_amount, ) np.testing.assert_array_equal(dst_image, expected_output, strict=True) @@ -160,8 +523,14 @@ def test_merge_tiles_with_linear_blending_blend_amount_exceeds_vertical_overlap( """ # Initialize 2 tiles stacked vertically. tiles = [ - Tile(coords=TBLR(top=0, bottom=512, left=0, right=512), overlap=TBLR(top=0, bottom=64, left=0, right=0)), - Tile(coords=TBLR(top=448, bottom=960, left=0, right=512), overlap=TBLR(top=64, bottom=0, left=0, right=0)), + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=512), + overlap=TBLR(top=0, bottom=64, left=0, right=0), + ), + Tile( + coords=TBLR(top=448, bottom=960, left=0, right=512), + overlap=TBLR(top=64, bottom=0, left=0, right=0), + ), ] dst_image = np.zeros((960, 512, 3), dtype=np.uint8) @@ -180,8 +549,14 @@ def test_merge_tiles_with_linear_blending_blend_amount_exceeds_horizontal_overla """ # Initialize 2 tiles side-by-side. tiles = [ - Tile(coords=TBLR(top=0, bottom=512, left=0, right=512), overlap=TBLR(top=0, bottom=0, left=0, right=64)), - Tile(coords=TBLR(top=0, bottom=512, left=448, right=960), overlap=TBLR(top=0, bottom=0, left=64, right=0)), + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=512), + overlap=TBLR(top=0, bottom=0, left=0, right=64), + ), + Tile( + coords=TBLR(top=0, bottom=512, left=448, right=960), + overlap=TBLR(top=0, bottom=0, left=64, right=0), + ), ] dst_image = np.zeros((512, 960, 3), dtype=np.uint8) @@ -198,7 +573,12 @@ def test_merge_tiles_with_linear_blending_tiles_overflow_dst_image(): """Test that merge_tiles_with_linear_blending(...) raises an exception if any of the tiles overflows the dst_image. """ - tiles = [Tile(coords=TBLR(top=0, bottom=512, left=0, right=512), overlap=TBLR(top=0, bottom=0, left=0, right=0))] + tiles = [ + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=512), + overlap=TBLR(top=0, bottom=0, left=0, right=0), + ) + ] dst_image = np.zeros((256, 512, 3), dtype=np.uint8) @@ -213,7 +593,12 @@ def test_merge_tiles_with_linear_blending_mismatched_list_lengths(): """Test that merge_tiles_with_linear_blending(...) raises an exception if the lengths of 'tiles' and 'tile_images' do not match. """ - tiles = [Tile(coords=TBLR(top=0, bottom=512, left=0, right=512), overlap=TBLR(top=0, bottom=0, left=0, right=0))] + tiles = [ + Tile( + coords=TBLR(top=0, bottom=512, left=0, right=512), + overlap=TBLR(top=0, bottom=0, left=0, right=0), + ) + ] dst_image = np.zeros((256, 512, 3), dtype=np.uint8)