mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into refactor/model-manager-3
This commit is contained in:
commit
d7f7fbc8c2
28
.github/workflows/lint-frontend.yml
vendored
28
.github/workflows/lint-frontend.yml
vendored
@ -21,13 +21,23 @@ jobs:
|
|||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Node 18
|
- name: Setup Node 20
|
||||||
uses: actions/setup-node@v3
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: '18'
|
node-version: '20'
|
||||||
- uses: actions/checkout@v3
|
- name: Checkout
|
||||||
- run: 'yarn install --frozen-lockfile'
|
uses: actions/checkout@v4
|
||||||
- run: 'yarn run lint:tsc'
|
- name: Setup pnpm
|
||||||
- run: 'yarn run lint:madge'
|
uses: pnpm/action-setup@v2
|
||||||
- run: 'yarn run lint:eslint'
|
with:
|
||||||
- run: 'yarn run lint:prettier'
|
version: 8
|
||||||
|
- name: Install dependencies
|
||||||
|
run: 'pnpm install --prefer-frozen-lockfile'
|
||||||
|
- name: Typescript
|
||||||
|
run: 'pnpm run lint:tsc'
|
||||||
|
- name: Madge
|
||||||
|
run: 'pnpm run lint:madge'
|
||||||
|
- name: ESLint
|
||||||
|
run: 'pnpm run lint:eslint'
|
||||||
|
- name: Prettier
|
||||||
|
run: 'pnpm run lint:prettier'
|
||||||
|
@ -125,8 +125,8 @@ and go to http://localhost:9090.
|
|||||||
|
|
||||||
You must have Python 3.10 through 3.11 installed on your machine. Earlier or
|
You must have Python 3.10 through 3.11 installed on your machine. Earlier or
|
||||||
later versions are not supported.
|
later versions are not supported.
|
||||||
Node.js also needs to be installed along with yarn (can be installed with
|
Node.js also needs to be installed along with `pnpm` (can be installed with
|
||||||
the command `npm install -g yarn` if needed)
|
the command `npm install -g pnpm` if needed)
|
||||||
|
|
||||||
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||||
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||||
|
@ -14,6 +14,10 @@ To use a community workflow, download the the `.json` node graph file and load i
|
|||||||
|
|
||||||
- Community Nodes
|
- Community Nodes
|
||||||
+ [Average Images](#average-images)
|
+ [Average Images](#average-images)
|
||||||
|
+ [Clean Image Artifacts After Cut](#clean-image-artifacts-after-cut)
|
||||||
|
+ [Close Color Mask](#close-color-mask)
|
||||||
|
+ [Clothing Mask](#clothing-mask)
|
||||||
|
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
|
||||||
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
||||||
+ [Film Grain](#film-grain)
|
+ [Film Grain](#film-grain)
|
||||||
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
||||||
@ -22,16 +26,22 @@ To use a community workflow, download the the `.json` node graph file and load i
|
|||||||
+ [Halftone](#halftone)
|
+ [Halftone](#halftone)
|
||||||
+ [Ideal Size](#ideal-size)
|
+ [Ideal Size](#ideal-size)
|
||||||
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
||||||
|
+ [Image Dominant Color](#image-dominant-color)
|
||||||
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||||
+ [Image Picker](#image-picker)
|
+ [Image Picker](#image-picker)
|
||||||
|
+ [Image Resize Plus](#image-resize-plus)
|
||||||
+ [Load Video Frame](#load-video-frame)
|
+ [Load Video Frame](#load-video-frame)
|
||||||
+ [Make 3D](#make-3d)
|
+ [Make 3D](#make-3d)
|
||||||
|
+ [Mask Operations](#mask-operations)
|
||||||
+ [Match Histogram](#match-histogram)
|
+ [Match Histogram](#match-histogram)
|
||||||
|
+ [Negative Image](#negative-image)
|
||||||
+ [Oobabooga](#oobabooga)
|
+ [Oobabooga](#oobabooga)
|
||||||
+ [Prompt Tools](#prompt-tools)
|
+ [Prompt Tools](#prompt-tools)
|
||||||
+ [Remote Image](#remote-image)
|
+ [Remote Image](#remote-image)
|
||||||
|
+ [Remove Background](#remove-background)
|
||||||
+ [Retroize](#retroize)
|
+ [Retroize](#retroize)
|
||||||
+ [Size Stepper Nodes](#size-stepper-nodes)
|
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||||
|
+ [Simple Skin Detection](#simple-skin-detection)
|
||||||
+ [Text font to Image](#text-font-to-image)
|
+ [Text font to Image](#text-font-to-image)
|
||||||
+ [Thresholding](#thresholding)
|
+ [Thresholding](#thresholding)
|
||||||
+ [Unsharp Mask](#unsharp-mask)
|
+ [Unsharp Mask](#unsharp-mask)
|
||||||
@ -48,6 +58,46 @@ To use a community workflow, download the the `.json` node graph file and load i
|
|||||||
|
|
||||||
**Node Link:** https://github.com/JPPhoto/average-images-node
|
**Node Link:** https://github.com/JPPhoto/average-images-node
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Clean Image Artifacts After Cut
|
||||||
|
|
||||||
|
Description: Removes residual artifacts after an image is separated from its background.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/clean-artifact-after-cut-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/clean-artifact-after-cut-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Close Color Mask
|
||||||
|
|
||||||
|
Description: Generates a mask for images based on a closely matching color, useful for color-based selections.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/close-color-mask-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/close-color-mask-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Clothing Mask
|
||||||
|
|
||||||
|
Description: Employs a U2NET neural network trained for the segmentation of clothing items in images.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/clothing-mask-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/clothing-mask-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Contrast Limited Adaptive Histogram Equalization
|
||||||
|
|
||||||
|
Description: Enhances local image contrast using adaptive histogram equalization with contrast limiting.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/clahe-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Depth Map from Wavefront OBJ
|
### Depth Map from Wavefront OBJ
|
||||||
|
|
||||||
@ -164,6 +214,16 @@ This includes 15 Nodes:
|
|||||||
|
|
||||||
</br><img src="https://raw.githubusercontent.com/dwringer/composition-nodes/main/composition_pack_overview.jpg" width="500" />
|
</br><img src="https://raw.githubusercontent.com/dwringer/composition-nodes/main/composition_pack_overview.jpg" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Image Dominant Color
|
||||||
|
|
||||||
|
Description: Identifies and extracts the dominant color from an image using k-means clustering.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/image-dominant-color-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-dominant-color-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Image to Character Art Image Nodes
|
### Image to Character Art Image Nodes
|
||||||
|
|
||||||
@ -185,6 +245,17 @@ This includes 15 Nodes:
|
|||||||
|
|
||||||
**Node Link:** https://github.com/JPPhoto/image-picker-node
|
**Node Link:** https://github.com/JPPhoto/image-picker-node
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Image Resize Plus
|
||||||
|
|
||||||
|
Description: Provides various image resizing options such as fill, stretch, fit, center, and crop.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/image-resize-plus-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-resize-plus-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Load Video Frame
|
### Load Video Frame
|
||||||
|
|
||||||
@ -209,6 +280,16 @@ This includes 15 Nodes:
|
|||||||
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-1.png" width="300" />
|
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-1.png" width="300" />
|
||||||
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-2.png" width="300" />
|
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-2.png" width="300" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Mask Operations
|
||||||
|
|
||||||
|
Description: Offers logical operations (OR, SUB, AND) for combining and manipulating image masks.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/mask-operations-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/mask-operations-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Match Histogram
|
### Match Histogram
|
||||||
|
|
||||||
@ -226,6 +307,16 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
|||||||
|
|
||||||
<img src="https://github.com/skunkworxdark/match_histogram/assets/21961335/ed12f329-a0ef-444a-9bae-129ed60d6097" width="300" />
|
<img src="https://github.com/skunkworxdark/match_histogram/assets/21961335/ed12f329-a0ef-444a-9bae-129ed60d6097" width="300" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Negative Image
|
||||||
|
|
||||||
|
Description: Creates a negative version of an image, effective for visual effects and mask inversion.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/negative-image-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/negative-image-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Oobabooga
|
### Oobabooga
|
||||||
|
|
||||||
@ -289,6 +380,15 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
|||||||
|
|
||||||
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
|
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Remove Background
|
||||||
|
|
||||||
|
Description: An integration of the rembg package to remove backgrounds from images using multiple U2NET models.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/remove-background-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/remove-background-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Retroize
|
### Retroize
|
||||||
@ -301,6 +401,17 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
|||||||
|
|
||||||
<img src="https://github.com/Ar7ific1al/InvokeAI_nodes_retroize/assets/2306586/de8b4fa6-324c-4c2d-b36c-297600c73974" width="500" />
|
<img src="https://github.com/Ar7ific1al/InvokeAI_nodes_retroize/assets/2306586/de8b4fa6-324c-4c2d-b36c-297600c73974" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Simple Skin Detection
|
||||||
|
|
||||||
|
Description: Detects skin in images based on predefined color thresholds.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/simple-skin-detection-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/simple-skin-detection-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Size Stepper Nodes
|
### Size Stepper Nodes
|
||||||
|
|
||||||
@ -386,6 +497,7 @@ See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/READ
|
|||||||
|
|
||||||
<img src="https://github.com/skunkworxdark/XYGrid_nodes/blob/main/images/collage.png" width="300" />
|
<img src="https://github.com/skunkworxdark/XYGrid_nodes/blob/main/images/collage.png" width="300" />
|
||||||
|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Example Node Template
|
### Example Node Template
|
||||||
|
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
from logging import Logger
|
from logging import Logger
|
||||||
|
|
||||||
from invokeai.app.services.workflow_image_records.workflow_image_records_sqlite import SqliteWorkflowImageRecordsStorage
|
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.version.invokeai_version import __version__
|
from invokeai.version.invokeai_version import __version__
|
||||||
|
|
||||||
@ -31,7 +30,7 @@ from ..services.session_processor.session_processor_default import DefaultSessio
|
|||||||
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
|
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
|
||||||
from ..services.shared.default_graphs import create_system_graphs
|
from ..services.shared.default_graphs import create_system_graphs
|
||||||
from ..services.shared.graph import GraphExecutionState, LibraryGraph
|
from ..services.shared.graph import GraphExecutionState, LibraryGraph
|
||||||
from ..services.shared.sqlite import SqliteDatabase
|
from ..services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
from ..services.urls.urls_default import LocalUrlService
|
from ..services.urls.urls_default import LocalUrlService
|
||||||
from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||||
from .events import FastAPIEventService
|
from .events import FastAPIEventService
|
||||||
@ -98,7 +97,6 @@ class ApiDependencies:
|
|||||||
session_processor = DefaultSessionProcessor()
|
session_processor = DefaultSessionProcessor()
|
||||||
session_queue = SqliteSessionQueue(db=db)
|
session_queue = SqliteSessionQueue(db=db)
|
||||||
urls = LocalUrlService()
|
urls = LocalUrlService()
|
||||||
workflow_image_records = SqliteWorkflowImageRecordsStorage(db=db)
|
|
||||||
workflow_records = SqliteWorkflowRecordsStorage(db=db)
|
workflow_records = SqliteWorkflowRecordsStorage(db=db)
|
||||||
|
|
||||||
services = InvocationServices(
|
services = InvocationServices(
|
||||||
@ -126,14 +124,12 @@ class ApiDependencies:
|
|||||||
session_processor=session_processor,
|
session_processor=session_processor,
|
||||||
session_queue=session_queue,
|
session_queue=session_queue,
|
||||||
urls=urls,
|
urls=urls,
|
||||||
workflow_image_records=workflow_image_records,
|
|
||||||
workflow_records=workflow_records,
|
workflow_records=workflow_records,
|
||||||
)
|
)
|
||||||
|
|
||||||
create_system_graphs(services.graph_library)
|
create_system_graphs(services.graph_library)
|
||||||
|
|
||||||
ApiDependencies.invoker = Invoker(services)
|
ApiDependencies.invoker = Invoker(services)
|
||||||
|
|
||||||
db.clean()
|
db.clean()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -8,10 +8,11 @@ from fastapi.routing import APIRouter
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
from pydantic import BaseModel, Field, ValidationError
|
from pydantic import BaseModel, Field, ValidationError
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import MetadataField, MetadataFieldValidator, WorkflowFieldValidator
|
from invokeai.app.invocations.baseinvocation import MetadataField, MetadataFieldValidator
|
||||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin
|
from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin
|
||||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID, WorkflowWithoutIDValidator
|
||||||
|
|
||||||
from ..dependencies import ApiDependencies
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
@ -73,7 +74,7 @@ async def upload_image(
|
|||||||
workflow_raw = pil_image.info.get("invokeai_workflow", None)
|
workflow_raw = pil_image.info.get("invokeai_workflow", None)
|
||||||
if workflow_raw is not None:
|
if workflow_raw is not None:
|
||||||
try:
|
try:
|
||||||
workflow = WorkflowFieldValidator.validate_json(workflow_raw)
|
workflow = WorkflowWithoutIDValidator.validate_json(workflow_raw)
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||||
pass
|
pass
|
||||||
@ -184,6 +185,18 @@ async def get_image_metadata(
|
|||||||
raise HTTPException(status_code=404)
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get(
|
||||||
|
"/i/{image_name}/workflow", operation_id="get_image_workflow", response_model=Optional[WorkflowWithoutID]
|
||||||
|
)
|
||||||
|
async def get_image_workflow(
|
||||||
|
image_name: str = Path(description="The name of image whose workflow to get"),
|
||||||
|
) -> Optional[WorkflowWithoutID]:
|
||||||
|
try:
|
||||||
|
return ApiDependencies.invoker.services.images.get_workflow(image_name)
|
||||||
|
except Exception:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
@images_router.api_route(
|
@images_router.api_route(
|
||||||
"/i/{image_name}/full",
|
"/i/{image_name}/full",
|
||||||
methods=["GET", "HEAD"],
|
methods=["GET", "HEAD"],
|
||||||
|
@ -1,7 +1,19 @@
|
|||||||
from fastapi import APIRouter, Path
|
from typing import Optional
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Body, HTTPException, Path, Query
|
||||||
|
|
||||||
from invokeai.app.api.dependencies import ApiDependencies
|
from invokeai.app.api.dependencies import ApiDependencies
|
||||||
from invokeai.app.invocations.baseinvocation import WorkflowField
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||||
|
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||||
|
Workflow,
|
||||||
|
WorkflowCategory,
|
||||||
|
WorkflowNotFoundError,
|
||||||
|
WorkflowRecordDTO,
|
||||||
|
WorkflowRecordListItemDTO,
|
||||||
|
WorkflowRecordOrderBy,
|
||||||
|
WorkflowWithoutID,
|
||||||
|
)
|
||||||
|
|
||||||
workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
||||||
|
|
||||||
@ -10,11 +22,76 @@ workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
|||||||
"/i/{workflow_id}",
|
"/i/{workflow_id}",
|
||||||
operation_id="get_workflow",
|
operation_id="get_workflow",
|
||||||
responses={
|
responses={
|
||||||
200: {"model": WorkflowField},
|
200: {"model": WorkflowRecordDTO},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
async def get_workflow(
|
async def get_workflow(
|
||||||
workflow_id: str = Path(description="The workflow to get"),
|
workflow_id: str = Path(description="The workflow to get"),
|
||||||
) -> WorkflowField:
|
) -> WorkflowRecordDTO:
|
||||||
"""Gets a workflow"""
|
"""Gets a workflow"""
|
||||||
return ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
try:
|
||||||
|
return ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||||
|
except WorkflowNotFoundError:
|
||||||
|
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.patch(
|
||||||
|
"/i/{workflow_id}",
|
||||||
|
operation_id="update_workflow",
|
||||||
|
responses={
|
||||||
|
200: {"model": WorkflowRecordDTO},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def update_workflow(
|
||||||
|
workflow: Workflow = Body(description="The updated workflow", embed=True),
|
||||||
|
) -> WorkflowRecordDTO:
|
||||||
|
"""Updates a workflow"""
|
||||||
|
return ApiDependencies.invoker.services.workflow_records.update(workflow=workflow)
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.delete(
|
||||||
|
"/i/{workflow_id}",
|
||||||
|
operation_id="delete_workflow",
|
||||||
|
)
|
||||||
|
async def delete_workflow(
|
||||||
|
workflow_id: str = Path(description="The workflow to delete"),
|
||||||
|
) -> None:
|
||||||
|
"""Deletes a workflow"""
|
||||||
|
ApiDependencies.invoker.services.workflow_records.delete(workflow_id)
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.post(
|
||||||
|
"/",
|
||||||
|
operation_id="create_workflow",
|
||||||
|
responses={
|
||||||
|
200: {"model": WorkflowRecordDTO},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def create_workflow(
|
||||||
|
workflow: WorkflowWithoutID = Body(description="The workflow to create", embed=True),
|
||||||
|
) -> WorkflowRecordDTO:
|
||||||
|
"""Creates a workflow"""
|
||||||
|
return ApiDependencies.invoker.services.workflow_records.create(workflow=workflow)
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.get(
|
||||||
|
"/",
|
||||||
|
operation_id="list_workflows",
|
||||||
|
responses={
|
||||||
|
200: {"model": PaginatedResults[WorkflowRecordListItemDTO]},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def list_workflows(
|
||||||
|
page: int = Query(default=0, description="The page to get"),
|
||||||
|
per_page: int = Query(default=10, description="The number of workflows per page"),
|
||||||
|
order_by: WorkflowRecordOrderBy = Query(
|
||||||
|
default=WorkflowRecordOrderBy.Name, description="The attribute to order by"
|
||||||
|
),
|
||||||
|
direction: SQLiteDirection = Query(default=SQLiteDirection.Ascending, description="The direction to order by"),
|
||||||
|
category: WorkflowCategory = Query(default=WorkflowCategory.User, description="The category of workflow to get"),
|
||||||
|
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
|
||||||
|
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||||
|
"""Gets a page of workflows"""
|
||||||
|
return ApiDependencies.invoker.services.workflow_records.get_many(
|
||||||
|
page=page, per_page=per_page, order_by=order_by, direction=direction, query=query, category=category
|
||||||
|
)
|
||||||
|
@ -4,6 +4,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import inspect
|
import inspect
|
||||||
import re
|
import re
|
||||||
|
import warnings
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from inspect import signature
|
from inspect import signature
|
||||||
@ -16,6 +17,7 @@ from pydantic.fields import FieldInfo, _Unset
|
|||||||
from pydantic_core import PydanticUndefined
|
from pydantic_core import PydanticUndefined
|
||||||
|
|
||||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||||
from invokeai.app.shared.fields import FieldDescriptions
|
from invokeai.app.shared.fields import FieldDescriptions
|
||||||
from invokeai.app.util.metaenum import MetaEnum
|
from invokeai.app.util.metaenum import MetaEnum
|
||||||
from invokeai.app.util.misc import uuid_string
|
from invokeai.app.util.misc import uuid_string
|
||||||
@ -452,6 +454,7 @@ class InvocationContext:
|
|||||||
queue_id: str
|
queue_id: str
|
||||||
queue_item_id: int
|
queue_item_id: int
|
||||||
queue_batch_id: str
|
queue_batch_id: str
|
||||||
|
workflow: Optional[WorkflowWithoutID]
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@ -460,12 +463,14 @@ class InvocationContext:
|
|||||||
queue_item_id: int,
|
queue_item_id: int,
|
||||||
queue_batch_id: str,
|
queue_batch_id: str,
|
||||||
graph_execution_state_id: str,
|
graph_execution_state_id: str,
|
||||||
|
workflow: Optional[WorkflowWithoutID],
|
||||||
):
|
):
|
||||||
self.services = services
|
self.services = services
|
||||||
self.graph_execution_state_id = graph_execution_state_id
|
self.graph_execution_state_id = graph_execution_state_id
|
||||||
self.queue_id = queue_id
|
self.queue_id = queue_id
|
||||||
self.queue_item_id = queue_item_id
|
self.queue_item_id = queue_item_id
|
||||||
self.queue_batch_id = queue_batch_id
|
self.queue_batch_id = queue_batch_id
|
||||||
|
self.workflow = workflow
|
||||||
|
|
||||||
|
|
||||||
class BaseInvocationOutput(BaseModel):
|
class BaseInvocationOutput(BaseModel):
|
||||||
@ -705,8 +710,10 @@ class _Model(BaseModel):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# Get all pydantic model attrs, methods, etc
|
with warnings.catch_warnings():
|
||||||
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
|
warnings.simplefilter("ignore", category=DeprecationWarning)
|
||||||
|
# Get all pydantic model attrs, methods, etc
|
||||||
|
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
|
||||||
|
|
||||||
|
|
||||||
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
|
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
|
||||||
@ -807,9 +814,9 @@ def invocation(
|
|||||||
cls.UIConfig.category = category
|
cls.UIConfig.category = category
|
||||||
|
|
||||||
# Grab the node pack's name from the module name, if it's a custom node
|
# Grab the node pack's name from the module name, if it's a custom node
|
||||||
module_name = cls.__module__.split(".")[0]
|
is_custom_node = cls.__module__.rsplit(".", 1)[0] == "invokeai.app.invocations"
|
||||||
if module_name.endswith(CUSTOM_NODE_PACK_SUFFIX):
|
if is_custom_node:
|
||||||
cls.UIConfig.node_pack = module_name.split(CUSTOM_NODE_PACK_SUFFIX)[0]
|
cls.UIConfig.node_pack = cls.__module__.split(".")[0]
|
||||||
else:
|
else:
|
||||||
cls.UIConfig.node_pack = None
|
cls.UIConfig.node_pack = None
|
||||||
|
|
||||||
@ -903,24 +910,6 @@ def invocation_output(
|
|||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
class WorkflowField(RootModel):
|
|
||||||
"""
|
|
||||||
Pydantic model for workflows with custom root of type dict[str, Any].
|
|
||||||
Workflows are stored without a strict schema.
|
|
||||||
"""
|
|
||||||
|
|
||||||
root: dict[str, Any] = Field(description="The workflow")
|
|
||||||
|
|
||||||
|
|
||||||
WorkflowFieldValidator = TypeAdapter(WorkflowField)
|
|
||||||
|
|
||||||
|
|
||||||
class WithWorkflow(BaseModel):
|
|
||||||
workflow: Optional[WorkflowField] = Field(
|
|
||||||
default=None, description=FieldDescriptions.workflow, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MetadataField(RootModel):
|
class MetadataField(RootModel):
|
||||||
"""
|
"""
|
||||||
Pydantic model for metadata with custom root of type dict[str, Any].
|
Pydantic model for metadata with custom root of type dict[str, Any].
|
||||||
@ -943,3 +932,13 @@ class WithMetadata(BaseModel):
|
|||||||
orig_required=False,
|
orig_required=False,
|
||||||
).model_dump(exclude_none=True),
|
).model_dump(exclude_none=True),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WithWorkflow:
|
||||||
|
workflow = None
|
||||||
|
|
||||||
|
def __init_subclass__(cls) -> None:
|
||||||
|
logger.warn(
|
||||||
|
f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow."
|
||||||
|
)
|
||||||
|
super().__init_subclass__()
|
||||||
|
@ -39,7 +39,6 @@ from .baseinvocation import (
|
|||||||
InvocationContext,
|
InvocationContext,
|
||||||
OutputField,
|
OutputField,
|
||||||
WithMetadata,
|
WithMetadata,
|
||||||
WithWorkflow,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@ -129,7 +128,7 @@ class ControlNetInvocation(BaseInvocation):
|
|||||||
|
|
||||||
|
|
||||||
# This invocation exists for other invocations to subclass it - do not register with @invocation!
|
# This invocation exists for other invocations to subclass it - do not register with @invocation!
|
||||||
class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
class ImageProcessorInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Base class for invocations that preprocess images for ControlNet"""
|
"""Base class for invocations that preprocess images for ControlNet"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to process")
|
image: ImageField = InputField(description="The image to process")
|
||||||
@ -153,7 +152,7 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
node_id=self.id,
|
node_id=self.id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
"""Builds an ImageOutput and its ImageField"""
|
"""Builds an ImageOutput and its ImageField"""
|
||||||
@ -173,7 +172,7 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
title="Canny Processor",
|
title="Canny Processor",
|
||||||
tags=["controlnet", "canny"],
|
tags=["controlnet", "canny"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Canny edge detection for ControlNet"""
|
"""Canny edge detection for ControlNet"""
|
||||||
@ -196,7 +195,7 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="HED (softedge) Processor",
|
title="HED (softedge) Processor",
|
||||||
tags=["controlnet", "hed", "softedge"],
|
tags=["controlnet", "hed", "softedge"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies HED edge detection to image"""
|
"""Applies HED edge detection to image"""
|
||||||
@ -225,7 +224,7 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Lineart Processor",
|
title="Lineart Processor",
|
||||||
tags=["controlnet", "lineart"],
|
tags=["controlnet", "lineart"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies line art processing to image"""
|
"""Applies line art processing to image"""
|
||||||
@ -247,7 +246,7 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Lineart Anime Processor",
|
title="Lineart Anime Processor",
|
||||||
tags=["controlnet", "lineart", "anime"],
|
tags=["controlnet", "lineart", "anime"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies line art anime processing to image"""
|
"""Applies line art anime processing to image"""
|
||||||
@ -270,7 +269,7 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Openpose Processor",
|
title="Openpose Processor",
|
||||||
tags=["controlnet", "openpose", "pose"],
|
tags=["controlnet", "openpose", "pose"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies Openpose processing to image"""
|
"""Applies Openpose processing to image"""
|
||||||
@ -295,7 +294,7 @@ class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Midas Depth Processor",
|
title="Midas Depth Processor",
|
||||||
tags=["controlnet", "midas"],
|
tags=["controlnet", "midas"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies Midas depth processing to image"""
|
"""Applies Midas depth processing to image"""
|
||||||
@ -322,7 +321,7 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Normal BAE Processor",
|
title="Normal BAE Processor",
|
||||||
tags=["controlnet"],
|
tags=["controlnet"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies NormalBae processing to image"""
|
"""Applies NormalBae processing to image"""
|
||||||
@ -339,7 +338,7 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.1.0"
|
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.0"
|
||||||
)
|
)
|
||||||
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies MLSD processing to image"""
|
"""Applies MLSD processing to image"""
|
||||||
@ -362,7 +361,7 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.1.0"
|
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.0"
|
||||||
)
|
)
|
||||||
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies PIDI processing to image"""
|
"""Applies PIDI processing to image"""
|
||||||
@ -389,7 +388,7 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Content Shuffle Processor",
|
title="Content Shuffle Processor",
|
||||||
tags=["controlnet", "contentshuffle"],
|
tags=["controlnet", "contentshuffle"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies content shuffle processing to image"""
|
"""Applies content shuffle processing to image"""
|
||||||
@ -419,7 +418,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Zoe (Depth) Processor",
|
title="Zoe (Depth) Processor",
|
||||||
tags=["controlnet", "zoe", "depth"],
|
tags=["controlnet", "zoe", "depth"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies Zoe depth processing to image"""
|
"""Applies Zoe depth processing to image"""
|
||||||
@ -435,7 +434,7 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Mediapipe Face Processor",
|
title="Mediapipe Face Processor",
|
||||||
tags=["controlnet", "mediapipe", "face"],
|
tags=["controlnet", "mediapipe", "face"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies mediapipe face processing to image"""
|
"""Applies mediapipe face processing to image"""
|
||||||
@ -458,7 +457,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Leres (Depth) Processor",
|
title="Leres (Depth) Processor",
|
||||||
tags=["controlnet", "leres", "depth"],
|
tags=["controlnet", "leres", "depth"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies leres processing to image"""
|
"""Applies leres processing to image"""
|
||||||
@ -487,7 +486,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Tile Resample Processor",
|
title="Tile Resample Processor",
|
||||||
tags=["controlnet", "tile"],
|
tags=["controlnet", "tile"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Tile resampler processor"""
|
"""Tile resampler processor"""
|
||||||
@ -527,7 +526,7 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
|||||||
title="Segment Anything Processor",
|
title="Segment Anything Processor",
|
||||||
tags=["controlnet", "segmentanything"],
|
tags=["controlnet", "segmentanything"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Applies segment anything processing to image"""
|
"""Applies segment anything processing to image"""
|
||||||
@ -569,7 +568,7 @@ class SamDetectorReproducibleColors(SamDetector):
|
|||||||
title="Color Map Processor",
|
title="Color Map Processor",
|
||||||
tags=["controlnet"],
|
tags=["controlnet"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
"""Generates a color map from the provided image"""
|
"""Generates a color map from the provided image"""
|
||||||
|
@ -6,7 +6,6 @@ import sys
|
|||||||
from importlib.util import module_from_spec, spec_from_file_location
|
from importlib.util import module_from_spec, spec_from_file_location
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import CUSTOM_NODE_PACK_SUFFIX
|
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
logger = InvokeAILogger.get_logger()
|
logger = InvokeAILogger.get_logger()
|
||||||
@ -34,7 +33,7 @@ for d in Path(__file__).parent.iterdir():
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# load the module, appending adding a suffix to identify it as a custom node pack
|
# load the module, appending adding a suffix to identify it as a custom node pack
|
||||||
spec = spec_from_file_location(f"{module_name}{CUSTOM_NODE_PACK_SUFFIX}", init.absolute())
|
spec = spec_from_file_location(module_name, init.absolute())
|
||||||
|
|
||||||
if spec is None or spec.loader is None:
|
if spec is None or spec.loader is None:
|
||||||
logger.warn(f"Could not load {init}")
|
logger.warn(f"Could not load {init}")
|
||||||
|
@ -8,11 +8,11 @@ from PIL import Image, ImageOps
|
|||||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||||
|
|
||||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
|
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, invocation
|
||||||
|
|
||||||
|
|
||||||
@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.1.0")
|
@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.2.0")
|
||||||
class CvInpaintInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
class CvInpaintInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Simple inpaint using opencv."""
|
"""Simple inpaint using opencv."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to inpaint")
|
image: ImageField = InputField(description="The image to inpaint")
|
||||||
@ -41,7 +41,7 @@ class CvInpaintInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
node_id=self.id,
|
node_id=self.id,
|
||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
|
@ -17,7 +17,6 @@ from invokeai.app.invocations.baseinvocation import (
|
|||||||
InvocationContext,
|
InvocationContext,
|
||||||
OutputField,
|
OutputField,
|
||||||
WithMetadata,
|
WithMetadata,
|
||||||
WithWorkflow,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@ -438,8 +437,8 @@ def get_faces_list(
|
|||||||
return all_faces
|
return all_faces
|
||||||
|
|
||||||
|
|
||||||
@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.1.0")
|
@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.2.0")
|
||||||
class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class FaceOffInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Bound, extract, and mask a face from an image using MediaPipe detection"""
|
"""Bound, extract, and mask a face from an image using MediaPipe detection"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="Image for face detection")
|
image: ImageField = InputField(description="Image for face detection")
|
||||||
@ -508,7 +507,7 @@ class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
node_id=self.id,
|
node_id=self.id,
|
||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
mask_dto = context.services.images.create(
|
mask_dto = context.services.images.create(
|
||||||
@ -532,8 +531,8 @@ class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.1.0")
|
@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.2.0")
|
||||||
class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class FaceMaskInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Face mask creation using mediapipe face detection"""
|
"""Face mask creation using mediapipe face detection"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="Image to face detect")
|
image: ImageField = InputField(description="Image to face detect")
|
||||||
@ -627,7 +626,7 @@ class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
node_id=self.id,
|
node_id=self.id,
|
||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
mask_dto = context.services.images.create(
|
mask_dto = context.services.images.create(
|
||||||
@ -650,9 +649,9 @@ class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.1.0"
|
"face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.2.0"
|
||||||
)
|
)
|
||||||
class FaceIdentifierInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class FaceIdentifierInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Outputs an image with detected face IDs printed on each face. For use with other FaceTools."""
|
"""Outputs an image with detected face IDs printed on each face. For use with other FaceTools."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="Image to face detect")
|
image: ImageField = InputField(description="Image to face detect")
|
||||||
@ -716,7 +715,7 @@ class FaceIdentifierInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
node_id=self.id,
|
node_id=self.id,
|
||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
|
@ -13,7 +13,7 @@ from invokeai.app.shared.fields import FieldDescriptions
|
|||||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||||
|
|
||||||
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
|
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, invocation
|
||||||
|
|
||||||
|
|
||||||
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")
|
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")
|
||||||
@ -36,8 +36,14 @@ class ShowImageInvocation(BaseInvocation):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
"blank_image",
|
||||||
|
title="Blank Image",
|
||||||
|
tags=["image"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class BlankImageInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Creates a blank image and forwards it to the pipeline"""
|
"""Creates a blank image and forwards it to the pipeline"""
|
||||||
|
|
||||||
width: int = InputField(default=512, description="The width of the image")
|
width: int = InputField(default=512, description="The width of the image")
|
||||||
@ -56,7 +62,7 @@ class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -66,8 +72,14 @@ class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_crop",
|
||||||
|
title="Crop Image",
|
||||||
|
tags=["image", "crop"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageCropInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Crops an image to a specified box. The box can be outside of the image."""
|
"""Crops an image to a specified box. The box can be outside of the image."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to crop")
|
image: ImageField = InputField(description="The image to crop")
|
||||||
@ -90,7 +102,7 @@ class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -155,8 +167,14 @@ class CenterPadCropInvocation(BaseInvocation):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_paste",
|
||||||
|
title="Paste Image",
|
||||||
|
tags=["image", "paste"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImagePasteInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Pastes an image into another image."""
|
"""Pastes an image into another image."""
|
||||||
|
|
||||||
base_image: ImageField = InputField(description="The base image")
|
base_image: ImageField = InputField(description="The base image")
|
||||||
@ -199,7 +217,7 @@ class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -209,8 +227,14 @@ class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"tomask",
|
||||||
|
title="Mask from Alpha",
|
||||||
|
tags=["image", "mask"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class MaskFromAlphaInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Extracts the alpha channel of an image as a mask."""
|
"""Extracts the alpha channel of an image as a mask."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to create the mask from")
|
image: ImageField = InputField(description="The image to create the mask from")
|
||||||
@ -231,7 +255,7 @@ class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -241,8 +265,14 @@ class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_mul",
|
||||||
|
title="Multiply Images",
|
||||||
|
tags=["image", "multiply"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageMultiplyInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Multiplies two images together using `PIL.ImageChops.multiply()`."""
|
"""Multiplies two images together using `PIL.ImageChops.multiply()`."""
|
||||||
|
|
||||||
image1: ImageField = InputField(description="The first image to multiply")
|
image1: ImageField = InputField(description="The first image to multiply")
|
||||||
@ -262,7 +292,7 @@ class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -275,8 +305,14 @@ class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
IMAGE_CHANNELS = Literal["A", "R", "G", "B"]
|
IMAGE_CHANNELS = Literal["A", "R", "G", "B"]
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_chan",
|
||||||
|
title="Extract Image Channel",
|
||||||
|
tags=["image", "channel"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageChannelInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Gets a channel from an image."""
|
"""Gets a channel from an image."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to get the channel from")
|
image: ImageField = InputField(description="The image to get the channel from")
|
||||||
@ -295,7 +331,7 @@ class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -308,8 +344,14 @@ class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
|
IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_conv",
|
||||||
|
title="Convert Image Mode",
|
||||||
|
tags=["image", "convert"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageConvertInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Converts an image to a different mode."""
|
"""Converts an image to a different mode."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to convert")
|
image: ImageField = InputField(description="The image to convert")
|
||||||
@ -328,7 +370,7 @@ class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -338,8 +380,14 @@ class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageBlurInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_blur",
|
||||||
|
title="Blur Image",
|
||||||
|
tags=["image", "blur"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageBlurInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Blurs an image"""
|
"""Blurs an image"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to blur")
|
image: ImageField = InputField(description="The image to blur")
|
||||||
@ -363,7 +411,7 @@ class ImageBlurInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -393,8 +441,14 @@ PIL_RESAMPLING_MAP = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
"img_resize",
|
||||||
|
title="Resize Image",
|
||||||
|
tags=["image", "resize"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageResizeInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Resizes an image to specific dimensions"""
|
"""Resizes an image to specific dimensions"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to resize")
|
image: ImageField = InputField(description="The image to resize")
|
||||||
@ -420,7 +474,7 @@ class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -430,8 +484,14 @@ class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
"img_scale",
|
||||||
|
title="Scale Image",
|
||||||
|
tags=["image", "scale"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageScaleInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Scales an image by a factor"""
|
"""Scales an image by a factor"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to scale")
|
image: ImageField = InputField(description="The image to scale")
|
||||||
@ -462,7 +522,7 @@ class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -472,8 +532,14 @@ class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_lerp",
|
||||||
|
title="Lerp Image",
|
||||||
|
tags=["image", "lerp"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageLerpInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Linear interpolation of all pixels of an image"""
|
"""Linear interpolation of all pixels of an image"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to lerp")
|
image: ImageField = InputField(description="The image to lerp")
|
||||||
@ -496,7 +562,7 @@ class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -506,8 +572,14 @@ class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_ilerp",
|
||||||
|
title="Inverse Lerp Image",
|
||||||
|
tags=["image", "ilerp"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageInverseLerpInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Inverse linear interpolation of all pixels of an image"""
|
"""Inverse linear interpolation of all pixels of an image"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to lerp")
|
image: ImageField = InputField(description="The image to lerp")
|
||||||
@ -530,7 +602,7 @@ class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -540,8 +612,14 @@ class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
"img_nsfw",
|
||||||
|
title="Blur NSFW Image",
|
||||||
|
tags=["image", "nsfw"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Add blur to NSFW-flagged images"""
|
"""Add blur to NSFW-flagged images"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to check")
|
image: ImageField = InputField(description="The image to check")
|
||||||
@ -566,7 +644,7 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -587,9 +665,9 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
title="Add Invisible Watermark",
|
title="Add Invisible Watermark",
|
||||||
tags=["image", "watermark"],
|
tags=["image", "watermark"],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
class ImageWatermarkInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Add an invisible watermark to an image"""
|
"""Add an invisible watermark to an image"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to check")
|
image: ImageField = InputField(description="The image to check")
|
||||||
@ -606,7 +684,7 @@ class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -616,8 +694,14 @@ class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"mask_edge",
|
||||||
|
title="Mask Edge",
|
||||||
|
tags=["image", "mask", "inpaint"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class MaskEdgeInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Applies an edge mask to an image"""
|
"""Applies an edge mask to an image"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to apply the mask to")
|
image: ImageField = InputField(description="The image to apply the mask to")
|
||||||
@ -652,7 +736,7 @@ class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -667,9 +751,9 @@ class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
title="Combine Masks",
|
title="Combine Masks",
|
||||||
tags=["image", "mask", "multiply"],
|
tags=["image", "mask", "multiply"],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class MaskCombineInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`."""
|
"""Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`."""
|
||||||
|
|
||||||
mask1: ImageField = InputField(description="The first mask to combine")
|
mask1: ImageField = InputField(description="The first mask to combine")
|
||||||
@ -689,7 +773,7 @@ class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -699,8 +783,14 @@ class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"color_correct",
|
||||||
|
title="Color Correct",
|
||||||
|
tags=["image", "color"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ColorCorrectInvocation(BaseInvocation, WithMetadata):
|
||||||
"""
|
"""
|
||||||
Shifts the colors of a target image to match the reference image, optionally
|
Shifts the colors of a target image to match the reference image, optionally
|
||||||
using a mask to only color-correct certain regions of the target image.
|
using a mask to only color-correct certain regions of the target image.
|
||||||
@ -800,7 +890,7 @@ class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -810,8 +900,14 @@ class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.1.0")
|
@invocation(
|
||||||
class ImageHueAdjustmentInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
"img_hue_adjust",
|
||||||
|
title="Adjust Image Hue",
|
||||||
|
tags=["image", "hue"],
|
||||||
|
category="image",
|
||||||
|
version="1.2.0",
|
||||||
|
)
|
||||||
|
class ImageHueAdjustmentInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Adjusts the Hue of an image."""
|
"""Adjusts the Hue of an image."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to adjust")
|
image: ImageField = InputField(description="The image to adjust")
|
||||||
@ -840,7 +936,7 @@ class ImageHueAdjustmentInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -913,9 +1009,9 @@ CHANNEL_FORMATS = {
|
|||||||
"value",
|
"value",
|
||||||
],
|
],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Add or subtract a value from a specific color channel of an image."""
|
"""Add or subtract a value from a specific color channel of an image."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to adjust")
|
image: ImageField = InputField(description="The image to adjust")
|
||||||
@ -950,7 +1046,7 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -984,9 +1080,9 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
"value",
|
"value",
|
||||||
],
|
],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Scale a specific color channel of an image."""
|
"""Scale a specific color channel of an image."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to adjust")
|
image: ImageField = InputField(description="The image to adjust")
|
||||||
@ -1025,7 +1121,7 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata)
|
|||||||
node_id=self.id,
|
node_id=self.id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1043,10 +1139,10 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata)
|
|||||||
title="Save Image",
|
title="Save Image",
|
||||||
tags=["primitives", "image"],
|
tags=["primitives", "image"],
|
||||||
category="primitives",
|
category="primitives",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
use_cache=False,
|
use_cache=False,
|
||||||
)
|
)
|
||||||
class SaveImageInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class SaveImageInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Saves an image. Unlike an image primitive, this invocation stores a copy of the image."""
|
"""Saves an image. Unlike an image primitive, this invocation stores a copy of the image."""
|
||||||
|
|
||||||
image: ImageField = InputField(description=FieldDescriptions.image)
|
image: ImageField = InputField(description=FieldDescriptions.image)
|
||||||
@ -1064,7 +1160,7 @@ class SaveImageInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -1082,7 +1178,7 @@ class SaveImageInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
version="1.0.1",
|
version="1.0.1",
|
||||||
use_cache=False,
|
use_cache=False,
|
||||||
)
|
)
|
||||||
class LinearUIOutputInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class LinearUIOutputInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Handles Linear UI Image Outputting tasks."""
|
"""Handles Linear UI Image Outputting tasks."""
|
||||||
|
|
||||||
image: ImageField = InputField(description=FieldDescriptions.image)
|
image: ImageField = InputField(description=FieldDescriptions.image)
|
||||||
|
@ -13,7 +13,7 @@ from invokeai.backend.image_util.cv2_inpaint import cv2_inpaint
|
|||||||
from invokeai.backend.image_util.lama import LaMA
|
from invokeai.backend.image_util.lama import LaMA
|
||||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||||
|
|
||||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
|
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, invocation
|
||||||
from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES
|
from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES
|
||||||
|
|
||||||
|
|
||||||
@ -118,8 +118,8 @@ def tile_fill_missing(im: Image.Image, tile_size: int = 16, seed: Optional[int]
|
|||||||
return si
|
return si
|
||||||
|
|
||||||
|
|
||||||
@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0")
|
@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.0")
|
||||||
class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class InfillColorInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Infills transparent areas of an image with a solid color"""
|
"""Infills transparent areas of an image with a solid color"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to infill")
|
image: ImageField = InputField(description="The image to infill")
|
||||||
@ -144,7 +144,7 @@ class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -154,8 +154,8 @@ class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.1")
|
@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1")
|
||||||
class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class InfillTileInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Infills transparent areas of an image with tiles of the image"""
|
"""Infills transparent areas of an image with tiles of the image"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to infill")
|
image: ImageField = InputField(description="The image to infill")
|
||||||
@ -181,7 +181,7 @@ class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -192,9 +192,9 @@ class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0"
|
"infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.0"
|
||||||
)
|
)
|
||||||
class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class InfillPatchMatchInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Infills transparent areas of an image using the PatchMatch algorithm"""
|
"""Infills transparent areas of an image using the PatchMatch algorithm"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to infill")
|
image: ImageField = InputField(description="The image to infill")
|
||||||
@ -235,7 +235,7 @@ class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -245,8 +245,8 @@ class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0")
|
@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.0")
|
||||||
class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class LaMaInfillInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Infills transparent areas of an image using the LaMa model"""
|
"""Infills transparent areas of an image using the LaMa model"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to infill")
|
image: ImageField = InputField(description="The image to infill")
|
||||||
@ -264,7 +264,7 @@ class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
@ -274,8 +274,8 @@ class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0")
|
@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.0")
|
||||||
class CV2InfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class CV2InfillInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Infills transparent areas of an image using OpenCV Inpainting"""
|
"""Infills transparent areas of an image using OpenCV Inpainting"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The image to infill")
|
image: ImageField = InputField(description="The image to infill")
|
||||||
@ -293,7 +293,7 @@ class CV2InfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
|
@ -64,7 +64,6 @@ from .baseinvocation import (
|
|||||||
OutputField,
|
OutputField,
|
||||||
UIType,
|
UIType,
|
||||||
WithMetadata,
|
WithMetadata,
|
||||||
WithWorkflow,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@ -802,9 +801,9 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
|||||||
title="Latents to Image",
|
title="Latents to Image",
|
||||||
tags=["latents", "image", "vae", "l2i"],
|
tags=["latents", "image", "vae", "l2i"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
class LatentsToImageInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Generates an image from latents."""
|
"""Generates an image from latents."""
|
||||||
|
|
||||||
latents: LatentsField = InputField(
|
latents: LatentsField = InputField(
|
||||||
@ -886,7 +885,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
|
@ -31,7 +31,6 @@ from .baseinvocation import (
|
|||||||
UIComponent,
|
UIComponent,
|
||||||
UIType,
|
UIType,
|
||||||
WithMetadata,
|
WithMetadata,
|
||||||
WithWorkflow,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@ -326,9 +325,9 @@ class ONNXTextToLatentsInvocation(BaseInvocation):
|
|||||||
title="ONNX Latents to Image",
|
title="ONNX Latents to Image",
|
||||||
tags=["latents", "image", "vae", "onnx"],
|
tags=["latents", "image", "vae", "onnx"],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.1.0",
|
version="1.2.0",
|
||||||
)
|
)
|
||||||
class ONNXLatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
class ONNXLatentsToImageInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Generates an image from latents."""
|
"""Generates an image from latents."""
|
||||||
|
|
||||||
latents: LatentsField = InputField(
|
latents: LatentsField = InputField(
|
||||||
@ -378,7 +377,7 @@ class ONNXLatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
|
@ -9,7 +9,6 @@ from invokeai.app.invocations.baseinvocation import (
|
|||||||
InvocationContext,
|
InvocationContext,
|
||||||
OutputField,
|
OutputField,
|
||||||
WithMetadata,
|
WithMetadata,
|
||||||
WithWorkflow,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@ -122,8 +121,8 @@ class PairTileImageInvocation(BaseInvocation):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@invocation("merge_tiles_to_image", title="Merge Tiles to Image", tags=["tiles"], category="tiles", version="1.0.0")
|
@invocation("merge_tiles_to_image", title="Merge Tiles to Image", tags=["tiles"], category="tiles", version="1.1.0")
|
||||||
class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
class MergeTilesToImageInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Merge multiple tile images into a single image."""
|
"""Merge multiple tile images into a single image."""
|
||||||
|
|
||||||
# Inputs
|
# Inputs
|
||||||
@ -172,7 +171,7 @@ class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
image=ImageField(image_name=image_dto.image_name),
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
@ -14,7 +14,7 @@ from invokeai.app.services.image_records.image_records_common import ImageCatego
|
|||||||
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
|
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
|
||||||
from invokeai.backend.util.devices import choose_torch_device
|
from invokeai.backend.util.devices import choose_torch_device
|
||||||
|
|
||||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
|
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, invocation
|
||||||
|
|
||||||
# TODO: Populate this from disk?
|
# TODO: Populate this from disk?
|
||||||
# TODO: Use model manager to load?
|
# TODO: Use model manager to load?
|
||||||
@ -29,8 +29,8 @@ if choose_torch_device() == torch.device("mps"):
|
|||||||
from torch import mps
|
from torch import mps
|
||||||
|
|
||||||
|
|
||||||
@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.2.0")
|
@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.3.0")
|
||||||
class ESRGANInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
class ESRGANInvocation(BaseInvocation, WithMetadata):
|
||||||
"""Upscales an image using RealESRGAN."""
|
"""Upscales an image using RealESRGAN."""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The input image")
|
image: ImageField = InputField(description="The input image")
|
||||||
@ -118,7 +118,7 @@ class ESRGANInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
|||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
metadata=self.metadata,
|
metadata=self.metadata,
|
||||||
workflow=self.workflow,
|
workflow=context.workflow,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
|
@ -4,7 +4,7 @@ from typing import Optional, cast
|
|||||||
|
|
||||||
from invokeai.app.services.image_records.image_records_common import ImageRecord, deserialize_image_record
|
from invokeai.app.services.image_records.image_records_common import ImageRecord, deserialize_image_record
|
||||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
|
|
||||||
from .board_image_records_base import BoardImageRecordStorageBase
|
from .board_image_records_base import BoardImageRecordStorageBase
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ import threading
|
|||||||
from typing import Union, cast
|
from typing import Union, cast
|
||||||
|
|
||||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
from invokeai.app.util.misc import uuid_string
|
from invokeai.app.util.misc import uuid_string
|
||||||
|
|
||||||
from .board_records_base import BoardRecordStorageBase
|
from .board_records_base import BoardRecordStorageBase
|
||||||
|
@ -4,7 +4,8 @@ from typing import Optional
|
|||||||
|
|
||||||
from PIL.Image import Image as PILImageType
|
from PIL.Image import Image as PILImageType
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import MetadataField, WorkflowField
|
from invokeai.app.invocations.baseinvocation import MetadataField
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||||
|
|
||||||
|
|
||||||
class ImageFileStorageBase(ABC):
|
class ImageFileStorageBase(ABC):
|
||||||
@ -33,7 +34,7 @@ class ImageFileStorageBase(ABC):
|
|||||||
image: PILImageType,
|
image: PILImageType,
|
||||||
image_name: str,
|
image_name: str,
|
||||||
metadata: Optional[MetadataField] = None,
|
metadata: Optional[MetadataField] = None,
|
||||||
workflow: Optional[WorkflowField] = None,
|
workflow: Optional[WorkflowWithoutID] = None,
|
||||||
thumbnail_size: int = 256,
|
thumbnail_size: int = 256,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp."""
|
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp."""
|
||||||
@ -43,3 +44,8 @@ class ImageFileStorageBase(ABC):
|
|||||||
def delete(self, image_name: str) -> None:
|
def delete(self, image_name: str) -> None:
|
||||||
"""Deletes an image and its thumbnail (if one exists)."""
|
"""Deletes an image and its thumbnail (if one exists)."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||||
|
"""Gets the workflow of an image."""
|
||||||
|
pass
|
||||||
|
@ -7,8 +7,9 @@ from PIL import Image, PngImagePlugin
|
|||||||
from PIL.Image import Image as PILImageType
|
from PIL.Image import Image as PILImageType
|
||||||
from send2trash import send2trash
|
from send2trash import send2trash
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import MetadataField, WorkflowField
|
from invokeai.app.invocations.baseinvocation import MetadataField
|
||||||
from invokeai.app.services.invoker import Invoker
|
from invokeai.app.services.invoker import Invoker
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||||
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
|
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
|
||||||
|
|
||||||
from .image_files_base import ImageFileStorageBase
|
from .image_files_base import ImageFileStorageBase
|
||||||
@ -56,7 +57,7 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
|||||||
image: PILImageType,
|
image: PILImageType,
|
||||||
image_name: str,
|
image_name: str,
|
||||||
metadata: Optional[MetadataField] = None,
|
metadata: Optional[MetadataField] = None,
|
||||||
workflow: Optional[WorkflowField] = None,
|
workflow: Optional[WorkflowWithoutID] = None,
|
||||||
thumbnail_size: int = 256,
|
thumbnail_size: int = 256,
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
@ -64,12 +65,19 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
|||||||
image_path = self.get_path(image_name)
|
image_path = self.get_path(image_name)
|
||||||
|
|
||||||
pnginfo = PngImagePlugin.PngInfo()
|
pnginfo = PngImagePlugin.PngInfo()
|
||||||
|
info_dict = {}
|
||||||
|
|
||||||
if metadata is not None:
|
if metadata is not None:
|
||||||
pnginfo.add_text("invokeai_metadata", metadata.model_dump_json())
|
metadata_json = metadata.model_dump_json()
|
||||||
|
info_dict["invokeai_metadata"] = metadata_json
|
||||||
|
pnginfo.add_text("invokeai_metadata", metadata_json)
|
||||||
if workflow is not None:
|
if workflow is not None:
|
||||||
pnginfo.add_text("invokeai_workflow", workflow.model_dump_json())
|
workflow_json = workflow.model_dump_json()
|
||||||
|
info_dict["invokeai_workflow"] = workflow_json
|
||||||
|
pnginfo.add_text("invokeai_workflow", workflow_json)
|
||||||
|
|
||||||
|
# When saving the image, the image object's info field is not populated. We need to set it
|
||||||
|
image.info = info_dict
|
||||||
image.save(
|
image.save(
|
||||||
image_path,
|
image_path,
|
||||||
"PNG",
|
"PNG",
|
||||||
@ -121,6 +129,13 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
|||||||
path = path if isinstance(path, Path) else Path(path)
|
path = path if isinstance(path, Path) else Path(path)
|
||||||
return path.exists()
|
return path.exists()
|
||||||
|
|
||||||
|
def get_workflow(self, image_name: str) -> WorkflowWithoutID | None:
|
||||||
|
image = self.get(image_name)
|
||||||
|
workflow = image.info.get("invokeai_workflow", None)
|
||||||
|
if workflow is not None:
|
||||||
|
return WorkflowWithoutID.model_validate_json(workflow)
|
||||||
|
return None
|
||||||
|
|
||||||
def __validate_storage_folders(self) -> None:
|
def __validate_storage_folders(self) -> None:
|
||||||
"""Checks if the required output folders exist and create them if they don't"""
|
"""Checks if the required output folders exist and create them if they don't"""
|
||||||
folders: list[Path] = [self.__output_folder, self.__thumbnails_folder]
|
folders: list[Path] = [self.__output_folder, self.__thumbnails_folder]
|
||||||
|
@ -75,6 +75,7 @@ class ImageRecordStorageBase(ABC):
|
|||||||
image_category: ImageCategory,
|
image_category: ImageCategory,
|
||||||
width: int,
|
width: int,
|
||||||
height: int,
|
height: int,
|
||||||
|
has_workflow: bool,
|
||||||
is_intermediate: Optional[bool] = False,
|
is_intermediate: Optional[bool] = False,
|
||||||
starred: Optional[bool] = False,
|
starred: Optional[bool] = False,
|
||||||
session_id: Optional[str] = None,
|
session_id: Optional[str] = None,
|
||||||
|
@ -100,6 +100,7 @@ IMAGE_DTO_COLS = ", ".join(
|
|||||||
"height",
|
"height",
|
||||||
"session_id",
|
"session_id",
|
||||||
"node_id",
|
"node_id",
|
||||||
|
"has_workflow",
|
||||||
"is_intermediate",
|
"is_intermediate",
|
||||||
"created_at",
|
"created_at",
|
||||||
"updated_at",
|
"updated_at",
|
||||||
@ -145,6 +146,7 @@ class ImageRecord(BaseModelExcludeNull):
|
|||||||
"""The node ID that generated this image, if it is a generated image."""
|
"""The node ID that generated this image, if it is a generated image."""
|
||||||
starred: bool = Field(description="Whether this image is starred.")
|
starred: bool = Field(description="Whether this image is starred.")
|
||||||
"""Whether this image is starred."""
|
"""Whether this image is starred."""
|
||||||
|
has_workflow: bool = Field(description="Whether this image has a workflow.")
|
||||||
|
|
||||||
|
|
||||||
class ImageRecordChanges(BaseModelExcludeNull, extra="allow"):
|
class ImageRecordChanges(BaseModelExcludeNull, extra="allow"):
|
||||||
@ -188,6 +190,7 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord:
|
|||||||
deleted_at = image_dict.get("deleted_at", get_iso_timestamp())
|
deleted_at = image_dict.get("deleted_at", get_iso_timestamp())
|
||||||
is_intermediate = image_dict.get("is_intermediate", False)
|
is_intermediate = image_dict.get("is_intermediate", False)
|
||||||
starred = image_dict.get("starred", False)
|
starred = image_dict.get("starred", False)
|
||||||
|
has_workflow = image_dict.get("has_workflow", False)
|
||||||
|
|
||||||
return ImageRecord(
|
return ImageRecord(
|
||||||
image_name=image_name,
|
image_name=image_name,
|
||||||
@ -202,4 +205,5 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord:
|
|||||||
deleted_at=deleted_at,
|
deleted_at=deleted_at,
|
||||||
is_intermediate=is_intermediate,
|
is_intermediate=is_intermediate,
|
||||||
starred=starred,
|
starred=starred,
|
||||||
|
has_workflow=has_workflow,
|
||||||
)
|
)
|
||||||
|
@ -5,7 +5,7 @@ from typing import Optional, Union, cast
|
|||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import MetadataField, MetadataFieldValidator
|
from invokeai.app.invocations.baseinvocation import MetadataField, MetadataFieldValidator
|
||||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
|
|
||||||
from .image_records_base import ImageRecordStorageBase
|
from .image_records_base import ImageRecordStorageBase
|
||||||
from .image_records_common import (
|
from .image_records_common import (
|
||||||
@ -117,6 +117,16 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self._cursor.execute("PRAGMA table_info(images)")
|
||||||
|
columns = [column[1] for column in self._cursor.fetchall()]
|
||||||
|
if "has_workflow" not in columns:
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
ALTER TABLE images
|
||||||
|
ADD COLUMN has_workflow BOOLEAN DEFAULT FALSE;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
def get(self, image_name: str) -> ImageRecord:
|
def get(self, image_name: str) -> ImageRecord:
|
||||||
try:
|
try:
|
||||||
self._lock.acquire()
|
self._lock.acquire()
|
||||||
@ -408,6 +418,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
image_category: ImageCategory,
|
image_category: ImageCategory,
|
||||||
width: int,
|
width: int,
|
||||||
height: int,
|
height: int,
|
||||||
|
has_workflow: bool,
|
||||||
is_intermediate: Optional[bool] = False,
|
is_intermediate: Optional[bool] = False,
|
||||||
starred: Optional[bool] = False,
|
starred: Optional[bool] = False,
|
||||||
session_id: Optional[str] = None,
|
session_id: Optional[str] = None,
|
||||||
@ -429,9 +440,10 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
session_id,
|
session_id,
|
||||||
metadata,
|
metadata,
|
||||||
is_intermediate,
|
is_intermediate,
|
||||||
starred
|
starred,
|
||||||
|
has_workflow
|
||||||
)
|
)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
|
||||||
""",
|
""",
|
||||||
(
|
(
|
||||||
image_name,
|
image_name,
|
||||||
@ -444,6 +456,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
metadata_json,
|
metadata_json,
|
||||||
is_intermediate,
|
is_intermediate,
|
||||||
starred,
|
starred,
|
||||||
|
has_workflow,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
self._conn.commit()
|
self._conn.commit()
|
||||||
|
@ -3,7 +3,7 @@ from typing import Callable, Optional
|
|||||||
|
|
||||||
from PIL.Image import Image as PILImageType
|
from PIL.Image import Image as PILImageType
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import MetadataField, WorkflowField
|
from invokeai.app.invocations.baseinvocation import MetadataField
|
||||||
from invokeai.app.services.image_records.image_records_common import (
|
from invokeai.app.services.image_records.image_records_common import (
|
||||||
ImageCategory,
|
ImageCategory,
|
||||||
ImageRecord,
|
ImageRecord,
|
||||||
@ -12,6 +12,7 @@ from invokeai.app.services.image_records.image_records_common import (
|
|||||||
)
|
)
|
||||||
from invokeai.app.services.images.images_common import ImageDTO
|
from invokeai.app.services.images.images_common import ImageDTO
|
||||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||||
|
|
||||||
|
|
||||||
class ImageServiceABC(ABC):
|
class ImageServiceABC(ABC):
|
||||||
@ -51,7 +52,7 @@ class ImageServiceABC(ABC):
|
|||||||
board_id: Optional[str] = None,
|
board_id: Optional[str] = None,
|
||||||
is_intermediate: Optional[bool] = False,
|
is_intermediate: Optional[bool] = False,
|
||||||
metadata: Optional[MetadataField] = None,
|
metadata: Optional[MetadataField] = None,
|
||||||
workflow: Optional[WorkflowField] = None,
|
workflow: Optional[WorkflowWithoutID] = None,
|
||||||
) -> ImageDTO:
|
) -> ImageDTO:
|
||||||
"""Creates an image, storing the file and its metadata."""
|
"""Creates an image, storing the file and its metadata."""
|
||||||
pass
|
pass
|
||||||
@ -85,6 +86,11 @@ class ImageServiceABC(ABC):
|
|||||||
"""Gets an image's metadata."""
|
"""Gets an image's metadata."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||||
|
"""Gets an image's workflow."""
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
||||||
"""Gets an image's path."""
|
"""Gets an image's path."""
|
||||||
|
@ -24,11 +24,6 @@ class ImageDTO(ImageRecord, ImageUrlsDTO):
|
|||||||
default=None, description="The id of the board the image belongs to, if one exists."
|
default=None, description="The id of the board the image belongs to, if one exists."
|
||||||
)
|
)
|
||||||
"""The id of the board the image belongs to, if one exists."""
|
"""The id of the board the image belongs to, if one exists."""
|
||||||
workflow_id: Optional[str] = Field(
|
|
||||||
default=None,
|
|
||||||
description="The workflow that generated this image.",
|
|
||||||
)
|
|
||||||
"""The workflow that generated this image."""
|
|
||||||
|
|
||||||
|
|
||||||
def image_record_to_dto(
|
def image_record_to_dto(
|
||||||
@ -36,7 +31,6 @@ def image_record_to_dto(
|
|||||||
image_url: str,
|
image_url: str,
|
||||||
thumbnail_url: str,
|
thumbnail_url: str,
|
||||||
board_id: Optional[str],
|
board_id: Optional[str],
|
||||||
workflow_id: Optional[str],
|
|
||||||
) -> ImageDTO:
|
) -> ImageDTO:
|
||||||
"""Converts an image record to an image DTO."""
|
"""Converts an image record to an image DTO."""
|
||||||
return ImageDTO(
|
return ImageDTO(
|
||||||
@ -44,5 +38,4 @@ def image_record_to_dto(
|
|||||||
image_url=image_url,
|
image_url=image_url,
|
||||||
thumbnail_url=thumbnail_url,
|
thumbnail_url=thumbnail_url,
|
||||||
board_id=board_id,
|
board_id=board_id,
|
||||||
workflow_id=workflow_id,
|
|
||||||
)
|
)
|
||||||
|
@ -2,9 +2,10 @@ from typing import Optional
|
|||||||
|
|
||||||
from PIL.Image import Image as PILImageType
|
from PIL.Image import Image as PILImageType
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import MetadataField, WorkflowField
|
from invokeai.app.invocations.baseinvocation import MetadataField
|
||||||
from invokeai.app.services.invoker import Invoker
|
from invokeai.app.services.invoker import Invoker
|
||||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||||
|
|
||||||
from ..image_files.image_files_common import (
|
from ..image_files.image_files_common import (
|
||||||
ImageFileDeleteException,
|
ImageFileDeleteException,
|
||||||
@ -42,7 +43,7 @@ class ImageService(ImageServiceABC):
|
|||||||
board_id: Optional[str] = None,
|
board_id: Optional[str] = None,
|
||||||
is_intermediate: Optional[bool] = False,
|
is_intermediate: Optional[bool] = False,
|
||||||
metadata: Optional[MetadataField] = None,
|
metadata: Optional[MetadataField] = None,
|
||||||
workflow: Optional[WorkflowField] = None,
|
workflow: Optional[WorkflowWithoutID] = None,
|
||||||
) -> ImageDTO:
|
) -> ImageDTO:
|
||||||
if image_origin not in ResourceOrigin:
|
if image_origin not in ResourceOrigin:
|
||||||
raise InvalidOriginException
|
raise InvalidOriginException
|
||||||
@ -55,12 +56,6 @@ class ImageService(ImageServiceABC):
|
|||||||
(width, height) = image.size
|
(width, height) = image.size
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if workflow is not None:
|
|
||||||
created_workflow = self.__invoker.services.workflow_records.create(workflow)
|
|
||||||
workflow_id = created_workflow.model_dump()["id"]
|
|
||||||
else:
|
|
||||||
workflow_id = None
|
|
||||||
|
|
||||||
# TODO: Consider using a transaction here to ensure consistency between storage and database
|
# TODO: Consider using a transaction here to ensure consistency between storage and database
|
||||||
self.__invoker.services.image_records.save(
|
self.__invoker.services.image_records.save(
|
||||||
# Non-nullable fields
|
# Non-nullable fields
|
||||||
@ -69,6 +64,7 @@ class ImageService(ImageServiceABC):
|
|||||||
image_category=image_category,
|
image_category=image_category,
|
||||||
width=width,
|
width=width,
|
||||||
height=height,
|
height=height,
|
||||||
|
has_workflow=workflow is not None,
|
||||||
# Meta fields
|
# Meta fields
|
||||||
is_intermediate=is_intermediate,
|
is_intermediate=is_intermediate,
|
||||||
# Nullable fields
|
# Nullable fields
|
||||||
@ -78,8 +74,6 @@ class ImageService(ImageServiceABC):
|
|||||||
)
|
)
|
||||||
if board_id is not None:
|
if board_id is not None:
|
||||||
self.__invoker.services.board_image_records.add_image_to_board(board_id=board_id, image_name=image_name)
|
self.__invoker.services.board_image_records.add_image_to_board(board_id=board_id, image_name=image_name)
|
||||||
if workflow_id is not None:
|
|
||||||
self.__invoker.services.workflow_image_records.create(workflow_id=workflow_id, image_name=image_name)
|
|
||||||
self.__invoker.services.image_files.save(
|
self.__invoker.services.image_files.save(
|
||||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow
|
image_name=image_name, image=image, metadata=metadata, workflow=workflow
|
||||||
)
|
)
|
||||||
@ -143,7 +137,6 @@ class ImageService(ImageServiceABC):
|
|||||||
image_url=self.__invoker.services.urls.get_image_url(image_name),
|
image_url=self.__invoker.services.urls.get_image_url(image_name),
|
||||||
thumbnail_url=self.__invoker.services.urls.get_image_url(image_name, True),
|
thumbnail_url=self.__invoker.services.urls.get_image_url(image_name, True),
|
||||||
board_id=self.__invoker.services.board_image_records.get_board_for_image(image_name),
|
board_id=self.__invoker.services.board_image_records.get_board_for_image(image_name),
|
||||||
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(image_name),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return image_dto
|
return image_dto
|
||||||
@ -164,18 +157,15 @@ class ImageService(ImageServiceABC):
|
|||||||
self.__invoker.services.logger.error("Problem getting image DTO")
|
self.__invoker.services.logger.error("Problem getting image DTO")
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
def get_workflow(self, image_name: str) -> Optional[WorkflowField]:
|
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||||
try:
|
try:
|
||||||
workflow_id = self.__invoker.services.workflow_image_records.get_workflow_for_image(image_name)
|
return self.__invoker.services.image_files.get_workflow(image_name)
|
||||||
if workflow_id is None:
|
except ImageFileNotFoundException:
|
||||||
return None
|
self.__invoker.services.logger.error("Image file not found")
|
||||||
return self.__invoker.services.workflow_records.get(workflow_id)
|
raise
|
||||||
except ImageRecordNotFoundException:
|
except Exception:
|
||||||
self.__invoker.services.logger.error("Image record not found")
|
self.__invoker.services.logger.error("Problem getting image workflow")
|
||||||
raise
|
raise
|
||||||
except Exception as e:
|
|
||||||
self.__invoker.services.logger.error("Problem getting image DTO")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
||||||
try:
|
try:
|
||||||
@ -223,7 +213,6 @@ class ImageService(ImageServiceABC):
|
|||||||
image_url=self.__invoker.services.urls.get_image_url(r.image_name),
|
image_url=self.__invoker.services.urls.get_image_url(r.image_name),
|
||||||
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
|
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
|
||||||
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
|
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
|
||||||
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name),
|
|
||||||
)
|
)
|
||||||
for r in results.items
|
for r in results.items
|
||||||
]
|
]
|
||||||
|
@ -108,6 +108,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
queue_item_id=queue_item.session_queue_item_id,
|
queue_item_id=queue_item.session_queue_item_id,
|
||||||
queue_id=queue_item.session_queue_id,
|
queue_id=queue_item.session_queue_id,
|
||||||
queue_batch_id=queue_item.session_queue_batch_id,
|
queue_batch_id=queue_item.session_queue_batch_id,
|
||||||
|
workflow=queue_item.workflow,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -178,6 +179,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
session_queue_item_id=queue_item.session_queue_item_id,
|
session_queue_item_id=queue_item.session_queue_item_id,
|
||||||
session_queue_id=queue_item.session_queue_id,
|
session_queue_id=queue_item.session_queue_id,
|
||||||
graph_execution_state=graph_execution_state,
|
graph_execution_state=graph_execution_state,
|
||||||
|
workflow=queue_item.workflow,
|
||||||
invoke_all=True,
|
invoke_all=True,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||||
|
|
||||||
|
|
||||||
class InvocationQueueItem(BaseModel):
|
class InvocationQueueItem(BaseModel):
|
||||||
graph_execution_state_id: str = Field(description="The ID of the graph execution state")
|
graph_execution_state_id: str = Field(description="The ID of the graph execution state")
|
||||||
@ -15,5 +18,6 @@ class InvocationQueueItem(BaseModel):
|
|||||||
session_queue_batch_id: str = Field(
|
session_queue_batch_id: str = Field(
|
||||||
description="The ID of the session batch from which this invocation queue item came"
|
description="The ID of the session batch from which this invocation queue item came"
|
||||||
)
|
)
|
||||||
|
workflow: Optional[WorkflowWithoutID] = Field(description="The workflow associated with this queue item")
|
||||||
invoke_all: bool = Field(default=False)
|
invoke_all: bool = Field(default=False)
|
||||||
timestamp: float = Field(default_factory=time.time)
|
timestamp: float = Field(default_factory=time.time)
|
||||||
|
@ -29,7 +29,6 @@ if TYPE_CHECKING:
|
|||||||
from .session_queue.session_queue_base import SessionQueueBase
|
from .session_queue.session_queue_base import SessionQueueBase
|
||||||
from .shared.graph import GraphExecutionState, LibraryGraph
|
from .shared.graph import GraphExecutionState, LibraryGraph
|
||||||
from .urls.urls_base import UrlServiceBase
|
from .urls.urls_base import UrlServiceBase
|
||||||
from .workflow_image_records.workflow_image_records_base import WorkflowImageRecordsStorageBase
|
|
||||||
from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase
|
from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase
|
||||||
|
|
||||||
|
|
||||||
@ -61,7 +60,6 @@ class InvocationServices:
|
|||||||
invocation_cache: "InvocationCacheBase"
|
invocation_cache: "InvocationCacheBase"
|
||||||
names: "NameServiceBase"
|
names: "NameServiceBase"
|
||||||
urls: "UrlServiceBase"
|
urls: "UrlServiceBase"
|
||||||
workflow_image_records: "WorkflowImageRecordsStorageBase"
|
|
||||||
workflow_records: "WorkflowRecordsStorageBase"
|
workflow_records: "WorkflowRecordsStorageBase"
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@ -90,7 +88,6 @@ class InvocationServices:
|
|||||||
invocation_cache: "InvocationCacheBase",
|
invocation_cache: "InvocationCacheBase",
|
||||||
names: "NameServiceBase",
|
names: "NameServiceBase",
|
||||||
urls: "UrlServiceBase",
|
urls: "UrlServiceBase",
|
||||||
workflow_image_records: "WorkflowImageRecordsStorageBase",
|
|
||||||
workflow_records: "WorkflowRecordsStorageBase",
|
workflow_records: "WorkflowRecordsStorageBase",
|
||||||
):
|
):
|
||||||
self.board_images = board_images
|
self.board_images = board_images
|
||||||
@ -117,5 +114,4 @@ class InvocationServices:
|
|||||||
self.invocation_cache = invocation_cache
|
self.invocation_cache = invocation_cache
|
||||||
self.names = names
|
self.names = names
|
||||||
self.urls = urls
|
self.urls = urls
|
||||||
self.workflow_image_records = workflow_image_records
|
|
||||||
self.workflow_records = workflow_records
|
self.workflow_records = workflow_records
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||||
|
|
||||||
from .invocation_queue.invocation_queue_common import InvocationQueueItem
|
from .invocation_queue.invocation_queue_common import InvocationQueueItem
|
||||||
from .invocation_services import InvocationServices
|
from .invocation_services import InvocationServices
|
||||||
from .shared.graph import Graph, GraphExecutionState
|
from .shared.graph import Graph, GraphExecutionState
|
||||||
@ -22,6 +24,7 @@ class Invoker:
|
|||||||
session_queue_item_id: int,
|
session_queue_item_id: int,
|
||||||
session_queue_batch_id: str,
|
session_queue_batch_id: str,
|
||||||
graph_execution_state: GraphExecutionState,
|
graph_execution_state: GraphExecutionState,
|
||||||
|
workflow: Optional[WorkflowWithoutID] = None,
|
||||||
invoke_all: bool = False,
|
invoke_all: bool = False,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
"""Determines the next node to invoke and enqueues it, preparing if needed.
|
"""Determines the next node to invoke and enqueues it, preparing if needed.
|
||||||
@ -43,6 +46,7 @@ class Invoker:
|
|||||||
session_queue_batch_id=session_queue_batch_id,
|
session_queue_batch_id=session_queue_batch_id,
|
||||||
graph_execution_state_id=graph_execution_state.id,
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
invocation_id=invocation.id,
|
invocation_id=invocation.id,
|
||||||
|
workflow=workflow,
|
||||||
invoke_all=invoke_all,
|
invoke_all=invoke_all,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -5,7 +5,7 @@ from typing import Generic, Optional, TypeVar, get_args
|
|||||||
from pydantic import BaseModel, TypeAdapter
|
from pydantic import BaseModel, TypeAdapter
|
||||||
|
|
||||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
|
|
||||||
from .item_storage_base import ItemStorageABC
|
from .item_storage_base import ItemStorageABC
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ from invokeai.backend.model_manager.config import (
|
|||||||
ModelType,
|
ModelType,
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..shared.sqlite import SqliteDatabase
|
from ..shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
from .model_records_base import (
|
from .model_records_base import (
|
||||||
CONFIG_FILE_VERSION,
|
CONFIG_FILE_VERSION,
|
||||||
DuplicateModelException,
|
DuplicateModelException,
|
||||||
|
@ -114,6 +114,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
|||||||
session_queue_id=queue_item.queue_id,
|
session_queue_id=queue_item.queue_id,
|
||||||
session_queue_item_id=queue_item.item_id,
|
session_queue_item_id=queue_item.item_id,
|
||||||
graph_execution_state=queue_item.session,
|
graph_execution_state=queue_item.session,
|
||||||
|
workflow=queue_item.workflow,
|
||||||
invoke_all=True,
|
invoke_all=True,
|
||||||
)
|
)
|
||||||
queue_item = None
|
queue_item = None
|
||||||
|
@ -8,6 +8,10 @@ from pydantic_core import to_jsonable_python
|
|||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||||
from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError
|
from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||||
|
WorkflowWithoutID,
|
||||||
|
WorkflowWithoutIDValidator,
|
||||||
|
)
|
||||||
from invokeai.app.util.misc import uuid_string
|
from invokeai.app.util.misc import uuid_string
|
||||||
|
|
||||||
# region Errors
|
# region Errors
|
||||||
@ -66,6 +70,9 @@ class Batch(BaseModel):
|
|||||||
batch_id: str = Field(default_factory=uuid_string, description="The ID of the batch")
|
batch_id: str = Field(default_factory=uuid_string, description="The ID of the batch")
|
||||||
data: Optional[BatchDataCollection] = Field(default=None, description="The batch data collection.")
|
data: Optional[BatchDataCollection] = Field(default=None, description="The batch data collection.")
|
||||||
graph: Graph = Field(description="The graph to initialize the session with")
|
graph: Graph = Field(description="The graph to initialize the session with")
|
||||||
|
workflow: Optional[WorkflowWithoutID] = Field(
|
||||||
|
default=None, description="The workflow to initialize the session with"
|
||||||
|
)
|
||||||
runs: int = Field(
|
runs: int = Field(
|
||||||
default=1, ge=1, description="Int stating how many times to iterate through all possible batch indices"
|
default=1, ge=1, description="Int stating how many times to iterate through all possible batch indices"
|
||||||
)
|
)
|
||||||
@ -164,6 +171,14 @@ def get_session(queue_item_dict: dict) -> GraphExecutionState:
|
|||||||
return session
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
def get_workflow(queue_item_dict: dict) -> Optional[WorkflowWithoutID]:
|
||||||
|
workflow_raw = queue_item_dict.get("workflow", None)
|
||||||
|
if workflow_raw is not None:
|
||||||
|
workflow = WorkflowWithoutIDValidator.validate_json(workflow_raw, strict=False)
|
||||||
|
return workflow
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
class SessionQueueItemWithoutGraph(BaseModel):
|
class SessionQueueItemWithoutGraph(BaseModel):
|
||||||
"""Session queue item without the full graph. Used for serialization."""
|
"""Session queue item without the full graph. Used for serialization."""
|
||||||
|
|
||||||
@ -213,12 +228,16 @@ class SessionQueueItemDTO(SessionQueueItemWithoutGraph):
|
|||||||
|
|
||||||
class SessionQueueItem(SessionQueueItemWithoutGraph):
|
class SessionQueueItem(SessionQueueItemWithoutGraph):
|
||||||
session: GraphExecutionState = Field(description="The fully-populated session to be executed")
|
session: GraphExecutionState = Field(description="The fully-populated session to be executed")
|
||||||
|
workflow: Optional[WorkflowWithoutID] = Field(
|
||||||
|
default=None, description="The workflow associated with this queue item"
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def queue_item_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItem":
|
def queue_item_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItem":
|
||||||
# must parse these manually
|
# must parse these manually
|
||||||
queue_item_dict["field_values"] = get_field_values(queue_item_dict)
|
queue_item_dict["field_values"] = get_field_values(queue_item_dict)
|
||||||
queue_item_dict["session"] = get_session(queue_item_dict)
|
queue_item_dict["session"] = get_session(queue_item_dict)
|
||||||
|
queue_item_dict["workflow"] = get_workflow(queue_item_dict)
|
||||||
return SessionQueueItem(**queue_item_dict)
|
return SessionQueueItem(**queue_item_dict)
|
||||||
|
|
||||||
model_config = ConfigDict(
|
model_config = ConfigDict(
|
||||||
@ -334,7 +353,7 @@ def populate_graph(graph: Graph, node_field_values: Iterable[NodeFieldValue]) ->
|
|||||||
|
|
||||||
def create_session_nfv_tuples(
|
def create_session_nfv_tuples(
|
||||||
batch: Batch, maximum: int
|
batch: Batch, maximum: int
|
||||||
) -> Generator[tuple[GraphExecutionState, list[NodeFieldValue]], None, None]:
|
) -> Generator[tuple[GraphExecutionState, list[NodeFieldValue], Optional[WorkflowWithoutID]], None, None]:
|
||||||
"""
|
"""
|
||||||
Create all graph permutations from the given batch data and graph. Yields tuples
|
Create all graph permutations from the given batch data and graph. Yields tuples
|
||||||
of the form (graph, batch_data_items) where batch_data_items is the list of BatchDataItems
|
of the form (graph, batch_data_items) where batch_data_items is the list of BatchDataItems
|
||||||
@ -365,7 +384,7 @@ def create_session_nfv_tuples(
|
|||||||
return
|
return
|
||||||
flat_node_field_values = list(chain.from_iterable(d))
|
flat_node_field_values = list(chain.from_iterable(d))
|
||||||
graph = populate_graph(batch.graph, flat_node_field_values)
|
graph = populate_graph(batch.graph, flat_node_field_values)
|
||||||
yield (GraphExecutionState(graph=graph), flat_node_field_values)
|
yield (GraphExecutionState(graph=graph), flat_node_field_values, batch.workflow)
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
|
|
||||||
@ -391,12 +410,14 @@ def calc_session_count(batch: Batch) -> int:
|
|||||||
class SessionQueueValueToInsert(NamedTuple):
|
class SessionQueueValueToInsert(NamedTuple):
|
||||||
"""A tuple of values to insert into the session_queue table"""
|
"""A tuple of values to insert into the session_queue table"""
|
||||||
|
|
||||||
|
# Careful with the ordering of this - it must match the insert statement
|
||||||
queue_id: str # queue_id
|
queue_id: str # queue_id
|
||||||
session: str # session json
|
session: str # session json
|
||||||
session_id: str # session_id
|
session_id: str # session_id
|
||||||
batch_id: str # batch_id
|
batch_id: str # batch_id
|
||||||
field_values: Optional[str] # field_values json
|
field_values: Optional[str] # field_values json
|
||||||
priority: int # priority
|
priority: int # priority
|
||||||
|
workflow: Optional[str] # workflow json
|
||||||
|
|
||||||
|
|
||||||
ValuesToInsert: TypeAlias = list[SessionQueueValueToInsert]
|
ValuesToInsert: TypeAlias = list[SessionQueueValueToInsert]
|
||||||
@ -404,7 +425,7 @@ ValuesToInsert: TypeAlias = list[SessionQueueValueToInsert]
|
|||||||
|
|
||||||
def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new_queue_items: int) -> ValuesToInsert:
|
def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new_queue_items: int) -> ValuesToInsert:
|
||||||
values_to_insert: ValuesToInsert = []
|
values_to_insert: ValuesToInsert = []
|
||||||
for session, field_values in create_session_nfv_tuples(batch, max_new_queue_items):
|
for session, field_values, workflow in create_session_nfv_tuples(batch, max_new_queue_items):
|
||||||
# sessions must have unique id
|
# sessions must have unique id
|
||||||
session.id = uuid_string()
|
session.id = uuid_string()
|
||||||
values_to_insert.append(
|
values_to_insert.append(
|
||||||
@ -416,6 +437,7 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new
|
|||||||
# must use pydantic_encoder bc field_values is a list of models
|
# must use pydantic_encoder bc field_values is a list of models
|
||||||
json.dumps(field_values, default=to_jsonable_python) if field_values else None, # field_values (json)
|
json.dumps(field_values, default=to_jsonable_python) if field_values else None, # field_values (json)
|
||||||
priority, # priority
|
priority, # priority
|
||||||
|
json.dumps(workflow, default=to_jsonable_python) if workflow else None, # workflow (json)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return values_to_insert
|
return values_to_insert
|
||||||
|
@ -28,7 +28,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
|||||||
prepare_values_to_insert,
|
prepare_values_to_insert,
|
||||||
)
|
)
|
||||||
from invokeai.app.services.shared.pagination import CursorPaginatedResults
|
from invokeai.app.services.shared.pagination import CursorPaginatedResults
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
|
|
||||||
|
|
||||||
class SqliteSessionQueue(SessionQueueBase):
|
class SqliteSessionQueue(SessionQueueBase):
|
||||||
@ -199,6 +199,15 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.__cursor.execute("PRAGMA table_info(session_queue)")
|
||||||
|
columns = [column[1] for column in self.__cursor.fetchall()]
|
||||||
|
if "workflow" not in columns:
|
||||||
|
self.__cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
ALTER TABLE session_queue ADD COLUMN workflow TEXT;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
self.__conn.commit()
|
self.__conn.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
self.__conn.rollback()
|
self.__conn.rollback()
|
||||||
@ -281,8 +290,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
|
|
||||||
self.__cursor.executemany(
|
self.__cursor.executemany(
|
||||||
"""--sql
|
"""--sql
|
||||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority)
|
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow)
|
||||||
VALUES (?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||||
""",
|
""",
|
||||||
values_to_insert,
|
values_to_insert,
|
||||||
)
|
)
|
||||||
|
10
invokeai/app/services/shared/sqlite/sqlite_common.py
Normal file
10
invokeai/app/services/shared/sqlite/sqlite_common.py
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from invokeai.app.util.metaenum import MetaEnum
|
||||||
|
|
||||||
|
sqlite_memory = ":memory:"
|
||||||
|
|
||||||
|
|
||||||
|
class SQLiteDirection(str, Enum, metaclass=MetaEnum):
|
||||||
|
Ascending = "ASC"
|
||||||
|
Descending = "DESC"
|
@ -4,8 +4,7 @@ from logging import Logger
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.shared.sqlite.sqlite_common import sqlite_memory
|
||||||
sqlite_memory = ":memory:"
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteDatabase:
|
class SqliteDatabase:
|
||||||
@ -32,19 +31,17 @@ class SqliteDatabase:
|
|||||||
self.conn.execute("PRAGMA foreign_keys = ON;")
|
self.conn.execute("PRAGMA foreign_keys = ON;")
|
||||||
|
|
||||||
def clean(self) -> None:
|
def clean(self) -> None:
|
||||||
try:
|
with self.lock:
|
||||||
if self.db_path == sqlite_memory:
|
try:
|
||||||
return
|
if self.db_path == sqlite_memory:
|
||||||
initial_db_size = Path(self.db_path).stat().st_size
|
return
|
||||||
self.lock.acquire()
|
initial_db_size = Path(self.db_path).stat().st_size
|
||||||
self.conn.execute("VACUUM;")
|
self.conn.execute("VACUUM;")
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
final_db_size = Path(self.db_path).stat().st_size
|
final_db_size = Path(self.db_path).stat().st_size
|
||||||
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
||||||
if freed_space_in_mb > 0:
|
if freed_space_in_mb > 0:
|
||||||
self._logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
self._logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._logger.error(f"Error cleaning database: {e}")
|
self._logger.error(f"Error cleaning database: {e}")
|
||||||
raise e
|
raise
|
||||||
finally:
|
|
||||||
self.lock.release()
|
|
@ -1,23 +0,0 @@
|
|||||||
from abc import ABC, abstractmethod
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
|
|
||||||
class WorkflowImageRecordsStorageBase(ABC):
|
|
||||||
"""Abstract base class for the one-to-many workflow-image relationship record storage."""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def create(
|
|
||||||
self,
|
|
||||||
workflow_id: str,
|
|
||||||
image_name: str,
|
|
||||||
) -> None:
|
|
||||||
"""Creates a workflow-image record."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_workflow_for_image(
|
|
||||||
self,
|
|
||||||
image_name: str,
|
|
||||||
) -> Optional[str]:
|
|
||||||
"""Gets an image's workflow id, if it has one."""
|
|
||||||
pass
|
|
@ -1,122 +0,0 @@
|
|||||||
import sqlite3
|
|
||||||
import threading
|
|
||||||
from typing import Optional, cast
|
|
||||||
|
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
|
||||||
from invokeai.app.services.workflow_image_records.workflow_image_records_base import WorkflowImageRecordsStorageBase
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteWorkflowImageRecordsStorage(WorkflowImageRecordsStorageBase):
|
|
||||||
"""SQLite implementation of WorkflowImageRecordsStorageBase."""
|
|
||||||
|
|
||||||
_conn: sqlite3.Connection
|
|
||||||
_cursor: sqlite3.Cursor
|
|
||||||
_lock: threading.RLock
|
|
||||||
|
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
|
||||||
super().__init__()
|
|
||||||
self._lock = db.lock
|
|
||||||
self._conn = db.conn
|
|
||||||
self._cursor = self._conn.cursor()
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._create_tables()
|
|
||||||
self._conn.commit()
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
def _create_tables(self) -> None:
|
|
||||||
# Create the `workflow_images` junction table.
|
|
||||||
self._cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
CREATE TABLE IF NOT EXISTS workflow_images (
|
|
||||||
workflow_id TEXT NOT NULL,
|
|
||||||
image_name TEXT NOT NULL,
|
|
||||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
|
||||||
-- updated via trigger
|
|
||||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
|
||||||
-- Soft delete, currently unused
|
|
||||||
deleted_at DATETIME,
|
|
||||||
-- enforce one-to-many relationship between workflows and images using PK
|
|
||||||
-- (we can extend this to many-to-many later)
|
|
||||||
PRIMARY KEY (image_name),
|
|
||||||
FOREIGN KEY (workflow_id) REFERENCES workflows (workflow_id) ON DELETE CASCADE,
|
|
||||||
FOREIGN KEY (image_name) REFERENCES images (image_name) ON DELETE CASCADE
|
|
||||||
);
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add index for workflow id
|
|
||||||
self._cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_workflow_images_workflow_id ON workflow_images (workflow_id);
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add index for workflow id, sorted by created_at
|
|
||||||
self._cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_workflow_images_workflow_id_created_at ON workflow_images (workflow_id, created_at);
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add trigger for `updated_at`.
|
|
||||||
self._cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
CREATE TRIGGER IF NOT EXISTS tg_workflow_images_updated_at
|
|
||||||
AFTER UPDATE
|
|
||||||
ON workflow_images FOR EACH ROW
|
|
||||||
BEGIN
|
|
||||||
UPDATE workflow_images SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
|
||||||
WHERE workflow_id = old.workflow_id AND image_name = old.image_name;
|
|
||||||
END;
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
def create(
|
|
||||||
self,
|
|
||||||
workflow_id: str,
|
|
||||||
image_name: str,
|
|
||||||
) -> None:
|
|
||||||
"""Creates a workflow-image record."""
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
INSERT INTO workflow_images (workflow_id, image_name)
|
|
||||||
VALUES (?, ?);
|
|
||||||
""",
|
|
||||||
(workflow_id, image_name),
|
|
||||||
)
|
|
||||||
self._conn.commit()
|
|
||||||
except sqlite3.Error as e:
|
|
||||||
self._conn.rollback()
|
|
||||||
raise e
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
def get_workflow_for_image(
|
|
||||||
self,
|
|
||||||
image_name: str,
|
|
||||||
) -> Optional[str]:
|
|
||||||
"""Gets an image's workflow id, if it has one."""
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
SELECT workflow_id
|
|
||||||
FROM workflow_images
|
|
||||||
WHERE image_name = ?;
|
|
||||||
""",
|
|
||||||
(image_name,),
|
|
||||||
)
|
|
||||||
result = self._cursor.fetchone()
|
|
||||||
if result is None:
|
|
||||||
return None
|
|
||||||
return cast(str, result[0])
|
|
||||||
except sqlite3.Error as e:
|
|
||||||
self._conn.rollback()
|
|
||||||
raise e
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
@ -0,0 +1,17 @@
|
|||||||
|
# Default Workflows
|
||||||
|
|
||||||
|
Workflows placed in this directory will be synced to the `workflow_library` as
|
||||||
|
_default workflows_ on app startup.
|
||||||
|
|
||||||
|
- Default workflows are not editable by users. If they are loaded and saved,
|
||||||
|
they will save as a copy of the default workflow.
|
||||||
|
- Default workflows must have the `meta.category` property set to `"default"`.
|
||||||
|
An exception will be raised during sync if this is not set correctly.
|
||||||
|
- Default workflows appear on the "Default Workflows" tab of the Workflow
|
||||||
|
Library.
|
||||||
|
|
||||||
|
After adding or updating default workflows, you **must** start the app up and
|
||||||
|
load them to ensure:
|
||||||
|
|
||||||
|
- The workflow loads without warning or errors
|
||||||
|
- The workflow runs successfully
|
@ -0,0 +1,798 @@
|
|||||||
|
{
|
||||||
|
"name": "Text to Image - SD1.5",
|
||||||
|
"author": "InvokeAI",
|
||||||
|
"description": "Sample text to image workflow for Stable Diffusion 1.5/2",
|
||||||
|
"version": "1.1.0",
|
||||||
|
"contact": "invoke@invoke.ai",
|
||||||
|
"tags": "text2image, SD1.5, SD2, default",
|
||||||
|
"notes": "",
|
||||||
|
"exposedFields": [
|
||||||
|
{
|
||||||
|
"nodeId": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
|
"fieldName": "model"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nodeId": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||||
|
"fieldName": "prompt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nodeId": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||||
|
"fieldName": "prompt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nodeId": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
|
"fieldName": "width"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nodeId": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
|
"fieldName": "height"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"meta": {
|
||||||
|
"category": "default",
|
||||||
|
"version": "2.0.0"
|
||||||
|
},
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||||
|
"type": "compel",
|
||||||
|
"label": "Negative Compel Prompt",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"prompt": {
|
||||||
|
"id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
|
||||||
|
"name": "prompt",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "Negative Prompt",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "StringField"
|
||||||
|
},
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
|
||||||
|
"name": "clip",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ClipField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"conditioning": {
|
||||||
|
"id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
|
||||||
|
"name": "conditioning",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ConditioningField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 259,
|
||||||
|
"position": {
|
||||||
|
"x": 1000,
|
||||||
|
"y": 350
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
|
"type": "noise",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.1",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"seed": {
|
||||||
|
"id": "6431737c-918a-425d-a3b4-5d57e2f35d4d",
|
||||||
|
"name": "seed",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "38fc5b66-fe6e-47c8-bba9-daf58e454ed7",
|
||||||
|
"name": "width",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 512
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "16298330-e2bf-4872-a514-d6923df53cbb",
|
||||||
|
"name": "height",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 512
|
||||||
|
},
|
||||||
|
"use_cpu": {
|
||||||
|
"id": "c7c436d3-7a7a-4e76-91e4-c6deb271623c",
|
||||||
|
"name": "use_cpu",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "BooleanField"
|
||||||
|
},
|
||||||
|
"value": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"noise": {
|
||||||
|
"id": "50f650dc-0184-4e23-a927-0497a96fe954",
|
||||||
|
"name": "noise",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "bb8a452b-133d-42d1-ae4a-3843d7e4109a",
|
||||||
|
"name": "width",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "35cfaa12-3b8b-4b7a-a884-327ff3abddd9",
|
||||||
|
"name": "height",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 388,
|
||||||
|
"position": {
|
||||||
|
"x": 600,
|
||||||
|
"y": 325
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
|
"type": "main_model_loader",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"model": {
|
||||||
|
"id": "993eabd2-40fd-44fe-bce7-5d0c7075ddab",
|
||||||
|
"name": "model",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "MainModelField"
|
||||||
|
},
|
||||||
|
"value": {
|
||||||
|
"model_name": "stable-diffusion-v1-5",
|
||||||
|
"base_model": "sd-1",
|
||||||
|
"model_type": "main"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"unet": {
|
||||||
|
"id": "5c18c9db-328d-46d0-8cb9-143391c410be",
|
||||||
|
"name": "unet",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "UNetField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "6effcac0-ec2f-4bf5-a49e-a2c29cf921f4",
|
||||||
|
"name": "clip",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ClipField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"vae": {
|
||||||
|
"id": "57683ba3-f5f5-4f58-b9a2-4b83dacad4a1",
|
||||||
|
"name": "vae",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "VaeField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 226,
|
||||||
|
"position": {
|
||||||
|
"x": 600,
|
||||||
|
"y": 25
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||||
|
"type": "compel",
|
||||||
|
"label": "Positive Compel Prompt",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"prompt": {
|
||||||
|
"id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
|
||||||
|
"name": "prompt",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "Positive Prompt",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "StringField"
|
||||||
|
},
|
||||||
|
"value": "Super cute tiger cub, national geographic award-winning photograph"
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
|
||||||
|
"name": "clip",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ClipField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"conditioning": {
|
||||||
|
"id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
|
||||||
|
"name": "conditioning",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ConditioningField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 259,
|
||||||
|
"position": {
|
||||||
|
"x": 1000,
|
||||||
|
"y": 25
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||||
|
"type": "rand_int",
|
||||||
|
"label": "Random Seed",
|
||||||
|
"isOpen": false,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": false,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"low": {
|
||||||
|
"id": "3ec65a37-60ba-4b6c-a0b2-553dd7a84b84",
|
||||||
|
"name": "low",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"high": {
|
||||||
|
"id": "085f853a-1a5f-494d-8bec-e4ba29a3f2d1",
|
||||||
|
"name": "high",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 2147483647
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"value": {
|
||||||
|
"id": "812ade4d-7699-4261-b9fc-a6c9d2ab55ee",
|
||||||
|
"name": "value",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32,
|
||||||
|
"position": {
|
||||||
|
"x": 600,
|
||||||
|
"y": 275
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
|
"type": "denoise_latents",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.5.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"positive_conditioning": {
|
||||||
|
"id": "90b7f4f8-ada7-4028-8100-d2e54f192052",
|
||||||
|
"name": "positive_conditioning",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ConditioningField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"negative_conditioning": {
|
||||||
|
"id": "9393779e-796c-4f64-b740-902a1177bf53",
|
||||||
|
"name": "negative_conditioning",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ConditioningField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"noise": {
|
||||||
|
"id": "8e17f1e5-4f98-40b1-b7f4-86aeeb4554c1",
|
||||||
|
"name": "noise",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"steps": {
|
||||||
|
"id": "9b63302d-6bd2-42c9-ac13-9b1afb51af88",
|
||||||
|
"name": "steps",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 50
|
||||||
|
},
|
||||||
|
"cfg_scale": {
|
||||||
|
"id": "87dd04d3-870e-49e1-98bf-af003a810109",
|
||||||
|
"name": "cfg_scale",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": true,
|
||||||
|
"name": "FloatField"
|
||||||
|
},
|
||||||
|
"value": 7.5
|
||||||
|
},
|
||||||
|
"denoising_start": {
|
||||||
|
"id": "f369d80f-4931-4740-9bcd-9f0620719fab",
|
||||||
|
"name": "denoising_start",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "FloatField"
|
||||||
|
},
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"denoising_end": {
|
||||||
|
"id": "747d10e5-6f02-445c-994c-0604d814de8c",
|
||||||
|
"name": "denoising_end",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "FloatField"
|
||||||
|
},
|
||||||
|
"value": 1
|
||||||
|
},
|
||||||
|
"scheduler": {
|
||||||
|
"id": "1de84a4e-3a24-4ec8-862b-16ce49633b9b",
|
||||||
|
"name": "scheduler",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "SchedulerField"
|
||||||
|
},
|
||||||
|
"value": "unipc"
|
||||||
|
},
|
||||||
|
"unet": {
|
||||||
|
"id": "ffa6fef4-3ce2-4bdb-9296-9a834849489b",
|
||||||
|
"name": "unet",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "UNetField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"control": {
|
||||||
|
"id": "077b64cb-34be-4fcc-83f2-e399807a02bd",
|
||||||
|
"name": "control",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": true,
|
||||||
|
"name": "ControlField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ip_adapter": {
|
||||||
|
"id": "1d6948f7-3a65-4a65-a20c-768b287251aa",
|
||||||
|
"name": "ip_adapter",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": true,
|
||||||
|
"name": "IPAdapterField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"t2i_adapter": {
|
||||||
|
"id": "75e67b09-952f-4083-aaf4-6b804d690412",
|
||||||
|
"name": "t2i_adapter",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": true,
|
||||||
|
"name": "T2IAdapterField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cfg_rescale_multiplier": {
|
||||||
|
"id": "9101f0a6-5fe0-4826-b7b3-47e5d506826c",
|
||||||
|
"name": "cfg_rescale_multiplier",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "FloatField"
|
||||||
|
},
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"latents": {
|
||||||
|
"id": "334d4ba3-5a99-4195-82c5-86fb3f4f7d43",
|
||||||
|
"name": "latents",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"denoise_mask": {
|
||||||
|
"id": "0d3dbdbf-b014-4e95-8b18-ff2ff9cb0bfa",
|
||||||
|
"name": "denoise_mask",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "DenoiseMaskField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"latents": {
|
||||||
|
"id": "70fa5bbc-0c38-41bb-861a-74d6d78d2f38",
|
||||||
|
"name": "latents",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "98ee0e6c-82aa-4e8f-8be5-dc5f00ee47f0",
|
||||||
|
"name": "width",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "e8cb184a-5e1a-47c8-9695-4b8979564f5d",
|
||||||
|
"name": "height",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 703,
|
||||||
|
"position": {
|
||||||
|
"x": 1400,
|
||||||
|
"y": 25
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||||
|
"type": "l2i",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": false,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.2.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"metadata": {
|
||||||
|
"id": "ab375f12-0042-4410-9182-29e30db82c85",
|
||||||
|
"name": "metadata",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "MetadataField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"latents": {
|
||||||
|
"id": "3a7e7efd-bff5-47d7-9d48-615127afee78",
|
||||||
|
"name": "latents",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"vae": {
|
||||||
|
"id": "a1f5f7a1-0795-4d58-b036-7820c0b0ef2b",
|
||||||
|
"name": "vae",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "VaeField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tiled": {
|
||||||
|
"id": "da52059a-0cee-4668-942f-519aa794d739",
|
||||||
|
"name": "tiled",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "BooleanField"
|
||||||
|
},
|
||||||
|
"value": false
|
||||||
|
},
|
||||||
|
"fp32": {
|
||||||
|
"id": "c4841df3-b24e-4140-be3b-ccd454c2522c",
|
||||||
|
"name": "fp32",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "BooleanField"
|
||||||
|
},
|
||||||
|
"value": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"image": {
|
||||||
|
"id": "72d667d0-cf85-459d-abf2-28bd8b823fe7",
|
||||||
|
"name": "image",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ImageField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "c8c907d8-1066-49d1-b9a6-83bdcd53addc",
|
||||||
|
"name": "width",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "230f359c-b4ea-436c-b372-332d7dcdca85",
|
||||||
|
"name": "height",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 266,
|
||||||
|
"position": {
|
||||||
|
"x": 1800,
|
||||||
|
"y": 25
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"edges": [
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||||
|
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||||
|
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "value",
|
||||||
|
"targetHandle": "seed"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-7d8bf987-284f-413a-b2fd-d825445a5d6cclip",
|
||||||
|
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
|
"target": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "clip",
|
||||||
|
"targetHandle": "clip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-93dc02a4-d05b-48ed-b99c-c9b616af3402clip",
|
||||||
|
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
|
"target": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "clip",
|
||||||
|
"targetHandle": "clip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise",
|
||||||
|
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
|
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "noise",
|
||||||
|
"targetHandle": "noise"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning",
|
||||||
|
"source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||||
|
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "conditioning",
|
||||||
|
"targetHandle": "positive_conditioning"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning",
|
||||||
|
"source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||||
|
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "conditioning",
|
||||||
|
"targetHandle": "negative_conditioning"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet",
|
||||||
|
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
|
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "unet",
|
||||||
|
"targetHandle": "unet"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents",
|
||||||
|
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
|
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "latents",
|
||||||
|
"targetHandle": "latents"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae",
|
||||||
|
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
|
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "vae",
|
||||||
|
"targetHandle": "vae"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -1,17 +1,50 @@
|
|||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import WorkflowField
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||||
|
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||||
|
Workflow,
|
||||||
|
WorkflowCategory,
|
||||||
|
WorkflowRecordDTO,
|
||||||
|
WorkflowRecordListItemDTO,
|
||||||
|
WorkflowRecordOrderBy,
|
||||||
|
WorkflowWithoutID,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class WorkflowRecordsStorageBase(ABC):
|
class WorkflowRecordsStorageBase(ABC):
|
||||||
"""Base class for workflow storage services."""
|
"""Base class for workflow storage services."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get(self, workflow_id: str) -> WorkflowField:
|
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
||||||
"""Get workflow by id."""
|
"""Get workflow by id."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def create(self, workflow: WorkflowField) -> WorkflowField:
|
def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
|
||||||
"""Creates a workflow."""
|
"""Creates a workflow."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
||||||
|
"""Updates a workflow."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, workflow_id: str) -> None:
|
||||||
|
"""Deletes a workflow."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_many(
|
||||||
|
self,
|
||||||
|
page: int,
|
||||||
|
per_page: int,
|
||||||
|
order_by: WorkflowRecordOrderBy,
|
||||||
|
direction: SQLiteDirection,
|
||||||
|
category: WorkflowCategory,
|
||||||
|
query: Optional[str],
|
||||||
|
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||||
|
"""Gets many workflows."""
|
||||||
|
pass
|
||||||
|
@ -1,2 +1,106 @@
|
|||||||
|
import datetime
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Any, Union
|
||||||
|
|
||||||
|
import semver
|
||||||
|
from pydantic import BaseModel, ConfigDict, Field, JsonValue, TypeAdapter, field_validator
|
||||||
|
|
||||||
|
from invokeai.app.util.metaenum import MetaEnum
|
||||||
|
|
||||||
|
__workflow_meta_version__ = semver.Version.parse("1.0.0")
|
||||||
|
|
||||||
|
|
||||||
|
class ExposedField(BaseModel):
|
||||||
|
nodeId: str
|
||||||
|
fieldName: str
|
||||||
|
|
||||||
|
|
||||||
class WorkflowNotFoundError(Exception):
|
class WorkflowNotFoundError(Exception):
|
||||||
"""Raised when a workflow is not found"""
|
"""Raised when a workflow is not found"""
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum):
|
||||||
|
"""The order by options for workflow records"""
|
||||||
|
|
||||||
|
CreatedAt = "created_at"
|
||||||
|
UpdatedAt = "updated_at"
|
||||||
|
OpenedAt = "opened_at"
|
||||||
|
Name = "name"
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
|
||||||
|
User = "user"
|
||||||
|
Default = "default"
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowMeta(BaseModel):
|
||||||
|
version: str = Field(description="The version of the workflow schema.")
|
||||||
|
category: WorkflowCategory = Field(
|
||||||
|
default=WorkflowCategory.User, description="The category of the workflow (user or default)."
|
||||||
|
)
|
||||||
|
|
||||||
|
@field_validator("version")
|
||||||
|
def validate_version(cls, version: str):
|
||||||
|
try:
|
||||||
|
semver.Version.parse(version)
|
||||||
|
return version
|
||||||
|
except Exception:
|
||||||
|
raise ValueError(f"Invalid workflow meta version: {version}")
|
||||||
|
|
||||||
|
def to_semver(self) -> semver.Version:
|
||||||
|
return semver.Version.parse(self.version)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowWithoutID(BaseModel):
|
||||||
|
name: str = Field(description="The name of the workflow.")
|
||||||
|
author: str = Field(description="The author of the workflow.")
|
||||||
|
description: str = Field(description="The description of the workflow.")
|
||||||
|
version: str = Field(description="The version of the workflow.")
|
||||||
|
contact: str = Field(description="The contact of the workflow.")
|
||||||
|
tags: str = Field(description="The tags of the workflow.")
|
||||||
|
notes: str = Field(description="The notes of the workflow.")
|
||||||
|
exposedFields: list[ExposedField] = Field(description="The exposed fields of the workflow.")
|
||||||
|
meta: WorkflowMeta = Field(description="The meta of the workflow.")
|
||||||
|
# TODO: nodes and edges are very loosely typed
|
||||||
|
nodes: list[dict[str, JsonValue]] = Field(description="The nodes of the workflow.")
|
||||||
|
edges: list[dict[str, JsonValue]] = Field(description="The edges of the workflow.")
|
||||||
|
|
||||||
|
model_config = ConfigDict(extra="forbid")
|
||||||
|
|
||||||
|
|
||||||
|
WorkflowWithoutIDValidator = TypeAdapter(WorkflowWithoutID)
|
||||||
|
|
||||||
|
|
||||||
|
class Workflow(WorkflowWithoutID):
|
||||||
|
id: str = Field(description="The id of the workflow.")
|
||||||
|
|
||||||
|
|
||||||
|
WorkflowValidator = TypeAdapter(Workflow)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowRecordDTOBase(BaseModel):
|
||||||
|
workflow_id: str = Field(description="The id of the workflow.")
|
||||||
|
name: str = Field(description="The name of the workflow.")
|
||||||
|
created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the workflow.")
|
||||||
|
updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the workflow.")
|
||||||
|
opened_at: Union[datetime.datetime, str] = Field(description="The opened timestamp of the workflow.")
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowRecordDTO(WorkflowRecordDTOBase):
|
||||||
|
workflow: Workflow = Field(description="The workflow.")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any]) -> "WorkflowRecordDTO":
|
||||||
|
data["workflow"] = WorkflowValidator.validate_json(data.get("workflow", ""))
|
||||||
|
return WorkflowRecordDTOValidator.validate_python(data)
|
||||||
|
|
||||||
|
|
||||||
|
WorkflowRecordDTOValidator = TypeAdapter(WorkflowRecordDTO)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowRecordListItemDTO(WorkflowRecordDTOBase):
|
||||||
|
description: str = Field(description="The description of the workflow.")
|
||||||
|
category: WorkflowCategory = Field(description="The description of the workflow.")
|
||||||
|
|
||||||
|
|
||||||
|
WorkflowRecordListItemDTOValidator = TypeAdapter(WorkflowRecordListItemDTO)
|
||||||
|
@ -1,20 +1,26 @@
|
|||||||
import sqlite3
|
from pathlib import Path
|
||||||
import threading
|
from typing import Optional
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import WorkflowField, WorkflowFieldValidator
|
|
||||||
from invokeai.app.services.invoker import Invoker
|
from invokeai.app.services.invoker import Invoker
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||||
|
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||||
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase
|
from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase
|
||||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowNotFoundError
|
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||||
|
Workflow,
|
||||||
|
WorkflowCategory,
|
||||||
|
WorkflowNotFoundError,
|
||||||
|
WorkflowRecordDTO,
|
||||||
|
WorkflowRecordListItemDTO,
|
||||||
|
WorkflowRecordListItemDTOValidator,
|
||||||
|
WorkflowRecordOrderBy,
|
||||||
|
WorkflowWithoutID,
|
||||||
|
WorkflowWithoutIDValidator,
|
||||||
|
)
|
||||||
from invokeai.app.util.misc import uuid_string
|
from invokeai.app.util.misc import uuid_string
|
||||||
|
|
||||||
|
|
||||||
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||||
_invoker: Invoker
|
|
||||||
_conn: sqlite3.Connection
|
|
||||||
_cursor: sqlite3.Cursor
|
|
||||||
_lock: threading.RLock
|
|
||||||
|
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._lock = db.lock
|
self._lock = db.lock
|
||||||
@ -24,14 +30,25 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
|
|
||||||
def start(self, invoker: Invoker) -> None:
|
def start(self, invoker: Invoker) -> None:
|
||||||
self._invoker = invoker
|
self._invoker = invoker
|
||||||
|
self._sync_default_workflows()
|
||||||
|
|
||||||
def get(self, workflow_id: str) -> WorkflowField:
|
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
||||||
|
"""Gets a workflow by ID. Updates the opened_at column."""
|
||||||
try:
|
try:
|
||||||
self._lock.acquire()
|
self._lock.acquire()
|
||||||
self._cursor.execute(
|
self._cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT workflow
|
UPDATE workflow_library
|
||||||
FROM workflows
|
SET opened_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||||
|
WHERE workflow_id = ?;
|
||||||
|
""",
|
||||||
|
(workflow_id,),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
||||||
|
FROM workflow_library
|
||||||
WHERE workflow_id = ?;
|
WHERE workflow_id = ?;
|
||||||
""",
|
""",
|
||||||
(workflow_id,),
|
(workflow_id,),
|
||||||
@ -39,25 +56,28 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
row = self._cursor.fetchone()
|
row = self._cursor.fetchone()
|
||||||
if row is None:
|
if row is None:
|
||||||
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
|
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
|
||||||
return WorkflowFieldValidator.validate_json(row[0])
|
return WorkflowRecordDTO.from_dict(dict(row))
|
||||||
except Exception:
|
except Exception:
|
||||||
self._conn.rollback()
|
self._conn.rollback()
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
self._lock.release()
|
self._lock.release()
|
||||||
|
|
||||||
def create(self, workflow: WorkflowField) -> WorkflowField:
|
def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
|
||||||
try:
|
try:
|
||||||
# workflows do not have ids until they are saved
|
# Only user workflows may be created by this method
|
||||||
workflow_id = uuid_string()
|
assert workflow.meta.category is WorkflowCategory.User
|
||||||
workflow.root["id"] = workflow_id
|
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
||||||
self._lock.acquire()
|
self._lock.acquire()
|
||||||
self._cursor.execute(
|
self._cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
INSERT INTO workflows(workflow)
|
INSERT OR IGNORE INTO workflow_library (
|
||||||
VALUES (?);
|
workflow_id,
|
||||||
|
workflow
|
||||||
|
)
|
||||||
|
VALUES (?, ?);
|
||||||
""",
|
""",
|
||||||
(workflow.model_dump_json(),),
|
(workflow_with_id.id, workflow_with_id.model_dump_json()),
|
||||||
)
|
)
|
||||||
self._conn.commit()
|
self._conn.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -65,35 +85,232 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
self._lock.release()
|
self._lock.release()
|
||||||
return self.get(workflow_id)
|
return self.get(workflow_with_id.id)
|
||||||
|
|
||||||
|
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
UPDATE workflow_library
|
||||||
|
SET workflow = ?
|
||||||
|
WHERE workflow_id = ? AND category = 'user';
|
||||||
|
""",
|
||||||
|
(workflow.model_dump_json(), workflow.id),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
return self.get(workflow.id)
|
||||||
|
|
||||||
|
def delete(self, workflow_id: str) -> None:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
DELETE from workflow_library
|
||||||
|
WHERE workflow_id = ? AND category = 'user';
|
||||||
|
""",
|
||||||
|
(workflow_id,),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_many(
|
||||||
|
self,
|
||||||
|
page: int,
|
||||||
|
per_page: int,
|
||||||
|
order_by: WorkflowRecordOrderBy,
|
||||||
|
direction: SQLiteDirection,
|
||||||
|
category: WorkflowCategory,
|
||||||
|
query: Optional[str] = None,
|
||||||
|
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
# sanitize!
|
||||||
|
assert order_by in WorkflowRecordOrderBy
|
||||||
|
assert direction in SQLiteDirection
|
||||||
|
assert category in WorkflowCategory
|
||||||
|
count_query = "SELECT COUNT(*) FROM workflow_library WHERE category = ?"
|
||||||
|
main_query = """
|
||||||
|
SELECT
|
||||||
|
workflow_id,
|
||||||
|
category,
|
||||||
|
name,
|
||||||
|
description,
|
||||||
|
created_at,
|
||||||
|
updated_at,
|
||||||
|
opened_at
|
||||||
|
FROM workflow_library
|
||||||
|
WHERE category = ?
|
||||||
|
"""
|
||||||
|
main_params: list[int | str] = [category.value]
|
||||||
|
count_params: list[int | str] = [category.value]
|
||||||
|
stripped_query = query.strip() if query else None
|
||||||
|
if stripped_query:
|
||||||
|
wildcard_query = "%" + stripped_query + "%"
|
||||||
|
main_query += " AND name LIKE ? OR description LIKE ? "
|
||||||
|
count_query += " AND name LIKE ? OR description LIKE ?;"
|
||||||
|
main_params.extend([wildcard_query, wildcard_query])
|
||||||
|
count_params.extend([wildcard_query, wildcard_query])
|
||||||
|
|
||||||
|
main_query += f" ORDER BY {order_by.value} {direction.value} LIMIT ? OFFSET ?;"
|
||||||
|
main_params.extend([per_page, page * per_page])
|
||||||
|
self._cursor.execute(main_query, main_params)
|
||||||
|
rows = self._cursor.fetchall()
|
||||||
|
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
|
||||||
|
|
||||||
|
self._cursor.execute(count_query, count_params)
|
||||||
|
total = self._cursor.fetchone()[0]
|
||||||
|
pages = int(total / per_page) + 1
|
||||||
|
|
||||||
|
return PaginatedResults(
|
||||||
|
items=workflows,
|
||||||
|
page=page,
|
||||||
|
per_page=per_page,
|
||||||
|
pages=pages,
|
||||||
|
total=total,
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def _sync_default_workflows(self) -> None:
|
||||||
|
"""Syncs default workflows to the database. Internal use only."""
|
||||||
|
|
||||||
|
"""
|
||||||
|
An enhancement might be to only update workflows that have changed. This would require stable
|
||||||
|
default workflow IDs, and properly incrementing the workflow version.
|
||||||
|
|
||||||
|
It's much simpler to just replace them all with whichever workflows are in the directory.
|
||||||
|
|
||||||
|
The downside is that the `updated_at` and `opened_at` timestamps for default workflows are
|
||||||
|
meaningless, as they are overwritten every time the server starts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
workflows: list[Workflow] = []
|
||||||
|
workflows_dir = Path(__file__).parent / Path("default_workflows")
|
||||||
|
workflow_paths = workflows_dir.glob("*.json")
|
||||||
|
for path in workflow_paths:
|
||||||
|
bytes_ = path.read_bytes()
|
||||||
|
workflow_without_id = WorkflowWithoutIDValidator.validate_json(bytes_)
|
||||||
|
workflow = Workflow(**workflow_without_id.model_dump(), id=uuid_string())
|
||||||
|
workflows.append(workflow)
|
||||||
|
# Only default workflows may be managed by this method
|
||||||
|
assert all(w.meta.category is WorkflowCategory.Default for w in workflows)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
DELETE FROM workflow_library
|
||||||
|
WHERE category = 'default';
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
for w in workflows:
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
INSERT OR REPLACE INTO workflow_library (
|
||||||
|
workflow_id,
|
||||||
|
workflow
|
||||||
|
)
|
||||||
|
VALUES (?, ?);
|
||||||
|
""",
|
||||||
|
(w.id, w.model_dump_json()),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
def _create_tables(self) -> None:
|
def _create_tables(self) -> None:
|
||||||
try:
|
try:
|
||||||
self._lock.acquire()
|
self._lock.acquire()
|
||||||
self._cursor.execute(
|
self._cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
CREATE TABLE IF NOT EXISTS workflows (
|
CREATE TABLE IF NOT EXISTS workflow_library (
|
||||||
|
workflow_id TEXT NOT NULL PRIMARY KEY,
|
||||||
workflow TEXT NOT NULL,
|
workflow TEXT NOT NULL,
|
||||||
workflow_id TEXT GENERATED ALWAYS AS (json_extract(workflow, '$.id')) VIRTUAL NOT NULL UNIQUE, -- gets implicit index
|
|
||||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) -- updated via trigger
|
-- updated via trigger
|
||||||
|
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||||
|
-- updated manually when retrieving workflow
|
||||||
|
opened_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||||
|
-- Generated columns, needed for indexing and searching
|
||||||
|
category TEXT GENERATED ALWAYS as (json_extract(workflow, '$.meta.category')) VIRTUAL NOT NULL,
|
||||||
|
name TEXT GENERATED ALWAYS as (json_extract(workflow, '$.name')) VIRTUAL NOT NULL,
|
||||||
|
description TEXT GENERATED ALWAYS as (json_extract(workflow, '$.description')) VIRTUAL NOT NULL
|
||||||
);
|
);
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
self._cursor.execute(
|
self._cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
CREATE TRIGGER IF NOT EXISTS tg_workflows_updated_at
|
CREATE TRIGGER IF NOT EXISTS tg_workflow_library_updated_at
|
||||||
AFTER UPDATE
|
AFTER UPDATE
|
||||||
ON workflows FOR EACH ROW
|
ON workflow_library FOR EACH ROW
|
||||||
BEGIN
|
BEGIN
|
||||||
UPDATE workflows
|
UPDATE workflow_library
|
||||||
SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||||
WHERE workflow_id = old.workflow_id;
|
WHERE workflow_id = old.workflow_id;
|
||||||
END;
|
END;
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_workflow_library_created_at ON workflow_library(created_at);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_workflow_library_updated_at ON workflow_library(updated_at);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_workflow_library_opened_at ON workflow_library(opened_at);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_workflow_library_category ON workflow_library(category);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_workflow_library_name ON workflow_library(name);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_workflow_library_description ON workflow_library(description);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
# We do not need the original `workflows` table or `workflow_images` junction table.
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
DROP TABLE IF EXISTS workflow_images;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
DROP TABLE IF EXISTS workflows;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
self._conn.commit()
|
self._conn.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
self._conn.rollback()
|
self._conn.rollback()
|
||||||
|
@ -11,7 +11,7 @@ from invokeai.app.services.model_records import (
|
|||||||
DuplicateModelException,
|
DuplicateModelException,
|
||||||
ModelRecordServiceSQL,
|
ModelRecordServiceSQL,
|
||||||
)
|
)
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
BaseModelType,
|
||||||
|
@ -32,9 +32,9 @@ sd-1/main/Analog-Diffusion:
|
|||||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||||
repo_id: wavymulder/Analog-Diffusion
|
repo_id: wavymulder/Analog-Diffusion
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/Deliberate:
|
sd-1/main/Deliberate_v5:
|
||||||
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||||
repo_id: XpucT/Deliberate
|
path: https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5.safetensors
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/Dungeons-and-Diffusion:
|
sd-1/main/Dungeons-and-Diffusion:
|
||||||
description: Dungeons & Dragons characters (2.13 GB)
|
description: Dungeons & Dragons characters (2.13 GB)
|
||||||
|
@ -11,6 +11,7 @@ module.exports = {
|
|||||||
'plugin:react-hooks/recommended',
|
'plugin:react-hooks/recommended',
|
||||||
'plugin:react/jsx-runtime',
|
'plugin:react/jsx-runtime',
|
||||||
'prettier',
|
'prettier',
|
||||||
|
'plugin:storybook/recommended',
|
||||||
],
|
],
|
||||||
parser: '@typescript-eslint/parser',
|
parser: '@typescript-eslint/parser',
|
||||||
parserOptions: {
|
parserOptions: {
|
||||||
@ -26,6 +27,7 @@ module.exports = {
|
|||||||
'eslint-plugin-react-hooks',
|
'eslint-plugin-react-hooks',
|
||||||
'i18next',
|
'i18next',
|
||||||
'path',
|
'path',
|
||||||
|
'unused-imports',
|
||||||
],
|
],
|
||||||
root: true,
|
root: true,
|
||||||
rules: {
|
rules: {
|
||||||
@ -44,9 +46,16 @@ module.exports = {
|
|||||||
radix: 'error',
|
radix: 'error',
|
||||||
'space-before-blocks': 'error',
|
'space-before-blocks': 'error',
|
||||||
'import/prefer-default-export': 'off',
|
'import/prefer-default-export': 'off',
|
||||||
'@typescript-eslint/no-unused-vars': [
|
'@typescript-eslint/no-unused-vars': 'off',
|
||||||
|
'unused-imports/no-unused-imports': 'error',
|
||||||
|
'unused-imports/no-unused-vars': [
|
||||||
'warn',
|
'warn',
|
||||||
{ varsIgnorePattern: '^_', argsIgnorePattern: '^_' },
|
{
|
||||||
|
vars: 'all',
|
||||||
|
varsIgnorePattern: '^_',
|
||||||
|
args: 'after-used',
|
||||||
|
argsIgnorePattern: '^_',
|
||||||
|
},
|
||||||
],
|
],
|
||||||
'@typescript-eslint/ban-ts-comment': 'warn',
|
'@typescript-eslint/ban-ts-comment': 'warn',
|
||||||
'@typescript-eslint/no-explicit-any': 'warn',
|
'@typescript-eslint/no-explicit-any': 'warn',
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
dist/
|
dist/
|
||||||
public/locales/*.json
|
public/locales/*.json
|
||||||
|
!public/locales/en.json
|
||||||
.husky/
|
.husky/
|
||||||
node_modules/
|
node_modules/
|
||||||
patches/
|
patches/
|
||||||
@ -11,3 +12,4 @@ index.html
|
|||||||
src/services/api/schema.d.ts
|
src/services/api/schema.d.ts
|
||||||
static/
|
static/
|
||||||
src/theme/css/overlayscrollbars.css
|
src/theme/css/overlayscrollbars.css
|
||||||
|
pnpm-lock.yaml
|
||||||
|
21
invokeai/frontend/web/.storybook/main.ts
Normal file
21
invokeai/frontend/web/.storybook/main.ts
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
import type { StorybookConfig } from '@storybook/react-vite';
|
||||||
|
|
||||||
|
const config: StorybookConfig = {
|
||||||
|
stories: ['../src/**/*.mdx', '../src/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
|
||||||
|
addons: [
|
||||||
|
'@storybook/addon-links',
|
||||||
|
'@storybook/addon-essentials',
|
||||||
|
'@storybook/addon-interactions',
|
||||||
|
],
|
||||||
|
framework: {
|
||||||
|
name: '@storybook/react-vite',
|
||||||
|
options: {},
|
||||||
|
},
|
||||||
|
docs: {
|
||||||
|
autodocs: 'tag',
|
||||||
|
},
|
||||||
|
core: {
|
||||||
|
disableTelemetry: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
export default config;
|
6
invokeai/frontend/web/.storybook/manager.ts
Normal file
6
invokeai/frontend/web/.storybook/manager.ts
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
import { addons } from '@storybook/manager-api';
|
||||||
|
import { themes } from '@storybook/theming';
|
||||||
|
|
||||||
|
addons.setConfig({
|
||||||
|
theme: themes.dark,
|
||||||
|
});
|
47
invokeai/frontend/web/.storybook/preview.tsx
Normal file
47
invokeai/frontend/web/.storybook/preview.tsx
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
import { Preview } from '@storybook/react';
|
||||||
|
import { themes } from '@storybook/theming';
|
||||||
|
import i18n from 'i18next';
|
||||||
|
import React from 'react';
|
||||||
|
import { initReactI18next } from 'react-i18next';
|
||||||
|
import { Provider } from 'react-redux';
|
||||||
|
import GlobalHotkeys from '../src/app/components/GlobalHotkeys';
|
||||||
|
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||||
|
import { createStore } from '../src/app/store/store';
|
||||||
|
// TODO: Disabled for IDE performance issues with our translation JSON
|
||||||
|
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||||
|
// @ts-ignore
|
||||||
|
import translationEN from '../public/locales/en.json';
|
||||||
|
|
||||||
|
i18n.use(initReactI18next).init({
|
||||||
|
lng: 'en',
|
||||||
|
resources: {
|
||||||
|
en: { translation: translationEN },
|
||||||
|
},
|
||||||
|
debug: true,
|
||||||
|
interpolation: {
|
||||||
|
escapeValue: false,
|
||||||
|
},
|
||||||
|
returnNull: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
const store = createStore(undefined, false);
|
||||||
|
|
||||||
|
const preview: Preview = {
|
||||||
|
decorators: [
|
||||||
|
(Story) => (
|
||||||
|
<Provider store={store}>
|
||||||
|
<ThemeLocaleProvider>
|
||||||
|
<GlobalHotkeys />
|
||||||
|
<Story />
|
||||||
|
</ThemeLocaleProvider>
|
||||||
|
</Provider>
|
||||||
|
),
|
||||||
|
],
|
||||||
|
parameters: {
|
||||||
|
docs: {
|
||||||
|
theme: themes.dark,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export default preview;
|
193957
invokeai/frontend/web/.yarn/releases/yarn-1.22.19.cjs
vendored
193957
invokeai/frontend/web/.yarn/releases/yarn-1.22.19.cjs
vendored
File diff suppressed because one or more lines are too long
@ -1,5 +0,0 @@
|
|||||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
|
||||||
# yarn lockfile v1
|
|
||||||
|
|
||||||
|
|
||||||
yarn-path ".yarn/releases/yarn-1.22.19.cjs"
|
|
@ -1 +0,0 @@
|
|||||||
yarnPath: .yarn/releases/yarn-1.22.19.cjs
|
|
@ -85,14 +85,14 @@ The server must be started and available at <http://127.0.0.1:9090>.
|
|||||||
# from the repo root, start the server
|
# from the repo root, start the server
|
||||||
python scripts/invokeai-web.py
|
python scripts/invokeai-web.py
|
||||||
# from invokeai/frontend/web/, run the script
|
# from invokeai/frontend/web/, run the script
|
||||||
yarn typegen
|
pnpm typegen
|
||||||
```
|
```
|
||||||
|
|
||||||
## Package Scripts
|
## Package Scripts
|
||||||
|
|
||||||
See `package.json` for all scripts.
|
See `package.json` for all scripts.
|
||||||
|
|
||||||
Run with `yarn <script name>`.
|
Run with `pnpm <script name>`.
|
||||||
|
|
||||||
- `dev`: run the frontend in dev mode, enabling hot reloading
|
- `dev`: run the frontend in dev mode, enabling hot reloading
|
||||||
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
|
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
|
||||||
@ -112,13 +112,13 @@ We encourage you to ping @psychedelicious and @blessedcoolant on [discord] if yo
|
|||||||
|
|
||||||
### Dev Environment
|
### Dev Environment
|
||||||
|
|
||||||
Install [node] and [yarn classic].
|
Install [node] and [pnpm].
|
||||||
|
|
||||||
From `invokeai/frontend/web/` run `yarn install` to get everything set up.
|
From `invokeai/frontend/web/` run `pnpm i` to get everything set up.
|
||||||
|
|
||||||
Start everything in dev mode:
|
Start everything in dev mode:
|
||||||
|
|
||||||
1. Start the dev server: `yarn dev`
|
1. Start the dev server: `pnpm dev`
|
||||||
2. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
2. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
||||||
3. Point your browser to the dev server address e.g. <http://localhost:5173/>
|
3. Point your browser to the dev server address e.g. <http://localhost:5173/>
|
||||||
|
|
||||||
@ -134,10 +134,10 @@ For a number of technical and logistical reasons, we need to commit UI build art
|
|||||||
|
|
||||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
||||||
|
|
||||||
To build for production, run `yarn build`.
|
To build for production, run `pnpm build`.
|
||||||
|
|
||||||
[node]: https://nodejs.org/en/download/
|
[node]: https://nodejs.org/en/download/
|
||||||
[yarn classic]: https://classic.yarnpkg.com/lang/en/
|
[pnpm]: https://github.com/pnpm/pnpm
|
||||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||||
[Redux Toolkit]: https://github.com/reduxjs/redux-toolkit
|
[Redux Toolkit]: https://github.com/reduxjs/redux-toolkit
|
||||||
[redux-remember]: https://github.com/zewish/redux-remember
|
[redux-remember]: https://github.com/zewish/redux-remember
|
||||||
|
@ -19,21 +19,23 @@
|
|||||||
"dist"
|
"dist"
|
||||||
],
|
],
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"dev": "concurrently \"vite dev\" \"yarn run theme:watch\"",
|
"dev": "concurrently \"vite dev\" \"pnpm run theme:watch\"",
|
||||||
"dev:host": "concurrently \"vite dev --host\" \"yarn run theme:watch\"",
|
"dev:host": "concurrently \"vite dev --host\" \"pnpm run theme:watch\"",
|
||||||
"build": "yarn run lint && vite build",
|
"build": "pnpm run lint && vite build",
|
||||||
"typegen": "node scripts/typegen.js",
|
"typegen": "node scripts/typegen.js",
|
||||||
"preview": "vite preview",
|
"preview": "vite preview",
|
||||||
"lint:madge": "madge --circular src/main.tsx",
|
"lint:madge": "madge --circular src/main.tsx",
|
||||||
"lint:eslint": "eslint --max-warnings=0 .",
|
"lint:eslint": "eslint --max-warnings=0 .",
|
||||||
"lint:prettier": "prettier --check .",
|
"lint:prettier": "prettier --check .",
|
||||||
"lint:tsc": "tsc --noEmit",
|
"lint:tsc": "tsc --noEmit",
|
||||||
"lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"yarn run lint:eslint\" \"yarn run lint:prettier\" \"yarn run lint:tsc\" \"yarn run lint:madge\"",
|
"lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"pnpm run lint:eslint\" \"pnpm run lint:prettier\" \"pnpm run lint:tsc\" \"pnpm run lint:madge\"",
|
||||||
"fix": "eslint --fix . && prettier --log-level warn --write .",
|
"fix": "eslint --fix . && prettier --log-level warn --write .",
|
||||||
"postinstall": "patch-package && yarn run theme",
|
"preinstall": "npx only-allow pnpm",
|
||||||
|
"postinstall": "patch-package && pnpm run theme",
|
||||||
"theme": "chakra-cli tokens src/theme/theme.ts",
|
"theme": "chakra-cli tokens src/theme/theme.ts",
|
||||||
"theme:watch": "chakra-cli tokens src/theme/theme.ts --watch",
|
"theme:watch": "chakra-cli tokens src/theme/theme.ts --watch",
|
||||||
"up": "yarn upgrade-interactive --latest"
|
"storybook": "storybook dev -p 6006",
|
||||||
|
"build-storybook": "storybook build"
|
||||||
},
|
},
|
||||||
"madge": {
|
"madge": {
|
||||||
"detectiveOptions": {
|
"detectiveOptions": {
|
||||||
@ -48,6 +50,8 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@chakra-ui/anatomy": "^2.2.2",
|
"@chakra-ui/anatomy": "^2.2.2",
|
||||||
"@chakra-ui/icons": "^2.1.1",
|
"@chakra-ui/icons": "^2.1.1",
|
||||||
|
"@chakra-ui/layout": "^2.3.1",
|
||||||
|
"@chakra-ui/portal": "^2.1.0",
|
||||||
"@chakra-ui/react": "^2.8.2",
|
"@chakra-ui/react": "^2.8.2",
|
||||||
"@chakra-ui/styled-system": "^2.9.2",
|
"@chakra-ui/styled-system": "^2.9.2",
|
||||||
"@chakra-ui/theme-tools": "^2.1.2",
|
"@chakra-ui/theme-tools": "^2.1.2",
|
||||||
@ -56,24 +60,26 @@
|
|||||||
"@dnd-kit/utilities": "^3.2.2",
|
"@dnd-kit/utilities": "^3.2.2",
|
||||||
"@emotion/react": "^11.11.1",
|
"@emotion/react": "^11.11.1",
|
||||||
"@emotion/styled": "^11.11.0",
|
"@emotion/styled": "^11.11.0",
|
||||||
"@fontsource-variable/inter": "^5.0.15",
|
"@fontsource-variable/inter": "^5.0.16",
|
||||||
"@mantine/core": "^6.0.19",
|
"@mantine/core": "^6.0.19",
|
||||||
"@mantine/form": "^6.0.19",
|
"@mantine/form": "^6.0.19",
|
||||||
"@mantine/hooks": "^6.0.19",
|
"@mantine/hooks": "^6.0.19",
|
||||||
"@nanostores/react": "^0.7.1",
|
"@nanostores/react": "^0.7.1",
|
||||||
"@reduxjs/toolkit": "^1.9.7",
|
"@reduxjs/toolkit": "^2.0.1",
|
||||||
"@roarr/browser-log-writer": "^1.3.0",
|
"@roarr/browser-log-writer": "^1.3.0",
|
||||||
|
"@storybook/manager-api": "^7.6.4",
|
||||||
|
"@storybook/theming": "^7.6.4",
|
||||||
"compare-versions": "^6.1.0",
|
"compare-versions": "^6.1.0",
|
||||||
"dateformat": "^5.0.3",
|
"dateformat": "^5.0.3",
|
||||||
"framer-motion": "^10.16.4",
|
"framer-motion": "^10.16.15",
|
||||||
"i18next": "^23.6.0",
|
"i18next": "^23.7.8",
|
||||||
"i18next-http-backend": "^2.3.1",
|
"i18next-http-backend": "^2.4.2",
|
||||||
"idb-keyval": "^6.2.1",
|
"idb-keyval": "^6.2.1",
|
||||||
"konva": "^9.2.3",
|
"konva": "^9.2.3",
|
||||||
"lodash-es": "^4.17.21",
|
"lodash-es": "^4.17.21",
|
||||||
"nanostores": "^0.9.4",
|
"nanostores": "^0.9.5",
|
||||||
"new-github-issue-url": "^1.0.0",
|
"new-github-issue-url": "^1.0.0",
|
||||||
"overlayscrollbars": "^2.4.4",
|
"overlayscrollbars": "^2.4.5",
|
||||||
"overlayscrollbars-react": "^0.5.3",
|
"overlayscrollbars-react": "^0.5.3",
|
||||||
"patch-package": "^8.0.0",
|
"patch-package": "^8.0.0",
|
||||||
"query-string": "^8.1.0",
|
"query-string": "^8.1.0",
|
||||||
@ -83,20 +89,20 @@
|
|||||||
"react-dropzone": "^14.2.3",
|
"react-dropzone": "^14.2.3",
|
||||||
"react-error-boundary": "^4.0.11",
|
"react-error-boundary": "^4.0.11",
|
||||||
"react-hotkeys-hook": "4.4.1",
|
"react-hotkeys-hook": "4.4.1",
|
||||||
"react-i18next": "^13.3.1",
|
"react-i18next": "^13.5.0",
|
||||||
"react-icons": "^4.11.0",
|
"react-icons": "^4.12.0",
|
||||||
"react-konva": "^18.2.10",
|
"react-konva": "^18.2.10",
|
||||||
"react-redux": "^8.1.3",
|
"react-redux": "^9.0.2",
|
||||||
"react-resizable-panels": "^0.0.55",
|
"react-resizable-panels": "^0.0.55",
|
||||||
"react-use": "^17.4.0",
|
"react-use": "^17.4.2",
|
||||||
"react-virtuoso": "^4.6.2",
|
"react-virtuoso": "^4.6.2",
|
||||||
"reactflow": "^11.9.4",
|
"reactflow": "^11.10.1",
|
||||||
"redux-dynamic-middlewares": "^2.2.0",
|
"redux-dynamic-middlewares": "^2.2.0",
|
||||||
"redux-remember": "^4.0.4",
|
"redux-remember": "^5.0.0",
|
||||||
"roarr": "^7.18.3",
|
"roarr": "^7.21.0",
|
||||||
"serialize-error": "^11.0.2",
|
"serialize-error": "^11.0.3",
|
||||||
"socket.io-client": "^4.7.2",
|
"socket.io-client": "^4.7.2",
|
||||||
"type-fest": "^4.7.1",
|
"type-fest": "^4.8.3",
|
||||||
"use-debounce": "^10.0.0",
|
"use-debounce": "^10.0.0",
|
||||||
"use-image": "^1.1.1",
|
"use-image": "^1.1.1",
|
||||||
"uuid": "^9.0.1",
|
"uuid": "^9.0.1",
|
||||||
@ -104,43 +110,51 @@
|
|||||||
"zod-validation-error": "^2.1.0"
|
"zod-validation-error": "^2.1.0"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"@chakra-ui/cli": "^2.4.0",
|
"@chakra-ui/cli": "^2.4.1",
|
||||||
"@chakra-ui/react": "^2.8.0",
|
"@chakra-ui/react": "^2.8.2",
|
||||||
"react": "^18.2.0",
|
"react": "^18.2.0",
|
||||||
"react-dom": "^18.2.0",
|
"react-dom": "^18.2.0",
|
||||||
"ts-toolbelt": "^9.6.0"
|
"ts-toolbelt": "^9.6.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@chakra-ui/cli": "^2.4.1",
|
"@chakra-ui/cli": "^2.4.1",
|
||||||
|
"@storybook/addon-essentials": "^7.6.4",
|
||||||
|
"@storybook/addon-interactions": "^7.6.4",
|
||||||
|
"@storybook/addon-links": "^7.6.4",
|
||||||
|
"@storybook/blocks": "^7.6.4",
|
||||||
|
"@storybook/react": "^7.6.4",
|
||||||
|
"@storybook/react-vite": "^7.6.4",
|
||||||
|
"@storybook/test": "^7.6.4",
|
||||||
"@types/dateformat": "^5.0.2",
|
"@types/dateformat": "^5.0.2",
|
||||||
"@types/lodash-es": "^4.17.11",
|
"@types/lodash-es": "^4.17.12",
|
||||||
"@types/node": "^20.9.0",
|
"@types/node": "^20.9.0",
|
||||||
"@types/react": "^18.2.37",
|
"@types/react": "^18.2.37",
|
||||||
"@types/react-dom": "^18.2.15",
|
"@types/react-dom": "^18.2.17",
|
||||||
"@types/react-redux": "^7.1.30",
|
|
||||||
"@types/uuid": "^9.0.7",
|
"@types/uuid": "^9.0.7",
|
||||||
"@typescript-eslint/eslint-plugin": "^6.10.0",
|
"@typescript-eslint/eslint-plugin": "^6.13.2",
|
||||||
"@typescript-eslint/parser": "^6.10.0",
|
"@typescript-eslint/parser": "^6.13.2",
|
||||||
"@vitejs/plugin-react-swc": "^3.4.1",
|
"@vitejs/plugin-react-swc": "^3.5.0",
|
||||||
"concurrently": "^8.2.2",
|
"concurrently": "^8.2.2",
|
||||||
"eslint": "^8.53.0",
|
"eslint": "^8.55.0",
|
||||||
"eslint-config-prettier": "^9.0.0",
|
"eslint-config-prettier": "^9.1.0",
|
||||||
"eslint-plugin-i18next": "^6.0.3",
|
"eslint-plugin-i18next": "^6.0.3",
|
||||||
"eslint-plugin-path": "^1.2.2",
|
"eslint-plugin-path": "^1.2.2",
|
||||||
"eslint-plugin-react": "^7.33.2",
|
"eslint-plugin-react": "^7.33.2",
|
||||||
"eslint-plugin-react-hooks": "^4.6.0",
|
"eslint-plugin-react-hooks": "^4.6.0",
|
||||||
|
"eslint-plugin-storybook": "^0.6.15",
|
||||||
|
"eslint-plugin-unused-imports": "^3.0.0",
|
||||||
"madge": "^6.1.0",
|
"madge": "^6.1.0",
|
||||||
"openapi-types": "^12.1.3",
|
"openapi-types": "^12.1.3",
|
||||||
"openapi-typescript": "^6.7.0",
|
"openapi-typescript": "^6.7.2",
|
||||||
"prettier": "^3.0.3",
|
"prettier": "^3.1.0",
|
||||||
"rollup-plugin-visualizer": "^5.9.2",
|
"rollup-plugin-visualizer": "^5.10.0",
|
||||||
|
"storybook": "^7.6.4",
|
||||||
"ts-toolbelt": "^9.6.0",
|
"ts-toolbelt": "^9.6.0",
|
||||||
"typescript": "^5.2.2",
|
"typescript": "^5.3.3",
|
||||||
"vite": "^4.5.1",
|
"vite": "^4.5.1",
|
||||||
"vite-plugin-css-injected-by-js": "^3.3.0",
|
"vite-plugin-css-injected-by-js": "^3.3.0",
|
||||||
"vite-plugin-dts": "^3.6.3",
|
"vite-plugin-dts": "^3.6.4",
|
||||||
"vite-plugin-eslint": "^1.8.1",
|
"vite-plugin-eslint": "^1.8.1",
|
||||||
"vite-tsconfig-paths": "^4.2.1",
|
"vite-tsconfig-paths": "^4.2.2"
|
||||||
"yarn": "^1.22.19"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
13148
invokeai/frontend/web/pnpm-lock.yaml
Normal file
13148
invokeai/frontend/web/pnpm-lock.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@ -67,7 +67,9 @@
|
|||||||
"controlNet": "ControlNet",
|
"controlNet": "ControlNet",
|
||||||
"controlAdapter": "Control Adapter",
|
"controlAdapter": "Control Adapter",
|
||||||
"data": "Data",
|
"data": "Data",
|
||||||
|
"delete": "Delete",
|
||||||
"details": "Details",
|
"details": "Details",
|
||||||
|
"direction": "Direction",
|
||||||
"ipAdapter": "IP Adapter",
|
"ipAdapter": "IP Adapter",
|
||||||
"t2iAdapter": "T2I Adapter",
|
"t2iAdapter": "T2I Adapter",
|
||||||
"darkMode": "Dark Mode",
|
"darkMode": "Dark Mode",
|
||||||
@ -115,6 +117,7 @@
|
|||||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||||
"notInstalled": "Not $t(common.installed)",
|
"notInstalled": "Not $t(common.installed)",
|
||||||
"openInNewTab": "Open in New Tab",
|
"openInNewTab": "Open in New Tab",
|
||||||
|
"orderBy": "Order By",
|
||||||
"outpaint": "outpaint",
|
"outpaint": "outpaint",
|
||||||
"outputs": "Outputs",
|
"outputs": "Outputs",
|
||||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||||
@ -125,6 +128,8 @@
|
|||||||
"random": "Random",
|
"random": "Random",
|
||||||
"reportBugLabel": "Report Bug",
|
"reportBugLabel": "Report Bug",
|
||||||
"safetensors": "Safetensors",
|
"safetensors": "Safetensors",
|
||||||
|
"save": "Save",
|
||||||
|
"saveAs": "Save As",
|
||||||
"settingsLabel": "Settings",
|
"settingsLabel": "Settings",
|
||||||
"simple": "Simple",
|
"simple": "Simple",
|
||||||
"somethingWentWrong": "Something went wrong",
|
"somethingWentWrong": "Something went wrong",
|
||||||
@ -161,7 +166,13 @@
|
|||||||
"txt2img": "Text To Image",
|
"txt2img": "Text To Image",
|
||||||
"unifiedCanvas": "Unified Canvas",
|
"unifiedCanvas": "Unified Canvas",
|
||||||
"unknown": "Unknown",
|
"unknown": "Unknown",
|
||||||
"upload": "Upload"
|
"upload": "Upload",
|
||||||
|
"updated": "Updated",
|
||||||
|
"created": "Created",
|
||||||
|
"prevPage": "Previous Page",
|
||||||
|
"nextPage": "Next Page",
|
||||||
|
"unknownError": "Unknown Error",
|
||||||
|
"unsaved": "Unsaved"
|
||||||
},
|
},
|
||||||
"controlnet": {
|
"controlnet": {
|
||||||
"controlAdapter_one": "Control Adapter",
|
"controlAdapter_one": "Control Adapter",
|
||||||
@ -384,7 +395,9 @@
|
|||||||
"deleteSelection": "Delete Selection",
|
"deleteSelection": "Delete Selection",
|
||||||
"downloadSelection": "Download Selection",
|
"downloadSelection": "Download Selection",
|
||||||
"preparingDownload": "Preparing Download",
|
"preparingDownload": "Preparing Download",
|
||||||
"preparingDownloadFailed": "Problem Preparing Download"
|
"preparingDownloadFailed": "Problem Preparing Download",
|
||||||
|
"problemDeletingImages": "Problem Deleting Images",
|
||||||
|
"problemDeletingImagesDesc": "One or more images could not be deleted"
|
||||||
},
|
},
|
||||||
"hotkeys": {
|
"hotkeys": {
|
||||||
"acceptStagingImage": {
|
"acceptStagingImage": {
|
||||||
@ -937,9 +950,9 @@
|
|||||||
"problemSettingTitle": "Problem Setting Title",
|
"problemSettingTitle": "Problem Setting Title",
|
||||||
"reloadNodeTemplates": "Reload Node Templates",
|
"reloadNodeTemplates": "Reload Node Templates",
|
||||||
"removeLinearView": "Remove from Linear View",
|
"removeLinearView": "Remove from Linear View",
|
||||||
"resetWorkflow": "Reset Workflow",
|
"resetWorkflow": "Reset Workflow Editor",
|
||||||
"resetWorkflowDesc": "Are you sure you want to reset this workflow?",
|
"resetWorkflowDesc": "Are you sure you want to reset the Workflow Editor?",
|
||||||
"resetWorkflowDesc2": "Resetting the workflow will clear all nodes, edges and workflow details.",
|
"resetWorkflowDesc2": "Resetting the Workflow Editor will clear all nodes, edges and workflow details. Saved workflows will not be affected.",
|
||||||
"scheduler": "Scheduler",
|
"scheduler": "Scheduler",
|
||||||
"schedulerDescription": "TODO",
|
"schedulerDescription": "TODO",
|
||||||
"sDXLMainModelField": "SDXL Model",
|
"sDXLMainModelField": "SDXL Model",
|
||||||
@ -1266,7 +1279,6 @@
|
|||||||
"modelAddedSimple": "Model Added",
|
"modelAddedSimple": "Model Added",
|
||||||
"modelAddFailed": "Model Add Failed",
|
"modelAddFailed": "Model Add Failed",
|
||||||
"nodesBrokenConnections": "Cannot load. Some connections are broken.",
|
"nodesBrokenConnections": "Cannot load. Some connections are broken.",
|
||||||
"nodesCleared": "Nodes Cleared",
|
|
||||||
"nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.",
|
"nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.",
|
||||||
"nodesLoaded": "Nodes Loaded",
|
"nodesLoaded": "Nodes Loaded",
|
||||||
"nodesLoadedFailed": "Failed To Load Nodes",
|
"nodesLoadedFailed": "Failed To Load Nodes",
|
||||||
@ -1315,7 +1327,10 @@
|
|||||||
"uploadFailedInvalidUploadDesc": "Must be single PNG or JPEG image",
|
"uploadFailedInvalidUploadDesc": "Must be single PNG or JPEG image",
|
||||||
"uploadFailedUnableToLoadDesc": "Unable to load file",
|
"uploadFailedUnableToLoadDesc": "Unable to load file",
|
||||||
"upscalingFailed": "Upscaling Failed",
|
"upscalingFailed": "Upscaling Failed",
|
||||||
"workflowLoaded": "Workflow Loaded"
|
"workflowLoaded": "Workflow Loaded",
|
||||||
|
"problemRetrievingWorkflow": "Problem Retrieving Workflow",
|
||||||
|
"workflowDeleted": "Workflow Deleted",
|
||||||
|
"problemDeletingWorkflow": "Problem Deleting Workflow"
|
||||||
},
|
},
|
||||||
"tooltip": {
|
"tooltip": {
|
||||||
"feature": {
|
"feature": {
|
||||||
@ -1610,5 +1625,36 @@
|
|||||||
"showIntermediates": "Show Intermediates",
|
"showIntermediates": "Show Intermediates",
|
||||||
"snapToGrid": "Snap to Grid",
|
"snapToGrid": "Snap to Grid",
|
||||||
"undo": "Undo"
|
"undo": "Undo"
|
||||||
|
},
|
||||||
|
"workflows": {
|
||||||
|
"workflows": "Workflows",
|
||||||
|
"workflowLibrary": "Library",
|
||||||
|
"userWorkflows": "My Workflows",
|
||||||
|
"defaultWorkflows": "Default Workflows",
|
||||||
|
"openWorkflow": "Open Workflow",
|
||||||
|
"uploadWorkflow": "Upload Workflow",
|
||||||
|
"deleteWorkflow": "Delete Workflow",
|
||||||
|
"unnamedWorkflow": "Unnamed Workflow",
|
||||||
|
"downloadWorkflow": "Download Workflow",
|
||||||
|
"saveWorkflow": "Save Workflow",
|
||||||
|
"saveWorkflowAs": "Save Workflow As",
|
||||||
|
"savingWorkflow": "Saving Workflow...",
|
||||||
|
"problemSavingWorkflow": "Problem Saving Workflow",
|
||||||
|
"workflowSaved": "Workflow Saved",
|
||||||
|
"noRecentWorkflows": "No Recent Workflows",
|
||||||
|
"noUserWorkflows": "No User Workflows",
|
||||||
|
"noSystemWorkflows": "No System Workflows",
|
||||||
|
"problemLoading": "Problem Loading Workflows",
|
||||||
|
"loading": "Loading Workflows",
|
||||||
|
"noDescription": "No description",
|
||||||
|
"searchWorkflows": "Search Workflows",
|
||||||
|
"clearWorkflowSearchFilter": "Clear Workflow Search Filter",
|
||||||
|
"workflowName": "Workflow Name",
|
||||||
|
"workflowEditorReset": "Workflow Editor Reset",
|
||||||
|
"workflowEditorMenu": "Workflow Editor Menu",
|
||||||
|
"workflowIsOpen": "Workflow is Open"
|
||||||
|
},
|
||||||
|
"app": {
|
||||||
|
"storeNotInitialized": "Store is not initialized"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -605,7 +605,6 @@
|
|||||||
"nodesSaved": "Nodos guardados",
|
"nodesSaved": "Nodos guardados",
|
||||||
"nodesLoadedFailed": "Error al cargar los nodos",
|
"nodesLoadedFailed": "Error al cargar los nodos",
|
||||||
"nodesLoaded": "Nodos cargados",
|
"nodesLoaded": "Nodos cargados",
|
||||||
"nodesCleared": "Nodos borrados",
|
|
||||||
"problemCopyingImage": "No se puede copiar la imagen",
|
"problemCopyingImage": "No se puede copiar la imagen",
|
||||||
"nodesNotValidJSON": "JSON no válido",
|
"nodesNotValidJSON": "JSON no válido",
|
||||||
"nodesCorruptedGraph": "No se puede cargar. El gráfico parece estar dañado.",
|
"nodesCorruptedGraph": "No se puede cargar. El gráfico parece estar dañado.",
|
||||||
|
@ -103,7 +103,8 @@
|
|||||||
"somethingWentWrong": "Qualcosa è andato storto",
|
"somethingWentWrong": "Qualcosa è andato storto",
|
||||||
"copyError": "$t(gallery.copy) Errore",
|
"copyError": "$t(gallery.copy) Errore",
|
||||||
"input": "Ingresso",
|
"input": "Ingresso",
|
||||||
"notInstalled": "Non $t(common.installed)"
|
"notInstalled": "Non $t(common.installed)",
|
||||||
|
"unknownError": "Errore sconosciuto"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Generazioni",
|
"generations": "Generazioni",
|
||||||
@ -141,7 +142,9 @@
|
|||||||
"unstarImage": "Rimuovi preferenza immagine",
|
"unstarImage": "Rimuovi preferenza immagine",
|
||||||
"dropOrUpload": "$t(gallery.drop) o carica",
|
"dropOrUpload": "$t(gallery.drop) o carica",
|
||||||
"starImage": "Immagine preferita",
|
"starImage": "Immagine preferita",
|
||||||
"dropToUpload": "$t(gallery.drop) per aggiornare"
|
"dropToUpload": "$t(gallery.drop) per aggiornare",
|
||||||
|
"problemDeletingImagesDesc": "Impossibile eliminare una o più immagini",
|
||||||
|
"problemDeletingImages": "Problema durante l'eliminazione delle immagini"
|
||||||
},
|
},
|
||||||
"hotkeys": {
|
"hotkeys": {
|
||||||
"keyboardShortcuts": "Tasti rapidi",
|
"keyboardShortcuts": "Tasti rapidi",
|
||||||
@ -626,7 +629,10 @@
|
|||||||
"imageActions": "Azioni Immagine",
|
"imageActions": "Azioni Immagine",
|
||||||
"aspectRatioFree": "Libere",
|
"aspectRatioFree": "Libere",
|
||||||
"maskEdge": "Maschera i bordi",
|
"maskEdge": "Maschera i bordi",
|
||||||
"unmasked": "No maschera"
|
"unmasked": "No maschera",
|
||||||
|
"cfgRescaleMultiplier": "Moltiplicatore riscala CFG",
|
||||||
|
"cfgRescale": "Riscala CFG",
|
||||||
|
"useSize": "Usa Dimensioni"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"models": "Modelli",
|
"models": "Modelli",
|
||||||
@ -670,7 +676,8 @@
|
|||||||
"clearIntermediatesDisabled": "La coda deve essere vuota per cancellare le immagini intermedie",
|
"clearIntermediatesDisabled": "La coda deve essere vuota per cancellare le immagini intermedie",
|
||||||
"enableNSFWChecker": "Abilita controllo NSFW",
|
"enableNSFWChecker": "Abilita controllo NSFW",
|
||||||
"enableInvisibleWatermark": "Abilita filigrana invisibile",
|
"enableInvisibleWatermark": "Abilita filigrana invisibile",
|
||||||
"enableInformationalPopovers": "Abilita testo informativo a comparsa"
|
"enableInformationalPopovers": "Abilita testo informativo a comparsa",
|
||||||
|
"reloadingIn": "Ricaricando in"
|
||||||
},
|
},
|
||||||
"toast": {
|
"toast": {
|
||||||
"tempFoldersEmptied": "Cartella temporanea svuotata",
|
"tempFoldersEmptied": "Cartella temporanea svuotata",
|
||||||
@ -713,7 +720,6 @@
|
|||||||
"nodesLoadedFailed": "Impossibile caricare i nodi",
|
"nodesLoadedFailed": "Impossibile caricare i nodi",
|
||||||
"nodesSaved": "Nodi salvati",
|
"nodesSaved": "Nodi salvati",
|
||||||
"nodesLoaded": "Nodi caricati",
|
"nodesLoaded": "Nodi caricati",
|
||||||
"nodesCleared": "Nodi cancellati",
|
|
||||||
"problemCopyingImage": "Impossibile copiare l'immagine",
|
"problemCopyingImage": "Impossibile copiare l'immagine",
|
||||||
"nodesNotValidGraph": "Grafico del nodo InvokeAI non valido",
|
"nodesNotValidGraph": "Grafico del nodo InvokeAI non valido",
|
||||||
"nodesCorruptedGraph": "Impossibile caricare. Il grafico sembra essere danneggiato.",
|
"nodesCorruptedGraph": "Impossibile caricare. Il grafico sembra essere danneggiato.",
|
||||||
@ -752,11 +758,12 @@
|
|||||||
"setNodeField": "Imposta come campo nodo",
|
"setNodeField": "Imposta come campo nodo",
|
||||||
"problemSavingMask": "Problema nel salvataggio della maschera",
|
"problemSavingMask": "Problema nel salvataggio della maschera",
|
||||||
"problemSavingCanvasDesc": "Impossibile salvare la tela",
|
"problemSavingCanvasDesc": "Impossibile salvare la tela",
|
||||||
"setCanvasInitialImage": "Imposta come immagine iniziale della tela",
|
"setCanvasInitialImage": "Imposta l'immagine iniziale della tela",
|
||||||
"workflowLoaded": "Flusso di lavoro caricato",
|
"workflowLoaded": "Flusso di lavoro caricato",
|
||||||
"setIPAdapterImage": "Imposta come immagine per l'Adattatore IP",
|
"setIPAdapterImage": "Imposta come immagine per l'Adattatore IP",
|
||||||
"problemSavingMaskDesc": "Impossibile salvare la maschera",
|
"problemSavingMaskDesc": "Impossibile salvare la maschera",
|
||||||
"setAsCanvasInitialImage": "Imposta come immagine iniziale della tela"
|
"setAsCanvasInitialImage": "Imposta come immagine iniziale della tela",
|
||||||
|
"invalidUpload": "Caricamento non valido"
|
||||||
},
|
},
|
||||||
"tooltip": {
|
"tooltip": {
|
||||||
"feature": {
|
"feature": {
|
||||||
@ -780,7 +787,7 @@
|
|||||||
"maskingOptions": "Opzioni di mascheramento",
|
"maskingOptions": "Opzioni di mascheramento",
|
||||||
"enableMask": "Abilita maschera",
|
"enableMask": "Abilita maschera",
|
||||||
"preserveMaskedArea": "Mantieni area mascherata",
|
"preserveMaskedArea": "Mantieni area mascherata",
|
||||||
"clearMask": "Elimina la maschera",
|
"clearMask": "Cancella maschera (Shift+C)",
|
||||||
"brush": "Pennello",
|
"brush": "Pennello",
|
||||||
"eraser": "Cancellino",
|
"eraser": "Cancellino",
|
||||||
"fillBoundingBox": "Riempi rettangolo di selezione",
|
"fillBoundingBox": "Riempi rettangolo di selezione",
|
||||||
@ -833,7 +840,8 @@
|
|||||||
"betaPreserveMasked": "Conserva quanto mascherato",
|
"betaPreserveMasked": "Conserva quanto mascherato",
|
||||||
"antialiasing": "Anti aliasing",
|
"antialiasing": "Anti aliasing",
|
||||||
"showResultsOn": "Mostra i risultati (attivato)",
|
"showResultsOn": "Mostra i risultati (attivato)",
|
||||||
"showResultsOff": "Mostra i risultati (disattivato)"
|
"showResultsOff": "Mostra i risultati (disattivato)",
|
||||||
|
"saveMask": "Salva $t(unifiedCanvas.mask)"
|
||||||
},
|
},
|
||||||
"accessibility": {
|
"accessibility": {
|
||||||
"modelSelect": "Seleziona modello",
|
"modelSelect": "Seleziona modello",
|
||||||
@ -859,7 +867,8 @@
|
|||||||
"showGalleryPanel": "Mostra il pannello Galleria",
|
"showGalleryPanel": "Mostra il pannello Galleria",
|
||||||
"loadMore": "Carica altro",
|
"loadMore": "Carica altro",
|
||||||
"mode": "Modalità",
|
"mode": "Modalità",
|
||||||
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente"
|
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente",
|
||||||
|
"createIssue": "Segnala un problema"
|
||||||
},
|
},
|
||||||
"ui": {
|
"ui": {
|
||||||
"hideProgressImages": "Nascondi avanzamento immagini",
|
"hideProgressImages": "Nascondi avanzamento immagini",
|
||||||
@ -940,7 +949,7 @@
|
|||||||
"unknownNode": "Nodo sconosciuto",
|
"unknownNode": "Nodo sconosciuto",
|
||||||
"vaeFieldDescription": "Sotto modello VAE.",
|
"vaeFieldDescription": "Sotto modello VAE.",
|
||||||
"booleanPolymorphicDescription": "Una raccolta di booleani.",
|
"booleanPolymorphicDescription": "Una raccolta di booleani.",
|
||||||
"missingTemplate": "Modello mancante",
|
"missingTemplate": "Nodo non valido: nodo {{node}} di tipo {{type}} modello mancante (non installato?)",
|
||||||
"outputSchemaNotFound": "Schema di output non trovato",
|
"outputSchemaNotFound": "Schema di output non trovato",
|
||||||
"colorFieldDescription": "Un colore RGBA.",
|
"colorFieldDescription": "Un colore RGBA.",
|
||||||
"maybeIncompatible": "Potrebbe essere incompatibile con quello installato",
|
"maybeIncompatible": "Potrebbe essere incompatibile con quello installato",
|
||||||
@ -979,7 +988,7 @@
|
|||||||
"cannotConnectOutputToOutput": "Impossibile collegare Output ad Output",
|
"cannotConnectOutputToOutput": "Impossibile collegare Output ad Output",
|
||||||
"booleanCollection": "Raccolta booleana",
|
"booleanCollection": "Raccolta booleana",
|
||||||
"cannotConnectToSelf": "Impossibile connettersi a se stesso",
|
"cannotConnectToSelf": "Impossibile connettersi a se stesso",
|
||||||
"mismatchedVersion": "Ha una versione non corrispondente",
|
"mismatchedVersion": "Nodo non valido: il nodo {{node}} di tipo {{type}} ha una versione non corrispondente (provare ad aggiornare?)",
|
||||||
"outputNode": "Nodo di Output",
|
"outputNode": "Nodo di Output",
|
||||||
"loadingNodes": "Caricamento nodi...",
|
"loadingNodes": "Caricamento nodi...",
|
||||||
"oNNXModelFieldDescription": "Campo del modello ONNX.",
|
"oNNXModelFieldDescription": "Campo del modello ONNX.",
|
||||||
@ -1058,7 +1067,7 @@
|
|||||||
"latentsCollectionDescription": "Le immagini latenti possono essere passate tra i nodi.",
|
"latentsCollectionDescription": "Le immagini latenti possono essere passate tra i nodi.",
|
||||||
"imageCollection": "Raccolta Immagini",
|
"imageCollection": "Raccolta Immagini",
|
||||||
"loRAModelField": "LoRA",
|
"loRAModelField": "LoRA",
|
||||||
"updateAllNodes": "Aggiorna tutti i nodi",
|
"updateAllNodes": "Aggiorna i nodi",
|
||||||
"unableToUpdateNodes_one": "Impossibile aggiornare {{count}} nodo",
|
"unableToUpdateNodes_one": "Impossibile aggiornare {{count}} nodo",
|
||||||
"unableToUpdateNodes_many": "Impossibile aggiornare {{count}} nodi",
|
"unableToUpdateNodes_many": "Impossibile aggiornare {{count}} nodi",
|
||||||
"unableToUpdateNodes_other": "Impossibile aggiornare {{count}} nodi",
|
"unableToUpdateNodes_other": "Impossibile aggiornare {{count}} nodi",
|
||||||
@ -1069,7 +1078,27 @@
|
|||||||
"unknownErrorValidatingWorkflow": "Errore sconosciuto durante la convalida del flusso di lavoro",
|
"unknownErrorValidatingWorkflow": "Errore sconosciuto durante la convalida del flusso di lavoro",
|
||||||
"collectionFieldType": "{{name}} Raccolta",
|
"collectionFieldType": "{{name}} Raccolta",
|
||||||
"collectionOrScalarFieldType": "{{name}} Raccolta|Scalare",
|
"collectionOrScalarFieldType": "{{name}} Raccolta|Scalare",
|
||||||
"nodeVersion": "Versione Nodo"
|
"nodeVersion": "Versione Nodo",
|
||||||
|
"inputFieldTypeParseError": "Impossibile analizzare il tipo di campo di input {{node}}.{{field}} ({{message}})",
|
||||||
|
"unsupportedArrayItemType": "tipo di elemento dell'array non supportato \"{{type}}\"",
|
||||||
|
"targetNodeFieldDoesNotExist": "Connessione non valida: il campo di destinazione/input {{node}}.{{field}} non esiste",
|
||||||
|
"unsupportedMismatchedUnion": "tipo CollectionOrScalar non corrispondente con tipi di base {{firstType}} e {{secondType}}",
|
||||||
|
"allNodesUpdated": "Tutti i nodi sono aggiornati",
|
||||||
|
"sourceNodeDoesNotExist": "Connessione non valida: il nodo di origine/output {{node}} non esiste",
|
||||||
|
"unableToExtractEnumOptions": "impossibile estrarre le opzioni enum",
|
||||||
|
"unableToParseFieldType": "impossibile analizzare il tipo di campo",
|
||||||
|
"unrecognizedWorkflowVersion": "Versione dello schema del flusso di lavoro non riconosciuta {{version}}",
|
||||||
|
"outputFieldTypeParseError": "Impossibile analizzare il tipo di campo di output {{node}}.{{field}} ({{message}})",
|
||||||
|
"sourceNodeFieldDoesNotExist": "Connessione non valida: il campo di origine/output {{node}}.{{field}} non esiste",
|
||||||
|
"unableToGetWorkflowVersion": "Impossibile ottenere la versione dello schema del flusso di lavoro",
|
||||||
|
"nodePack": "Pacchetto di nodi",
|
||||||
|
"unableToExtractSchemaNameFromRef": "impossibile estrarre il nome dello schema dal riferimento",
|
||||||
|
"unknownOutput": "Output sconosciuto: {{name}}",
|
||||||
|
"unknownNodeType": "Tipo di nodo sconosciuto",
|
||||||
|
"targetNodeDoesNotExist": "Connessione non valida: il nodo di destinazione/input {{node}} non esiste",
|
||||||
|
"unknownFieldType": "$t(nodes.unknownField) tipo: {{type}}",
|
||||||
|
"deletedInvalidEdge": "Eliminata connessione non valida {{source}} -> {{target}}",
|
||||||
|
"unknownInput": "Input sconosciuto: {{name}}"
|
||||||
},
|
},
|
||||||
"boards": {
|
"boards": {
|
||||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||||
@ -1088,9 +1117,9 @@
|
|||||||
"selectBoard": "Seleziona una Bacheca",
|
"selectBoard": "Seleziona una Bacheca",
|
||||||
"uncategorized": "Non categorizzato",
|
"uncategorized": "Non categorizzato",
|
||||||
"downloadBoard": "Scarica la bacheca",
|
"downloadBoard": "Scarica la bacheca",
|
||||||
"deleteBoardOnly": "Elimina solo la Bacheca",
|
"deleteBoardOnly": "solo la Bacheca",
|
||||||
"deleteBoard": "Elimina Bacheca",
|
"deleteBoard": "Elimina Bacheca",
|
||||||
"deleteBoardAndImages": "Elimina Bacheca e Immagini",
|
"deleteBoardAndImages": "Bacheca e Immagini",
|
||||||
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate",
|
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate",
|
||||||
"movingImagesToBoard_one": "Spostare {{count}} immagine nella bacheca:",
|
"movingImagesToBoard_one": "Spostare {{count}} immagine nella bacheca:",
|
||||||
"movingImagesToBoard_many": "Spostare {{count}} immagini nella bacheca:",
|
"movingImagesToBoard_many": "Spostare {{count}} immagini nella bacheca:",
|
||||||
@ -1499,6 +1528,12 @@
|
|||||||
"ControlNet fornisce una guida al processo di generazione, aiutando a creare immagini con composizione, struttura o stile controllati, a seconda del modello selezionato."
|
"ControlNet fornisce una guida al processo di generazione, aiutando a creare immagini con composizione, struttura o stile controllati, a seconda del modello selezionato."
|
||||||
],
|
],
|
||||||
"heading": "ControlNet"
|
"heading": "ControlNet"
|
||||||
|
},
|
||||||
|
"paramCFGRescaleMultiplier": {
|
||||||
|
"heading": "Moltiplicatore di riscala CFG",
|
||||||
|
"paragraphs": [
|
||||||
|
"Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr). Valore suggerito 0.7."
|
||||||
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"sdxl": {
|
"sdxl": {
|
||||||
|
@ -682,7 +682,6 @@
|
|||||||
"parameterSet": "Instellen parameters",
|
"parameterSet": "Instellen parameters",
|
||||||
"nodesSaved": "Knooppunten bewaard",
|
"nodesSaved": "Knooppunten bewaard",
|
||||||
"nodesLoaded": "Knooppunten geladen",
|
"nodesLoaded": "Knooppunten geladen",
|
||||||
"nodesCleared": "Knooppunten weggehaald",
|
|
||||||
"nodesLoadedFailed": "Laden knooppunten mislukt",
|
"nodesLoadedFailed": "Laden knooppunten mislukt",
|
||||||
"problemCopyingImage": "Kan Afbeelding Niet Kopiëren",
|
"problemCopyingImage": "Kan Afbeelding Niet Kopiëren",
|
||||||
"nodesNotValidJSON": "Ongeldige JSON",
|
"nodesNotValidJSON": "Ongeldige JSON",
|
||||||
|
@ -606,7 +606,6 @@
|
|||||||
"nodesLoaded": "Узлы загружены",
|
"nodesLoaded": "Узлы загружены",
|
||||||
"problemCopyingImage": "Не удается скопировать изображение",
|
"problemCopyingImage": "Не удается скопировать изображение",
|
||||||
"nodesLoadedFailed": "Не удалось загрузить Узлы",
|
"nodesLoadedFailed": "Не удалось загрузить Узлы",
|
||||||
"nodesCleared": "Узлы очищены",
|
|
||||||
"nodesBrokenConnections": "Не удается загрузить. Некоторые соединения повреждены.",
|
"nodesBrokenConnections": "Не удается загрузить. Некоторые соединения повреждены.",
|
||||||
"nodesUnrecognizedTypes": "Не удается загрузить. Граф имеет нераспознанные типы",
|
"nodesUnrecognizedTypes": "Не удается загрузить. Граф имеет нераспознанные типы",
|
||||||
"nodesNotValidJSON": "Недопустимый JSON",
|
"nodesNotValidJSON": "Недопустимый JSON",
|
||||||
|
@ -723,7 +723,6 @@
|
|||||||
"nodesUnrecognizedTypes": "无法加载。节点图有无法识别的节点类型",
|
"nodesUnrecognizedTypes": "无法加载。节点图有无法识别的节点类型",
|
||||||
"nodesNotValidJSON": "无效的 JSON",
|
"nodesNotValidJSON": "无效的 JSON",
|
||||||
"nodesNotValidGraph": "无效的 InvokeAi 节点图",
|
"nodesNotValidGraph": "无效的 InvokeAi 节点图",
|
||||||
"nodesCleared": "节点已清空",
|
|
||||||
"nodesLoadedFailed": "节点图加载失败",
|
"nodesLoadedFailed": "节点图加载失败",
|
||||||
"modelAddedSimple": "已添加模型",
|
"modelAddedSimple": "已添加模型",
|
||||||
"modelAdded": "已添加模型: {{modelName}}",
|
"modelAdded": "已添加模型: {{modelName}}",
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
import { stateSelector } from 'app/store/store';
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||||
import { useQueueBack } from 'features/queue/hooks/useQueueBack';
|
import { useQueueBack } from 'features/queue/hooks/useQueueBack';
|
||||||
@ -9,20 +9,14 @@ import {
|
|||||||
shiftKeyPressed,
|
shiftKeyPressed,
|
||||||
} from 'features/ui/store/hotkeysSlice';
|
} from 'features/ui/store/hotkeysSlice';
|
||||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||||
import { isEqual } from 'lodash-es';
|
|
||||||
import React, { memo } from 'react';
|
import React, { memo } from 'react';
|
||||||
import { isHotkeyPressed, useHotkeys } from 'react-hotkeys-hook';
|
import { isHotkeyPressed, useHotkeys } from 'react-hotkeys-hook';
|
||||||
|
|
||||||
const globalHotkeysSelector = createSelector(
|
const globalHotkeysSelector = createMemoizedSelector(
|
||||||
[stateSelector],
|
[stateSelector],
|
||||||
({ hotkeys }) => {
|
({ hotkeys }) => {
|
||||||
const { shift, ctrl, meta } = hotkeys;
|
const { shift, ctrl, meta } = hotkeys;
|
||||||
return { shift, ctrl, meta };
|
return { shift, ctrl, meta };
|
||||||
},
|
|
||||||
{
|
|
||||||
memoizeOptions: {
|
|
||||||
resultEqualityCheck: isEqual,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
|
||||||
import { createLogWriter } from '@roarr/browser-log-writer';
|
import { createLogWriter } from '@roarr/browser-log-writer';
|
||||||
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
import { stateSelector } from 'app/store/store';
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { isEqual } from 'lodash-es';
|
|
||||||
import { useEffect, useMemo } from 'react';
|
import { useEffect, useMemo } from 'react';
|
||||||
import { ROARR, Roarr } from 'roarr';
|
import { ROARR, Roarr } from 'roarr';
|
||||||
import {
|
import {
|
||||||
@ -13,22 +12,14 @@ import {
|
|||||||
logger,
|
logger,
|
||||||
} from './logger';
|
} from './logger';
|
||||||
|
|
||||||
const selector = createSelector(
|
const selector = createMemoizedSelector(stateSelector, ({ system }) => {
|
||||||
stateSelector,
|
const { consoleLogLevel, shouldLogToConsole } = system;
|
||||||
({ system }) => {
|
|
||||||
const { consoleLogLevel, shouldLogToConsole } = system;
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
consoleLogLevel,
|
consoleLogLevel,
|
||||||
shouldLogToConsole,
|
shouldLogToConsole,
|
||||||
};
|
};
|
||||||
},
|
});
|
||||||
{
|
|
||||||
memoizeOptions: {
|
|
||||||
resultEqualityCheck: isEqual,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
export const useLogger = (namespace: LoggerNamespace) => {
|
export const useLogger = (namespace: LoggerNamespace) => {
|
||||||
const { consoleLogLevel, shouldLogToConsole } = useAppSelector(selector);
|
const { consoleLogLevel, shouldLogToConsole } = useAppSelector(selector);
|
||||||
|
@ -0,0 +1,12 @@
|
|||||||
|
import { createSelectorCreator, lruMemoize } from '@reduxjs/toolkit';
|
||||||
|
import { isEqual } from 'lodash-es';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A memoized selector creator that uses LRU cache and lodash's isEqual for equality check.
|
||||||
|
*/
|
||||||
|
export const createMemoizedSelector = createSelectorCreator({
|
||||||
|
memoize: lruMemoize,
|
||||||
|
memoizeOptions: {
|
||||||
|
resultEqualityCheck: isEqual,
|
||||||
|
},
|
||||||
|
});
|
@ -1,10 +1,10 @@
|
|||||||
import { AnyAction } from '@reduxjs/toolkit';
|
import { UnknownAction } from '@reduxjs/toolkit';
|
||||||
import { isAnyGraphBuilt } from 'features/nodes/store/actions';
|
import { isAnyGraphBuilt } from 'features/nodes/store/actions';
|
||||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice';
|
import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice';
|
||||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||||
import { Graph } from 'services/api/types';
|
import { Graph } from 'services/api/types';
|
||||||
|
|
||||||
export const actionSanitizer = <A extends AnyAction>(action: A): A => {
|
export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
|
||||||
if (isAnyGraphBuilt(action)) {
|
if (isAnyGraphBuilt(action)) {
|
||||||
if (action.payload.nodes) {
|
if (action.payload.nodes) {
|
||||||
const sanitizedNodes: Graph['nodes'] = {};
|
const sanitizedNodes: Graph['nodes'] = {};
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
import type { TypedAddListener, TypedStartListening } from '@reduxjs/toolkit';
|
import type { TypedAddListener, TypedStartListening } from '@reduxjs/toolkit';
|
||||||
import {
|
import {
|
||||||
AnyAction,
|
UnknownAction,
|
||||||
ListenerEffect,
|
ListenerEffect,
|
||||||
addListener,
|
addListener,
|
||||||
createListenerMiddleware,
|
createListenerMiddleware,
|
||||||
} from '@reduxjs/toolkit';
|
} from '@reduxjs/toolkit';
|
||||||
|
|
||||||
import type { AppDispatch, RootState } from 'app/store/store';
|
import type { AppDispatch, RootState } from 'app/store/store';
|
||||||
import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingAreaImageListener';
|
import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingAreaImageListener';
|
||||||
import { addFirstListImagesListener } from './listeners/addFirstListImagesListener.ts';
|
import { addFirstListImagesListener } from './listeners/addFirstListImagesListener.ts';
|
||||||
@ -87,7 +86,7 @@ export const addAppListener = addListener as TypedAddListener<
|
|||||||
>;
|
>;
|
||||||
|
|
||||||
export type AppListenerEffect = ListenerEffect<
|
export type AppListenerEffect = ListenerEffect<
|
||||||
AnyAction,
|
UnknownAction,
|
||||||
RootState,
|
RootState,
|
||||||
AppDispatch
|
AppDispatch
|
||||||
>;
|
>;
|
||||||
|
@ -3,7 +3,7 @@ import { logger } from 'app/logging/logger';
|
|||||||
import { parseify } from 'common/util/serialize';
|
import { parseify } from 'common/util/serialize';
|
||||||
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
||||||
import { t } from 'i18next';
|
import { t } from 'i18next';
|
||||||
import { get, truncate, upperFirst } from 'lodash-es';
|
import { truncate, upperFirst } from 'lodash-es';
|
||||||
import { queueApi } from 'services/api/endpoints/queue';
|
import { queueApi } from 'services/api/endpoints/queue';
|
||||||
import { TOAST_OPTIONS, theme } from 'theme/theme';
|
import { TOAST_OPTIONS, theme } from 'theme/theme';
|
||||||
import { startAppListening } from '..';
|
import { startAppListening } from '..';
|
||||||
@ -74,22 +74,11 @@ export const addBatchEnqueuedListener = () => {
|
|||||||
),
|
),
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
} else {
|
} else if (response.status !== 403) {
|
||||||
let detail = 'Unknown Error';
|
|
||||||
let duration = undefined;
|
|
||||||
if (response.status === 403 && 'body' in response) {
|
|
||||||
detail = get(response, 'body.detail', 'Unknown Error');
|
|
||||||
} else if (response.status === 403 && 'error' in response) {
|
|
||||||
detail = get(response, 'error.detail', 'Unknown Error');
|
|
||||||
} else if (response.status === 403 && 'data' in response) {
|
|
||||||
detail = get(response, 'data.detail', 'Unknown Error');
|
|
||||||
duration = 15000;
|
|
||||||
}
|
|
||||||
toast({
|
toast({
|
||||||
title: t('queue.batchFailedToQueue'),
|
title: t('queue.batchFailedToQueue'),
|
||||||
|
description: t('common.unknownError'),
|
||||||
status: 'error',
|
status: 'error',
|
||||||
description: detail,
|
|
||||||
...(duration ? { duration } : {}),
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
logger('queue').error(
|
logger('queue').error(
|
||||||
|
@ -49,11 +49,14 @@ export const addBoardIdSelectedListener = () => {
|
|||||||
|
|
||||||
if (isSuccess) {
|
if (isSuccess) {
|
||||||
// the board was just changed - we can select the first image
|
// the board was just changed - we can select the first image
|
||||||
const { data: boardImagesData } = imagesApi.endpoints.listImages.select(
|
const { data: boardImagesData } =
|
||||||
queryArgs
|
imagesApi.endpoints.listImages.select(queryArgs)(getState());
|
||||||
)(getState());
|
|
||||||
|
|
||||||
if (boardImagesData) {
|
if (
|
||||||
|
boardImagesData &&
|
||||||
|
boardIdSelected.match(action) &&
|
||||||
|
action.payload.selectedImageName
|
||||||
|
) {
|
||||||
const firstImage = imagesSelectors.selectAll(boardImagesData)[0];
|
const firstImage = imagesSelectors.selectAll(boardImagesData)[0];
|
||||||
const selectedImage = imagesSelectors.selectById(
|
const selectedImage = imagesSelectors.selectById(
|
||||||
boardImagesData,
|
boardImagesData,
|
||||||
|
@ -109,20 +109,9 @@ export const addControlNetImageProcessedListener = () => {
|
|||||||
t('queue.graphFailedToQueue')
|
t('queue.graphFailedToQueue')
|
||||||
);
|
);
|
||||||
|
|
||||||
// handle usage-related errors
|
|
||||||
if (error instanceof Object) {
|
if (error instanceof Object) {
|
||||||
if ('data' in error && 'status' in error) {
|
if ('data' in error && 'status' in error) {
|
||||||
if (error.status === 403) {
|
if (error.status === 403) {
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
||||||
const detail = (error.data as any)?.detail || 'Unknown Error';
|
|
||||||
dispatch(
|
|
||||||
addToast({
|
|
||||||
title: t('queue.graphFailedToQueue'),
|
|
||||||
status: 'error',
|
|
||||||
description: detail,
|
|
||||||
duration: 15000,
|
|
||||||
})
|
|
||||||
);
|
|
||||||
dispatch(pendingControlImagesCleared());
|
dispatch(pendingControlImagesCleared());
|
||||||
dispatch(controlAdapterImageChanged({ id, controlImage: null }));
|
dispatch(controlAdapterImageChanged({ id, controlImage: null }));
|
||||||
return;
|
return;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import { enqueueRequested } from 'app/store/actions';
|
import { enqueueRequested } from 'app/store/actions';
|
||||||
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
||||||
|
import { buildWorkflow } from 'features/nodes/util/workflow/buildWorkflow';
|
||||||
import { queueApi } from 'services/api/endpoints/queue';
|
import { queueApi } from 'services/api/endpoints/queue';
|
||||||
import { BatchConfig } from 'services/api/types';
|
import { BatchConfig } from 'services/api/types';
|
||||||
import { startAppListening } from '..';
|
import { startAppListening } from '..';
|
||||||
@ -10,10 +11,22 @@ export const addEnqueueRequestedNodes = () => {
|
|||||||
enqueueRequested.match(action) && action.payload.tabName === 'nodes',
|
enqueueRequested.match(action) && action.payload.tabName === 'nodes',
|
||||||
effect: async (action, { getState, dispatch }) => {
|
effect: async (action, { getState, dispatch }) => {
|
||||||
const state = getState();
|
const state = getState();
|
||||||
|
const { nodes, edges } = state.nodes;
|
||||||
|
const workflow = state.workflow;
|
||||||
const graph = buildNodesGraph(state.nodes);
|
const graph = buildNodesGraph(state.nodes);
|
||||||
|
const builtWorkflow = buildWorkflow({
|
||||||
|
nodes,
|
||||||
|
edges,
|
||||||
|
workflow,
|
||||||
|
});
|
||||||
|
|
||||||
|
// embedded workflows don't have an id
|
||||||
|
delete builtWorkflow.id;
|
||||||
|
|
||||||
const batchConfig: BatchConfig = {
|
const batchConfig: BatchConfig = {
|
||||||
batch: {
|
batch: {
|
||||||
graph,
|
graph,
|
||||||
|
workflow: builtWorkflow,
|
||||||
runs: state.generation.iterations,
|
runs: state.generation.iterations,
|
||||||
},
|
},
|
||||||
prepend: action.payload.prepend,
|
prepend: action.payload.prepend,
|
||||||
|
@ -11,13 +11,11 @@ import {
|
|||||||
TypesafeDroppableData,
|
TypesafeDroppableData,
|
||||||
} from 'features/dnd/types';
|
} from 'features/dnd/types';
|
||||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||||
import {
|
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||||
fieldImageValueChanged,
|
|
||||||
workflowExposedFieldAdded,
|
|
||||||
} from 'features/nodes/store/nodesSlice';
|
|
||||||
import { initialImageChanged } from 'features/parameters/store/generationSlice';
|
import { initialImageChanged } from 'features/parameters/store/generationSlice';
|
||||||
import { imagesApi } from 'services/api/endpoints/images';
|
import { imagesApi } from 'services/api/endpoints/images';
|
||||||
import { startAppListening } from '../';
|
import { startAppListening } from '../';
|
||||||
|
import { workflowExposedFieldAdded } from 'features/nodes/store/workflowSlice';
|
||||||
|
|
||||||
export const dndDropped = createAction<{
|
export const dndDropped = createAction<{
|
||||||
overData: TypesafeDroppableData;
|
overData: TypesafeDroppableData;
|
||||||
|
@ -24,7 +24,7 @@ export const addSocketQueueItemStatusChangedEventListener = () => {
|
|||||||
dispatch(
|
dispatch(
|
||||||
queueApi.util.updateQueryData('listQueueItems', undefined, (draft) => {
|
queueApi.util.updateQueryData('listQueueItems', undefined, (draft) => {
|
||||||
queueItemsAdapter.updateOne(draft, {
|
queueItemsAdapter.updateOne(draft, {
|
||||||
id: queue_item.item_id,
|
id: String(queue_item.item_id),
|
||||||
changes: queue_item,
|
changes: queue_item,
|
||||||
});
|
});
|
||||||
})
|
})
|
||||||
|
@ -75,31 +75,20 @@ export const addUpscaleRequestedListener = () => {
|
|||||||
t('queue.graphFailedToQueue')
|
t('queue.graphFailedToQueue')
|
||||||
);
|
);
|
||||||
|
|
||||||
// handle usage-related errors
|
if (
|
||||||
if (error instanceof Object) {
|
error instanceof Object &&
|
||||||
if ('data' in error && 'status' in error) {
|
'status' in error &&
|
||||||
if (error.status === 403) {
|
error.status === 403
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
) {
|
||||||
const detail = (error.data as any)?.detail || 'Unknown Error';
|
return;
|
||||||
dispatch(
|
} else {
|
||||||
addToast({
|
dispatch(
|
||||||
title: t('queue.graphFailedToQueue'),
|
addToast({
|
||||||
status: 'error',
|
title: t('queue.graphFailedToQueue'),
|
||||||
description: detail,
|
status: 'error',
|
||||||
duration: 15000,
|
})
|
||||||
})
|
);
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dispatch(
|
|
||||||
addToast({
|
|
||||||
title: t('queue.graphFailedToQueue'),
|
|
||||||
status: 'error',
|
|
||||||
})
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import { parseify } from 'common/util/serialize';
|
import { parseify } from 'common/util/serialize';
|
||||||
import { workflowLoadRequested } from 'features/nodes/store/actions';
|
import { workflowLoadRequested } from 'features/nodes/store/actions';
|
||||||
import { workflowLoaded } from 'features/nodes/store/nodesSlice';
|
import { workflowLoaded } from 'features/nodes/store/actions';
|
||||||
import { $flow } from 'features/nodes/store/reactFlowInstance';
|
import { $flow } from 'features/nodes/store/reactFlowInstance';
|
||||||
import {
|
import {
|
||||||
WorkflowMigrationError,
|
WorkflowMigrationError,
|
||||||
@ -21,7 +21,7 @@ export const addWorkflowLoadRequestedListener = () => {
|
|||||||
actionCreator: workflowLoadRequested,
|
actionCreator: workflowLoadRequested,
|
||||||
effect: (action, { dispatch, getState }) => {
|
effect: (action, { dispatch, getState }) => {
|
||||||
const log = logger('nodes');
|
const log = logger('nodes');
|
||||||
const workflow = action.payload;
|
const { workflow, asCopy } = action.payload;
|
||||||
const nodeTemplates = getState().nodes.nodeTemplates;
|
const nodeTemplates = getState().nodes.nodeTemplates;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -29,6 +29,12 @@ export const addWorkflowLoadRequestedListener = () => {
|
|||||||
workflow,
|
workflow,
|
||||||
nodeTemplates
|
nodeTemplates
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if (asCopy) {
|
||||||
|
// If we're loading a copy, we need to remove the ID so that the backend will create a new workflow
|
||||||
|
delete validatedWorkflow.id;
|
||||||
|
}
|
||||||
|
|
||||||
dispatch(workflowLoaded(validatedWorkflow));
|
dispatch(workflowLoaded(validatedWorkflow));
|
||||||
if (!warnings.length) {
|
if (!warnings.length) {
|
||||||
dispatch(
|
dispatch(
|
||||||
@ -99,7 +105,6 @@ export const addWorkflowLoadRequestedListener = () => {
|
|||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
// Some other error occurred
|
// Some other error occurred
|
||||||
console.log(e);
|
|
||||||
log.error(
|
log.error(
|
||||||
{ error: parseify(e) },
|
{ error: parseify(e) },
|
||||||
t('nodes.unknownErrorValidatingWorkflow')
|
t('nodes.unknownErrorValidatingWorkflow')
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import { Store } from '@reduxjs/toolkit';
|
import { createStore } from 'app/store/store';
|
||||||
import { atom } from 'nanostores';
|
import { atom } from 'nanostores';
|
||||||
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
export const $store = atom<
|
||||||
export const $store = atom<Store<any> | undefined>();
|
Readonly<ReturnType<typeof createStore>> | undefined
|
||||||
|
>();
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import {
|
import {
|
||||||
AnyAction,
|
|
||||||
ThunkDispatch,
|
ThunkDispatch,
|
||||||
|
UnknownAction,
|
||||||
autoBatchEnhancer,
|
autoBatchEnhancer,
|
||||||
combineReducers,
|
combineReducers,
|
||||||
configureStore,
|
configureStore,
|
||||||
@ -14,6 +14,7 @@ import galleryReducer from 'features/gallery/store/gallerySlice';
|
|||||||
import loraReducer from 'features/lora/store/loraSlice';
|
import loraReducer from 'features/lora/store/loraSlice';
|
||||||
import modelmanagerReducer from 'features/modelManager/store/modelManagerSlice';
|
import modelmanagerReducer from 'features/modelManager/store/modelManagerSlice';
|
||||||
import nodesReducer from 'features/nodes/store/nodesSlice';
|
import nodesReducer from 'features/nodes/store/nodesSlice';
|
||||||
|
import workflowReducer from 'features/nodes/store/workflowSlice';
|
||||||
import generationReducer from 'features/parameters/store/generationSlice';
|
import generationReducer from 'features/parameters/store/generationSlice';
|
||||||
import postprocessingReducer from 'features/parameters/store/postprocessingSlice';
|
import postprocessingReducer from 'features/parameters/store/postprocessingSlice';
|
||||||
import queueReducer from 'features/queue/store/queueSlice';
|
import queueReducer from 'features/queue/store/queueSlice';
|
||||||
@ -22,6 +23,7 @@ import configReducer from 'features/system/store/configSlice';
|
|||||||
import systemReducer from 'features/system/store/systemSlice';
|
import systemReducer from 'features/system/store/systemSlice';
|
||||||
import hotkeysReducer from 'features/ui/store/hotkeysSlice';
|
import hotkeysReducer from 'features/ui/store/hotkeysSlice';
|
||||||
import uiReducer from 'features/ui/store/uiSlice';
|
import uiReducer from 'features/ui/store/uiSlice';
|
||||||
|
import { createStore as createIDBKeyValStore, get, set } from 'idb-keyval';
|
||||||
import dynamicMiddlewares from 'redux-dynamic-middlewares';
|
import dynamicMiddlewares from 'redux-dynamic-middlewares';
|
||||||
import { Driver, rememberEnhancer, rememberReducer } from 'redux-remember';
|
import { Driver, rememberEnhancer, rememberReducer } from 'redux-remember';
|
||||||
import { api } from 'services/api';
|
import { api } from 'services/api';
|
||||||
@ -32,7 +34,6 @@ import { actionSanitizer } from './middleware/devtools/actionSanitizer';
|
|||||||
import { actionsDenylist } from './middleware/devtools/actionsDenylist';
|
import { actionsDenylist } from './middleware/devtools/actionsDenylist';
|
||||||
import { stateSanitizer } from './middleware/devtools/stateSanitizer';
|
import { stateSanitizer } from './middleware/devtools/stateSanitizer';
|
||||||
import { listenerMiddleware } from './middleware/listenerMiddleware';
|
import { listenerMiddleware } from './middleware/listenerMiddleware';
|
||||||
import { createStore as createIDBKeyValStore, get, set } from 'idb-keyval';
|
|
||||||
|
|
||||||
const allReducers = {
|
const allReducers = {
|
||||||
canvas: canvasReducer,
|
canvas: canvasReducer,
|
||||||
@ -52,6 +53,7 @@ const allReducers = {
|
|||||||
modelmanager: modelmanagerReducer,
|
modelmanager: modelmanagerReducer,
|
||||||
sdxl: sdxlReducer,
|
sdxl: sdxlReducer,
|
||||||
queue: queueReducer,
|
queue: queueReducer,
|
||||||
|
workflow: workflowReducer,
|
||||||
[api.reducerPath]: api.reducer,
|
[api.reducerPath]: api.reducer,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -65,6 +67,7 @@ const rememberedKeys: (keyof typeof allReducers)[] = [
|
|||||||
'generation',
|
'generation',
|
||||||
'sdxl',
|
'sdxl',
|
||||||
'nodes',
|
'nodes',
|
||||||
|
'workflow',
|
||||||
'postprocessing',
|
'postprocessing',
|
||||||
'system',
|
'system',
|
||||||
'ui',
|
'ui',
|
||||||
@ -83,23 +86,9 @@ const idbKeyValDriver: Driver = {
|
|||||||
setItem: (key, value) => set(key, value, idbKeyValStore),
|
setItem: (key, value) => set(key, value, idbKeyValStore),
|
||||||
};
|
};
|
||||||
|
|
||||||
export const createStore = (uniqueStoreKey?: string) =>
|
export const createStore = (uniqueStoreKey?: string, persist = true) =>
|
||||||
configureStore({
|
configureStore({
|
||||||
reducer: rememberedRootReducer,
|
reducer: rememberedRootReducer,
|
||||||
enhancers: (existingEnhancers) => {
|
|
||||||
return existingEnhancers
|
|
||||||
.concat(
|
|
||||||
rememberEnhancer(idbKeyValDriver, rememberedKeys, {
|
|
||||||
persistDebounce: 300,
|
|
||||||
serialize,
|
|
||||||
unserialize,
|
|
||||||
prefix: uniqueStoreKey
|
|
||||||
? `${STORAGE_PREFIX}${uniqueStoreKey}-`
|
|
||||||
: STORAGE_PREFIX,
|
|
||||||
})
|
|
||||||
)
|
|
||||||
.concat(autoBatchEnhancer());
|
|
||||||
},
|
|
||||||
middleware: (getDefaultMiddleware) =>
|
middleware: (getDefaultMiddleware) =>
|
||||||
getDefaultMiddleware({
|
getDefaultMiddleware({
|
||||||
serializableCheck: false,
|
serializableCheck: false,
|
||||||
@ -108,6 +97,22 @@ export const createStore = (uniqueStoreKey?: string) =>
|
|||||||
.concat(api.middleware)
|
.concat(api.middleware)
|
||||||
.concat(dynamicMiddlewares)
|
.concat(dynamicMiddlewares)
|
||||||
.prepend(listenerMiddleware.middleware),
|
.prepend(listenerMiddleware.middleware),
|
||||||
|
enhancers: (getDefaultEnhancers) => {
|
||||||
|
const _enhancers = getDefaultEnhancers().concat(autoBatchEnhancer());
|
||||||
|
if (persist) {
|
||||||
|
_enhancers.push(
|
||||||
|
rememberEnhancer(idbKeyValDriver, rememberedKeys, {
|
||||||
|
persistDebounce: 300,
|
||||||
|
serialize,
|
||||||
|
unserialize,
|
||||||
|
prefix: uniqueStoreKey
|
||||||
|
? `${STORAGE_PREFIX}${uniqueStoreKey}-`
|
||||||
|
: STORAGE_PREFIX,
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return _enhancers;
|
||||||
|
},
|
||||||
devTools: {
|
devTools: {
|
||||||
actionSanitizer,
|
actionSanitizer,
|
||||||
stateSanitizer,
|
stateSanitizer,
|
||||||
@ -138,6 +143,6 @@ export type AppGetState = ReturnType<
|
|||||||
>;
|
>;
|
||||||
export type RootState = ReturnType<ReturnType<typeof createStore>['getState']>;
|
export type RootState = ReturnType<ReturnType<typeof createStore>['getState']>;
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
export type AppThunkDispatch = ThunkDispatch<RootState, any, AnyAction>;
|
export type AppThunkDispatch = ThunkDispatch<RootState, any, UnknownAction>;
|
||||||
export type AppDispatch = ReturnType<typeof createStore>['dispatch'];
|
export type AppDispatch = ReturnType<typeof createStore>['dispatch'];
|
||||||
export const stateSelector = (state: RootState) => state;
|
export const stateSelector = (state: RootState) => state;
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
import { isEqual } from 'lodash-es';
|
|
||||||
|
|
||||||
export const defaultSelectorOptions = {
|
|
||||||
memoizeOptions: {
|
|
||||||
resultEqualityCheck: isEqual,
|
|
||||||
},
|
|
||||||
};
|
|
@ -23,7 +23,8 @@ export type AppFeature =
|
|||||||
| 'resumeQueue'
|
| 'resumeQueue'
|
||||||
| 'prependQueue'
|
| 'prependQueue'
|
||||||
| 'invocationCache'
|
| 'invocationCache'
|
||||||
| 'bulkDownload';
|
| 'bulkDownload'
|
||||||
|
| 'workflowLibrary';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A disable-able Stable Diffusion feature
|
* A disable-able Stable Diffusion feature
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
* - increment it in `onPaneClick`
|
* - increment it in `onPaneClick`
|
||||||
* - `useEffect()` to close the menu when `globalContextMenuCloseTrigger` changes
|
* - `useEffect()` to close the menu when `globalContextMenuCloseTrigger` changes
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import {
|
import {
|
||||||
Menu,
|
Menu,
|
||||||
MenuButton,
|
MenuButton,
|
||||||
@ -22,7 +21,7 @@ import {
|
|||||||
PortalProps,
|
PortalProps,
|
||||||
useEventListener,
|
useEventListener,
|
||||||
} from '@chakra-ui/react';
|
} from '@chakra-ui/react';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useGlobalMenuCloseTrigger } from 'common/hooks/useGlobalMenuCloseTrigger';
|
||||||
import * as React from 'react';
|
import * as React from 'react';
|
||||||
import {
|
import {
|
||||||
MutableRefObject,
|
MutableRefObject,
|
||||||
@ -49,10 +48,6 @@ export function IAIContextMenu<T extends HTMLElement = HTMLElement>(
|
|||||||
const [position, setPosition] = useState<[number, number]>([0, 0]);
|
const [position, setPosition] = useState<[number, number]>([0, 0]);
|
||||||
const targetRef = useRef<T>(null);
|
const targetRef = useRef<T>(null);
|
||||||
|
|
||||||
const globalContextMenuCloseTrigger = useAppSelector(
|
|
||||||
(state) => state.ui.globalContextMenuCloseTrigger
|
|
||||||
);
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (isOpen) {
|
if (isOpen) {
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
@ -70,11 +65,12 @@ export function IAIContextMenu<T extends HTMLElement = HTMLElement>(
|
|||||||
}
|
}
|
||||||
}, [isOpen]);
|
}, [isOpen]);
|
||||||
|
|
||||||
useEffect(() => {
|
const onClose = useCallback(() => {
|
||||||
setIsOpen(false);
|
setIsOpen(false);
|
||||||
setIsDeferredOpen(false);
|
setIsDeferredOpen(false);
|
||||||
setIsRendered(false);
|
setIsRendered(false);
|
||||||
}, [globalContextMenuCloseTrigger]);
|
}, []);
|
||||||
|
useGlobalMenuCloseTrigger(onClose);
|
||||||
|
|
||||||
useEventListener('contextmenu', (e) => {
|
useEventListener('contextmenu', (e) => {
|
||||||
if (
|
if (
|
||||||
|
@ -1,4 +1,10 @@
|
|||||||
import { FormControl, FormLabel, Tooltip, forwardRef } from '@chakra-ui/react';
|
import {
|
||||||
|
FormControl,
|
||||||
|
FormControlProps,
|
||||||
|
FormLabel,
|
||||||
|
Tooltip,
|
||||||
|
forwardRef,
|
||||||
|
} from '@chakra-ui/react';
|
||||||
import { Select, SelectProps } from '@mantine/core';
|
import { Select, SelectProps } from '@mantine/core';
|
||||||
import { useMantineSelectStyles } from 'mantine-theme/hooks/useMantineSelectStyles';
|
import { useMantineSelectStyles } from 'mantine-theme/hooks/useMantineSelectStyles';
|
||||||
import { RefObject, memo } from 'react';
|
import { RefObject, memo } from 'react';
|
||||||
@ -13,10 +19,19 @@ export type IAISelectProps = Omit<SelectProps, 'label'> & {
|
|||||||
tooltip?: string | null;
|
tooltip?: string | null;
|
||||||
inputRef?: RefObject<HTMLInputElement>;
|
inputRef?: RefObject<HTMLInputElement>;
|
||||||
label?: string;
|
label?: string;
|
||||||
|
formControlProps?: FormControlProps;
|
||||||
};
|
};
|
||||||
|
|
||||||
const IAIMantineSelect = forwardRef((props: IAISelectProps, ref) => {
|
const IAIMantineSelect = forwardRef((props: IAISelectProps, ref) => {
|
||||||
const { tooltip, inputRef, label, disabled, required, ...rest } = props;
|
const {
|
||||||
|
tooltip,
|
||||||
|
formControlProps,
|
||||||
|
inputRef,
|
||||||
|
label,
|
||||||
|
disabled,
|
||||||
|
required,
|
||||||
|
...rest
|
||||||
|
} = props;
|
||||||
|
|
||||||
const styles = useMantineSelectStyles();
|
const styles = useMantineSelectStyles();
|
||||||
|
|
||||||
@ -28,6 +43,7 @@ const IAIMantineSelect = forwardRef((props: IAISelectProps, ref) => {
|
|||||||
isDisabled={disabled}
|
isDisabled={disabled}
|
||||||
position="static"
|
position="static"
|
||||||
data-testid={`select-${label || props.placeholder}`}
|
data-testid={`select-${label || props.placeholder}`}
|
||||||
|
{...formControlProps}
|
||||||
>
|
>
|
||||||
<FormLabel>{label}</FormLabel>
|
<FormLabel>{label}</FormLabel>
|
||||||
<Select disabled={disabled} ref={inputRef} styles={styles} {...rest} />
|
<Select disabled={disabled} ref={inputRef} styles={styles} {...rest} />
|
||||||
|
@ -19,7 +19,6 @@ import { useAppDispatch } from 'app/store/storeHooks';
|
|||||||
import { stopPastePropagation } from 'common/util/stopPastePropagation';
|
import { stopPastePropagation } from 'common/util/stopPastePropagation';
|
||||||
import { shiftKeyPressed } from 'features/ui/store/hotkeysSlice';
|
import { shiftKeyPressed } from 'features/ui/store/hotkeysSlice';
|
||||||
import { clamp } from 'lodash-es';
|
import { clamp } from 'lodash-es';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
FocusEvent,
|
FocusEvent,
|
||||||
KeyboardEvent,
|
KeyboardEvent,
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
import { Box } from '@chakra-ui/react';
|
import { Box } from '@chakra-ui/react';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
|
||||||
import { useAppToaster } from 'app/components/Toaster';
|
import { useAppToaster } from 'app/components/Toaster';
|
||||||
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
import { stateSelector } from 'app/store/store';
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
|
||||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||||
import { AnimatePresence, motion } from 'framer-motion';
|
import { AnimatePresence, motion } from 'framer-motion';
|
||||||
import {
|
import {
|
||||||
@ -20,7 +19,7 @@ import { useUploadImageMutation } from 'services/api/endpoints/images';
|
|||||||
import { PostUploadAction } from 'services/api/types';
|
import { PostUploadAction } from 'services/api/types';
|
||||||
import ImageUploadOverlay from './ImageUploadOverlay';
|
import ImageUploadOverlay from './ImageUploadOverlay';
|
||||||
|
|
||||||
const selector = createSelector(
|
const selector = createMemoizedSelector(
|
||||||
[stateSelector, activeTabNameSelector],
|
[stateSelector, activeTabNameSelector],
|
||||||
({ gallery }, activeTabName) => {
|
({ gallery }, activeTabName) => {
|
||||||
let postUploadAction: PostUploadAction = { type: 'TOAST' };
|
let postUploadAction: PostUploadAction = { type: 'TOAST' };
|
||||||
@ -39,8 +38,7 @@ const selector = createSelector(
|
|||||||
autoAddBoardId,
|
autoAddBoardId,
|
||||||
postUploadAction,
|
postUploadAction,
|
||||||
};
|
};
|
||||||
},
|
}
|
||||||
defaultSelectorOptions
|
|
||||||
);
|
);
|
||||||
|
|
||||||
type ImageUploaderProps = {
|
type ImageUploaderProps = {
|
||||||
|
1
invokeai/frontend/web/src/common/components/Nbsp.tsx
Normal file
1
invokeai/frontend/web/src/common/components/Nbsp.tsx
Normal file
@ -0,0 +1 @@
|
|||||||
|
export const Nbsp = () => <>{'\u00A0'}</>;
|
@ -25,16 +25,16 @@ const SelectionOverlay = ({ isSelected, isHovered }: Props) => {
|
|||||||
? 'hoverSelected.light'
|
? 'hoverSelected.light'
|
||||||
: 'selected.light'
|
: 'selected.light'
|
||||||
: isHovered
|
: isHovered
|
||||||
? 'hoverUnselected.light'
|
? 'hoverUnselected.light'
|
||||||
: undefined,
|
: undefined,
|
||||||
_dark: {
|
_dark: {
|
||||||
shadow: isSelected
|
shadow: isSelected
|
||||||
? isHovered
|
? isHovered
|
||||||
? 'hoverSelected.dark'
|
? 'hoverSelected.dark'
|
||||||
: 'selected.dark'
|
: 'selected.dark'
|
||||||
: isHovered
|
: isHovered
|
||||||
? 'hoverUnselected.dark'
|
? 'hoverUnselected.dark'
|
||||||
: undefined,
|
: undefined,
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
|
@ -0,0 +1,25 @@
|
|||||||
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
|
import { useEffect } from 'react';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The reactflow background element somehow prevents the chakra `useOutsideClick()` hook from working.
|
||||||
|
* With a menu open, clicking on the reactflow background element doesn't close the menu.
|
||||||
|
*
|
||||||
|
* Reactflow does provide an `onPaneClick` to handle clicks on the background element, but it is not
|
||||||
|
* straightforward to programatically close the menu.
|
||||||
|
*
|
||||||
|
* As a (hopefully temporary) workaround, we will use a dirty hack:
|
||||||
|
* - create `globalMenuCloseTrigger: number` in `ui` slice
|
||||||
|
* - increment it in `onPaneClick`
|
||||||
|
* - `useEffect()` to close the menu when `globalMenuCloseTrigger` changes
|
||||||
|
*/
|
||||||
|
|
||||||
|
export const useGlobalMenuCloseTrigger = (onClose: () => void) => {
|
||||||
|
const globalMenuCloseTrigger = useAppSelector(
|
||||||
|
(state) => state.ui.globalMenuCloseTrigger
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
onClose();
|
||||||
|
}, [globalMenuCloseTrigger, onClose]);
|
||||||
|
};
|
@ -1,7 +1,6 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
import { stateSelector } from 'app/store/store';
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
|
||||||
import { selectControlAdapterAll } from 'features/controlAdapters/store/controlAdaptersSlice';
|
import { selectControlAdapterAll } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||||
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||||
@ -10,7 +9,7 @@ import i18n from 'i18next';
|
|||||||
import { forEach } from 'lodash-es';
|
import { forEach } from 'lodash-es';
|
||||||
import { getConnectedEdges } from 'reactflow';
|
import { getConnectedEdges } from 'reactflow';
|
||||||
|
|
||||||
const selector = createSelector(
|
const selector = createMemoizedSelector(
|
||||||
[stateSelector, activeTabNameSelector],
|
[stateSelector, activeTabNameSelector],
|
||||||
(
|
(
|
||||||
{ controlAdapters, generation, system, nodes, dynamicPrompts },
|
{ controlAdapters, generation, system, nodes, dynamicPrompts },
|
||||||
@ -125,8 +124,7 @@ const selector = createSelector(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return { isReady: !reasons.length, reasons };
|
return { isReady: !reasons.length, reasons };
|
||||||
},
|
}
|
||||||
defaultSelectorOptions
|
|
||||||
);
|
);
|
||||||
|
|
||||||
export const useIsReadyToEnqueue = () => {
|
export const useIsReadyToEnqueue = () => {
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
// https://stackoverflow.com/a/73731908
|
// https://stackoverflow.com/a/73731908
|
||||||
|
|
||||||
import { useEffect, useState } from 'react';
|
import { useEffect, useState } from 'react';
|
||||||
|
|
||||||
export function useSingleAndDoubleClick(
|
export function useSingleAndDoubleClick(
|
||||||
|
@ -1,16 +1,7 @@
|
|||||||
import { Box, chakra, Flex } from '@chakra-ui/react';
|
import { Box, chakra, Flex } from '@chakra-ui/react';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
|
||||||
import {
|
|
||||||
canvasSelector,
|
|
||||||
isStagingSelector,
|
|
||||||
} from 'features/canvas/store/canvasSelectors';
|
|
||||||
import Konva from 'konva';
|
|
||||||
import { KonvaEventObject } from 'konva/lib/Node';
|
|
||||||
import { Vector2d } from 'konva/lib/types';
|
|
||||||
import { memo, useCallback, useEffect, useRef } from 'react';
|
|
||||||
import { Layer, Stage } from 'react-konva';
|
|
||||||
import useCanvasDragMove from 'features/canvas/hooks/useCanvasDragMove';
|
import useCanvasDragMove from 'features/canvas/hooks/useCanvasDragMove';
|
||||||
import useCanvasHotkeys from 'features/canvas/hooks/useCanvasHotkeys';
|
import useCanvasHotkeys from 'features/canvas/hooks/useCanvasHotkeys';
|
||||||
import useCanvasMouseDown from 'features/canvas/hooks/useCanvasMouseDown';
|
import useCanvasMouseDown from 'features/canvas/hooks/useCanvasMouseDown';
|
||||||
@ -18,11 +9,17 @@ import useCanvasMouseMove from 'features/canvas/hooks/useCanvasMouseMove';
|
|||||||
import useCanvasMouseOut from 'features/canvas/hooks/useCanvasMouseOut';
|
import useCanvasMouseOut from 'features/canvas/hooks/useCanvasMouseOut';
|
||||||
import useCanvasMouseUp from 'features/canvas/hooks/useCanvasMouseUp';
|
import useCanvasMouseUp from 'features/canvas/hooks/useCanvasMouseUp';
|
||||||
import useCanvasWheel from 'features/canvas/hooks/useCanvasZoom';
|
import useCanvasWheel from 'features/canvas/hooks/useCanvasZoom';
|
||||||
|
import { isStagingSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
import { canvasResized } from 'features/canvas/store/canvasSlice';
|
import { canvasResized } from 'features/canvas/store/canvasSlice';
|
||||||
import {
|
import {
|
||||||
setCanvasBaseLayer,
|
setCanvasBaseLayer,
|
||||||
setCanvasStage,
|
setCanvasStage,
|
||||||
} from 'features/canvas/util/konvaInstanceProvider';
|
} from 'features/canvas/util/konvaInstanceProvider';
|
||||||
|
import Konva from 'konva';
|
||||||
|
import { KonvaEventObject } from 'konva/lib/Node';
|
||||||
|
import { Vector2d } from 'konva/lib/types';
|
||||||
|
import { memo, useCallback, useEffect, useRef } from 'react';
|
||||||
|
import { Layer, Stage } from 'react-konva';
|
||||||
import IAICanvasBoundingBoxOverlay from './IAICanvasBoundingBoxOverlay';
|
import IAICanvasBoundingBoxOverlay from './IAICanvasBoundingBoxOverlay';
|
||||||
import IAICanvasGrid from './IAICanvasGrid';
|
import IAICanvasGrid from './IAICanvasGrid';
|
||||||
import IAICanvasIntermediateImage from './IAICanvasIntermediateImage';
|
import IAICanvasIntermediateImage from './IAICanvasIntermediateImage';
|
||||||
@ -35,9 +32,9 @@ import IAICanvasStatusText from './IAICanvasStatusText';
|
|||||||
import IAICanvasBoundingBox from './IAICanvasToolbar/IAICanvasBoundingBox';
|
import IAICanvasBoundingBox from './IAICanvasToolbar/IAICanvasBoundingBox';
|
||||||
import IAICanvasToolPreview from './IAICanvasToolPreview';
|
import IAICanvasToolPreview from './IAICanvasToolPreview';
|
||||||
|
|
||||||
const selector = createSelector(
|
const selector = createMemoizedSelector(
|
||||||
[canvasSelector, isStagingSelector],
|
[stateSelector, isStagingSelector],
|
||||||
(canvas, isStaging) => {
|
({ canvas }, isStaging) => {
|
||||||
const {
|
const {
|
||||||
isMaskEnabled,
|
isMaskEnabled,
|
||||||
stageScale,
|
stageScale,
|
||||||
@ -83,8 +80,7 @@ const selector = createSelector(
|
|||||||
shouldShowIntermediates,
|
shouldShowIntermediates,
|
||||||
shouldAntialias,
|
shouldAntialias,
|
||||||
};
|
};
|
||||||
},
|
}
|
||||||
defaultSelectorOptions
|
|
||||||
);
|
);
|
||||||
|
|
||||||
const ChakraStage = chakra(Stage, {
|
const ChakraStage = chakra(Stage, {
|
||||||
|
@ -1,38 +1,28 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { isEqual } from 'lodash-es';
|
|
||||||
|
|
||||||
import { Group, Rect } from 'react-konva';
|
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
|
import { Group, Rect } from 'react-konva';
|
||||||
|
|
||||||
const selector = createSelector(
|
const selector = createMemoizedSelector(stateSelector, ({ canvas }) => {
|
||||||
canvasSelector,
|
const {
|
||||||
(canvas) => {
|
boundingBoxCoordinates,
|
||||||
const {
|
boundingBoxDimensions,
|
||||||
boundingBoxCoordinates,
|
stageDimensions,
|
||||||
boundingBoxDimensions,
|
stageScale,
|
||||||
stageDimensions,
|
shouldDarkenOutsideBoundingBox,
|
||||||
stageScale,
|
stageCoordinates,
|
||||||
shouldDarkenOutsideBoundingBox,
|
} = canvas;
|
||||||
stageCoordinates,
|
|
||||||
} = canvas;
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
boundingBoxCoordinates,
|
boundingBoxCoordinates,
|
||||||
boundingBoxDimensions,
|
boundingBoxDimensions,
|
||||||
shouldDarkenOutsideBoundingBox,
|
shouldDarkenOutsideBoundingBox,
|
||||||
stageCoordinates,
|
stageCoordinates,
|
||||||
stageDimensions,
|
stageDimensions,
|
||||||
stageScale,
|
stageScale,
|
||||||
};
|
};
|
||||||
},
|
});
|
||||||
{
|
|
||||||
memoizeOptions: {
|
|
||||||
resultEqualityCheck: isEqual,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
);
|
|
||||||
const IAICanvasBoundingBoxOverlay = () => {
|
const IAICanvasBoundingBoxOverlay = () => {
|
||||||
const {
|
const {
|
||||||
boundingBoxCoordinates,
|
boundingBoxCoordinates,
|
||||||
|
@ -1,26 +1,16 @@
|
|||||||
// Grid drawing adapted from https://longviewcoder.com/2021/12/08/konva-a-better-grid/
|
// Grid drawing adapted from https://longviewcoder.com/2021/12/08/konva-a-better-grid/
|
||||||
|
|
||||||
import { useColorMode, useToken } from '@chakra-ui/react';
|
import { useColorMode, useToken } from '@chakra-ui/react';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { range } from 'lodash-es';
|
||||||
import { isEqual, range } from 'lodash-es';
|
|
||||||
|
|
||||||
import { ReactNode, memo, useCallback, useLayoutEffect, useState } from 'react';
|
import { ReactNode, memo, useCallback, useLayoutEffect, useState } from 'react';
|
||||||
import { Group, Line as KonvaLine } from 'react-konva';
|
import { Group, Line as KonvaLine } from 'react-konva';
|
||||||
|
|
||||||
const selector = createSelector(
|
const selector = createMemoizedSelector([stateSelector], ({ canvas }) => {
|
||||||
[canvasSelector],
|
const { stageScale, stageCoordinates, stageDimensions } = canvas;
|
||||||
(canvas) => {
|
return { stageScale, stageCoordinates, stageDimensions };
|
||||||
const { stageScale, stageCoordinates, stageDimensions } = canvas;
|
});
|
||||||
return { stageScale, stageCoordinates, stageDimensions };
|
|
||||||
},
|
|
||||||
{
|
|
||||||
memoizeOptions: {
|
|
||||||
resultEqualityCheck: isEqual,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
const IAICanvasGrid = () => {
|
const IAICanvasGrid = () => {
|
||||||
const { stageScale, stageCoordinates, stageDimensions } =
|
const { stageScale, stageCoordinates, stageDimensions } =
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { skipToken } from '@reduxjs/toolkit/dist/query';
|
import { skipToken } from '@reduxjs/toolkit/query';
|
||||||
import { $authToken } from 'app/store/nanostores/authToken';
|
import { $authToken } from 'app/store/nanostores/authToken';
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
import { Image } from 'react-konva';
|
import { Image } from 'react-konva';
|
||||||
|
@ -2,31 +2,22 @@ import { createSelector } from '@reduxjs/toolkit';
|
|||||||
import { stateSelector } from 'app/store/store';
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { ImageConfig } from 'konva/lib/shapes/Image';
|
import { ImageConfig } from 'konva/lib/shapes/Image';
|
||||||
import { isEqual } from 'lodash-es';
|
|
||||||
import { memo, useEffect, useState } from 'react';
|
import { memo, useEffect, useState } from 'react';
|
||||||
import { Image as KonvaImage } from 'react-konva';
|
import { Image as KonvaImage } from 'react-konva';
|
||||||
|
|
||||||
const selector = createSelector(
|
const selector = createSelector([stateSelector], ({ system, canvas }) => {
|
||||||
[stateSelector],
|
const { denoiseProgress } = system;
|
||||||
({ system, canvas }) => {
|
const { boundingBox } = canvas.layerState.stagingArea;
|
||||||
const { denoiseProgress } = system;
|
const { batchIds } = canvas;
|
||||||
const { boundingBox } = canvas.layerState.stagingArea;
|
|
||||||
const { batchIds } = canvas;
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
boundingBox,
|
boundingBox,
|
||||||
progressImage:
|
progressImage:
|
||||||
denoiseProgress && batchIds.includes(denoiseProgress.batch_id)
|
denoiseProgress && batchIds.includes(denoiseProgress.batch_id)
|
||||||
? denoiseProgress.progress_image
|
? denoiseProgress.progress_image
|
||||||
: undefined,
|
: undefined,
|
||||||
};
|
};
|
||||||
},
|
});
|
||||||
{
|
|
||||||
memoizeOptions: {
|
|
||||||
resultEqualityCheck: isEqual,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
type Props = Omit<ImageConfig, 'image'>;
|
type Props = Omit<ImageConfig, 'image'>;
|
||||||
|
|
||||||
|
@ -1,17 +1,16 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
|
||||||
import { RectConfig } from 'konva/lib/shapes/Rect';
|
|
||||||
import { Rect } from 'react-konva';
|
|
||||||
|
|
||||||
import { rgbaColorToString } from 'features/canvas/util/colorToString';
|
import { rgbaColorToString } from 'features/canvas/util/colorToString';
|
||||||
import Konva from 'konva';
|
import Konva from 'konva';
|
||||||
|
import { RectConfig } from 'konva/lib/shapes/Rect';
|
||||||
import { isNumber } from 'lodash-es';
|
import { isNumber } from 'lodash-es';
|
||||||
import { memo, useCallback, useEffect, useRef, useState } from 'react';
|
import { memo, useCallback, useEffect, useRef, useState } from 'react';
|
||||||
|
import { Rect } from 'react-konva';
|
||||||
|
|
||||||
export const canvasMaskCompositerSelector = createSelector(
|
export const canvasMaskCompositerSelector = createMemoizedSelector(
|
||||||
canvasSelector,
|
stateSelector,
|
||||||
(canvas) => {
|
({ canvas }) => {
|
||||||
const { maskColor, stageCoordinates, stageDimensions, stageScale } = canvas;
|
const { maskColor, stageCoordinates, stageDimensions, stageScale } = canvas;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -1,22 +1,15 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
|
||||||
import { GroupConfig } from 'konva/lib/Group';
|
|
||||||
import { isEqual } from 'lodash-es';
|
|
||||||
|
|
||||||
import { Group, Line } from 'react-konva';
|
|
||||||
import { isCanvasMaskLine } from 'features/canvas/store/canvasTypes';
|
import { isCanvasMaskLine } from 'features/canvas/store/canvasTypes';
|
||||||
|
import { GroupConfig } from 'konva/lib/Group';
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
|
import { Group, Line } from 'react-konva';
|
||||||
|
|
||||||
export const canvasLinesSelector = createSelector(
|
export const canvasLinesSelector = createMemoizedSelector(
|
||||||
[canvasSelector],
|
[stateSelector],
|
||||||
(canvas) => {
|
({ canvas }) => {
|
||||||
return { objects: canvas.layerState.objects };
|
return { objects: canvas.layerState.objects };
|
||||||
},
|
|
||||||
{
|
|
||||||
memoizeOptions: {
|
|
||||||
resultEqualityCheck: isEqual,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -1,35 +1,25 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
|
import { stateSelector } from 'app/store/store';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
|
||||||
import { rgbaColorToString } from 'features/canvas/util/colorToString';
|
|
||||||
import { isEqual } from 'lodash-es';
|
|
||||||
|
|
||||||
import { Group, Line, Rect } from 'react-konva';
|
|
||||||
import {
|
import {
|
||||||
isCanvasBaseImage,
|
isCanvasBaseImage,
|
||||||
isCanvasBaseLine,
|
isCanvasBaseLine,
|
||||||
isCanvasEraseRect,
|
isCanvasEraseRect,
|
||||||
isCanvasFillRect,
|
isCanvasFillRect,
|
||||||
} from 'features/canvas/store/canvasTypes';
|
} from 'features/canvas/store/canvasTypes';
|
||||||
import IAICanvasImage from './IAICanvasImage';
|
import { rgbaColorToString } from 'features/canvas/util/colorToString';
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
|
import { Group, Line, Rect } from 'react-konva';
|
||||||
|
import IAICanvasImage from './IAICanvasImage';
|
||||||
|
|
||||||
const selector = createSelector(
|
const selector = createMemoizedSelector([stateSelector], ({ canvas }) => {
|
||||||
[canvasSelector],
|
const {
|
||||||
(canvas) => {
|
layerState: { objects },
|
||||||
const {
|
} = canvas;
|
||||||
layerState: { objects },
|
return {
|
||||||
} = canvas;
|
objects,
|
||||||
return {
|
};
|
||||||
objects,
|
});
|
||||||
};
|
|
||||||
},
|
|
||||||
{
|
|
||||||
memoizeOptions: {
|
|
||||||
resultEqualityCheck: isEqual,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
const IAICanvasObjectRenderer = () => {
|
const IAICanvasObjectRenderer = () => {
|
||||||
const { objects } = useAppSelector(selector);
|
const { objects } = useAppSelector(selector);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user