mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
a514c9e28b
Update workflows handling for Workflow Library. **Updated Workflow Storage** "Embedded Workflows" are workflows associated with images, and are now only stored in the image files. "Library Workflows" are not associated with images, and are stored only in DB. This works out nicely. We have always saved workflows to files, but recently began saving them to the DB in addition to in image files. When that happened, we stopped reading workflows from files, so all the workflows that only existed in images were inaccessible. With this change, access to those workflows is restored, and no workflows are lost. **Updated Workflow Handling in Nodes** Prior to this change, workflows were embedded in images by passing the whole workflow JSON to a special workflow field on a node. In the node's `invoke()` function, the node was able to access this workflow and save it with the image. This (inaccurately) models workflows as a property of an image and is rather awkward technically. A workflow is now a property of a batch/session queue item. It is available in the InvocationContext and therefore available to all nodes during `invoke()`. **Database Migrations** Added a `SQLiteMigrator` class to handle database migrations. Migrations were needed to accomodate the DB-related changes in this PR. See the code for details. The `images`, `workflows` and `session_queue` tables required migrations for this PR, and are using the new migrator. Other tables/services are still creating tables themselves. A followup PR will adapt them to use the migrator. **Other/Support Changes** - Add a `has_workflow` column to `images` table to indicate that the image has an embedded workflow. - Add handling for retrieving the workflow from an image in python. The image file must be fetched, the workflow extracted, and then sent to client, avoiding needing the browser to parse the image file. With the `has_workflow` column, the UI knows if there is a workflow to be fetched, and only fetches when the user requests to load the workflow. - Add route to get the workflow from an image - Add CRUD service/routes for the library workflows - `workflow_images` table and services removed (no longer needed now that embedded workflows are not in the DB)
90 lines
3.2 KiB
Python
90 lines
3.2 KiB
Python
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
|
|
|
from typing import Optional
|
|
|
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
|
|
|
from .invocation_queue.invocation_queue_common import InvocationQueueItem
|
|
from .invocation_services import InvocationServices
|
|
from .shared.graph import Graph, GraphExecutionState
|
|
|
|
|
|
class Invoker:
|
|
"""The invoker, used to execute invocations"""
|
|
|
|
services: InvocationServices
|
|
|
|
def __init__(self, services: InvocationServices):
|
|
self.services = services
|
|
self._start()
|
|
|
|
def invoke(
|
|
self,
|
|
session_queue_id: str,
|
|
session_queue_item_id: int,
|
|
session_queue_batch_id: str,
|
|
graph_execution_state: GraphExecutionState,
|
|
workflow: Optional[WorkflowWithoutID] = None,
|
|
invoke_all: bool = False,
|
|
) -> Optional[str]:
|
|
"""Determines the next node to invoke and enqueues it, preparing if needed.
|
|
Returns the id of the queued node, or `None` if there are no nodes left to enqueue."""
|
|
|
|
# Get the next invocation
|
|
invocation = graph_execution_state.next()
|
|
if not invocation:
|
|
return None
|
|
|
|
# Save the execution state
|
|
self.services.graph_execution_manager.set(graph_execution_state)
|
|
|
|
# Queue the invocation
|
|
self.services.queue.put(
|
|
InvocationQueueItem(
|
|
session_queue_id=session_queue_id,
|
|
session_queue_item_id=session_queue_item_id,
|
|
session_queue_batch_id=session_queue_batch_id,
|
|
graph_execution_state_id=graph_execution_state.id,
|
|
invocation_id=invocation.id,
|
|
workflow=workflow,
|
|
invoke_all=invoke_all,
|
|
)
|
|
)
|
|
|
|
return invocation.id
|
|
|
|
def create_execution_state(self, graph: Optional[Graph] = None) -> GraphExecutionState:
|
|
"""Creates a new execution state for the given graph"""
|
|
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
|
|
self.services.graph_execution_manager.set(new_state)
|
|
return new_state
|
|
|
|
def cancel(self, graph_execution_state_id: str) -> None:
|
|
"""Cancels the given execution state"""
|
|
self.services.queue.cancel(graph_execution_state_id)
|
|
|
|
def __start_service(self, service) -> None:
|
|
# Call start() method on any services that have it
|
|
start_op = getattr(service, "start", None)
|
|
if callable(start_op):
|
|
start_op(self)
|
|
|
|
def __stop_service(self, service) -> None:
|
|
# Call stop() method on any services that have it
|
|
stop_op = getattr(service, "stop", None)
|
|
if callable(stop_op):
|
|
stop_op(self)
|
|
|
|
def _start(self) -> None:
|
|
"""Starts the invoker. This is called automatically when the invoker is created."""
|
|
for service in vars(self.services):
|
|
self.__start_service(getattr(self.services, service))
|
|
|
|
def stop(self) -> None:
|
|
"""Stops the invoker. A new invoker will have to be created to execute further."""
|
|
# First stop all services
|
|
for service in vars(self.services):
|
|
self.__stop_service(getattr(self.services, service))
|
|
|
|
self.services.queue.put(None)
|