mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
4b334be7d0
When a queue item is popped for processing, we need to retrieve its session from the DB. Pydantic serializes the graph at this stage. It's possible for a graph to have been made invalid during the graph preparation stage (e.g. an ancestor node executes, and its output is not valid for its successor node's input field). When this occurs, the session in the DB will fail validation, but we don't have a chance to find out until it is retrieved and parsed by pydantic. This logic was previously not wrapped in any exception handling. Just after retrieving a session, we retrieve the specific invocation to execute from the session. It's possible that this could also have some sort of error, though it should be impossible for it to be a pydantic validation error (that would have been caught during session validation). There was also no exception handling here. When either of these processes fail, the processor gets soft-locked because the processor's cleanup logic is never run. (I didn't dig deeper into exactly what cleanup is not happening, because the fix is to just handle the exceptions.) This PR adds exception handling to both the session retrieval and node retrieval and events for each: `session_retrieval_error` and `invocation_retrieval_error`. These events are caught and displayed in the UI as toasts, along with the type of the python exception (e.g. `Validation Error`). The events are also logged to the browser console.
192 lines
6.0 KiB
Python
192 lines
6.0 KiB
Python
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
|
|
|
from typing import Any, Optional
|
|
from invokeai.app.models.image import ProgressImage
|
|
from invokeai.app.util.misc import get_timestamp
|
|
from invokeai.app.services.model_manager_service import (
|
|
BaseModelType,
|
|
ModelType,
|
|
SubModelType,
|
|
ModelInfo,
|
|
)
|
|
|
|
|
|
class EventServiceBase:
|
|
session_event: str = "session_event"
|
|
|
|
"""Basic event bus, to have an empty stand-in when not needed"""
|
|
|
|
def dispatch(self, event_name: str, payload: Any) -> None:
|
|
pass
|
|
|
|
def __emit_session_event(self, event_name: str, payload: dict) -> None:
|
|
payload["timestamp"] = get_timestamp()
|
|
self.dispatch(
|
|
event_name=EventServiceBase.session_event,
|
|
payload=dict(event=event_name, data=payload),
|
|
)
|
|
|
|
# Define events here for every event in the system.
|
|
# This will make them easier to integrate until we find a schema generator.
|
|
def emit_generator_progress(
|
|
self,
|
|
graph_execution_state_id: str,
|
|
node: dict,
|
|
source_node_id: str,
|
|
progress_image: Optional[ProgressImage],
|
|
step: int,
|
|
total_steps: int,
|
|
) -> None:
|
|
"""Emitted when there is generation progress"""
|
|
self.__emit_session_event(
|
|
event_name="generator_progress",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
node=node,
|
|
source_node_id=source_node_id,
|
|
progress_image=progress_image.dict()
|
|
if progress_image is not None
|
|
else None,
|
|
step=step,
|
|
total_steps=total_steps,
|
|
),
|
|
)
|
|
|
|
def emit_invocation_complete(
|
|
self,
|
|
graph_execution_state_id: str,
|
|
result: dict,
|
|
node: dict,
|
|
source_node_id: str,
|
|
) -> None:
|
|
"""Emitted when an invocation has completed"""
|
|
self.__emit_session_event(
|
|
event_name="invocation_complete",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
node=node,
|
|
source_node_id=source_node_id,
|
|
result=result,
|
|
),
|
|
)
|
|
|
|
def emit_invocation_error(
|
|
self,
|
|
graph_execution_state_id: str,
|
|
node: dict,
|
|
source_node_id: str,
|
|
error_type: str,
|
|
error: str,
|
|
) -> None:
|
|
"""Emitted when an invocation has completed"""
|
|
self.__emit_session_event(
|
|
event_name="invocation_error",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
node=node,
|
|
source_node_id=source_node_id,
|
|
error_type=error_type,
|
|
error=error,
|
|
),
|
|
)
|
|
|
|
def emit_invocation_started(
|
|
self, graph_execution_state_id: str, node: dict, source_node_id: str
|
|
) -> None:
|
|
"""Emitted when an invocation has started"""
|
|
self.__emit_session_event(
|
|
event_name="invocation_started",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
node=node,
|
|
source_node_id=source_node_id,
|
|
),
|
|
)
|
|
|
|
def emit_graph_execution_complete(self, graph_execution_state_id: str) -> None:
|
|
"""Emitted when a session has completed all invocations"""
|
|
self.__emit_session_event(
|
|
event_name="graph_execution_state_complete",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
),
|
|
)
|
|
|
|
def emit_model_load_started(
|
|
self,
|
|
graph_execution_state_id: str,
|
|
model_name: str,
|
|
base_model: BaseModelType,
|
|
model_type: ModelType,
|
|
submodel: SubModelType,
|
|
) -> None:
|
|
"""Emitted when a model is requested"""
|
|
self.__emit_session_event(
|
|
event_name="model_load_started",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
model_name=model_name,
|
|
base_model=base_model,
|
|
model_type=model_type,
|
|
submodel=submodel,
|
|
),
|
|
)
|
|
|
|
def emit_model_load_completed(
|
|
self,
|
|
graph_execution_state_id: str,
|
|
model_name: str,
|
|
base_model: BaseModelType,
|
|
model_type: ModelType,
|
|
submodel: SubModelType,
|
|
model_info: ModelInfo,
|
|
) -> None:
|
|
"""Emitted when a model is correctly loaded (returns model info)"""
|
|
self.__emit_session_event(
|
|
event_name="model_load_completed",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
model_name=model_name,
|
|
base_model=base_model,
|
|
model_type=model_type,
|
|
submodel=submodel,
|
|
hash=model_info.hash,
|
|
location=str(model_info.location),
|
|
precision=str(model_info.precision),
|
|
),
|
|
)
|
|
|
|
def emit_session_retrieval_error(
|
|
self,
|
|
graph_execution_state_id: str,
|
|
error_type: str,
|
|
error: str,
|
|
) -> None:
|
|
"""Emitted when session retrieval fails"""
|
|
self.__emit_session_event(
|
|
event_name="session_retrieval_error",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
error_type=error_type,
|
|
error=error,
|
|
),
|
|
)
|
|
|
|
def emit_invocation_retrieval_error(
|
|
self,
|
|
graph_execution_state_id: str,
|
|
node_id: str,
|
|
error_type: str,
|
|
error: str,
|
|
) -> None:
|
|
"""Emitted when invocation retrieval fails"""
|
|
self.__emit_session_event(
|
|
event_name="invocation_retrieval_error",
|
|
payload=dict(
|
|
graph_execution_state_id=graph_execution_state_id,
|
|
node_id=node_id,
|
|
error_type=error_type,
|
|
error=error,
|
|
),
|
|
)
|