mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into patch-1
This commit is contained in:
commit
4208766e19
32
.github/workflows/test-invoke-pip-skip.yml
vendored
32
.github/workflows/test-invoke-pip-skip.yml
vendored
@ -1,10 +1,16 @@
|
|||||||
name: Test invoke.py pip
|
name: Test invoke.py pip
|
||||||
|
|
||||||
|
# This is a dummy stand-in for the actual tests
|
||||||
|
# we don't need to run python tests on non-Python changes
|
||||||
|
# But PRs require passing tests to be mergeable
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- '**'
|
- '**'
|
||||||
- '!pyproject.toml'
|
- '!pyproject.toml'
|
||||||
- '!invokeai/**'
|
- '!invokeai/**'
|
||||||
|
- '!tests/**'
|
||||||
- 'invokeai/frontend/web/**'
|
- 'invokeai/frontend/web/**'
|
||||||
merge_group:
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@ -19,48 +25,26 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
python-version:
|
python-version:
|
||||||
# - '3.9'
|
|
||||||
- '3.10'
|
- '3.10'
|
||||||
pytorch:
|
pytorch:
|
||||||
# - linux-cuda-11_6
|
|
||||||
- linux-cuda-11_7
|
- linux-cuda-11_7
|
||||||
- linux-rocm-5_2
|
- linux-rocm-5_2
|
||||||
- linux-cpu
|
- linux-cpu
|
||||||
- macos-default
|
- macos-default
|
||||||
- windows-cpu
|
- windows-cpu
|
||||||
# - windows-cuda-11_6
|
|
||||||
# - windows-cuda-11_7
|
|
||||||
include:
|
include:
|
||||||
# - pytorch: linux-cuda-11_6
|
|
||||||
# os: ubuntu-22.04
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
|
||||||
# github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-cuda-11_7
|
- pytorch: linux-cuda-11_7
|
||||||
os: ubuntu-22.04
|
os: ubuntu-22.04
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-rocm-5_2
|
- pytorch: linux-rocm-5_2
|
||||||
os: ubuntu-22.04
|
os: ubuntu-22.04
|
||||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-cpu
|
- pytorch: linux-cpu
|
||||||
os: ubuntu-22.04
|
os: ubuntu-22.04
|
||||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: macos-default
|
- pytorch: macos-default
|
||||||
os: macOS-12
|
os: macOS-12
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: windows-cpu
|
- pytorch: windows-cpu
|
||||||
os: windows-2022
|
os: windows-2022
|
||||||
github-env: $env:GITHUB_ENV
|
|
||||||
# - pytorch: windows-cuda-11_6
|
|
||||||
# os: windows-2022
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
|
||||||
# github-env: $env:GITHUB_ENV
|
|
||||||
# - pytorch: windows-cuda-11_7
|
|
||||||
# os: windows-2022
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
|
||||||
# github-env: $env:GITHUB_ENV
|
|
||||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No build required"'
|
- name: skip
|
||||||
|
run: echo "no build required"
|
||||||
|
84
.github/workflows/test-invoke-pip.yml
vendored
84
.github/workflows/test-invoke-pip.yml
vendored
@ -11,6 +11,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- 'invokeai/**'
|
- 'invokeai/**'
|
||||||
|
- 'tests/**'
|
||||||
- '!invokeai/frontend/web/**'
|
- '!invokeai/frontend/web/**'
|
||||||
types:
|
types:
|
||||||
- 'ready_for_review'
|
- 'ready_for_review'
|
||||||
@ -32,19 +33,12 @@ jobs:
|
|||||||
# - '3.9'
|
# - '3.9'
|
||||||
- '3.10'
|
- '3.10'
|
||||||
pytorch:
|
pytorch:
|
||||||
# - linux-cuda-11_6
|
|
||||||
- linux-cuda-11_7
|
- linux-cuda-11_7
|
||||||
- linux-rocm-5_2
|
- linux-rocm-5_2
|
||||||
- linux-cpu
|
- linux-cpu
|
||||||
- macos-default
|
- macos-default
|
||||||
- windows-cpu
|
- windows-cpu
|
||||||
# - windows-cuda-11_6
|
|
||||||
# - windows-cuda-11_7
|
|
||||||
include:
|
include:
|
||||||
# - pytorch: linux-cuda-11_6
|
|
||||||
# os: ubuntu-22.04
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
|
||||||
# github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-cuda-11_7
|
- pytorch: linux-cuda-11_7
|
||||||
os: ubuntu-22.04
|
os: ubuntu-22.04
|
||||||
github-env: $GITHUB_ENV
|
github-env: $GITHUB_ENV
|
||||||
@ -62,14 +56,6 @@ jobs:
|
|||||||
- pytorch: windows-cpu
|
- pytorch: windows-cpu
|
||||||
os: windows-2022
|
os: windows-2022
|
||||||
github-env: $env:GITHUB_ENV
|
github-env: $env:GITHUB_ENV
|
||||||
# - pytorch: windows-cuda-11_6
|
|
||||||
# os: windows-2022
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
|
||||||
# github-env: $env:GITHUB_ENV
|
|
||||||
# - pytorch: windows-cuda-11_7
|
|
||||||
# os: windows-2022
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
|
||||||
# github-env: $env:GITHUB_ENV
|
|
||||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
env:
|
env:
|
||||||
@ -100,40 +86,38 @@ jobs:
|
|||||||
id: run-pytest
|
id: run-pytest
|
||||||
run: pytest
|
run: pytest
|
||||||
|
|
||||||
- name: run invokeai-configure
|
# - name: run invokeai-configure
|
||||||
id: run-preload-models
|
# env:
|
||||||
env:
|
# HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }}
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }}
|
# run: >
|
||||||
run: >
|
# invokeai-configure
|
||||||
invokeai-configure
|
# --yes
|
||||||
--yes
|
# --default_only
|
||||||
--default_only
|
# --full-precision
|
||||||
--full-precision
|
# # can't use fp16 weights without a GPU
|
||||||
# can't use fp16 weights without a GPU
|
|
||||||
|
|
||||||
- name: run invokeai
|
# - name: run invokeai
|
||||||
id: run-invokeai
|
# id: run-invokeai
|
||||||
env:
|
# env:
|
||||||
# Set offline mode to make sure configure preloaded successfully.
|
# # Set offline mode to make sure configure preloaded successfully.
|
||||||
HF_HUB_OFFLINE: 1
|
# HF_HUB_OFFLINE: 1
|
||||||
HF_DATASETS_OFFLINE: 1
|
# HF_DATASETS_OFFLINE: 1
|
||||||
TRANSFORMERS_OFFLINE: 1
|
# TRANSFORMERS_OFFLINE: 1
|
||||||
INVOKEAI_OUTDIR: ${{ github.workspace }}/results
|
# INVOKEAI_OUTDIR: ${{ github.workspace }}/results
|
||||||
run: >
|
# run: >
|
||||||
invokeai
|
# invokeai
|
||||||
--no-patchmatch
|
# --no-patchmatch
|
||||||
--no-nsfw_checker
|
# --no-nsfw_checker
|
||||||
--precision=float32
|
# --precision=float32
|
||||||
--always_use_cpu
|
# --always_use_cpu
|
||||||
--use_memory_db
|
# --use_memory_db
|
||||||
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
|
# --outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
|
||||||
--from_file ${{ env.TEST_PROMPTS }}
|
# --from_file ${{ env.TEST_PROMPTS }}
|
||||||
|
|
||||||
- name: Archive results
|
# - name: Archive results
|
||||||
id: archive-results
|
# env:
|
||||||
env:
|
# INVOKEAI_OUTDIR: ${{ github.workspace }}/results
|
||||||
INVOKEAI_OUTDIR: ${{ github.workspace }}/results
|
# uses: actions/upload-artifact@v3
|
||||||
uses: actions/upload-artifact@v3
|
# with:
|
||||||
with:
|
# name: results
|
||||||
name: results
|
# path: ${{ env.INVOKEAI_OUTDIR }}
|
||||||
path: ${{ env.INVOKEAI_OUTDIR }}
|
|
||||||
|
@ -279,8 +279,8 @@ def _convert_ckpt_and_cache(
|
|||||||
raise Exception(f"Model variant {model_config.variant} not supported for {version}")
|
raise Exception(f"Model variant {model_config.variant} not supported for {version}")
|
||||||
|
|
||||||
|
|
||||||
weights = app_config.root_dir / model_config.path
|
weights = app_config.root_path / model_config.path
|
||||||
config_file = app_config.root_dir / model_config.config
|
config_file = app_config.root_path / model_config.config
|
||||||
output_path = Path(output_path)
|
output_path = Path(output_path)
|
||||||
|
|
||||||
if version == BaseModelType.StableDiffusion1:
|
if version == BaseModelType.StableDiffusion1:
|
||||||
|
30
tests/conftest.py
Normal file
30
tests/conftest.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
import pytest
|
||||||
|
from invokeai.app.services.invocation_services import InvocationServices
|
||||||
|
from invokeai.app.services.invocation_queue import MemoryInvocationQueue
|
||||||
|
from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory
|
||||||
|
from invokeai.app.services.graph import LibraryGraph, GraphExecutionState
|
||||||
|
from invokeai.app.services.processor import DefaultInvocationProcessor
|
||||||
|
|
||||||
|
# Ignore these files as they need to be rewritten following the model manager refactor
|
||||||
|
collect_ignore = ["nodes/test_graph_execution_state.py", "nodes/test_node_graph.py", "test_textual_inversion.py"]
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
|
def mock_services():
|
||||||
|
# NOTE: none of these are actually called by the test invocations
|
||||||
|
return InvocationServices(
|
||||||
|
model_manager = None, # type: ignore
|
||||||
|
events = None, # type: ignore
|
||||||
|
logger = None, # type: ignore
|
||||||
|
images = None, # type: ignore
|
||||||
|
latents = None, # type: ignore
|
||||||
|
board_images=None, # type: ignore
|
||||||
|
boards=None, # type: ignore
|
||||||
|
queue = MemoryInvocationQueue(),
|
||||||
|
graph_library=SqliteItemStorage[LibraryGraph](
|
||||||
|
filename=sqlite_memory, table_name="graphs"
|
||||||
|
),
|
||||||
|
graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'),
|
||||||
|
processor = DefaultInvocationProcessor(),
|
||||||
|
restoration = None, # type: ignore
|
||||||
|
configuration = None, # type: ignore
|
||||||
|
)
|
@ -1,14 +1,18 @@
|
|||||||
from .test_invoker import create_edge
|
import pytest
|
||||||
from .test_nodes import ImageTestInvocation, ListPassThroughInvocation, PromptTestInvocation, PromptCollectionTestInvocation
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
|
from invokeai.app.invocations.baseinvocation import (BaseInvocation,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
InvocationContext)
|
||||||
from invokeai.app.invocations.collections import RangeInvocation
|
from invokeai.app.invocations.collections import RangeInvocation
|
||||||
from invokeai.app.invocations.math import AddInvocation, MultiplyInvocation
|
from invokeai.app.invocations.math import AddInvocation, MultiplyInvocation
|
||||||
from invokeai.app.services.processor import DefaultInvocationProcessor
|
from invokeai.app.services.graph import (CollectInvocation, Graph,
|
||||||
from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory
|
GraphExecutionState,
|
||||||
from invokeai.app.services.invocation_queue import MemoryInvocationQueue
|
IterateInvocation)
|
||||||
from invokeai.app.services.invocation_services import InvocationServices
|
from invokeai.app.services.invocation_services import InvocationServices
|
||||||
from invokeai.app.services.graph import Graph, GraphInvocation, InvalidEdgeError, LibraryGraph, NodeAlreadyInGraphError, NodeNotFoundError, are_connections_compatible, EdgeConnection, CollectInvocation, IterateInvocation, GraphExecutionState
|
|
||||||
import pytest
|
from .test_invoker import create_edge
|
||||||
|
from .test_nodes import (ImageTestInvocation, PromptCollectionTestInvocation,
|
||||||
|
PromptTestInvocation)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@ -19,30 +23,11 @@ def simple_graph():
|
|||||||
g.add_edge(create_edge("1", "prompt", "2", "prompt"))
|
g.add_edge(create_edge("1", "prompt", "2", "prompt"))
|
||||||
return g
|
return g
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_services():
|
|
||||||
# NOTE: none of these are actually called by the test invocations
|
|
||||||
return InvocationServices(
|
|
||||||
model_manager = None, # type: ignore
|
|
||||||
events = None, # type: ignore
|
|
||||||
logger = None, # type: ignore
|
|
||||||
images = None, # type: ignore
|
|
||||||
latents = None, # type: ignore
|
|
||||||
queue = MemoryInvocationQueue(),
|
|
||||||
graph_library=SqliteItemStorage[LibraryGraph](
|
|
||||||
filename=sqlite_memory, table_name="graphs"
|
|
||||||
),
|
|
||||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'),
|
|
||||||
processor = DefaultInvocationProcessor(),
|
|
||||||
restoration = None, # type: ignore
|
|
||||||
configuration = None, # type: ignore
|
|
||||||
)
|
|
||||||
|
|
||||||
def invoke_next(g: GraphExecutionState, services: InvocationServices) -> tuple[BaseInvocation, BaseInvocationOutput]:
|
def invoke_next(g: GraphExecutionState, services: InvocationServices) -> tuple[BaseInvocation, BaseInvocationOutput]:
|
||||||
n = g.next()
|
n = g.next()
|
||||||
if n is None:
|
if n is None:
|
||||||
return (None, None)
|
return (None, None)
|
||||||
|
|
||||||
print(f'invoking {n.id}: {type(n)}')
|
print(f'invoking {n.id}: {type(n)}')
|
||||||
o = n.invoke(InvocationContext(services, "1"))
|
o = n.invoke(InvocationContext(services, "1"))
|
||||||
g.complete(n.id, o)
|
g.complete(n.id, o)
|
||||||
@ -51,7 +36,7 @@ def invoke_next(g: GraphExecutionState, services: InvocationServices) -> tuple[B
|
|||||||
|
|
||||||
def test_graph_state_executes_in_order(simple_graph, mock_services):
|
def test_graph_state_executes_in_order(simple_graph, mock_services):
|
||||||
g = GraphExecutionState(graph = simple_graph)
|
g = GraphExecutionState(graph = simple_graph)
|
||||||
|
|
||||||
n1 = invoke_next(g, mock_services)
|
n1 = invoke_next(g, mock_services)
|
||||||
n2 = invoke_next(g, mock_services)
|
n2 = invoke_next(g, mock_services)
|
||||||
n3 = g.next()
|
n3 = g.next()
|
||||||
@ -88,11 +73,11 @@ def test_graph_state_expands_iterator(mock_services):
|
|||||||
graph.add_edge(create_edge("0", "collection", "1", "collection"))
|
graph.add_edge(create_edge("0", "collection", "1", "collection"))
|
||||||
graph.add_edge(create_edge("1", "item", "2", "a"))
|
graph.add_edge(create_edge("1", "item", "2", "a"))
|
||||||
graph.add_edge(create_edge("2", "a", "3", "a"))
|
graph.add_edge(create_edge("2", "a", "3", "a"))
|
||||||
|
|
||||||
g = GraphExecutionState(graph = graph)
|
g = GraphExecutionState(graph = graph)
|
||||||
while not g.is_complete():
|
while not g.is_complete():
|
||||||
invoke_next(g, mock_services)
|
invoke_next(g, mock_services)
|
||||||
|
|
||||||
prepared_add_nodes = g.source_prepared_mapping['3']
|
prepared_add_nodes = g.source_prepared_mapping['3']
|
||||||
results = set([g.results[n].a for n in prepared_add_nodes])
|
results = set([g.results[n].a for n in prepared_add_nodes])
|
||||||
expected = set([1, 11, 21])
|
expected = set([1, 11, 21])
|
||||||
@ -109,7 +94,7 @@ def test_graph_state_collects(mock_services):
|
|||||||
graph.add_edge(create_edge("1", "collection", "2", "collection"))
|
graph.add_edge(create_edge("1", "collection", "2", "collection"))
|
||||||
graph.add_edge(create_edge("2", "item", "3", "prompt"))
|
graph.add_edge(create_edge("2", "item", "3", "prompt"))
|
||||||
graph.add_edge(create_edge("3", "prompt", "4", "item"))
|
graph.add_edge(create_edge("3", "prompt", "4", "item"))
|
||||||
|
|
||||||
g = GraphExecutionState(graph = graph)
|
g = GraphExecutionState(graph = graph)
|
||||||
n1 = invoke_next(g, mock_services)
|
n1 = invoke_next(g, mock_services)
|
||||||
n2 = invoke_next(g, mock_services)
|
n2 = invoke_next(g, mock_services)
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
from .test_nodes import ErrorInvocation, ImageTestInvocation, ListPassThroughInvocation, PromptTestInvocation, PromptCollectionTestInvocation, TestEventService, create_edge, wait_until
|
|
||||||
from invokeai.app.services.processor import DefaultInvocationProcessor
|
|
||||||
from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory
|
|
||||||
from invokeai.app.services.invocation_queue import MemoryInvocationQueue
|
|
||||||
from invokeai.app.services.invoker import Invoker
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
|
|
||||||
from invokeai.app.services.invocation_services import InvocationServices
|
|
||||||
from invokeai.app.services.graph import Graph, GraphInvocation, InvalidEdgeError, LibraryGraph, NodeAlreadyInGraphError, NodeNotFoundError, are_connections_compatible, EdgeConnection, CollectInvocation, IterateInvocation, GraphExecutionState
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from invokeai.app.services.graph import Graph, GraphExecutionState
|
||||||
|
from invokeai.app.services.invocation_services import InvocationServices
|
||||||
|
from invokeai.app.services.invoker import Invoker
|
||||||
|
|
||||||
|
from .test_nodes import (ErrorInvocation, ImageTestInvocation,
|
||||||
|
PromptTestInvocation, create_edge, wait_until)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def simple_graph():
|
def simple_graph():
|
||||||
@ -17,25 +16,6 @@ def simple_graph():
|
|||||||
g.add_edge(create_edge("1", "prompt", "2", "prompt"))
|
g.add_edge(create_edge("1", "prompt", "2", "prompt"))
|
||||||
return g
|
return g
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_services() -> InvocationServices:
|
|
||||||
# NOTE: none of these are actually called by the test invocations
|
|
||||||
return InvocationServices(
|
|
||||||
model_manager = None, # type: ignore
|
|
||||||
events = TestEventService(),
|
|
||||||
logger = None, # type: ignore
|
|
||||||
images = None, # type: ignore
|
|
||||||
latents = None, # type: ignore
|
|
||||||
queue = MemoryInvocationQueue(),
|
|
||||||
graph_library=SqliteItemStorage[LibraryGraph](
|
|
||||||
filename=sqlite_memory, table_name="graphs"
|
|
||||||
),
|
|
||||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'),
|
|
||||||
processor = DefaultInvocationProcessor(),
|
|
||||||
restoration = None, # type: ignore
|
|
||||||
configuration = None, # type: ignore
|
|
||||||
)
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def mock_invoker(mock_services: InvocationServices) -> Invoker:
|
def mock_invoker(mock_services: InvocationServices) -> Invoker:
|
||||||
return Invoker(
|
return Invoker(
|
||||||
@ -57,6 +37,7 @@ def test_can_create_graph_state_from_graph(mock_invoker: Invoker, simple_graph):
|
|||||||
assert isinstance(g, GraphExecutionState)
|
assert isinstance(g, GraphExecutionState)
|
||||||
assert g.graph == simple_graph
|
assert g.graph == simple_graph
|
||||||
|
|
||||||
|
@pytest.mark.xfail(reason = "Requires fixing following the model manager refactor")
|
||||||
def test_can_invoke(mock_invoker: Invoker, simple_graph):
|
def test_can_invoke(mock_invoker: Invoker, simple_graph):
|
||||||
g = mock_invoker.create_execution_state(graph = simple_graph)
|
g = mock_invoker.create_execution_state(graph = simple_graph)
|
||||||
invocation_id = mock_invoker.invoke(g)
|
invocation_id = mock_invoker.invoke(g)
|
||||||
@ -72,6 +53,7 @@ def test_can_invoke(mock_invoker: Invoker, simple_graph):
|
|||||||
g = mock_invoker.services.graph_execution_manager.get(g.id)
|
g = mock_invoker.services.graph_execution_manager.get(g.id)
|
||||||
assert len(g.executed) > 0
|
assert len(g.executed) > 0
|
||||||
|
|
||||||
|
@pytest.mark.xfail(reason = "Requires fixing following the model manager refactor")
|
||||||
def test_can_invoke_all(mock_invoker: Invoker, simple_graph):
|
def test_can_invoke_all(mock_invoker: Invoker, simple_graph):
|
||||||
g = mock_invoker.create_execution_state(graph = simple_graph)
|
g = mock_invoker.create_execution_state(graph = simple_graph)
|
||||||
invocation_id = mock_invoker.invoke(g, invoke_all = True)
|
invocation_id = mock_invoker.invoke(g, invoke_all = True)
|
||||||
@ -87,6 +69,7 @@ def test_can_invoke_all(mock_invoker: Invoker, simple_graph):
|
|||||||
g = mock_invoker.services.graph_execution_manager.get(g.id)
|
g = mock_invoker.services.graph_execution_manager.get(g.id)
|
||||||
assert g.is_complete()
|
assert g.is_complete()
|
||||||
|
|
||||||
|
@pytest.mark.xfail(reason = "Requires fixing following the model manager refactor")
|
||||||
def test_handles_errors(mock_invoker: Invoker):
|
def test_handles_errors(mock_invoker: Invoker):
|
||||||
g = mock_invoker.create_execution_state()
|
g = mock_invoker.create_execution_state()
|
||||||
g.graph.add_node(ErrorInvocation(id = "1"))
|
g.graph.add_node(ErrorInvocation(id = "1"))
|
||||||
|
Loading…
Reference in New Issue
Block a user