Remove @slow decorator in favor of @pytest.mark.slow.

This commit is contained in:
Ryan Dick 2023-10-06 18:26:06 -04:00
parent 7870b90717
commit e0e001758a
3 changed files with 6 additions and 15 deletions

View File

@ -3,7 +3,7 @@
We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.) We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.)
## Fast vs. Slow ## Fast vs. Slow
All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@slow` decorator). All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@pytest.mark.slow` decorator).
'Fast' tests are run to validate every PR, and are fast enough that they can be run routinely during development. 'Fast' tests are run to validate every PR, and are fast enough that they can be run routinely during development.
@ -47,12 +47,13 @@ There are a few things to keep in mind when adding tests that depend on models.
There are several utilities to help with model setup for tests. Here is a sample test that depends on a model: There are several utilities to help with model setup for tests. Here is a sample test that depends on a model:
```python ```python
import pytest
import torch import torch
from invokeai.backend.model_management.models.base import BaseModelType, ModelType from invokeai.backend.model_management.models.base import BaseModelType, ModelType
from invokeai.backend.util.test_utils import install_and_load_model, slow from invokeai.backend.util.test_utils import install_and_load_model
@slow @pytest.mark.slow
def test_model(model_installer, torch_device): def test_model(model_installer, torch_device):
model_info = install_and_load_model( model_info = install_and_load_model(
model_installer=model_installer, model_installer=model_installer,

View File

@ -11,16 +11,6 @@ from invokeai.backend.model_management.model_manager import ModelInfo
from invokeai.backend.model_management.models.base import BaseModelType, ModelNotFoundException, ModelType, SubModelType from invokeai.backend.model_management.models.base import BaseModelType, ModelNotFoundException, ModelType, SubModelType
def slow(test_case):
"""Decorator for slow tests.
Tests should be marked as slow if they download a model, run model inference, or do anything else slow. To judge
whether a test is 'slow', consider how it would perform in a CPU-only environment with a low-bandwidth internet
connection.
"""
return pytest.mark.slow(test_case)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def torch_device(): def torch_device():
return "cuda" if torch.cuda.is_available() else "cpu" return "cuda" if torch.cuda.is_available() else "cpu"

View File

@ -2,7 +2,7 @@ import pytest
import torch import torch
from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType
from invokeai.backend.util.test_utils import install_and_load_model, slow from invokeai.backend.util.test_utils import install_and_load_model
def build_dummy_sd15_unet_input(torch_device): def build_dummy_sd15_unet_input(torch_device):
@ -38,7 +38,7 @@ def build_dummy_sd15_unet_input(torch_device):
}, },
], ],
) )
@slow @pytest.mark.slow
def test_ip_adapter_unet_patch(model_params, model_installer, torch_device): def test_ip_adapter_unet_patch(model_params, model_installer, torch_device):
"""Smoke test that IP-Adapter weights can be loaded and used to patch a UNet.""" """Smoke test that IP-Adapter weights can be loaded and used to patch a UNet."""
ip_adapter_info = install_and_load_model( ip_adapter_info = install_and_load_model(