mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Remove @slow decorator in favor of @pytest.mark.slow.
This commit is contained in:
parent
7870b90717
commit
e0e001758a
@ -3,7 +3,7 @@
|
||||
We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.)
|
||||
|
||||
## Fast vs. Slow
|
||||
All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@slow` decorator).
|
||||
All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@pytest.mark.slow` decorator).
|
||||
|
||||
'Fast' tests are run to validate every PR, and are fast enough that they can be run routinely during development.
|
||||
|
||||
@ -47,12 +47,13 @@ There are a few things to keep in mind when adding tests that depend on models.
|
||||
|
||||
There are several utilities to help with model setup for tests. Here is a sample test that depends on a model:
|
||||
```python
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
|
||||
from invokeai.backend.util.test_utils import install_and_load_model, slow
|
||||
from invokeai.backend.util.test_utils import install_and_load_model
|
||||
|
||||
@slow
|
||||
@pytest.mark.slow
|
||||
def test_model(model_installer, torch_device):
|
||||
model_info = install_and_load_model(
|
||||
model_installer=model_installer,
|
||||
|
@ -11,16 +11,6 @@ from invokeai.backend.model_management.model_manager import ModelInfo
|
||||
from invokeai.backend.model_management.models.base import BaseModelType, ModelNotFoundException, ModelType, SubModelType
|
||||
|
||||
|
||||
def slow(test_case):
|
||||
"""Decorator for slow tests.
|
||||
|
||||
Tests should be marked as slow if they download a model, run model inference, or do anything else slow. To judge
|
||||
whether a test is 'slow', consider how it would perform in a CPU-only environment with a low-bandwidth internet
|
||||
connection.
|
||||
"""
|
||||
return pytest.mark.slow(test_case)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def torch_device():
|
||||
return "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
@ -2,7 +2,7 @@ import pytest
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType
|
||||
from invokeai.backend.util.test_utils import install_and_load_model, slow
|
||||
from invokeai.backend.util.test_utils import install_and_load_model
|
||||
|
||||
|
||||
def build_dummy_sd15_unet_input(torch_device):
|
||||
@ -38,7 +38,7 @@ def build_dummy_sd15_unet_input(torch_device):
|
||||
},
|
||||
],
|
||||
)
|
||||
@slow
|
||||
@pytest.mark.slow
|
||||
def test_ip_adapter_unet_patch(model_params, model_installer, torch_device):
|
||||
"""Smoke test that IP-Adapter weights can be loaded and used to patch a UNet."""
|
||||
ip_adapter_info = install_and_load_model(
|
||||
|
Loading…
Reference in New Issue
Block a user