From 1c8b1fbc532c3958f2e89c29fca060953e52b553 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 22 Sep 2023 18:44:10 -0400 Subject: [PATCH 1/5] POC of a test that depends on models. --- invokeai/backend/util/test_utils.py | 77 +++++++++++++++++++++ pyproject.toml | 5 +- tests/backend/__init__.py | 0 tests/backend/ip_adapter/__init__.py | 0 tests/backend/ip_adapter/test_ip_adapter.py | 71 +++++++++++++++++++ 5 files changed, 152 insertions(+), 1 deletion(-) create mode 100644 invokeai/backend/util/test_utils.py create mode 100644 tests/backend/__init__.py create mode 100644 tests/backend/ip_adapter/__init__.py create mode 100644 tests/backend/ip_adapter/test_ip_adapter.py diff --git a/invokeai/backend/util/test_utils.py b/invokeai/backend/util/test_utils.py new file mode 100644 index 0000000000..a41bdde8f8 --- /dev/null +++ b/invokeai/backend/util/test_utils.py @@ -0,0 +1,77 @@ +import contextlib +from pathlib import Path +from typing import Optional, Union + +import pytest +import torch + +from invokeai.app.services.config.invokeai_config import InvokeAIAppConfig +from invokeai.backend.install.model_install_backend import ModelInstall +from invokeai.backend.model_management.model_manager import ModelInfo +from invokeai.backend.model_management.models.base import BaseModelType, ModelNotFoundException, ModelType, SubModelType + + +def slow(test_case): + """Decorator for slow tests. + + Tests should be marked as slow if they download a model, run model inference, or do anything else slow. To judge + whether a test is 'slow', consider how it would perform in a CPU-only environment with a low-bandwidth internet + connection. + """ + return pytest.mark.slow(test_case) + + +@pytest.fixture(scope="session") +def torch_device(): + return "cuda" if torch.cuda.is_available() else "cpu" + + +@pytest.fixture(scope="module") +def model_installer(): + """A global ModelInstall pytest fixture to be used by many tests.""" + # HACK(ryand): InvokeAIAppConfig.get_config() returns a singleton config object. This can lead to weird interactions + # between tests that need to alter the config. For example, some tests change the 'root' directory in the config, + # which can cause `install_and_load_model(...)` to re-download the model unnecessarily. As a temporary workaround, + # we pass a kwarg to get_config, which causes the config to be re-loaded. To fix this properly, we should stop using + # a singleton. + return ModelInstall(InvokeAIAppConfig.get_config(log_level="info")) + + +def install_and_load_model( + model_installer: ModelInstall, + model_path_id_or_url: Union[str, Path], + model_name: str, + base_model: BaseModelType, + model_type: ModelType, + submodel_type: Optional[SubModelType] = None, +) -> ModelInfo: + """Install a model if it is not already installed, then get the ModelInfo for that model. + + This is intended as a utility function for tests. + + Args: + model_installer (ModelInstall): The model installer. + model_path_id_or_url (Union[str, Path]): The path, HF ID, URL, etc. where the model can be installed from if it + is not already installed. + model_name (str): The model name, forwarded to ModelManager.get_model(...). + base_model (BaseModelType): The base model, forwarded to ModelManager.get_model(...). + model_type (ModelType): The model type, forwarded to ModelManager.get_model(...). + submodel_type (Optional[SubModelType]): The submodel type, forwarded to ModelManager.get_model(...). + + Returns: + ModelInfo + """ + # If the requested model is already installed, return its ModelInfo. + with contextlib.suppress(ModelNotFoundException): + return model_installer.mgr.get_model(model_name, base_model, model_type, submodel_type) + + # Install the requested model. + model_installer.heuristic_import(model_path_id_or_url) + + try: + return model_installer.mgr.get_model(model_name, base_model, model_type, submodel_type) + except ModelNotFoundException as e: + raise Exception( + "Failed to get model info after installing it. There could be a mismatch between the requested model and" + f" the installation id ('{model_path_id_or_url}'). Error: {e}" + ) diff --git a/pyproject.toml b/pyproject.toml index da9dccd71d..f4bbf01102 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -178,7 +178,10 @@ version = { attr = "invokeai.version.__version__" } #=== Begin: PyTest and Coverage [tool.pytest.ini_options] -addopts = "--cov-report term --cov-report html --cov-report xml" +addopts = "--cov-report term --cov-report html --cov-report xml --strict-markers -m \"not slow\"" +markers = [ + "slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\"." +] [tool.coverage.run] branch = true source = ["invokeai"] diff --git a/tests/backend/__init__.py b/tests/backend/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/backend/ip_adapter/__init__.py b/tests/backend/ip_adapter/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/backend/ip_adapter/test_ip_adapter.py b/tests/backend/ip_adapter/test_ip_adapter.py new file mode 100644 index 0000000000..d8be45bae5 --- /dev/null +++ b/tests/backend/ip_adapter/test_ip_adapter.py @@ -0,0 +1,71 @@ +import pytest +import torch + +from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType +from invokeai.backend.util.test_utils import install_and_load_model, model_installer, slow, torch_device + + +def build_dummy_sd15_unet_input(torch_device): + batch_size = 1 + num_channels = 4 + sizes = (32, 32) + + noise = torch.randn((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, 77, 768)).to(torch_device) + + return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} + + +@pytest.mark.parametrize( + "model_params", + [ + # SD1.5, IPAdapter + { + "ip_adapter_model_id": "InvokeAI/ip_adapter_sd15", + "ip_adapter_model_name": "ip_adapter_sd15", + "base_model": BaseModelType.StableDiffusion1, + "unet_model_id": "runwayml/stable-diffusion-v1-5", + "unet_model_name": "stable-diffusion-v1-5", + }, + # SD1.5, IPAdapterPlus + { + "ip_adapter_model_id": "InvokeAI/ip_adapter_plus_sd15", + "ip_adapter_model_name": "ip_adapter_plus_sd15", + "base_model": BaseModelType.StableDiffusion1, + "unet_model_id": "runwayml/stable-diffusion-v1-5", + "unet_model_name": "stable-diffusion-v1-5", + }, + ], +) +@slow +def test_ip_adapter_unet_patch(model_params, model_installer, torch_device): + """Smoke test that IP-Adapter weights can be loaded and used to patch a UNet.""" + ip_adapter_info = install_and_load_model( + model_installer=model_installer, + model_path_id_or_url=model_params["ip_adapter_model_id"], + model_name=model_params["ip_adapter_model_name"], + base_model=model_params["base_model"], + model_type=ModelType.IPAdapter, + ) + + unet_info = install_and_load_model( + model_installer=model_installer, + model_path_id_or_url=model_params["unet_model_id"], + model_name=model_params["unet_model_name"], + base_model=model_params["base_model"], + model_type=ModelType.Main, + submodel_type=SubModelType.UNet, + ) + + dummy_unet_input = build_dummy_sd15_unet_input(torch_device) + + with torch.no_grad(), ip_adapter_info as ip_adapter, unet_info as unet: + ip_adapter.to(torch_device, dtype=torch.float32) + unet.to(torch_device, dtype=torch.float32) + + cross_attention_kwargs = {"ip_adapter_image_prompt_embeds": torch.randn((1, 4, 768)).to(torch_device)} + with ip_adapter.apply_ip_adapter_attention(unet, 1.0): + output = unet(**dummy_unet_input, cross_attention_kwargs=cross_attention_kwargs).sample + + assert output.shape == dummy_unet_input["sample"].shape From 7d800e1ce363e227a6b1478ccb006313f0e51227 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 5 Oct 2023 11:16:43 -0400 Subject: [PATCH 2/5] Fix broken link in documentation to 'Frontend Documentation'. --- docs/contributing/contribution_guides/development.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/contributing/contribution_guides/development.md b/docs/contributing/contribution_guides/development.md index 10a390c6e4..086fd6e90d 100644 --- a/docs/contributing/contribution_guides/development.md +++ b/docs/contributing/contribution_guides/development.md @@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh Once you're setup, for more information, you can review the documentation specific to your area of interest: * #### [InvokeAI Architecure](../ARCHITECTURE.md) -* #### [Frontend Documentation](development_guides/contributingToFrontend.md) +* #### [Frontend Documentation](./contributingToFrontend.md) * #### [Node Documentation](../INVOCATIONS.md) * #### [Local Development](../LOCAL_DEVELOPMENT.md) From 9854b244fd2bf7f0a766143a23a196abd3372605 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 5 Oct 2023 12:11:21 -0400 Subject: [PATCH 3/5] Fix Flake8 errors by using a pytest conftest.py file. --- tests/backend/ip_adapter/test_ip_adapter.py | 2 +- tests/conftest.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 tests/conftest.py diff --git a/tests/backend/ip_adapter/test_ip_adapter.py b/tests/backend/ip_adapter/test_ip_adapter.py index d8be45bae5..b84f5836be 100644 --- a/tests/backend/ip_adapter/test_ip_adapter.py +++ b/tests/backend/ip_adapter/test_ip_adapter.py @@ -2,7 +2,7 @@ import pytest import torch from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType -from invokeai.backend.util.test_utils import install_and_load_model, model_installer, slow, torch_device +from invokeai.backend.util.test_utils import install_and_load_model, slow def build_dummy_sd15_unet_input(torch_device): diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000..8618f5e102 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,6 @@ +# conftest.py is a special pytest file. Fixtures defined in this file will be accessible to all tests in this directory +# without needing to explicitly import them. (https://docs.pytest.org/en/6.2.x/fixture.html) + +# We import the model_installer and torch_device fixtures here so that they can be used by all tests. Flake8 does not +# play well with fixtures (F401 and F811), so this is cleaner than importing in all files that use these fixtures. +from invokeai.backend.util.test_utils import model_installer, torch_device # noqa: F401 From 7870b9071768375f1d064d1d5a26ed6a70bd849a Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 27 Sep 2023 21:05:40 -0400 Subject: [PATCH 4/5] Add TESTS.md documentation. --- docs/contributing/LOCAL_DEVELOPMENT.md | 29 +-------- docs/contributing/TESTS.md | 88 ++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 27 deletions(-) create mode 100644 docs/contributing/TESTS.md diff --git a/docs/contributing/LOCAL_DEVELOPMENT.md b/docs/contributing/LOCAL_DEVELOPMENT.md index 01b65923f4..0136fbfcd0 100644 --- a/docs/contributing/LOCAL_DEVELOPMENT.md +++ b/docs/contributing/LOCAL_DEVELOPMENT.md @@ -47,34 +47,9 @@ pip install ".[dev,test]" These are optional groups of packages which are defined within the `pyproject.toml` and will be required for testing the changes you make to the code. -### Running Tests - -We use [pytest](https://docs.pytest.org/en/7.2.x/) for our test suite. Tests can -be found under the `./tests` folder and can be run with a single `pytest` -command. Optionally, to review test coverage you can append `--cov`. - -```zsh -pytest --cov -``` - -Test outcomes and coverage will be reported in the terminal. In addition a more -detailed report is created in both XML and HTML format in the `./coverage` -folder. The HTML one in particular can help identify missing statements -requiring tests to ensure coverage. This can be run by opening -`./coverage/html/index.html`. - -For example. - -```zsh -pytest --cov; open ./coverage/html/index.html -``` - -??? info "HTML coverage report output" - - ![html-overview](../assets/contributing/html-overview.png) - - ![html-detail](../assets/contributing/html-detail.png) +### Tests +See the [tests documentation](./TESTS.md) for information about running and writing tests. ### Reloading Changes Experimenting with changes to the Python source code is a drag if you have to re-start the server — diff --git a/docs/contributing/TESTS.md b/docs/contributing/TESTS.md new file mode 100644 index 0000000000..aab6cfa635 --- /dev/null +++ b/docs/contributing/TESTS.md @@ -0,0 +1,88 @@ +# InvokeAI Backend Tests + +We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.) + +## Fast vs. Slow +All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@slow` decorator). + +'Fast' tests are run to validate every PR, and are fast enough that they can be run routinely during development. + +'Slow' tests are currently only run manually on an ad-hoc basis. In the future, they may be automated to run nightly. Most developers are only expected to run the 'slow' tests that directly relate to the feature(s) that they are working on. + +As a rule of thumb, tests should be marked as 'slow' if there is a chance that they take >1s (e.g. on a CPU-only machine with slow internet connection). Common examples of slow tests are tests that depend on downloading a model, or running model inference. + +## Running Tests + +Below are some common test commands: +```bash +# Run the fast tests. (This implicitly uses the configured default option: `-m "not slow"`.) +pytest tests/ + +# Equivalent command to run the fast tests. +pytest tests/ -m "not slow" + +# Run the slow tests. +pytest tests/ -m "slow" + +# Run the slow tests from a specific file. +pytest tests/path/to/slow_test.py -m "slow" + +# Run all tests (fast and slow). +pytest tests -m "" +``` + +## Test Organization + +All backend tests are in the [`tests/`](/tests/) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`. + +TODO: The above statement is aspirational. A re-organization of legacy tests is required to make it true. + +## Tests that depend on models + +There are a few things to keep in mind when adding tests that depend on models. + +1. If a required model is not already present, it should automatically be downloaded as part of the test setup. +2. If a model is already downloaded, it should not be re-downloaded unnecessarily. +3. Take reasonable care to keep the total number of models required for the tests low. Whenever possible, re-use models that are already required for other tests. If you are adding a new model, consider including a comment to explain why it is required/unique. + +There are several utilities to help with model setup for tests. Here is a sample test that depends on a model: +```python +import torch + +from invokeai.backend.model_management.models.base import BaseModelType, ModelType +from invokeai.backend.util.test_utils import install_and_load_model, slow + +@slow +def test_model(model_installer, torch_device): + model_info = install_and_load_model( + model_installer=model_installer, + model_path_id_or_url="HF/dummy_model_id", + model_name="dummy_model", + base_model=BaseModelType.StableDiffusion1, + model_type=ModelType.Dummy, + ) + + dummy_input = build_dummy_input(torch_device) + + with torch.no_grad(), model_info as model: + model.to(torch_device, dtype=torch.float32) + output = model(dummy_input) + + # Validate output... + +``` + +## Test Coverage + +To review test coverage, append `--cov` to your pytest command: +```bash +pytest tests/ --cov +``` + +Test outcomes and coverage will be reported in the terminal. In addition, a more detailed report is created in both XML and HTML format in the `./coverage` folder. The HTML output is particularly helpful in identifying untested statements where coverage should be improved. The HTML report can be viewed by opening `./coverage/html/index.html`. + +??? info "HTML coverage report output" + + ![html-overview](../assets/contributing/html-overview.png) + + ![html-detail](../assets/contributing/html-detail.png) From e0e001758a88932e5b708a28a499fe5fe8224b0b Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 6 Oct 2023 18:26:06 -0400 Subject: [PATCH 5/5] Remove @slow decorator in favor of @pytest.mark.slow. --- docs/contributing/TESTS.md | 7 ++++--- invokeai/backend/util/test_utils.py | 10 ---------- tests/backend/ip_adapter/test_ip_adapter.py | 4 ++-- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/docs/contributing/TESTS.md b/docs/contributing/TESTS.md index aab6cfa635..8d823bb4e9 100644 --- a/docs/contributing/TESTS.md +++ b/docs/contributing/TESTS.md @@ -3,7 +3,7 @@ We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.) ## Fast vs. Slow -All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@slow` decorator). +All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@pytest.mark.slow` decorator). 'Fast' tests are run to validate every PR, and are fast enough that they can be run routinely during development. @@ -47,12 +47,13 @@ There are a few things to keep in mind when adding tests that depend on models. There are several utilities to help with model setup for tests. Here is a sample test that depends on a model: ```python +import pytest import torch from invokeai.backend.model_management.models.base import BaseModelType, ModelType -from invokeai.backend.util.test_utils import install_and_load_model, slow +from invokeai.backend.util.test_utils import install_and_load_model -@slow +@pytest.mark.slow def test_model(model_installer, torch_device): model_info = install_and_load_model( model_installer=model_installer, diff --git a/invokeai/backend/util/test_utils.py b/invokeai/backend/util/test_utils.py index a41bdde8f8..1c7b538882 100644 --- a/invokeai/backend/util/test_utils.py +++ b/invokeai/backend/util/test_utils.py @@ -11,16 +11,6 @@ from invokeai.backend.model_management.model_manager import ModelInfo from invokeai.backend.model_management.models.base import BaseModelType, ModelNotFoundException, ModelType, SubModelType -def slow(test_case): - """Decorator for slow tests. - - Tests should be marked as slow if they download a model, run model inference, or do anything else slow. To judge - whether a test is 'slow', consider how it would perform in a CPU-only environment with a low-bandwidth internet - connection. - """ - return pytest.mark.slow(test_case) - - @pytest.fixture(scope="session") def torch_device(): return "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/backend/ip_adapter/test_ip_adapter.py b/tests/backend/ip_adapter/test_ip_adapter.py index b84f5836be..1248ead98b 100644 --- a/tests/backend/ip_adapter/test_ip_adapter.py +++ b/tests/backend/ip_adapter/test_ip_adapter.py @@ -2,7 +2,7 @@ import pytest import torch from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType -from invokeai.backend.util.test_utils import install_and_load_model, slow +from invokeai.backend.util.test_utils import install_and_load_model def build_dummy_sd15_unet_input(torch_device): @@ -38,7 +38,7 @@ def build_dummy_sd15_unet_input(torch_device): }, ], ) -@slow +@pytest.mark.slow def test_ip_adapter_unet_patch(model_params, model_installer, torch_device): """Smoke test that IP-Adapter weights can be loaded and used to patch a UNet.""" ip_adapter_info = install_and_load_model(