diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index aea34402aac..170f05ffed5 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,7 +12,7 @@ _Short description of the approach_ - [ ] Added appropriate release note label - [ ] Commit history is consistent and clean, in line with the [contribution guidelines](https://github.com/equinor/ert/blob/main/CONTRIBUTING.md). - [ ] Make sure unit tests pass locally after every commit (`git rebase -i main - --exec 'pytest tests/ert/unit_tests -n auto --hypothesis-profile=fast -m "not integration_test"'`) + --exec 'pytest tests/ert/unit_tests tests/everest -n auto --hypothesis-profile=fast -m "not integration_test"'`) ## When applicable - [ ] **When there are user facing changes**: Updated documentation diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a52dca55a7a..ffb4ff25131 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,7 +23,7 @@ repos: - id: pytest name: pytest entry: pytest - args: [-n, auto, tests/ert/unit_tests, --hypothesis-profile=fast, -m, not integration_test] + args: [-n, auto, tests/ert/unit_tests, tests/everest, --hypothesis-profile=fast, -m, not integration_test] language: system types: [python] stages: [pre-push] diff --git a/README.md b/README.md index 0572fa41b1f..714560d6e3c 100644 --- a/README.md +++ b/README.md @@ -77,7 +77,7 @@ There are many kinds of tests in the `tests` directory, while iterating on your code you can run a fast subset of the tests with ```sh -pytest -n auto --hypothesis-profile=fast tests/ert/unit_tests -m "not integration_test" +pytest -n auto --hypothesis-profile=fast tests/ert/unit_tests tests/everest -m "not integration_test" ``` [Git LFS](https://git-lfs.com/) must be installed to get all the files. This is diff --git a/justfile b/justfile index d10cdf7de7b..810d6793548 100644 --- a/justfile +++ b/justfile @@ -10,7 +10,7 @@ snake_oil: # execute rapid unittests rapid-tests: - nice pytest -n auto tests/ert/unit_tests --hypothesis-profile=fast -m "not integration_test" + nice pytest -n auto tests/ert/unit_tests tests/everest --hypothesis-profile=fast -m "not integration_test" check-all: mypy src/ert src/everest diff --git a/tests/everest/test_api_snapshots.py b/tests/everest/test_api_snapshots.py index aa742a6764b..cac5fdac177 100644 --- a/tests/everest/test_api_snapshots.py +++ b/tests/everest/test_api_snapshots.py @@ -77,6 +77,7 @@ def test_api_snapshots(config_file, snapshot, cached_example): snapshot.assert_match(snapshot_str, "snapshot.json") +@pytest.mark.integration_test @pytest.mark.parametrize( "config_file", ["config_advanced.yml", "config_minimal.yml", "config_multiobj.yml"], diff --git a/tests/everest/test_detached.py b/tests/everest/test_detached.py index 064c881301d..e3cba39d7fc 100644 --- a/tests/everest/test_detached.py +++ b/tests/everest/test_detached.py @@ -140,6 +140,7 @@ def test_server_status(copy_math_func_test_data_to_tmp): assert status["message"] == f"{err_msg_1}\n{err_msg_2}" +@pytest.mark.integration_test @patch("everest.detached.server_is_running", return_value=False) def test_wait_for_server(server_is_running_mock, caplog, monkeypatch): config = EverestConfig.with_defaults() diff --git a/tests/everest/test_egg_simulation.py b/tests/everest/test_egg_simulation.py index d20de3a3f9d..96d466cf74c 100644 --- a/tests/everest/test_egg_simulation.py +++ b/tests/everest/test_egg_simulation.py @@ -553,6 +553,7 @@ def _generate_exp_ert_config(config_path, output_dir): } +@pytest.mark.integration_test @skipif_no_opm def test_egg_model_convert(copy_egg_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) @@ -667,6 +668,7 @@ def test_init_egg_model(copy_egg_test_data_to_tmp): ErtConfig.with_plugins().from_dict(config_dict=ert_config) +@pytest.mark.integration_test @skipif_no_everest_models @pytest.mark.everest_models_test @skipif_no_opm diff --git a/tests/everest/test_everlint.py b/tests/everest/test_everlint.py index a64b165c91d..d76c74f89e7 100644 --- a/tests/everest/test_everlint.py +++ b/tests/everest/test_everlint.py @@ -246,6 +246,7 @@ def test_control_ref_validation(min_config): EverestConfig(**min_config) +@pytest.mark.integration_test def test_init_context_controls(): test_configs = [ "test_data/mocked_test_case/config_input_constraints.yml", diff --git a/tests/everest/test_everserver.py b/tests/everest/test_everserver.py index 4075e3fbc41..fa95bae4126 100644 --- a/tests/everest/test_everserver.py +++ b/tests/everest/test_everserver.py @@ -5,6 +5,7 @@ from pathlib import Path from unittest.mock import patch +import pytest from seba_sqlite.snapshot import SebaSnapshot from ert.run_models.everest_run_model import EverestExitCode @@ -52,6 +53,7 @@ def set_shared_status(*args, progress, shared_data): } +@pytest.mark.integration_test def test_certificate_generation(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file("config_minimal.yml") cert, key, pw = everserver._generate_certificate( @@ -194,6 +196,7 @@ def test_everserver_status_exception( assert "Exception: Failed optimization" in status["message"] +@pytest.mark.integration_test @patch("sys.argv", ["name", "--config-file", "config_minimal.yml"]) @patch( "everest.detached.jobs.everserver._sim_monitor", @@ -223,6 +226,7 @@ def test_everserver_status_max_batch_num( assert {data.batch for data in snapshot.simulation_data} == {0} +@pytest.mark.integration_test @patch("sys.argv", ["name", "--config-file", "config_minimal.yml"]) def test_everserver_status_contains_max_runtime_failure( mock_server, change_to_tmpdir, min_config diff --git a/tests/everest/test_fix_control.py b/tests/everest/test_fix_control.py index bcf75e5a333..11b75bed06c 100644 --- a/tests/everest/test_fix_control.py +++ b/tests/everest/test_fix_control.py @@ -1,9 +1,12 @@ +import pytest + from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig CONFIG_FILE_ADVANCED = "config_advanced.yml" +@pytest.mark.integration_test def test_fix_control( copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): diff --git a/tests/everest/test_samplers.py b/tests/everest/test_samplers.py index 663db860c70..418a199df9d 100644 --- a/tests/everest/test_samplers.py +++ b/tests/everest/test_samplers.py @@ -7,6 +7,7 @@ CONFIG_FILE_ADVANCED = "config_advanced.yml" +@pytest.mark.integration_test def test_sampler_uniform( copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): @@ -38,6 +39,7 @@ def test_sampler_uniform( assert expected_opt == pytest.approx(run_model.result.total_objective, abs=0.001) +@pytest.mark.integration_test def test_sampler_mixed( copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): diff --git a/tests/everest/test_simulator_cache.py b/tests/everest/test_simulator_cache.py index 14765b49b14..8a61b6bca65 100644 --- a/tests/everest/test_simulator_cache.py +++ b/tests/everest/test_simulator_cache.py @@ -1,6 +1,7 @@ from pathlib import Path import numpy as np +import pytest from ert.config import QueueSystem from ert.ensemble_evaluator import EvaluatorServerConfig @@ -8,6 +9,7 @@ from everest.config import EverestConfig, SimulatorConfig +@pytest.mark.integration_test def test_simulator_cache(copy_math_func_test_data_to_tmp): n_evals = 0 diff --git a/tests/everest/test_templating.py b/tests/everest/test_templating.py index f023752ce62..6d7e0101cc5 100644 --- a/tests/everest/test_templating.py +++ b/tests/everest/test_templating.py @@ -94,6 +94,7 @@ def test_render_multiple_input(copy_template_test_data_to_tmp): assert output == ["0.2 vs 0.8"] +@pytest.mark.integration_test def test_render_executable(copy_template_test_data_to_tmp): assert os.access(everest.jobs.render, os.X_OK)