Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add everest to rapid tests #9831

Merged
merged 1 commit into from
Jan 22, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ _Short description of the approach_
- [ ] Added appropriate release note label
- [ ] Commit history is consistent and clean, in line with the [contribution guidelines](https://github.com/equinor/ert/blob/main/CONTRIBUTING.md).
- [ ] Make sure unit tests pass locally after every commit (`git rebase -i main
--exec 'pytest tests/ert/unit_tests -n auto --hypothesis-profile=fast -m "not integration_test"'`)
--exec 'pytest tests/ert/unit_tests tests/everest -n auto --hypothesis-profile=fast -m "not integration_test"'`)

## When applicable
- [ ] **When there are user facing changes**: Updated documentation
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ repos:
- id: pytest
name: pytest
entry: pytest
args: [-n, auto, tests/ert/unit_tests, --hypothesis-profile=fast, -m, not integration_test]
args: [-n, auto, tests/ert/unit_tests, tests/everest, --hypothesis-profile=fast, -m, not integration_test]
language: system
types: [python]
stages: [pre-push]
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ There are many kinds of tests in the `tests` directory, while iterating on your
code you can run a fast subset of the tests with

```sh
pytest -n auto --hypothesis-profile=fast tests/ert/unit_tests -m "not integration_test"
pytest -n auto --hypothesis-profile=fast tests/ert/unit_tests tests/everest -m "not integration_test"
```

[Git LFS](https://git-lfs.com/) must be installed to get all the files. This is
Expand Down
2 changes: 1 addition & 1 deletion justfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ snake_oil:

# execute rapid unittests
rapid-tests:
nice pytest -n auto tests/ert/unit_tests --hypothesis-profile=fast -m "not integration_test"
nice pytest -n auto tests/ert/unit_tests tests/everest --hypothesis-profile=fast -m "not integration_test"

check-all:
mypy src/ert src/everest
Expand Down
1 change: 1 addition & 0 deletions tests/everest/test_api_snapshots.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ def test_api_snapshots(config_file, snapshot, cached_example):
snapshot.assert_match(snapshot_str, "snapshot.json")


@pytest.mark.integration_test
@pytest.mark.parametrize(
"config_file",
["config_advanced.yml", "config_minimal.yml", "config_multiobj.yml"],
Expand Down
1 change: 1 addition & 0 deletions tests/everest/test_detached.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ def test_server_status(copy_math_func_test_data_to_tmp):
assert status["message"] == f"{err_msg_1}\n{err_msg_2}"


@pytest.mark.integration_test
@patch("everest.detached.server_is_running", return_value=False)
def test_wait_for_server(server_is_running_mock, caplog, monkeypatch):
config = EverestConfig.with_defaults()
Expand Down
2 changes: 2 additions & 0 deletions tests/everest/test_egg_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,6 +553,7 @@ def _generate_exp_ert_config(config_path, output_dir):
}


@pytest.mark.integration_test
@skipif_no_opm
def test_egg_model_convert(copy_egg_test_data_to_tmp):
config = EverestConfig.load_file(CONFIG_FILE)
Expand Down Expand Up @@ -667,6 +668,7 @@ def test_init_egg_model(copy_egg_test_data_to_tmp):
ErtConfig.with_plugins().from_dict(config_dict=ert_config)


@pytest.mark.integration_test
@skipif_no_everest_models
@pytest.mark.everest_models_test
@skipif_no_opm
Expand Down
1 change: 1 addition & 0 deletions tests/everest/test_everlint.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ def test_control_ref_validation(min_config):
EverestConfig(**min_config)


@pytest.mark.integration_test
def test_init_context_controls():
test_configs = [
"test_data/mocked_test_case/config_input_constraints.yml",
Expand Down
4 changes: 4 additions & 0 deletions tests/everest/test_everserver.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from pathlib import Path
from unittest.mock import patch

import pytest
from seba_sqlite.snapshot import SebaSnapshot

from ert.run_models.everest_run_model import EverestExitCode
Expand Down Expand Up @@ -52,6 +53,7 @@ def set_shared_status(*args, progress, shared_data):
}


@pytest.mark.integration_test
def test_certificate_generation(copy_math_func_test_data_to_tmp):
config = EverestConfig.load_file("config_minimal.yml")
cert, key, pw = everserver._generate_certificate(
Expand Down Expand Up @@ -194,6 +196,7 @@ def test_everserver_status_exception(
assert "Exception: Failed optimization" in status["message"]


@pytest.mark.integration_test
@patch("sys.argv", ["name", "--config-file", "config_minimal.yml"])
@patch(
"everest.detached.jobs.everserver._sim_monitor",
Expand Down Expand Up @@ -223,6 +226,7 @@ def test_everserver_status_max_batch_num(
assert {data.batch for data in snapshot.simulation_data} == {0}


@pytest.mark.integration_test
@patch("sys.argv", ["name", "--config-file", "config_minimal.yml"])
def test_everserver_status_contains_max_runtime_failure(
mock_server, change_to_tmpdir, min_config
Expand Down
3 changes: 3 additions & 0 deletions tests/everest/test_fix_control.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
import pytest

from ert.run_models.everest_run_model import EverestRunModel
from everest.config import EverestConfig

CONFIG_FILE_ADVANCED = "config_advanced.yml"


@pytest.mark.integration_test
def test_fix_control(
copy_math_func_test_data_to_tmp, evaluator_server_config_generator
):
Expand Down
2 changes: 2 additions & 0 deletions tests/everest/test_samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
CONFIG_FILE_ADVANCED = "config_advanced.yml"


@pytest.mark.integration_test
def test_sampler_uniform(
copy_math_func_test_data_to_tmp, evaluator_server_config_generator
):
Expand Down Expand Up @@ -38,6 +39,7 @@ def test_sampler_uniform(
assert expected_opt == pytest.approx(run_model.result.total_objective, abs=0.001)


@pytest.mark.integration_test
def test_sampler_mixed(
copy_math_func_test_data_to_tmp, evaluator_server_config_generator
):
Expand Down
2 changes: 2 additions & 0 deletions tests/everest/test_simulator_cache.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
from pathlib import Path

import numpy as np
import pytest

from ert.config import QueueSystem
from ert.ensemble_evaluator import EvaluatorServerConfig
from ert.run_models.everest_run_model import EverestRunModel
from everest.config import EverestConfig, SimulatorConfig


@pytest.mark.integration_test
def test_simulator_cache(copy_math_func_test_data_to_tmp):
n_evals = 0

Expand Down
1 change: 1 addition & 0 deletions tests/everest/test_templating.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ def test_render_multiple_input(copy_template_test_data_to_tmp):
assert output == ["0.2 vs 0.8"]


@pytest.mark.integration_test
def test_render_executable(copy_template_test_data_to_tmp):
assert os.access(everest.jobs.render, os.X_OK)

Expand Down
Loading