Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add unit tests for the benchmark repo #147

Merged
merged 14 commits into from
Feb 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .coveragerc
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[run]
include = scripts/*

[report]
fail_under = 80
15 changes: 15 additions & 0 deletions .github/workflows/pull_request.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,21 @@ jobs:
run: pipenv install --dev
- name: Lint scripts
run: make lint
test:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- run:
echo "PYTHON_VERSION=$(cat .python-version)" >> $GITHUB_ENV
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install pipenv
run: pip install pipenv==2023.8.22
- name: Install virtual environment
run: pipenv install --dev
- name: Running unit tests
run: make test
docker-push:
runs-on: ubuntu-22.04
steps:
Expand Down
4 changes: 4 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,11 @@ flake8:
pipenv run flake8 --max-complexity 10 --count

format:
pipenv run isort .
pipenv run black .

run:
pipenv run ./run.sh requests/test_checkbox.json

test:
pipenv run ./scripts/run_tests.sh
5 changes: 5 additions & 0 deletions Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,11 @@ name = "pypi"
[dev-packages]
black = "*"
"flake8" = "*"
isort = "*"
pytest = "*"
pytest-cov = "*"
pytest-mock = "*"
freezegun = "*"

[packages]
haralyzer = "*"
Expand Down
3,320 changes: 1,755 additions & 1,565 deletions Pipfile.lock

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,10 @@ helm tiller run \
- --set container.image=europe-west2-docker.pkg.dev/ons-eq-ci/docker-images/eq-benchmark-deploy-image:latest
```

## Running Tests

Tests for our python files in the scripts directory can be run using `make test`.

## Visualising Benchmark Results

You can use the `visualise_results.py` script to visualise benchmark results over time.
Expand Down
6 changes: 3 additions & 3 deletions generate_requests.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,9 @@ def parse_har_file(har_file):
def generate_requests(har_file, requests_file, schema_name):
requests = parse_har_file(har_file)
requests['schema_name'] = schema_name
requests[
'schema_url'
] = f"https://storage.googleapis.com/eq-questionnaire-schemas/{schema_name}.json"
requests['schema_url'] = (
f"https://storage.googleapis.com/eq-questionnaire-schemas/{schema_name}.json"
)
json.dump(requests, requests_file, indent=4)


Expand Down
2 changes: 1 addition & 1 deletion locustfile.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os

from locust import constant, HttpUser
from locust import HttpUser, constant

from runner_benchmark.taskset import SurveyRunnerTaskSet

Expand Down
2 changes: 1 addition & 1 deletion runner_benchmark/taskset.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@

from locust import TaskSet, task

from .utils import parse_params_from_location
from .questionnaire_mixins import QuestionnaireMixins
from .token_generator import create_token
from .utils import parse_params_from_location

r = random.Random()

Expand Down
3 changes: 2 additions & 1 deletion runner_benchmark/token_generator.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
import time
from uuid import uuid4
from datetime import datetime, timedelta, timezone
from uuid import uuid4

from sdc.crypto.encrypter import encrypt
from sdc.crypto.key_store import KeyStore

Expand Down
2 changes: 1 addition & 1 deletion runner_benchmark/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from urllib.parse import urlparse, parse_qs
from urllib.parse import parse_qs, urlparse


def parse_params_from_location(url, route):
Expand Down
2 changes: 1 addition & 1 deletion scripts/get_aggregated_summary.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from glob import glob
import os
import sys
from glob import glob
from typing import List

from scripts.benchmark_stats import BenchmarkStats
Expand Down
2 changes: 1 addition & 1 deletion scripts/get_benchmark_results.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
import sys

from datetime import datetime, timedelta

from dateutil.tz import tzutc

from scripts.get_summary import parse_environment_variables
Expand Down
2 changes: 1 addition & 1 deletion scripts/google_cloud_storage.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import os
import json
import os

from google.cloud import storage

Expand Down
24 changes: 24 additions & 0 deletions scripts/run_tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#!/bin/bash
#
# Run unit tests
#
# NOTE: This script expects to be run from the project root with
# ./scripts/run_tests.sh
set -o pipefail

function display_result {
RESULT=$1
EXIT_STATUS=$2
TEST=$3

if [ $RESULT -ne 0 ]; then
echo -e "\033[31m$TEST failed\033[0m"
exit $EXIT_STATUS
else
echo -e "\033[32m$TEST passed\033[0m"
fi
}

py.test --cov-config=.coveragerc --cov --cov-report html

display_result $? 3 "Unit tests"
58 changes: 34 additions & 24 deletions scripts/visualise_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,35 +8,36 @@
PERCENTILES_TO_GRAPH = (50, 90, 95, 99)


def plot_data(df, number_of_days_to_plot):
plt.style.use('fast')
class GraphGenerationFailed(Exception):
pass

if (
number_of_days_to_plot and number_of_days_to_plot <= 45
): # To make the chart still easily digestible
df.plot.line(marker="o", markersize=8)
plt.grid(True, axis="both", alpha=0.3)
else:
df.plot.line()

plt.margins(0.03, 0.07)
plt.legend(frameon=True, prop={"size": 17})
plt.xticks(df.index, df["DATE"], size="small", rotation=90)
plt.yticks(size="small")
plt.ylabel("Average Response Time (ms)")
plt.xlabel("Run Date (YYYY-MM-DD)", labelpad=13)
def plot_data(df, number_of_days_to_plot):
try:
plt.style.use('fast')

plt.savefig('performance_graph.png', bbox_inches="tight")
print("Graph saved as performance_graph.png")
if (
number_of_days_to_plot and number_of_days_to_plot <= 45
): # To make the chart still easily digestible
df.plot.line(marker="o", markersize=8)
plt.grid(True, axis="both", alpha=0.3)
else:
df.plot.line()
liamtoozer marked this conversation as resolved.
Show resolved Hide resolved

plt.margins(0.03, 0.07)
plt.legend(frameon=True, prop={"size": 17})
plt.xticks(df.index, df["DATE"], size="small", rotation=90)
plt.yticks(size="small")
plt.ylabel("Average Response Time (ms)")
plt.xlabel("Run Date (YYYY-MM-DD)", labelpad=13)

if __name__ == '__main__':
parsed_variables = parse_environment_variables()
number_of_days = parsed_variables['number_of_days']
plt.savefig('performance_graph.png', bbox_inches="tight")
print("Graph saved as performance_graph.png")
except Exception as e:
raise GraphGenerationFailed from e

folders = sorted(glob(f"{parsed_variables['output_dir']}/*"))
results = get_results(folders, number_of_days)

def get_data_frame(results):
result_fields = [
[
result.date,
Expand All @@ -49,6 +50,15 @@ def plot_data(df, number_of_days_to_plot):
]

percentile_columns = (f"{percentile}th" for percentile in PERCENTILES_TO_GRAPH)
data_frame = DataFrame(result_fields, columns=["DATE", *percentile_columns])
return DataFrame(result_fields, columns=["DATE", *percentile_columns])


if __name__ == '__main__':
parsed_variables = parse_environment_variables()
number_of_days = parsed_variables['number_of_days']

folders = sorted(glob(f"{parsed_variables['output_dir']}/*"))
results = get_results(folders, number_of_days)
dataframe = get_data_frame(results)

plot_data(data_frame, number_of_days)
plot_data(dataframe, number_of_days)
Empty file added tests/__init__.py
Empty file.
42 changes: 42 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import pytest

from scripts.get_summary import get_results

EXPECTED_OUTPUT_SINGLE_FOLDER = (
'---\n'
'Percentile Averages:\n'
'50th: 58ms\n'
'90th: 96ms\n'
'95th: 173ms\n'
'99th: 301ms\n'
'99.9th: 477ms\n'
'---\n'
'GETs (99th): 380ms\n'
'POSTs (99th): 211ms\n'
'---\n'
'Total Requests: 70,640\n'
'Total Failures: 1\n'
'Error Percentage: 0.0%\n'
)

EXPECTED_OUTPUT_MULTIPLE_FOLDERS = (
'---\n'
'Percentile Averages:\n'
'50th: 58ms\n'
'90th: 98ms\n'
'95th: 176ms\n'
'99th: 313ms\n'
'99.9th: 595ms\n'
'---\n'
'GETs (99th): 383ms\n'
'POSTs (99th): 234ms\n'
'---\n'
'Total Requests: 211,841\n'
'Total Failures: 2\n'
'Error Percentage: 0.0%\n'
)


@pytest.fixture
def get_results_single_file():
return get_results(folders=["./tests/mock_stats/2024-02-07T03:09:41"])
Loading
Loading