diff --git a/.github/workflows/performance.yaml b/.github/workflows/performance.yaml index a5af1967..dc59c4f1 100644 --- a/.github/workflows/performance.yaml +++ b/.github/workflows/performance.yaml @@ -77,10 +77,19 @@ jobs: TEST_INSPECTION_REPORTS_DIR: ${{ github.workspace }}/inspection-reports TEST_METRICS_DIR: ${{ github.workspace }}/test/performance/results/head TEST_RUN_NAME: head + TEST_DQLITE_TRACE_LEVEL: 1 + TEST_RAFT_TRACE_LEVEL: 1 + TEST_K8S_DQLITE_DEBUG: 1 run: | cd test/performance mkdir -p ./results/head - sg lxd -c 'tox -e performance' + set -ex + iteration=0 + while [[ $iteration -lt 5 ]]; do + echo "iteration: $iteration" + sg lxd -c 'tox -e performance' + iteration=$(( $iteration + 1 )) + done - name: Run Performance test for base code snap env: TEST_SNAP: ${{ github.workspace }}/base-code.snap @@ -89,16 +98,25 @@ jobs: TEST_INSPECTION_REPORTS_DIR: ${{ github.workspace }}/inspection-reports TEST_METRICS_DIR: ${{ github.workspace }}/test/performance/results/base-code TEST_RUN_NAME: base-code + TEST_DQLITE_TRACE_LEVEL: 1 + TEST_RAFT_TRACE_LEVEL: 1 + TEST_K8S_DQLITE_DEBUG: 1 run: | cd test/performance mkdir -p ./results/base-code - sg lxd -c 'tox -e performance' - - name: Generate 3 node Graphs - if: always() - run: | - cd test/performance - sudo Rscript parse-performance-metrics.R -p ./results/head -o ./results/head -f *three-node.log - sudo Rscript parse-performance-metrics.R -p ./results/base-code -o ./results/base-code -f *three-node.log + set -ex + iteration=0 + while [[ $iteration -lt 5 ]]; do + echo "iteration: $iteration" + sg lxd -c 'tox -e performance' + iteration=$(( $iteration + 1 )) + done + # - name: Generate 3 node Graphs + # if: always() + # run: | + # cd test/performance + # sudo Rscript parse-performance-metrics.R -p ./results/head -o ./results/head -f *three-node.log + # sudo Rscript parse-performance-metrics.R -p ./results/base-code -o ./results/base-code -f *three-node.log - name: Generate single node Graphs if: always() run: | diff --git a/test/performance/tests/test_multi_node.py b/test/performance/tests/test_multi_node.py index 439928bc..a0259226 100644 --- a/test/performance/tests/test_multi_node.py +++ b/test/performance/tests/test_multi_node.py @@ -9,6 +9,7 @@ @pytest.mark.node_count(3) def test_three_node_load(instances: List[harness.Instance]): + pytest.xfail("donotmerge: disabled") cluster_node = instances[0] joining_node = instances[1] joining_node_2 = instances[2] diff --git a/test/performance/tests/test_single_node.py b/test/performance/tests/test_single_node.py index 1c7b93ac..9c368457 100644 --- a/test/performance/tests/test_single_node.py +++ b/test/performance/tests/test_single_node.py @@ -9,7 +9,8 @@ def test_single_node_load(session_instance: harness.Instance): metrics.configure_kube_burner(session_instance) process_dict = metrics.collect_metrics([session_instance]) try: - metrics.run_kube_burner(session_instance) + for iteration in range(10): + metrics.run_kube_burner(session_instance) finally: # Collect the metrics even if kube-burner fails. metrics.stop_metrics([session_instance], process_dict)