Skip to content

Unit Test All Core Packages #46

Unit Test All Core Packages

Unit Test All Core Packages #46

name: Unit Test All Core Packages
on:
# push:
# branches: [main]
pull_request:
branches: [main]
schedule:
- cron: '0 0 * * *' # daily
workflow_dispatch:
pull_request_target:
types: [opened, synchronize, reopened]
jobs:
test:
runs-on: ubuntu-latest # TODO: make this `["ubuntu-latest", "windows-latest", "macos-latest"]`?
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"] # TODO: add 3.13 eventually
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install core PyHC packages # Install all dependencies listed in pyproject.toml
run: |
python -m pip install --upgrade pip
pip install .[tests] # include optional test dependencies
# ========== HAPI Client ===============================
- name: Get hapiclient version
id: hapiclient_version
run: |
echo "HAPICLIENT_VERSION=$(python -c 'import hapiclient; print(hapiclient.__version__)')" >> $GITHUB_ENV
# Mark tests as successful if 85%+ pass (not 98% because of known failing tests)
- name: Clone and test HAPI Client
shell: bash
env:
HAPICLIENT_VERSION: ${{ env.HAPICLIENT_VERSION }}
run: |
# Clone hapiclient repository
hapiclient_version="${HAPICLIENT_VERSION}"
if [ -z "$hapiclient_version" ]; then
echo "hapiclient version not found."
exit 1
fi
echo "Cloning hapiclient version $hapiclient_version"
hapiclient_version_tag="${hapiclient_version%%.dev*}"
git clone --branch "v$hapiclient_version_tag" "https://github.com/hapi-server/client-python.git"
cd client-python
# Run the tests
# --deselect hapiclient/test/test_hapitime_reformat.py::test_hapitime_reformat \
python -m pytest \
-v hapiclient/test/ \
--junitxml=test-results.xml \
|| true
# Analyze test results
python <<EOF
import sys
import xml.etree.ElementTree as ET
# Parse the XML file
tree = ET.parse('test-results.xml')
root = tree.getroot()
# Initialize counters
total = errors = failures = skipped = 0
if root.tag == 'testsuites':
# Sum over all testsuite elements
for testsuite in root.findall('testsuite'):
total += int(testsuite.attrib.get('tests', 0))
errors += int(testsuite.attrib.get('errors', 0))
failures += int(testsuite.attrib.get('failures', 0))
skipped += int(testsuite.attrib.get('skipped', 0))
elif root.tag == 'testsuite':
# Single testsuite
total = int(root.attrib.get('tests', 0))
errors = int(root.attrib.get('errors', 0))
failures = int(root.attrib.get('failures', 0))
skipped = int(root.attrib.get('skipped', 0))
else:
print(f'Unexpected root tag: {root.tag}')
sys.exit(1)
# Count skipped tests as passed
passed = total - errors - failures
pass_rate = (passed / total) * 100 if total > 0 else 0
print(f'Total tests: {total}')
print(f'Passed: {passed} (including skipped)')
print(f'Failures: {failures}')
print(f'Errors: {errors}')
print(f'Skipped: {skipped}')
print(f'Pass rate: {pass_rate:.2f}%')
if pass_rate >= 85:
print('Pass rate is above 85%, acceptable.')
sys.exit(0)
else:
print('Pass rate is below threshold of 85%.')
sys.exit(1)
EOF
# ========== Kamodo ===============================
# Kamodo package has no unit tests
# - name: Clone and test kamodo
# run: |
# git clone https://github.com/nasa/Kamodo.git
# cd Kamodo
# pip install -r requirements.txt
# # pip install .
# pytest
# ========== PlasmaPy ===============================
- name: Get PlasmaPy version
id: plasmapy_version
run: |
echo "PLASMAPY_VERSION=$(python -c 'import plasmapy; print(plasmapy.__version__)')" >> $GITHUB_ENV
# Mark tests as successful if 98%+ pass
- name: Clone and test PlasmaPy
if: matrix.python-version != '3.9' # Exclude PlasmaPy for Python 3.9
shell: bash
env:
PLASMAPY_VERSION: ${{ env.PLASMAPY_VERSION }}
run: |
# Clone matching version of PlasmaPy repository
plasmapy_version="${PLASMAPY_VERSION}"
if [ -z "$plasmapy_version" ]; then
echo "PlasmaPy version not found."
exit 1
fi
echo "Cloning PlasmaPy version $plasmapy_version"
plasmapy_version_tag="${plasmapy_version%%.dev*}"
git clone --branch "v$plasmapy_version_tag" "https://github.com/PlasmaPy/PlasmaPy.git"
cd PlasmaPy
# Run the tests (ignoring tests that require GH_TOKEN)
python -m pytest \
--junitxml=test-results.xml \
--continue-on-collection-errors \
--ignore=tests/utils/data/test_downloader.py || true
# Analyze test results
python <<EOF
import sys
import xml.etree.ElementTree as ET
# Parse the XML file
tree = ET.parse('test-results.xml')
root = tree.getroot()
# Initialize counters
total = errors = failures = skipped = 0
if root.tag == 'testsuites':
# Sum over all testsuite elements
for testsuite in root.findall('testsuite'):
total += int(testsuite.attrib.get('tests', 0))
errors += int(testsuite.attrib.get('errors', 0))
failures += int(testsuite.attrib.get('failures', 0))
skipped += int(testsuite.attrib.get('skipped', 0))
elif root.tag == 'testsuite':
# Single testsuite
total = int(root.attrib.get('tests', 0))
errors = int(root.attrib.get('errors', 0))
failures = int(root.attrib.get('failures', 0))
skipped = int(root.attrib.get('skipped', 0))
else:
print(f'Unexpected root tag: {root.tag}')
sys.exit(1)
# Count skipped tests as passed
passed = total - errors - failures
pass_rate = (passed / total) * 100 if total > 0 else 0
print(f'Total tests: {total}')
print(f'Passed: {passed} (including skipped)')
print(f'Failures: {failures}')
print(f'Errors: {errors}')
print(f'Skipped: {skipped}')
print(f'Pass rate: {pass_rate:.2f}%')
if pass_rate >= 98:
print('Pass rate is above 98%, acceptable.')
sys.exit(0)
else:
print('Pass rate is below threshold of 98%.')
sys.exit(1)
EOF
# ========== pysat ===============================
# Mark tests as successful if 98%+ pass
- name: Test pysat
continue-on-error: false
shell: bash
run: |
# Create a directory for pysat data
mkdir -p pysatData
# Set the directory for pysat data
python -c "import pysat; pysat.params['data_dirs'] = ['$(pwd)/pysatData']"
# Run pysat's unit tests from the installed package and output results to XML
pytest --pyargs pysat.tests --junitxml=test-results.xml || true
# Calculate pass rate and determine exit code
python <<EOF
import sys
import xml.etree.ElementTree as ET
# Parse the XML file
tree = ET.parse('test-results.xml')
root = tree.getroot()
# Initialize counters
total = errors = failures = skipped = 0
if root.tag == 'testsuites':
# Sum over all testsuite elements
for testsuite in root.findall('testsuite'):
total += int(testsuite.attrib.get('tests', 0))
errors += int(testsuite.attrib.get('errors', 0))
failures += int(testsuite.attrib.get('failures', 0))
skipped += int(testsuite.attrib.get('skipped', 0))
elif root.tag == 'testsuite':
# Single testsuite
total = int(root.attrib.get('tests', 0))
errors = int(root.attrib.get('errors', 0))
failures = int(root.attrib.get('failures', 0))
skipped = int(root.attrib.get('skipped', 0))
else:
print(f'Unexpected root tag: {root.tag}')
sys.exit(1)
# Count skipped tests as passed
passed = total - errors - failures
pass_rate = (passed / total) * 100 if total > 0 else 0
print(f'Total tests: {total}')
print(f'Passed: {passed} (including skipped)')
print(f'Failures: {failures}')
print(f'Errors: {errors}')
print(f'Skipped: {skipped}')
print(f'Pass rate: {pass_rate:.2f}%')
if pass_rate >= 98:
print('Pass rate is above 98%, acceptable.')
sys.exit(0)
else:
print('Pass rate is below threshold of 98%.')
sys.exit(1)
EOF
# ========== pySPEDAS ===============================
# Mark tests as successful if 90%+ pass (not 98% because of known failing tests)
- name: Test pySPEDAS
shell: bash
run: |
# Run pySPEDAS's unit tests from the installed package and output results to XML
# Ignore 5 known failing tests, for now
# pytest \
# --deselect pyspedas/analysis/tests/test_twavpol.py::TwavpolDataValidation::test_degpol \
# --deselect pyspedas/analysis/tests/test_twavpol.py::TwavpolDataValidation::test_elliptict \
# --deselect pyspedas/analysis/tests/test_twavpol.py::TwavpolDataValidation::test_helict \
# --deselect pyspedas/analysis/tests/test_twavpol.py::TwavpolDataValidation::test_powspec \
# --deselect pyspedas/analysis/tests/test_twavpol.py::TwavpolDataValidation::test_waveangle
pytest --pyargs pyspedas --junitxml=test-results.xml || true
# Calculate pass rate and determine exit code
python <<EOF
import sys
import xml.etree.ElementTree as ET
# Parse the XML file
tree = ET.parse('test-results.xml')
root = tree.getroot()
# Initialize counters
total = errors = failures = skipped = 0
if root.tag == 'testsuites':
# Sum over all testsuite elements
for testsuite in root.findall('testsuite'):
total += int(testsuite.attrib.get('tests', 0))
errors += int(testsuite.attrib.get('errors', 0))
failures += int(testsuite.attrib.get('failures', 0))
skipped += int(testsuite.attrib.get('skipped', 0))
elif root.tag == 'testsuite':
# Single testsuite
total = int(root.attrib.get('tests', 0))
errors = int(root.attrib.get('errors', 0))
failures = int(root.attrib.get('failures', 0))
skipped = int(root.attrib.get('skipped', 0))
else:
print(f'Unexpected root tag: {root.tag}')
sys.exit(1)
# Count skipped tests as passed
passed = total - errors - failures
pass_rate = (passed / total) * 100 if total > 0 else 0
print(f'Total tests: {total}')
print(f'Passed: {passed} (including skipped)')
print(f'Failures: {failures}')
print(f'Errors: {errors}')
print(f'Skipped: {skipped}')
print(f'Pass rate: {pass_rate:.2f}%')
if pass_rate >= 90:
print('Pass rate is above 90%, acceptable.')
sys.exit(0)
else:
print('Pass rate is below threshold of 90%.')
sys.exit(1)
EOF
# ========== SpacePy ===============================
- name: Get SpacePy version
id: spacepy_version
run: |
echo "SPACEPY_VERSION=$(python -c 'import spacepy; print(spacepy.__version__)')" >> $GITHUB_ENV
- name: Clone and test SpacePy
continue-on-error: false
shell: bash
env:
SPACEPY_VERSION: ${{ env.SPACEPY_VERSION }}
run: |
# Clone SpacePy repository
spacepy_version="${SPACEPY_VERSION}"
if [ -z "$spacepy_version" ]; then
echo "SpacePy version not found."
exit 1
fi
echo "Cloning SpacePy version $spacepy_version"
spacepy_version_tag="${spacepy_version%%.dev*}"
git clone --branch "release-$spacepy_version_tag" "https://github.com/spacepy/spacepy.git"
cd spacepy
# Run the tests (all test files except one that always fails)
cd tests
# python test_all.py # would run all files below
python test_ae9ap9.py
# python test_base.py # version "UNRELEASED" never matches installed version
python test_coordinates.py
python test_ctrans.py
python test_datamanager.py
python test_datamodel.py
python test_empiricals.py
python test_igrf.py
python test_irbempy.py
python test_lanlstar.py
python test_lib.py
python test_omni.py
python test_plot.py
python test_plot_utils.py
python test_poppy.py
python test_pybats.py
python test_pycdf.py
python test_pycdf_istp.py
python test_rst.py
python test_seapy.py
python test_spectrogram.py
python test_testing.py
python test_time.py
python test_toolbox.py
# ========== SunPy ===============================
# Mark tests as successful if 98%+ pass
- name: Test SunPy
if: matrix.python-version != '3.9' # Exclude SunPy for Python 3.9
shell: bash
run: |
# Run SunPy's unit tests from the installed package and output results to XML
pytest --pyargs sunpy --junitxml=test-results.xml || true
# Calculate pass rate and determine exit code
python <<EOF
import sys
import xml.etree.ElementTree as ET
# Parse the XML file
tree = ET.parse('test-results.xml')
root = tree.getroot()
# Initialize counters
total = errors = failures = skipped = 0
if root.tag == 'testsuites':
# Sum over all testsuite elements
for testsuite in root.findall('testsuite'):
total += int(testsuite.attrib.get('tests', 0))
errors += int(testsuite.attrib.get('errors', 0))
failures += int(testsuite.attrib.get('failures', 0))
skipped += int(testsuite.attrib.get('skipped', 0))
elif root.tag == 'testsuite':
# Single testsuite
total = int(root.attrib.get('tests', 0))
errors = int(root.attrib.get('errors', 0))
failures = int(root.attrib.get('failures', 0))
skipped = int(root.attrib.get('skipped', 0))
else:
print(f'Unexpected root tag: {root.tag}')
sys.exit(1)
# Count skipped tests as passed
passed = total - errors - failures
pass_rate = (passed / total) * 100 if total > 0 else 0
print(f'Total tests: {total}')
print(f'Passed: {passed} (including skipped)')
print(f'Failures: {failures}')
print(f'Errors: {errors}')
print(f'Skipped: {skipped}')
print(f'Pass rate: {pass_rate:.2f}%')
if pass_rate >= 98:
print('Pass rate is above 98%, acceptable.')
sys.exit(0)
else:
print('Pass rate is below threshold of 98%.')
sys.exit(1)
EOF