Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add performance benchmarks testing via perf tool #431

Merged
merged 1 commit into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions BM/common/check_perf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/usr/bin/env python3

import subprocess

def check_perf_installed():
try:
result = subprocess.run(['perf', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)

if result.returncode == 0:
print("Version:", result.stdout.decode().strip())
return True
else:
print("Perf tool is not installed")
return False
except Exception as e:
print("An error occurred:", str(e))
return False

if __name__ == '__main__':
check_perf_installed()
30 changes: 30 additions & 0 deletions BM/performance/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Release Notes

The performance covers the predefined benchmarks testing via perf tool
covers the CPU, Memory, I/O, Algorithm performance
If failures are detected, consider reading the debug logs, then
using perf top-down tool for additional analysis.

The python script utilizes the Avocade Test Framework, so it needs to be installed first

## The command to install the avocado from source code
```
git clone git://github.com/avocado-framework/avocado.git
cd avocado
pip install .
```

or

## Installing avocado vai pip:
```
pip3 install --user avocado-framework
```

## The command to run the case
### Running with 'runtest.py'
```
cd ..
./runtests.py -f performance -t performance/tests
```

197 changes: 197 additions & 0 deletions BM/performance/perf_bench.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only

"""
This script performs the predefined benchmark testing via perf tool
Covers the CPU, Memory, I/O, Algorithm performance

Prerequisites:
Install the avocado framework and the required dependencies with below command:
git clone git://github.com/avocado-framework/avocado.git
cd avocado
pip install .
"""

import subprocess
import re
import os
import argparse
import pty
import select
import sys

__author__ = "Wendy Wang"
__copyright__ = "GPL-2.0-only"
__license__ = "GPL version 2"

# Determine the directory of the current script
script_dir = os.path.dirname(os.path.abspath(__file__))

# Construct relative paths to the common.sh file
common_sh_path = os.path.join(script_dir, '../common/common.sh')


class ShellCommandRunnable():
"""Initialize the ShellCommandRunnable class."""

def __init__(self, command):
self.command = command
self.stdout = ""
self.stderr = ""

def run(self):
"""Run the shell command in a pseudo-terminal(PTY) and capture its output.
It handles both standard output and error, waits for the process to finish,
and checks for any issues such as timeout or failures."""
try:
# Create a pseudo-terminal (PTY) pair
master_fd, slave_fd = pty.openpty()

# Define the environment to simulate a TTY for color support
env = os.environ.copy()
# Set TERM to a terminal that supports colors
env['TERM'] = 'xterm-256color'

# Run the perf command with the PTY (both stdout and stderr will be sent to the PTY)
process = subprocess.Popen(
self.command,
shell=True,
stdout=slave_fd,
stderr=slave_fd,
text=True,
env=env,
executable='/bin/bash'
)

# Read the output from the master end of the PTY
output = ""
try:
while True:
rlist, _, _ = select.select([master_fd], [], [], 0.1)
if rlist:
part_output = os.read(master_fd, 1024).decode()
if not part_output:
break
output += part_output
print(part_output, end="")
else:
if process.poll() is not None:
break
except Exception as e:
print(f"Error reading output: {e}")
sys.exit(1)

# Close the slave_fd to stop output to the PTY
os.close(slave_fd)

# Wait for the process to finish with timeout
try:
process.wait(timeout=60)
except subprocess.TimeoutExpired:
print("Process tool too long to complete, terminating it.")
os.killpg(os.getpgid(process.pid),
subprocess.signal.SIGTERM)
process.wait()

# Ensure process has terminated
return_code = process.returncode
if return_code != 0:
print(f"Perf command failed with return code {return_code}")

# Analyze output for color codes
self.analyze_output(output)

return return_code

except (subprocess.CalledProcessError, OSError) as e:
print(f"Error running perf bench: {e}")
sys.exit(1)

return 0

def analyze_output(self, output):
"""Analyze the output of the perf command for any potential color codes or failures
It check each line for the output for color codes and flags them if found."""
# Check for any ANSI color codes in the output
ansi_color_pattern = re.compile(r'\033\[[0-9;]*m')
fail_lines = []

if fail_lines:
print(f"Raw output:\n{output}\n")

for line in output.splitlines():
# Skip empty lines
if not line.strip():
continue

print(f"Checking line: {line}")
# If the line contains ANSI color characters
if ansi_color_pattern.search(line):
print(f"Found color output(potential failure): {line}")
fail_lines.append(line)
if fail_lines:
print("Failure detected in the following lines:")
for line in fail_lines:
print(line)
sys.exit(1)


def check_dmesg_error():
"""Check the dmesg log for any failure or error messages."""
result = ShellCommandRunnable(
f"source {common_sh_path} && extract_case_dmesg")
result.run()
dmesg_log = result.stdout

# Check any failure, error, bug in the dmesg log when stress is running
if dmesg_log and any(keyword in dmesg_log for keyword in
["fail", "error", "Call Trace", "Bug", "error"]):
return dmesg_log
return None


def run_perf_bench(bench_feature, feature_option):
"""Run perf stat check when running perf benchmarks testing."""
try:
# Define the perf command
perf_command = (
f"perf stat -e cycles,instructions,cache-references,cache-misses,"
f"branch,branch-misses perf bench {bench_feature} {feature_option}"
)
# Run the perf command using ShellCommandRunnable
result = ShellCommandRunnable(perf_command)
exit_code = result.run()
if exit_code != 0:
print("Perf benchmark failed")
return exit_code

except Exception as e:
print(f"Error running perf bench: {e}")
sys.exit(1)

# Check dmesg log
dmesg_log = check_dmesg_error()
if dmesg_log:
print(
f"Kernel dmesg shows failure after perf bench testing: {dmesg_log}")
sys.exit(1)

print("Perf benchmark and dmesg check completed successfully")
return 0


# Create an ArgumentParser object
parser = argparse.ArgumentParser(description="Running perf benchmark testing")

# Add the perf bench command arguments
parser.add_argument('--bench_feature', type=str,
default="mem", help="perf bench feature name")
parser.add_argument('--feature_option', type=str,
default="find_bit", help="perf bench feature option")

# Parse the command-line arguments
args = parser.parse_args()

# Run the perf bench command
if __name__ == '__main__':
run_perf_bench(args.bench_feature, args.feature_option)
62 changes: 62 additions & 0 deletions BM/performance/tests
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# This script performs the predefined benchmarks test via perf tool

# memcpy: benchmark for memcpy() function
python3 -u perf_bench.py --bench_feature mem --feature_option memcpy
# memset: benchmark for memset() function
python3 -u perf_bench.py --bench_feature mem --feature_option memset
# find_bit: benchmark for find_bit() function
python3 -u perf_bench.py --bench_feature mem --feature_option find_bit
# mem: benchmark for NUMA workloads
python3 -u perf_bench.py --bench_feature numa --feature_option mem
# messaging: benchmark for scheduling and IPC
python3 -u perf_bench.py --bench_feature sched --feature_option messaging
# pipe: benchmark for pipe() between two processes
python3 -u perf_bench.py --bench_feature sched --feature_option pipe
# seccomp-notify: benchmark for seccomp user notify
python3 -u perf_bench.py --bench_feature sched --feature_option seccomp-notify
# basic: benchmark for basic getpgid(2) calls
python3 -u perf_bench.py --bench_feature syscall --feature_option basic
# getpgid: benchmark for getpgid(2) calls
python3 -u perf_bench.py --bench_feature syscall --feature_option getpgid
# fork: benchmark for fork(2) calls
python3 -u perf_bench.py --bench_feature syscall --feature_option fork
# execve: benchmark for execve(2) calls
python3 -u perf_bench.py --bench_feature syscall --feature_option evecve
# synthesize: benchmark perf event synthesis
python3 -u perf_bench.py --bench_feature internals --feature_option synthesize
# kallsyms-parse: benchmark kallsyms parsing
python3 -u perf_bench.py --bench_feature internals --feature_option kallsyms-parse
# inject-build-id: benchmark build-id injection
python3 -u perf_bench.py --bench_feature internals --feature_option inject-build-id
# evlist-open-close: benchmark evlist open and close
python3 -u perf_bench.py --bench_feature internals --feature_option evlist-open-close
# pmu-scan: benchmark sysfs PMU info scanning
python3 -u perf_bench.py --bench_feature internals --feature_option pmu-scan
# thread: benchmark thread start/finish with breakpoints
python3 -u perf_bench.py --bench_feature breakpoint --feature_option thread
# enable: benchmark breakpoint enable/disable
python3 -u perf_bench.py --bench_feature breakpoint --feature_option enable
# baseline: baseline libc usleep(1000) call
python3 -u perf_bench.py --bench_feature uprobe --feature_option baseline
# empty: attach empty BPF prog to uprobe on usleep, system wide
python3 -u perf_bench.py --bench_feature uprobe --feature_option empty
# trace_printk: attach trace_printk BPF prog to uprobe on usleep syswide
python3 -u perf_bench.py --bench_feature uprobe --feature_option trace_printk
# empty_ret: attach empty BPF prog to uretprobe on usleep, system wide
python3 -u perf_bench.py --bench_feature uprobe --feature_option empty_ret
# trace_printk_ret: attach trace_printk BPF prog to uretprobe on usleep syswide
python3 -u perf_bench.py --bench_feature uprobe --feature_option trace_printk_ret
# hash: benchmark for futex hash table
python3 -u perf_bench.py --bench_feature futex --feature_option hash
# wake: benchmark for futex wake calls
python3 -u perf_bench.py --bench_feature futex --feature_option wake
# wake-parallel: benchmark for parallel futex wake calls
python3 -u perf_bench.py --bench_feature futex --feature_option wake-parallel
# requeue: benchmark for futex request calls
python3 -u perf_bench.py --bench_feature futex --feature_option requeue
# lock-pi: benchmark for futex lock_pi calls
python3 -u perf_bench.py --bench_feature futex --feature_option lock-pi
# wait: benchmark for epoll concurrent epoll_waits
python3 -u perf_bench.py --bench_feature epoll --feature_option wait
# ctl: benchmark for epoll concurrent epoll_ctls
python3 -u perf_bench.py --bench_feature epoll --feature_option ctl
Loading