Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make test failure messages/tracebacks unique to PSM Interop #21

Merged
merged 9 commits into from
Jan 19, 2024
Merged
65 changes: 53 additions & 12 deletions framework/test_cases/base_testcase.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
class BaseTestCase(absltest.TestCase):
def run(self, result: Optional[unittest.TestResult] = None) -> None:
super().run(result)
# TODO(sergiitk): should this method be returning result? See
# super().run and xds_k8s_testcase.XdsKubernetesBaseTestCase.subTest
test_errors = [error for test, error in result.errors if test is self]
test_failures = [
failure for test, failure in result.failures if test is self
Expand All @@ -36,30 +38,69 @@ def run(self, result: Optional[unittest.TestResult] = None) -> None:
)
# Assume one test case will only have one status.
if test_errors or test_failures:
logging.info("----- TestCase %s FAILED -----", self.id())
total_errors = len(test_errors) + len(test_failures)
logging.error(
"----- PSM Test Case FAILED: %s%s -----",
self.test_name,
f" | Errors count: {total_errors}" if total_errors > 1 else "",
sergiitk marked this conversation as resolved.
Show resolved Hide resolved
)
if test_errors:
self._print_error_list(test_errors, is_unexpected_error=True)
if test_failures:
self._print_error_list(test_failures)
elif test_unexpected_successes:
logging.info(
"----- TestCase %s UNEXPECTEDLY SUCCEEDED -----", self.id()
logging.error(
"----- PSM Test Case UNEXPECTEDLY SUCCEEDED: %s -----\n",
self.test_name,
)
elif test_skipped:
logging.info("----- TestCase %s SKIPPED -----", self.id())
logging.info("Reason for skipping: %s", test_skipped)
logging.info(
"----- PSM Test Case SKIPPED: %s -----"
"\nReason for skipping: %s",
self.test_name,
test_skipped,
)
else:
logging.info("----- TestCase %s PASSED -----", self.id())
logging.info(
"----- PSM Test Case PASSED: %s -----\n",
self.test_name,
)

@property
def test_name(self) -> str:
"""Pretty test name (details in the description).

Test id returned from self.id() can have two forms:
1. Regular: __main__.MyClassTest.test_method
2. Parametrized: __main__.MyClassTest.test_method{case} ('param'),
where {case} is
a) An integer for @parameterized.parameters: test_method0,
test_method1, ...
b) {testcase_name} for @parameterized.named_parameters:
test_method_pass, test_method_fail, ...

This method:
1. Removes "__main__." if it's present
2. Removes the " ('param')" if present
"""
return self.id().removeprefix("__main__.").split(" ", 1)[0]

def _print_error_list(
self, errors: list[str], is_unexpected_error: bool = False
) -> None:
# FAILUREs are those errors explicitly signalled using
# the TestCase.assert*() methods.
# FAILURE is an errors explicitly signalled using one of the
# TestCase.assert*() methods, while ERROR means an unexpected exception.
fail_type: str = "ERROR" if is_unexpected_error else "FAILURE"
for err in errors:
logging.error(
"%s Traceback in %s:\n%s",
"ERROR" if is_unexpected_error else "FAILURE",
self.id(),
err,
"(%(fail_type)s) PSM Interop Test Failed: %(test_id)s"
"\n^^^^^"
"\n[%(test_id)s] PSM Failed Test Traceback BEGIN"
"\n%(error)s"
"[%(test_id)s] PSM Failed Test Traceback END\n",
{
"test_id": self.test_name,
"fail_type": fail_type,
"error": err,
},
)
13 changes: 10 additions & 3 deletions framework/xds_k8s_testcase.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,7 @@ def setUp(self):
def handle_sigint(
self, signalnum: _SignalNum, frame: Optional[FrameType]
) -> None:
# TODO(sergiitk): move to base_testcase.BaseTestCase
logger.info("Caught Ctrl+C, cleaning up...")
self._handling_sigint = True
# Force resource cleanup by their name. Addresses the case where ctrl-c
Expand All @@ -266,12 +267,18 @@ def handle_sigint(

@contextlib.contextmanager
def subTest(self, msg, **params): # noqa pylint: disable=signature-differs
logger.info("--- Starting subTest %s.%s ---", self.id(), msg)
# TODO(sergiitk): move to base_testcase.BaseTestCase
# Important side-effect: this halts test execution on first subtest
# failure, even if failfast is enabled.
# TODO(sergiitk): This is desired, but not understood. Figure out why.
logger.info("--- Starting subTest %s.%s ---", self.test_name, msg)
try:
yield super().subTest(msg, **params)
finally:
if not self._handling_sigint:
logger.info("--- Finished subTest %s.%s ---", self.id(), msg)
logger.info(
"--- Finished subTest %s.%s ---", self.test_name, msg
)

def setupTrafficDirectorGrpc(self):
self.td.setup_for_grpc(
Expand Down Expand Up @@ -684,7 +691,7 @@ def initKubernetesClientRunner(self) -> KubernetesClientRunner:
raise NotImplementedError

def tearDown(self):
logger.info("----- TestMethod %s teardown -----", self.id())
logger.info("----- TestMethod %s teardown -----", self.test_name)
logger.debug("Getting pods restart times")
client_restarts: int = 0
server_restarts: int = 0
Expand Down
4 changes: 2 additions & 2 deletions tests/bootstrap_generator_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def tearDownClass(cls):
super().tearDownClass()

def tearDown(self):
logger.info("----- TestMethod %s teardown -----", self.id())
logger.info("----- TestMethod %s teardown -----", self.test_name)
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=10),
attempts=3,
Expand Down Expand Up @@ -175,7 +175,7 @@ class BootstrapGeneratorServerTest(
test_server: XdsTestServer

def tearDown(self):
logger.info("----- TestMethod %s teardown -----", self.id())
logger.info("----- TestMethod %s teardown -----", self.test_name)
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=10),
attempts=3,
Expand Down
84 changes: 84 additions & 0 deletions tests/fake_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
# Copyright 2024 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging

from absl import flags
from absl.testing import absltest
from absl.testing import parameterized

from framework import xds_k8s_testcase

logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)


class FakeTest(xds_k8s_testcase.XdsKubernetesBaseTestCase):
"""A fake test class with known failures to debug BaseTestCase logs.

This test case won't do any infra provisioning, just parses the flags and
reads k8s contexts file.
"""

def test_0_pass(self):
self.assertTrue(True, "I'm a passing test in the beginning")

def test_1_error(self):
self.assertTrue(False, msg="Test 1 Assertion ERROR")

def test_2_pass(self):
self.assertTrue(True, "I'm a passing test in the middle")

def test_3_failure(self):
raise RuntimeError("Test 3 Uncaught Exception FAILURE")

def test_4_pass(self):
self.assertTrue(True, "I'm a passing test at the end")


class FakeParametrizedTest(
xds_k8s_testcase.XdsKubernetesBaseTestCase,
parameterized.TestCase,
):
"""A fake class to debug BaseTestCase logs produced by parametrized tests.

See FakeTest for notes on provisioning.
"""

@parameterized.parameters(True, False)
def test_true(self, is_true):
self.assertTrue(is_true)

@parameterized.named_parameters(
{"testcase_name": "pass", "is_true": True},
{"testcase_name": "fail", "is_true": False},
)
def test_true_named(self, is_true):
self.assertTrue(is_true)


class FakeSubtestTest(xds_k8s_testcase.XdsKubernetesBaseTestCase):
"""A fake class to debug BaseTestCase logs produced by tests with subtests.

See FakeTest for notes on provisioning.
"""

def test_even(self):
for num in range(0, 6):
with self.subTest(i=num, msg=f"num_{num}"):
if num & 1:
self.fail(f"Integer {num} is odd")


if __name__ == "__main__":
absltest.main()