From abdedb26c75f617d7343dadcd83989cd9a0aff56 Mon Sep 17 00:00:00 2001 From: Ryan de Kleer Date: Mon, 5 Sep 2022 10:52:33 -0700 Subject: [PATCH] Prevent logging interception; add log tests Prevents FatalLogger from re-logging warning & error level log records, which had resulted in masking the initial source of the log messages. Change to Interface =================== This change modifies the FatalLogger class attributes: - warnings_fatal - errors_fatal + fatal_log_level Motivation ---------- Intention is to clarify the behavior: raise RuntimeErrors on any desired log-level *or higher*. The previous implementation did not respond to higher log levels (eg. logging.CRITICAL) and allowed unintuitive states: for example, it was possible to set the logger to fail at warning level yet not error-level. Fix #3037 --- RELEASE.md | 13 +++++++++ pelican/log.py | 36 +++++++++++++++-------- pelican/tests/test_log.py | 61 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 12 deletions(-) create mode 100644 RELEASE.md diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..a3c5f014a4 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,13 @@ +Release type: minor + +Rework `pelican.log.FatalLogger` to prevent overriding the source of `warning` +and `error` level logs. + +Changes the class attributes on `FatalLogger`: +``` +- warnings_fatal +- errors_fatal ++ fatal_log_level +``` +This allows logic to merely check whether the log record is at (or higher than) +whichever level is declared "fatal". diff --git a/pelican/log.py b/pelican/log.py index be176ea89d..5cac9caadd 100644 --- a/pelican/log.py +++ b/pelican/log.py @@ -84,18 +84,21 @@ def enable_filter(self): class FatalLogger(LimitLogger): - warnings_fatal = False - errors_fatal = False - def warning(self, *args, **kwargs): - super().warning(*args, **kwargs) - if FatalLogger.warnings_fatal: - raise RuntimeError('Warning encountered') + fatal_log_level = logging.NOTSET - def error(self, *args, **kwargs): - super().error(*args, **kwargs) - if FatalLogger.errors_fatal: - raise RuntimeError('Error encountered') + def callHandlers(self, record): + """ + Raise RuntimeError if log level is higher than acceptable. + + Level limit set via the ``--fatal`` flag when running pelican. + """ + super().callHandlers(record) + if FatalLogger.fatal_log_level == logging.NOTSET: + return + + if record.levelno >= FatalLogger.fatal_log_level: + raise RuntimeError('%s encountered.' % record.levelname) logging.setLoggerClass(FatalLogger) @@ -105,8 +108,17 @@ def error(self, *args, **kwargs): def init(level=None, fatal='', handler=RichHandler(console=console), name=None, logs_dedup_min_level=None): - FatalLogger.warnings_fatal = fatal.startswith('warning') - FatalLogger.errors_fatal = bool(fatal) + + try: + FatalLogger.fatal_log_level = { + '': logging.NOTSET, # no filtering + 'warning': logging.WARNING, # RuntimeError on warning() & greater + 'error': logging.ERROR # RuntimeError on error() & greater + }[fatal.strip().lower()] + except KeyError: + raise ValueError( + "Expected fatal level (warning|error); got: %s" % fatal + ) LOG_FORMAT = "%(message)s" logging.basicConfig( diff --git a/pelican/tests/test_log.py b/pelican/tests/test_log.py index 1f2fb83a84..ccfd1a04e4 100644 --- a/pelican/tests/test_log.py +++ b/pelican/tests/test_log.py @@ -24,6 +24,9 @@ def _reset_limit_filter(self): log.LimitFilter._threshold = 5 log.LimitFilter._group_count = defaultdict(int) + def _log_action(self, log_level_name): + return getattr(self.logger, log_level_name) + @contextmanager def reset_logger(self): try: @@ -80,3 +83,61 @@ def do_logging(): self.assertEqual( self.handler.count_logs('Another log \\d', logging.WARNING), 0) + + def test_fatal_logger_no_quit(self): + # log.FatalLogger.fatal_log_level = logging.NOTSET + log.init(logging.DEBUG, fatal='') + + for level in ('debug', 'info', 'warning', 'error', 'critical'): + self._log_action(level)('%s: FatalLogger.%s()' % (__name__, level)) + + def test_fatal_logger_warning(self): + # log.FatalLogger.fatal_log_level = logging.WARNING + log.init(logging.DEBUG, fatal='warning') + + for level in ('debug', 'info'): + self._log_action(level)('%s: FatalLogger.%s()' % (__name__, level)) + + for level in ('warning', 'error', 'critical'): + try: + self._log_action(level)("FAIL") + except RuntimeError: + continue + else: # should have raised RuntimeError, but didn't + raise AssertionError( + "Fatal set to %s but failed to exit on %s()." % ( + logging.getLevelname(log.FatalLogger.fatal_log_level), + level + ) + ) + + def test_fatal_logger_error(self): + # log.FatalLogger.fatal_log_level = logging.ERROR + log.init(logging.DEBUG, fatal='error') + + for level in ('debug', 'info', 'warning'): + self._log_action(level)('%s: FatalLogger.%s()' % (__name__, level)) + + for level in ('error', 'critical'): + try: + self._log_action(level)("FAIL") + except RuntimeError: + continue + else: # should have raised RuntimeError, but didn't + raise AssertionError( + "Fatal set to %s but failed to exit on %s()." % ( + logging.getLevelname(log.FatalLogger.fatal_log_level), + level + ) + ) + + def test_fatal_logger_invalid_level(self): + # log.FatalLogger.fatal_log_level = logging.NOTSET + try: + log.init(logging.DEBUG, fatal='spam') + except ValueError: + pass + else: + raise AssertionError( + "An invalid log-level should have failed here." + )