[PATCH 5/5] patman: test_util: Print test stdout/stderr within test summaries

Alper Nebi Yasak alpernebiyasak at gmail.com
Sat Apr 2 19:06:08 CEST 2022


While running tests for a python tool, the tests' outputs get printed in
whatever order they happen to run, without any indication as to which
output belongs to which test. Unittest supports capturing these outputs
and printing them as part of the test summaries, but when a failure or
error occurs it switches back to printing as the tests run. Testtools
and subunit tests can do the same as their parts inherit from unittest,
but they don't outright expose this functionality.

On the unittest side, enable output buffering for the custom test result
class. Try to avoid ugly outputs by not printing stdout/stderr before
the test summary for low verbosity levels and for successful tests.

On the subunit side, implement a custom TestProtocolClient that enables
the same underlying functionality and injects the captured streams as
additional test details. This causes them to be merged into their test's
error traceback message, which is later rebuilt into an exception and
passed to our unittest report class.

Signed-off-by: Alper Nebi Yasak <alpernebiyasak at gmail.com>
---
The way I had to do this for concurrencytest feels hacky, so I am
ambivalent towards this patch. I'm actually fine with re-running a
failing binman test alone with --debug -v6 to get better, isolated
output from it.

 tools/concurrencytest/concurrencytest.py | 83 +++++++++++++++++++++++-
 tools/patman/test_util.py                | 33 +++++++++-
 2 files changed, 112 insertions(+), 4 deletions(-)

diff --git a/tools/concurrencytest/concurrencytest.py b/tools/concurrencytest/concurrencytest.py
index 5e88b94f4156..1c4f03f37e5b 100644
--- a/tools/concurrencytest/concurrencytest.py
+++ b/tools/concurrencytest/concurrencytest.py
@@ -31,6 +31,7 @@
 from subunit.test_results import AutoTimingTestResultDecorator
 
 from testtools import ConcurrentTestSuite, iterate_tests
+from testtools.content import TracebackContent, text_content
 
 
 _all__ = [
@@ -43,11 +44,81 @@
 CPU_COUNT = cpu_count()
 
 
-def fork_for_tests(concurrency_num=CPU_COUNT):
+class BufferingTestProtocolClient(TestProtocolClient):
+    """A TestProtocolClient which can buffer the test outputs
+
+    This class captures the stdout and stderr output streams of the
+    tests as it runs them, and includes the output texts in the subunit
+    stream as additional details.
+
+    Args:
+        stream: A file-like object to write a subunit stream to
+        buffer (bool): True to capture test stdout/stderr outputs and
+            include them in the test details
+    """
+    def __init__(self, stream, buffer=True):
+        super().__init__(stream)
+        self.buffer = buffer
+
+    def _addOutcome(self, outcome, test, error=None, details=None,
+            error_permitted=True):
+        """Report a test outcome to the subunit stream
+
+        The parent class uses this function as a common implementation
+        for various methods that report successes, errors, failures, etc.
+
+        This version automatically upgrades the error tracebacks to the
+        new 'details' format by wrapping them in a Content object, so
+        that we can include the captured test output in the test result
+        details.
+
+        Args:
+            outcome: A string describing the outcome - used as the
+                event name in the subunit stream.
+            test: The test case whose outcome is to be reported
+            error: Standard unittest positional argument form - an
+                exc_info tuple.
+            details: New Testing-in-python drafted API; a dict from
+                string to subunit.Content objects.
+            error_permitted: If True then one and only one of error or
+                details must be supplied. If False then error must not
+                be supplied and details is still optional.
+        """
+        if details is None:
+            details = {}
+
+        # Parent will raise an exception if error_permitted is False but
+        # error is not None. We want that exception in that case, so
+        # don't touch error when error_permitted is explicitly False.
+        if error_permitted and error is not None:
+            # Parent class prefers error over details
+            details['traceback'] = TracebackContent(error, test)
+            error_permitted = False
+            error = None
+
+        if self.buffer:
+            stdout = sys.stdout.getvalue()
+            if stdout:
+                details['stdout'] = text_content(stdout)
+
+            stderr = sys.stderr.getvalue()
+            if stderr:
+                details['stderr'] = text_content(stderr)
+
+        return super()._addOutcome(outcome, test, error=error,
+                details=details, error_permitted=error_permitted)
+
+
+def fork_for_tests(concurrency_num=CPU_COUNT, buffer=False):
     """Implementation of `make_tests` used to construct `ConcurrentTestSuite`.
 
     :param concurrency_num: number of processes to use.
     """
+    if buffer:
+        test_protocol_client_class = BufferingTestProtocolClient
+    else:
+        test_protocol_client_class = TestProtocolClient
+
     def do_fork(suite):
         """Take suite and start up multiple runners by forking (Unix only).
 
@@ -76,7 +147,7 @@ def do_fork(suite):
                     # child actually gets keystrokes for pdb etc).
                     sys.stdin.close()
                     subunit_result = AutoTimingTestResultDecorator(
-                        TestProtocolClient(stream)
+                        test_protocol_client_class(stream)
                     )
                     process_suite.run(subunit_result)
                 except:
@@ -93,7 +164,13 @@ def do_fork(suite):
             else:
                 os.close(c2pwrite)
                 stream = os.fdopen(c2pread, 'rb')
-                test = ProtocolTestCase(stream)
+                # If we don't pass the second argument here, it defaults
+                # to sys.stdout.buffer down the line. But if we don't
+                # pass it *now*, it may be resolved after sys.stdout is
+                # replaced with a StringIO (to capture tests' outputs)
+                # which doesn't have a buffer attribute and can end up
+                # occasionally causing a 'broken-runner' error.
+                test = ProtocolTestCase(stream, sys.stdout.buffer)
                 result.append(test)
         return result
     return do_fork
diff --git a/tools/patman/test_util.py b/tools/patman/test_util.py
index 130d91409140..c27e0b39e5fa 100644
--- a/tools/patman/test_util.py
+++ b/tools/patman/test_util.py
@@ -15,6 +15,7 @@
 
 from io import StringIO
 
+buffer_outputs = True
 use_concurrent = True
 try:
     from concurrencytest.concurrencytest import ConcurrentTestSuite
@@ -120,6 +121,7 @@ class FullTextTestResult(unittest.TextTestResult):
             0: Print nothing
             1: Print a dot per test
             2: Print test names
+            3: Print test names, and buffered outputs for failing tests
     """
     def __init__(self, stream, descriptions, verbosity):
         self.verbosity = verbosity
@@ -139,12 +141,39 @@ def printErrors(self):
         self.printErrorList('XFAIL', self.expectedFailures)
         self.printErrorList('XPASS', unexpected_successes)
 
+    def addError(self, test, err):
+        """Called when an error has occurred."""
+        super().addError(test, err)
+        self._mirrorOutput &= self.verbosity >= 3
+
+    def addFailure(self, test, err):
+        """Called when a test has failed."""
+        super().addFailure(test, err)
+        self._mirrorOutput &= self.verbosity >= 3
+
+    def addSubTest(self, test, subtest, err):
+        """Called at the end of a subtest."""
+        super().addSubTest(test, subtest, err)
+        self._mirrorOutput &= self.verbosity >= 3
+
+    def addSuccess(self, test):
+        """Called when a test has completed successfully"""
+        super().addSuccess(test)
+        # Don't print stdout/stderr for successful tests
+        self._mirrorOutput = False
+
     def addSkip(self, test, reason):
         """Called when a test is skipped."""
         # Add empty line to keep spacing consistent with other results
         if not reason.endswith('\n'):
             reason += '\n'
         super().addSkip(test, reason)
+        self._mirrorOutput &= self.verbosity >= 3
+
+    def addExpectedFailure(self, test, err):
+        """Called when an expected failure/error occurred."""
+        super().addExpectedFailure(test, err)
+        self._mirrorOutput &= self.verbosity >= 3
 
 
 def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
@@ -180,12 +209,14 @@ def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
     runner = unittest.TextTestRunner(
         stream=sys.stdout,
         verbosity=(1 if verbosity is None else verbosity),
+        buffer=buffer_outputs,
         resultclass=FullTextTestResult,
     )
 
     if use_concurrent and processes != 1:
         suite = ConcurrentTestSuite(suite,
-                fork_for_tests(processes or multiprocessing.cpu_count()))
+                fork_for_tests(processes or multiprocessing.cpu_count(),
+                               buffer=buffer_outputs))
 
     for module in class_and_module_list:
         if isinstance(module, str) and (not test_name or test_name == module):
-- 
2.35.1



More information about the U-Boot mailing list