[PATCH 02/15] Revert "patman: test_util: Print test stdout/stderr within test summaries"

Simon Glass sjg at chromium.org
Sun Feb 19 15:51:10 CET 2023


Unfortunately this adds a new feature to concurrencytest and it has not
made it upstream to the project[1].

Drop it for now so we can use the upstream module. Once it is applied we
can bring this functionality back.

[1] https://github.com/cgoldberg/concurrencytest

This reverts commit ebcaafcded40da8ae6cb4234c2ba9901c7bee644.

Signed-off-by: Simon Glass <sjg at chromium.org>
---

 tools/concurrencytest/concurrencytest.py | 83 +-----------------------
 tools/patman/test_util.py                | 33 +---------
 2 files changed, 4 insertions(+), 112 deletions(-)

diff --git a/tools/concurrencytest/concurrencytest.py b/tools/concurrencytest/concurrencytest.py
index 1c4f03f37e5..5e88b94f415 100644
--- a/tools/concurrencytest/concurrencytest.py
+++ b/tools/concurrencytest/concurrencytest.py
@@ -31,7 +31,6 @@ from subunit import ProtocolTestCase, TestProtocolClient
 from subunit.test_results import AutoTimingTestResultDecorator
 
 from testtools import ConcurrentTestSuite, iterate_tests
-from testtools.content import TracebackContent, text_content
 
 
 _all__ = [
@@ -44,81 +43,11 @@ _all__ = [
 CPU_COUNT = cpu_count()
 
 
-class BufferingTestProtocolClient(TestProtocolClient):
-    """A TestProtocolClient which can buffer the test outputs
-
-    This class captures the stdout and stderr output streams of the
-    tests as it runs them, and includes the output texts in the subunit
-    stream as additional details.
-
-    Args:
-        stream: A file-like object to write a subunit stream to
-        buffer (bool): True to capture test stdout/stderr outputs and
-            include them in the test details
-    """
-    def __init__(self, stream, buffer=True):
-        super().__init__(stream)
-        self.buffer = buffer
-
-    def _addOutcome(self, outcome, test, error=None, details=None,
-            error_permitted=True):
-        """Report a test outcome to the subunit stream
-
-        The parent class uses this function as a common implementation
-        for various methods that report successes, errors, failures, etc.
-
-        This version automatically upgrades the error tracebacks to the
-        new 'details' format by wrapping them in a Content object, so
-        that we can include the captured test output in the test result
-        details.
-
-        Args:
-            outcome: A string describing the outcome - used as the
-                event name in the subunit stream.
-            test: The test case whose outcome is to be reported
-            error: Standard unittest positional argument form - an
-                exc_info tuple.
-            details: New Testing-in-python drafted API; a dict from
-                string to subunit.Content objects.
-            error_permitted: If True then one and only one of error or
-                details must be supplied. If False then error must not
-                be supplied and details is still optional.
-        """
-        if details is None:
-            details = {}
-
-        # Parent will raise an exception if error_permitted is False but
-        # error is not None. We want that exception in that case, so
-        # don't touch error when error_permitted is explicitly False.
-        if error_permitted and error is not None:
-            # Parent class prefers error over details
-            details['traceback'] = TracebackContent(error, test)
-            error_permitted = False
-            error = None
-
-        if self.buffer:
-            stdout = sys.stdout.getvalue()
-            if stdout:
-                details['stdout'] = text_content(stdout)
-
-            stderr = sys.stderr.getvalue()
-            if stderr:
-                details['stderr'] = text_content(stderr)
-
-        return super()._addOutcome(outcome, test, error=error,
-                details=details, error_permitted=error_permitted)
-
-
-def fork_for_tests(concurrency_num=CPU_COUNT, buffer=False):
+def fork_for_tests(concurrency_num=CPU_COUNT):
     """Implementation of `make_tests` used to construct `ConcurrentTestSuite`.
 
     :param concurrency_num: number of processes to use.
     """
-    if buffer:
-        test_protocol_client_class = BufferingTestProtocolClient
-    else:
-        test_protocol_client_class = TestProtocolClient
-
     def do_fork(suite):
         """Take suite and start up multiple runners by forking (Unix only).
 
@@ -147,7 +76,7 @@ def fork_for_tests(concurrency_num=CPU_COUNT, buffer=False):
                     # child actually gets keystrokes for pdb etc).
                     sys.stdin.close()
                     subunit_result = AutoTimingTestResultDecorator(
-                        test_protocol_client_class(stream)
+                        TestProtocolClient(stream)
                     )
                     process_suite.run(subunit_result)
                 except:
@@ -164,13 +93,7 @@ def fork_for_tests(concurrency_num=CPU_COUNT, buffer=False):
             else:
                 os.close(c2pwrite)
                 stream = os.fdopen(c2pread, 'rb')
-                # If we don't pass the second argument here, it defaults
-                # to sys.stdout.buffer down the line. But if we don't
-                # pass it *now*, it may be resolved after sys.stdout is
-                # replaced with a StringIO (to capture tests' outputs)
-                # which doesn't have a buffer attribute and can end up
-                # occasionally causing a 'broken-runner' error.
-                test = ProtocolTestCase(stream, sys.stdout.buffer)
+                test = ProtocolTestCase(stream)
                 result.append(test)
         return result
     return do_fork
diff --git a/tools/patman/test_util.py b/tools/patman/test_util.py
index 0f6d1aa902d..4ee58f9fbb9 100644
--- a/tools/patman/test_util.py
+++ b/tools/patman/test_util.py
@@ -15,7 +15,6 @@ from patman import command
 
 from io import StringIO
 
-buffer_outputs = True
 use_concurrent = True
 try:
     from concurrencytest.concurrencytest import ConcurrentTestSuite
@@ -120,7 +119,6 @@ class FullTextTestResult(unittest.TextTestResult):
             0: Print nothing
             1: Print a dot per test
             2: Print test names
-            3: Print test names, and buffered outputs for failing tests
     """
     def __init__(self, stream, descriptions, verbosity):
         self.verbosity = verbosity
@@ -140,39 +138,12 @@ class FullTextTestResult(unittest.TextTestResult):
         self.printErrorList('XFAIL', self.expectedFailures)
         self.printErrorList('XPASS', unexpected_successes)
 
-    def addError(self, test, err):
-        """Called when an error has occurred."""
-        super().addError(test, err)
-        self._mirrorOutput &= self.verbosity >= 3
-
-    def addFailure(self, test, err):
-        """Called when a test has failed."""
-        super().addFailure(test, err)
-        self._mirrorOutput &= self.verbosity >= 3
-
-    def addSubTest(self, test, subtest, err):
-        """Called at the end of a subtest."""
-        super().addSubTest(test, subtest, err)
-        self._mirrorOutput &= self.verbosity >= 3
-
-    def addSuccess(self, test):
-        """Called when a test has completed successfully"""
-        super().addSuccess(test)
-        # Don't print stdout/stderr for successful tests
-        self._mirrorOutput = False
-
     def addSkip(self, test, reason):
         """Called when a test is skipped."""
         # Add empty line to keep spacing consistent with other results
         if not reason.endswith('\n'):
             reason += '\n'
         super().addSkip(test, reason)
-        self._mirrorOutput &= self.verbosity >= 3
-
-    def addExpectedFailure(self, test, err):
-        """Called when an expected failure/error occurred."""
-        super().addExpectedFailure(test, err)
-        self._mirrorOutput &= self.verbosity >= 3
 
 
 def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
@@ -208,14 +179,12 @@ def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
     runner = unittest.TextTestRunner(
         stream=sys.stdout,
         verbosity=(1 if verbosity is None else verbosity),
-        buffer=False if test_name else buffer_outputs,
         resultclass=FullTextTestResult,
     )
 
     if use_concurrent and processes != 1:
         suite = ConcurrentTestSuite(suite,
-                fork_for_tests(processes or multiprocessing.cpu_count(),
-                               buffer=False if test_name else buffer_outputs))
+                fork_for_tests(processes or multiprocessing.cpu_count()))
 
     for module in class_and_module_list:
         if isinstance(module, str) and (not test_name or test_name == module):
-- 
2.39.2.637.g21b0678d19-goog



More information about the U-Boot mailing list