diff mbox series

[mickledore,27/37] scripts/resulttool: add mention about new detected tests

Message ID 7e393ea6e46060daf3242f2f7721f7f968945122.1691683295.git.steve@sakoman.com
State New
Headers show
Series [mickledore,01/37] libarchive: ignore CVE-2023-30571 | expand

Commit Message

Steve Sakoman Aug. 10, 2023, 4:04 p.m. UTC
From: Alexis Lothoré <alexis.lothore@bootlin.com>

Some regression reports show a lot of "PASSED->None" transitions. When such
big lot of identical transitions are observed, it could be that tests are
now failing, but it could also be that some tests has been renamed.

To detect such case, add a log in regression report to report the number of
new tests (i.e: tests that are present in target results but not in base
result). This new log also allows to know about newly added tests bases

Signed-off-by: Alexis Lothoré <alexis.lothore@bootlin.com>
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
(cherry picked from commit 01b5cefd07e01c7407bc663842b8a8d502358a6d)
Signed-off-by: Steve Sakoman <steve@sakoman.com>
---
 scripts/lib/resulttool/regression.py | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
index 1facbcd85e..f80a9182a9 100644
--- a/scripts/lib/resulttool/regression.py
+++ b/scripts/lib/resulttool/regression.py
@@ -178,6 +178,8 @@  def compare_result(logger, base_name, target_name, base_result, target_result):
     base_result = base_result.get('result')
     target_result = target_result.get('result')
     result = {}
+    new_tests = 0
+
     if base_result and target_result:
         for k in base_result:
             base_testcase = base_result[k]
@@ -189,6 +191,13 @@  def compare_result(logger, base_name, target_name, base_result, target_result):
                     result[k] = {'base': base_status, 'target': target_status}
             else:
                 logger.error('Failed to retrieved base test case status: %s' % k)
+
+        # Also count new tests that were not present in base results: it
+        # could be newly added tests, but it could also highlights some tests
+        # renames or fixed faulty ptests
+        for k in target_result:
+            if k not in base_result:
+                new_tests += 1
     if result:
         new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values())
         # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...)
@@ -200,10 +209,13 @@  def compare_result(logger, base_name, target_name, base_result, target_result):
             if new_pass_count > 0:
                 resultstring += f'    Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
         else:
-            resultstring = "Improvement: %s\n             %s\n             (+%d test(s) passing)" % (base_name, target_name, new_pass_count)
+            resultstring = "Improvement: %s\n             %s\n             (+%d test(s) passing)\n" % (base_name, target_name, new_pass_count)
             result = None
     else:
-        resultstring = "Match:       %s\n             %s" % (base_name, target_name)
+        resultstring = "Match:       %s\n             %s\n" % (base_name, target_name)
+
+    if new_tests > 0:
+        resultstring += f'    Additionally, {new_tests} new test(s) is/are present\n'
     return result, resultstring
 
 def get_results(logger, source):