diff mbox series

[3/4] scripts/resulttool/regression: add metadata filtering for oeselftest

Message ID 20230214135214.42413-4-alexis.lothore@bootlin.com
State Accepted, archived
Commit 94ab7c2b892bf292dd86619ca9c63ddd7bf53f3c
Headers show
Series scripts/resulttool/regression: add metadata filtering | expand

Commit Message

Alexis Lothoré Feb. 14, 2023, 1:52 p.m. UTC
When generating regression reports, many false positive can be observed since
some tests results are compared while the corresponding tests sets are not the
same, as it can be seen for example for oeselftest tests (oeselftest is run
multiple time but with different parameters, resulting in different tests sets)

Add a filtering mechanism in resulttool regression module to enable a better
matching between tests. The METADATA_MATCH_TABLE defines that when the TEST_TYPE
is "oeselftest", then resulttool should filter pairs based on
OESELFTEST_METADATA appended to test configuration. If metadata is absent from
"base" test results, tests are marked "comparable" to preserve compatibility
with test results which still do not have those new metadata.

Signed-off-by: Alexis Lothoré <alexis.lothore@bootlin.com>
---
 scripts/lib/resulttool/regression.py | 34 ++++++++++++++++++++++++++++
 1 file changed, 34 insertions(+)
diff mbox series

Patch

diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
index 9f952951b3..64d1eeee37 100644
--- a/scripts/lib/resulttool/regression.py
+++ b/scripts/lib/resulttool/regression.py
@@ -12,6 +12,36 @@  import json
 from oeqa.utils.git import GitRepo
 import oeqa.utils.gitarchive as gitarchive
 
+METADATA_MATCH_TABLE={
+    "oeselftest": "OESELFTEST_METADATA"
+}
+
+
+def metadata_matches(base_configuration, target_configuration):
+    """
+    For passed base and target, check test type. If test type matches one of
+    properties described in METADATA_MATCH_TABLE, compare metadata if it is
+    present in base. Return true if metadata matches, or if base lacks some
+    data (either TEST_TYPE or the corresponding metadata)
+    """
+    test_type=base_configuration.get('TEST_TYPE')
+    metadata_key=METADATA_MATCH_TABLE.get(test_type)
+    if metadata_key not in base_configuration:
+        return True
+
+    if target_configuration.get(metadata_key) != base_configuration[metadata_key]:
+        return False
+
+    return True
+
+def can_be_compared(base_configuration, target_configuration):
+    """
+    Some tests are not relevant to be compared, for example some oeselftest
+    run with different tests sets or parameters. Return true if tests can be
+    compared
+    """
+    return metadata_matches(base_configuration, target_configuration)
+
 def compare_result(logger, base_name, target_name, base_result, target_result):
     base_result = base_result.get('result')
     target_result = target_result.get('result')
@@ -62,6 +92,8 @@  def regression_common(args, logger, base_results, target_results):
             # removing any pairs which match
             for c in base.copy():
                 for b in target.copy():
+                    if not can_be_compared(base_results[a][c]['configuration'], target_results[a][b]['configuration']):
+                        continue
                     res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
                     if not res:
                         matches.append(resstr)
@@ -71,6 +103,8 @@  def regression_common(args, logger, base_results, target_results):
             # Should only now see regressions, we may not be able to match multiple pairs directly
             for c in base:
                 for b in target:
+                    if not can_be_compared(base_results[a][c]['configuration'], target_results[a][b]['configuration']):
+                        continue
                     res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
                     if res:
                         regressions.append(resstr)