@@ -78,6 +78,8 @@ STATUS_STRINGS = {
"None": "No matching test result"
}
+REGRESSIONS_DISPLAY_LIMIT=50
+
def test_has_at_least_one_matching_tag(test, tag_list):
return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"])
@@ -181,11 +183,12 @@ def get_status_str(raw_status):
raw_status_lower = raw_status.lower() if raw_status else "None"
return STATUS_STRINGS.get(raw_status_lower, raw_status)
-def compare_result(logger, base_name, target_name, base_result, target_result):
+def compare_result(logger, base_name, target_name, base_result, target_result, display_limit):
base_result = base_result.get('result')
target_result = target_result.get('result')
result = {}
new_tests = 0
+ regressions_count = 0
if base_result and target_result:
for k in base_result:
@@ -212,7 +215,14 @@ def compare_result(logger, base_name, target_name, base_result, target_result):
resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
for k in sorted(result):
if not result[k]['target'] or not result[k]['target'].startswith("PASS"):
- resultstring += ' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))
+ # Count regressions only if we have to limit the number of
+ # displayed regressions
+ if display_limit > 0:
+ regressions_count = regressions_count + 1
+ if regressions_count <= display_limit:
+ resultstring += ' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))
+ if regressions_count > display_limit:
+ resultstring += f' [...]\n (In total, {regressions_count} regressions/status changes detected)\n'
if new_pass_count > 0:
resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
else:
@@ -263,6 +273,10 @@ def regression_common(args, logger, base_results, target_results):
if args.target_result_id:
target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+ display_limit=REGRESSIONS_DISPLAY_LIMIT
+ if args.limit:
+ display_limit=int(args.limit)
+
fixup_ptest_names(base_results, logger)
fixup_ptest_names(target_results, logger)
@@ -280,7 +294,7 @@ def regression_common(args, logger, base_results, target_results):
for b in target.copy():
if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
continue
- res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], display_limit)
if not res:
matches.append(resstr)
base.remove(c)
@@ -291,7 +305,7 @@ def regression_common(args, logger, base_results, target_results):
for b in target:
if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
continue
- res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], display_limit)
if res:
regressions.append(resstr)
else:
@@ -403,4 +417,5 @@ def register_commands(subparsers):
parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
parser_build.add_argument('--commit2', help="Revision to compare with")
parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+ parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")