summaryrefslogtreecommitdiffstats
path: root/scripts/lib/resulttool/regression.py
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib/resulttool/regression.py')
-rw-r--r--scripts/lib/resulttool/regression.py25
1 files changed, 21 insertions, 4 deletions
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
index 3d64b8f4af..d98504f4ce 100644
--- a/scripts/lib/resulttool/regression.py
+++ b/scripts/lib/resulttool/regression.py
@@ -78,6 +78,8 @@ STATUS_STRINGS = {
78 "None": "No matching test result" 78 "None": "No matching test result"
79} 79}
80 80
81REGRESSIONS_DISPLAY_LIMIT=50
82
81def test_has_at_least_one_matching_tag(test, tag_list): 83def test_has_at_least_one_matching_tag(test, tag_list):
82 return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"]) 84 return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"])
83 85
@@ -181,11 +183,15 @@ def get_status_str(raw_status):
181 raw_status_lower = raw_status.lower() if raw_status else "None" 183 raw_status_lower = raw_status.lower() if raw_status else "None"
182 return STATUS_STRINGS.get(raw_status_lower, raw_status) 184 return STATUS_STRINGS.get(raw_status_lower, raw_status)
183 185
184def compare_result(logger, base_name, target_name, base_result, target_result): 186def compare_result(logger, base_name, target_name, base_result, target_result, display_limit=None):
185 base_result = base_result.get('result') 187 base_result = base_result.get('result')
186 target_result = target_result.get('result') 188 target_result = target_result.get('result')
187 result = {} 189 result = {}
188 new_tests = 0 190 new_tests = 0
191 regressions = {}
192 resultstring = ""
193
194 display_limit = int(display_limit) if display_limit else REGRESSIONS_DISPLAY_LIMIT
189 195
190 if base_result and target_result: 196 if base_result and target_result:
191 for k in base_result: 197 for k in base_result:
@@ -212,7 +218,17 @@ def compare_result(logger, base_name, target_name, base_result, target_result):
212 resultstring = "Regression: %s\n %s\n" % (base_name, target_name) 218 resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
213 for k in sorted(result): 219 for k in sorted(result):
214 if not result[k]['target'] or not result[k]['target'].startswith("PASS"): 220 if not result[k]['target'] or not result[k]['target'].startswith("PASS"):
215 resultstring += ' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target'])) 221 # Differentiate each ptest kind when listing regressions
222 key = '.'.join(k.split('.')[:2]) if k.startswith('ptest') else k
223 # Append new regression to corresponding test family
224 regressions[key] = regressions.setdefault(key, []) + [' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))]
225 resultstring += f" Total: {sum([len(regressions[r]) for r in regressions])} new regression(s):\n"
226 for k in regressions:
227 resultstring += f" {len(regressions[k])} regression(s) for {k}\n"
228 count_to_print=min([display_limit, len(regressions[k])]) if display_limit > 0 else len(regressions[k])
229 resultstring += ''.join(regressions[k][:count_to_print])
230 if count_to_print < len(regressions[k]):
231 resultstring+=' [...]\n'
216 if new_pass_count > 0: 232 if new_pass_count > 0:
217 resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n' 233 resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
218 else: 234 else:
@@ -280,7 +296,7 @@ def regression_common(args, logger, base_results, target_results):
280 for b in target.copy(): 296 for b in target.copy():
281 if not can_be_compared(logger, base_results[a][c], target_results[a][b]): 297 if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
282 continue 298 continue
283 res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b]) 299 res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
284 if not res: 300 if not res:
285 matches.append(resstr) 301 matches.append(resstr)
286 base.remove(c) 302 base.remove(c)
@@ -291,7 +307,7 @@ def regression_common(args, logger, base_results, target_results):
291 for b in target: 307 for b in target:
292 if not can_be_compared(logger, base_results[a][c], target_results[a][b]): 308 if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
293 continue 309 continue
294 res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b]) 310 res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
295 if res: 311 if res:
296 regressions.append(resstr) 312 regressions.append(resstr)
297 else: 313 else:
@@ -403,4 +419,5 @@ def register_commands(subparsers):
403 parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified") 419 parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
404 parser_build.add_argument('--commit2', help="Revision to compare with") 420 parser_build.add_argument('--commit2', help="Revision to compare with")
405 parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified") 421 parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
422 parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")
406 423