diff options
| author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-16 18:13:00 +0000 |
|---|---|---|
| committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-21 12:34:00 +0000 |
| commit | 47eb3d00e9d6a66aee1283dab29af8117a006d6d (patch) | |
| tree | 9d574720d23da3edeab5648f05fb5d2ee95c9cd2 /scripts/lib/resulttool/report.py | |
| parent | beed7523b667affea71d37d88d2f5c19c935d159 (diff) | |
| download | poky-47eb3d00e9d6a66aee1283dab29af8117a006d6d.tar.gz | |
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I
made to resulttool:
* Avoid subprocess.run() as its a python 3.6 feature and we
have autobuilder workers with 3.5.
* Avoid python keywords as variable names
* Simplify dict accesses using .get()
* Rename resultsutils -> resultutils to match the resultstool ->
resulttool rename
* Formalised the handling of "file_name" to "TESTSERIES" which the code
will now add into the json configuration data if its not present, based
on the directory name.
* When we don't have failed test cases, print something saying so
instead of an empty table
* Tweak the table headers in the report to be more readable (reference
"Test Series" instead if file_id and ID instead of results_id)
* Improve/simplify the max string length handling
* Merge the counts and percentage data into one table in the report
since printing two reports of the same data confuses the user
* Removed the confusing header in the regression report
* Show matches, then regressions, then unmatched runs in the regression
report, also remove chatting unneeded output
* Try harder to "pair" up matching configurations to reduce noise in
the regressions report
* Abstracted the "mapping" table concept used to pairing in the
regression code to general code in resultutils
* Created multiple mappings for results analysis, results storage and
'flattening' results data in a merge
* Simplify the merge command to take a source and a destination,
letting the destination be a directory or a file, removing the need for
an output directory parameter
* Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression
mappings
* Have the store command place the testresults files in a layout from
the mapping, making commits into the git repo for results storage more
useful for simple comparison purposes
* Set the oe-git-archive tag format appropriately for oeqa results
storage (and simplify the commit messages closer to their defaults)
* Fix oe-git-archive to use the commit/branch data from the results file
* Cleaned up the command option help to match other changes
* Follow the model of git branch/tag processing used by oe-build-perf-report
and use that to read the data using git show to avoid branch change
* Add ptest summary to the report command
* Update the tests to match the above changes
(From OE-Core rev: ff2c029b568f70aa9960dde04ddd207829812ea0)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/resulttool/report.py')
| -rw-r--r-- | scripts/lib/resulttool/report.py | 157 |
1 files changed, 89 insertions, 68 deletions
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py index ab5de1f3a7..2f5ea308e2 100644 --- a/scripts/lib/resulttool/report.py +++ b/scripts/lib/resulttool/report.py | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | # test result tool - report text based test results | 1 | # test result tool - report text based test results |
| 2 | # | 2 | # |
| 3 | # Copyright (c) 2019, Intel Corporation. | 3 | # Copyright (c) 2019, Intel Corporation. |
| 4 | # Copyright (c) 2019, Linux Foundation | ||
| 4 | # | 5 | # |
| 5 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
| 6 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
| @@ -14,100 +15,120 @@ | |||
| 14 | import os | 15 | import os |
| 15 | import glob | 16 | import glob |
| 16 | import json | 17 | import json |
| 17 | from resulttool.resultsutils import checkout_git_dir, load_json_file, get_dict_value, get_directory_files | 18 | import resulttool.resultutils as resultutils |
| 19 | from oeqa.utils.git import GitRepo | ||
| 20 | import oeqa.utils.gitarchive as gitarchive | ||
| 21 | |||
| 18 | 22 | ||
| 19 | class ResultsTextReport(object): | 23 | class ResultsTextReport(object): |
| 24 | def __init__(self): | ||
| 25 | self.ptests = {} | ||
| 26 | self.result_types = {'passed': ['PASSED', 'passed'], | ||
| 27 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], | ||
| 28 | 'skipped': ['SKIPPED', 'skipped']} | ||
| 29 | |||
| 30 | |||
| 31 | def handle_ptest_result(self, k, status, result): | ||
| 32 | if k == 'ptestresult.sections': | ||
| 33 | return | ||
| 34 | _, suite, test = k.split(".", 2) | ||
| 35 | # Handle 'glib-2.0' | ||
| 36 | if suite not in result['ptestresult.sections']: | ||
| 37 | try: | ||
| 38 | _, suite, suite1, test = k.split(".", 3) | ||
| 39 | if suite + "." + suite1 in result['ptestresult.sections']: | ||
| 40 | suite = suite + "." + suite1 | ||
| 41 | except ValueError: | ||
| 42 | pass | ||
| 43 | if suite not in self.ptests: | ||
| 44 | self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} | ||
| 45 | for tk in self.result_types: | ||
| 46 | if status in self.result_types[tk]: | ||
| 47 | self.ptests[suite][tk] += 1 | ||
| 48 | if suite in result['ptestresult.sections']: | ||
| 49 | if 'duration' in result['ptestresult.sections'][suite]: | ||
| 50 | self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration'] | ||
| 51 | if 'timeout' in result['ptestresult.sections'][suite]: | ||
| 52 | self.ptests[suite]['duration'] += " T" | ||
| 20 | 53 | ||
| 21 | def get_aggregated_test_result(self, logger, testresult): | 54 | def get_aggregated_test_result(self, logger, testresult): |
| 22 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} | 55 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} |
| 23 | result_types = {'passed': ['PASSED', 'passed'], | 56 | result = testresult.get('result', []) |
| 24 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], | ||
| 25 | 'skipped': ['SKIPPED', 'skipped']} | ||
| 26 | result = get_dict_value(logger, testresult, 'result') | ||
| 27 | for k in result: | 57 | for k in result: |
| 28 | test_status = get_dict_value(logger, result[k], 'status') | 58 | test_status = result[k].get('status', []) |
| 29 | for tk in result_types: | 59 | for tk in self.result_types: |
| 30 | if test_status in result_types[tk]: | 60 | if test_status in self.result_types[tk]: |
| 31 | test_count_report[tk] += 1 | 61 | test_count_report[tk] += 1 |
| 32 | if test_status in result_types['failed']: | 62 | if test_status in self.result_types['failed']: |
| 33 | test_count_report['failed_testcases'].append(k) | 63 | test_count_report['failed_testcases'].append(k) |
| 64 | if k.startswith("ptestresult."): | ||
| 65 | self.handle_ptest_result(k, test_status, result) | ||
| 34 | return test_count_report | 66 | return test_count_report |
| 35 | 67 | ||
| 36 | def get_test_result_percentage(self, test_result_count): | 68 | def print_test_report(self, template_file_name, test_count_reports): |
| 37 | total_tested = test_result_count['passed'] + test_result_count['failed'] + test_result_count['skipped'] | ||
| 38 | test_percent_report = {'passed': 0, 'failed': 0, 'skipped': 0} | ||
| 39 | for k in test_percent_report: | ||
| 40 | test_percent_report[k] = format(test_result_count[k] / total_tested * 100, '.2f') | ||
| 41 | return test_percent_report | ||
| 42 | |||
| 43 | def add_test_configurations(self, test_report, source_dir, file, result_id): | ||
| 44 | test_report['file_dir'] = self._get_short_file_dir(source_dir, file) | ||
| 45 | test_report['result_id'] = result_id | ||
| 46 | test_report['test_file_dir_result_id'] = '%s_%s' % (test_report['file_dir'], test_report['result_id']) | ||
| 47 | |||
| 48 | def _get_short_file_dir(self, source_dir, file): | ||
| 49 | file_dir = os.path.dirname(file) | ||
| 50 | source_dir = source_dir[:-1] if source_dir[-1] == '/' else source_dir | ||
| 51 | if file_dir == source_dir: | ||
| 52 | return 'None' | ||
| 53 | return file_dir.replace(source_dir, '') | ||
| 54 | |||
| 55 | def get_max_string_len(self, test_result_list, key, default_max_len): | ||
| 56 | max_len = default_max_len | ||
| 57 | for test_result in test_result_list: | ||
| 58 | value_len = len(test_result[key]) | ||
| 59 | if value_len > max_len: | ||
| 60 | max_len = value_len | ||
| 61 | return max_len | ||
| 62 | |||
| 63 | def print_test_report(self, template_file_name, test_count_reports, test_percent_reports, | ||
| 64 | max_len_dir, max_len_result_id): | ||
| 65 | from jinja2 import Environment, FileSystemLoader | 69 | from jinja2 import Environment, FileSystemLoader |
| 66 | script_path = os.path.dirname(os.path.realpath(__file__)) | 70 | script_path = os.path.dirname(os.path.realpath(__file__)) |
| 67 | file_loader = FileSystemLoader(script_path + '/template') | 71 | file_loader = FileSystemLoader(script_path + '/template') |
| 68 | env = Environment(loader=file_loader, trim_blocks=True) | 72 | env = Environment(loader=file_loader, trim_blocks=True) |
| 69 | template = env.get_template(template_file_name) | 73 | template = env.get_template(template_file_name) |
| 70 | output = template.render(test_count_reports=test_count_reports, | 74 | havefailed = False |
| 71 | test_percent_reports=test_percent_reports, | 75 | haveptest = bool(self.ptests) |
| 72 | max_len_dir=max_len_dir, | 76 | reportvalues = [] |
| 73 | max_len_result_id=max_len_result_id) | 77 | cols = ['passed', 'failed', 'skipped'] |
| 74 | print('Printing text-based test report:') | 78 | maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 } |
| 79 | for line in test_count_reports: | ||
| 80 | total_tested = line['passed'] + line['failed'] + line['skipped'] | ||
| 81 | vals = {} | ||
| 82 | vals['result_id'] = line['result_id'] | ||
| 83 | vals['testseries'] = line['testseries'] | ||
| 84 | vals['sort'] = line['testseries'] + "_" + line['result_id'] | ||
| 85 | vals['failed_testcases'] = line['failed_testcases'] | ||
| 86 | for k in cols: | ||
| 87 | vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) | ||
| 88 | for k in maxlen: | ||
| 89 | if k in vals and len(vals[k]) > maxlen[k]: | ||
| 90 | maxlen[k] = len(vals[k]) | ||
| 91 | reportvalues.append(vals) | ||
| 92 | if line['failed_testcases']: | ||
| 93 | havefailed = True | ||
| 94 | for ptest in self.ptests: | ||
| 95 | if len(ptest) > maxlen['ptest']: | ||
| 96 | maxlen['ptest'] = len(ptest) | ||
| 97 | output = template.render(reportvalues=reportvalues, | ||
| 98 | havefailed=havefailed, | ||
| 99 | haveptest=haveptest, | ||
| 100 | ptests=self.ptests, | ||
| 101 | maxlen=maxlen) | ||
| 75 | print(output) | 102 | print(output) |
| 76 | 103 | ||
| 77 | def view_test_report(self, logger, source_dir, git_branch): | 104 | def view_test_report(self, logger, source_dir, tag): |
| 78 | if git_branch: | ||
| 79 | checkout_git_dir(source_dir, git_branch) | ||
| 80 | test_count_reports = [] | 105 | test_count_reports = [] |
| 81 | test_percent_reports = [] | 106 | if tag: |
| 82 | for file in get_directory_files(source_dir, ['.git'], 'testresults.json'): | 107 | repo = GitRepo(source_dir) |
| 83 | logger.debug('Computing result for test result file: %s' % file) | 108 | testresults = resultutils.git_get_result(repo, [tag]) |
| 84 | testresults = load_json_file(file) | 109 | else: |
| 85 | for k in testresults: | 110 | testresults = resultutils.load_resultsdata(source_dir) |
| 86 | test_count_report = self.get_aggregated_test_result(logger, testresults[k]) | 111 | for testsuite in testresults: |
| 87 | test_percent_report = self.get_test_result_percentage(test_count_report) | 112 | for resultid in testresults[testsuite]: |
| 88 | self.add_test_configurations(test_count_report, source_dir, file, k) | 113 | result = testresults[testsuite][resultid] |
| 89 | self.add_test_configurations(test_percent_report, source_dir, file, k) | 114 | test_count_report = self.get_aggregated_test_result(logger, result) |
| 115 | test_count_report['testseries'] = result['configuration']['TESTSERIES'] | ||
| 116 | test_count_report['result_id'] = resultid | ||
| 90 | test_count_reports.append(test_count_report) | 117 | test_count_reports.append(test_count_report) |
| 91 | test_percent_reports.append(test_percent_report) | 118 | self.print_test_report('test_report_full_text.txt', test_count_reports) |
| 92 | max_len_dir = self.get_max_string_len(test_count_reports, 'file_dir', len('file_dir')) | ||
| 93 | max_len_result_id = self.get_max_string_len(test_count_reports, 'result_id', len('result_id')) | ||
| 94 | self.print_test_report('test_report_full_text.txt', test_count_reports, test_percent_reports, | ||
| 95 | max_len_dir, max_len_result_id) | ||
| 96 | 119 | ||
| 97 | def report(args, logger): | 120 | def report(args, logger): |
| 98 | report = ResultsTextReport() | 121 | report = ResultsTextReport() |
| 99 | report.view_test_report(logger, args.source_dir, args.git_branch) | 122 | report.view_test_report(logger, args.source_dir, args.tag) |
| 100 | return 0 | 123 | return 0 |
| 101 | 124 | ||
| 102 | def register_commands(subparsers): | 125 | def register_commands(subparsers): |
| 103 | """Register subcommands from this plugin""" | 126 | """Register subcommands from this plugin""" |
| 104 | parser_build = subparsers.add_parser('report', help='report test result summary', | 127 | parser_build = subparsers.add_parser('report', help='summarise test results', |
| 105 | description='report text-based test result summary from the source directory', | 128 | description='print a text-based summary of the test results', |
| 106 | group='analysis') | 129 | group='analysis') |
| 107 | parser_build.set_defaults(func=report) | 130 | parser_build.set_defaults(func=report) |
| 108 | parser_build.add_argument('source_dir', | 131 | parser_build.add_argument('source_dir', |
| 109 | help='source directory that contain the test result files for reporting') | 132 | help='source file/directory that contain the test result files to summarise') |
| 110 | parser_build.add_argument('-b', '--git-branch', default='', | 133 | parser_build.add_argument('-t', '--tag', default='', |
| 111 | help='(optional) default assume source directory contains all available files for ' | 134 | help='source_dir is a git repository, report on the tag specified from that repository') |
| 112 | 'reporting unless a git branch was provided where it will try to checkout ' | ||
| 113 | 'the provided git branch assuming source directory was a git repository') | ||
