diff options
| author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-16 18:13:00 +0000 |
|---|---|---|
| committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-21 12:34:00 +0000 |
| commit | 47eb3d00e9d6a66aee1283dab29af8117a006d6d (patch) | |
| tree | 9d574720d23da3edeab5648f05fb5d2ee95c9cd2 /scripts/lib/resulttool/resultutils.py | |
| parent | beed7523b667affea71d37d88d2f5c19c935d159 (diff) | |
| download | poky-47eb3d00e9d6a66aee1283dab29af8117a006d6d.tar.gz | |
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I
made to resulttool:
* Avoid subprocess.run() as its a python 3.6 feature and we
have autobuilder workers with 3.5.
* Avoid python keywords as variable names
* Simplify dict accesses using .get()
* Rename resultsutils -> resultutils to match the resultstool ->
resulttool rename
* Formalised the handling of "file_name" to "TESTSERIES" which the code
will now add into the json configuration data if its not present, based
on the directory name.
* When we don't have failed test cases, print something saying so
instead of an empty table
* Tweak the table headers in the report to be more readable (reference
"Test Series" instead if file_id and ID instead of results_id)
* Improve/simplify the max string length handling
* Merge the counts and percentage data into one table in the report
since printing two reports of the same data confuses the user
* Removed the confusing header in the regression report
* Show matches, then regressions, then unmatched runs in the regression
report, also remove chatting unneeded output
* Try harder to "pair" up matching configurations to reduce noise in
the regressions report
* Abstracted the "mapping" table concept used to pairing in the
regression code to general code in resultutils
* Created multiple mappings for results analysis, results storage and
'flattening' results data in a merge
* Simplify the merge command to take a source and a destination,
letting the destination be a directory or a file, removing the need for
an output directory parameter
* Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression
mappings
* Have the store command place the testresults files in a layout from
the mapping, making commits into the git repo for results storage more
useful for simple comparison purposes
* Set the oe-git-archive tag format appropriately for oeqa results
storage (and simplify the commit messages closer to their defaults)
* Fix oe-git-archive to use the commit/branch data from the results file
* Cleaned up the command option help to match other changes
* Follow the model of git branch/tag processing used by oe-build-perf-report
and use that to read the data using git show to avoid branch change
* Add ptest summary to the report command
* Update the tests to match the above changes
(From OE-Core rev: ff2c029b568f70aa9960dde04ddd207829812ea0)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/resulttool/resultutils.py')
| -rw-r--r-- | scripts/lib/resulttool/resultutils.py | 127 |
1 files changed, 127 insertions, 0 deletions
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py new file mode 100644 index 0000000000..06cceef796 --- /dev/null +++ b/scripts/lib/resulttool/resultutils.py | |||
| @@ -0,0 +1,127 @@ | |||
| 1 | # resulttool - common library/utility functions | ||
| 2 | # | ||
| 3 | # Copyright (c) 2019, Intel Corporation. | ||
| 4 | # Copyright (c) 2019, Linux Foundation | ||
| 5 | # | ||
| 6 | # This program is free software; you can redistribute it and/or modify it | ||
| 7 | # under the terms and conditions of the GNU General Public License, | ||
| 8 | # version 2, as published by the Free Software Foundation. | ||
| 9 | # | ||
| 10 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | # more details. | ||
| 14 | # | ||
| 15 | import os | ||
| 16 | import json | ||
| 17 | import scriptpath | ||
| 18 | scriptpath.add_oe_lib_path() | ||
| 19 | |||
| 20 | flatten_map = { | ||
| 21 | "oeselftest": [], | ||
| 22 | "runtime": [], | ||
| 23 | "sdk": [], | ||
| 24 | "sdkext": [] | ||
| 25 | } | ||
| 26 | regression_map = { | ||
| 27 | "oeselftest": ['TEST_TYPE', 'MACHINE'], | ||
| 28 | "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'], | ||
| 29 | "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], | ||
| 30 | "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'] | ||
| 31 | } | ||
| 32 | store_map = { | ||
| 33 | "oeselftest": ['TEST_TYPE'], | ||
| 34 | "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'], | ||
| 35 | "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], | ||
| 36 | "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'] | ||
| 37 | } | ||
| 38 | |||
| 39 | # | ||
| 40 | # Load the json file and append the results data into the provided results dict | ||
| 41 | # | ||
| 42 | def append_resultsdata(results, f, configmap=store_map): | ||
| 43 | if type(f) is str: | ||
| 44 | with open(f, "r") as filedata: | ||
| 45 | data = json.load(filedata) | ||
| 46 | else: | ||
| 47 | data = f | ||
| 48 | for res in data: | ||
| 49 | if "configuration" not in data[res] or "result" not in data[res]: | ||
| 50 | raise ValueError("Test results data without configuration or result section?") | ||
| 51 | if "TESTSERIES" not in data[res]["configuration"]: | ||
| 52 | data[res]["configuration"]["TESTSERIES"] = os.path.basename(os.path.dirname(f)) | ||
| 53 | testtype = data[res]["configuration"].get("TEST_TYPE") | ||
| 54 | if testtype not in configmap: | ||
| 55 | raise ValueError("Unknown test type %s" % testtype) | ||
| 56 | configvars = configmap[testtype] | ||
| 57 | testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype]) | ||
| 58 | if testpath not in results: | ||
| 59 | results[testpath] = {} | ||
| 60 | if 'ptestresult.rawlogs' in data[res]['result']: | ||
| 61 | del data[res]['result']['ptestresult.rawlogs'] | ||
| 62 | if 'ptestresult.sections' in data[res]['result']: | ||
| 63 | for i in data[res]['result']['ptestresult.sections']: | ||
| 64 | del data[res]['result']['ptestresult.sections'][i]['log'] | ||
| 65 | results[testpath][res] = data[res] | ||
| 66 | |||
| 67 | # | ||
| 68 | # Walk a directory and find/load results data | ||
| 69 | # or load directly from a file | ||
| 70 | # | ||
| 71 | def load_resultsdata(source, configmap=store_map): | ||
| 72 | results = {} | ||
| 73 | if os.path.isfile(source): | ||
| 74 | append_resultsdata(results, source, configmap) | ||
| 75 | return results | ||
| 76 | for root, dirs, files in os.walk(source): | ||
| 77 | for name in files: | ||
| 78 | f = os.path.join(root, name) | ||
| 79 | if name == "testresults.json": | ||
| 80 | append_resultsdata(results, f, configmap) | ||
| 81 | return results | ||
| 82 | |||
| 83 | def filter_resultsdata(results, resultid): | ||
| 84 | newresults = {} | ||
| 85 | for r in results: | ||
| 86 | for i in results[r]: | ||
| 87 | if i == resultsid: | ||
| 88 | newresults[r] = {} | ||
| 89 | newresults[r][i] = results[r][i] | ||
| 90 | return newresults | ||
| 91 | |||
| 92 | def save_resultsdata(results, destdir, fn="testresults.json"): | ||
| 93 | for res in results: | ||
| 94 | if res: | ||
| 95 | dst = destdir + "/" + res + "/" + fn | ||
| 96 | else: | ||
| 97 | dst = destdir + "/" + fn | ||
| 98 | os.makedirs(os.path.dirname(dst), exist_ok=True) | ||
| 99 | with open(dst, 'w') as f: | ||
| 100 | f.write(json.dumps(results[res], sort_keys=True, indent=4)) | ||
| 101 | |||
| 102 | def git_get_result(repo, tags): | ||
| 103 | git_objs = [] | ||
| 104 | for tag in tags: | ||
| 105 | files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines() | ||
| 106 | git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")]) | ||
| 107 | |||
| 108 | def parse_json_stream(data): | ||
| 109 | """Parse multiple concatenated JSON objects""" | ||
| 110 | objs = [] | ||
| 111 | json_d = "" | ||
| 112 | for line in data.splitlines(): | ||
| 113 | if line == '}{': | ||
| 114 | json_d += '}' | ||
| 115 | objs.append(json.loads(json_d)) | ||
| 116 | json_d = '{' | ||
| 117 | else: | ||
| 118 | json_d += line | ||
| 119 | objs.append(json.loads(json_d)) | ||
| 120 | return objs | ||
| 121 | |||
| 122 | # Optimize by reading all data with one git command | ||
| 123 | results = {} | ||
| 124 | for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])): | ||
| 125 | append_resultsdata(results, obj) | ||
| 126 | |||
| 127 | return results | ||
