blob: 769412c705ee7d04ae488380537f7d97d4076eae [file] [log] [blame]
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates dashboard from multiple run_remote_tests.sh invocations.
Uses single-file Bottle (bottle.py) as a web framework.
"""
import json, logging, optparse, os, subprocess, sys, tempfile
from operator import itemgetter
logging.basicConfig()
LOG = logging.getLogger('local_dash')
UNKNOWN = 'unknown'
class LocalDashException(StandardError):
"""Base exception class for this utility.
This exception should be used to signal user errors or system failures
(like timeouts), not bugs (like an incorrect param value). For the
latter you should raise Exception so we can see where/how it happened.
"""
def __init__(self, reason):
"""Instantiate a LocalDashException..
Args:
reason: text describing the problem.
"""
StandardError.__init__(self)
self.reason = reason
def __repr__(self):
return 'LocalDashException: %s' % self.reason
def __str__(self):
return 'LocalDashException: %s' % self.reason
class ConfigException(LocalDashException):
"""Raised when issues with local config file."""
def simple_system_output(cmd):
"""Replace autotest utils.system_output() locally.
Args:
cmd: A shell command to execute.
Returns:
A list of lines returned from running the command.
"""
LOG.debug('Executing: %s', cmd)
try:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
# Ignore p.returncode - generate_test_report can return valid
# output while exiting nonzero to indicate failed tests.
return stdout.strip().split('\n')
except Exception, e:
LOG.warning('Command (%s) failed (%s).', cmd, e)
def parse_args():
"""Handle command line arguments.
Also sets the logging level based on verbosity specified:
0: Shows ERROR logged messages.
1: Shows ERROR, INFO logged messages.
2: Shows ERROR, INFO and DEBUG logged messages.
Returns:
Tuple of (options, args) parsed.
"""
parser = optparse.OptionParser()
parser.add_option('-c', '--config-file',
help='config file [default: %default]',
dest='config_file', default='config_dash.json')
parser.add_option('-p', '--print-model',
help='print test result model[default: %default]',
dest='print_model', action='store_true', default=False)
parser.add_option('-v', '--verbosity-level',
help='1=debug, 2=most verbose [default: %default]',
dest='verbosity', type='int', default=0)
options, args = parser.parse_args()
logging_level = logging.ERROR
if options.verbosity == 1:
logging_level = logging.INFO
elif options.verbosity > 1:
logging_level = logging.DEBUG
LOG.setLevel(logging_level)
return options, args
def get_json_config(current_dir, json_file):
"""Retrieve a json config file with program options.
Args:
current_dir: directory in which this executable resides.
json_file: file name as provided by the user (or default).
Returns:
A valid Python dictionary that may be used for setup details/defaults.
"""
if not os.path.isfile(json_file):
json_file = os.path.join(current_dir, json_file)
if not os.path.isfile(json_file):
return {'result_folder': '/tmp'}
try:
json_contents = json.load(open(json_file))
except ValueError as e:
raise ConfigException('Invalid json in %s. <%s>' % (json_file, e))
return json_contents
def write_json_file(json_data):
"""Write data-structure output to a file in json format.
Write the json to a named temp file (with a randomized name).
Then, update a sym-link so the viewer can find it.
Args:
json_data: data-structure to be serialized out.
Returns:
The file name of the file written.
"""
f = tempfile.NamedTemporaryFile('w', prefix='local_dash_test_results_',
suffix='.json', delete=False)
output_file = f.name
json.dump(json_data, f)
f.close()
result_link = os.path.join('/tmp', 'local_dash_test_results.latest')
if os.path.islink(result_link):
os.remove(os.readlink(result_link))
os.unlink(result_link)
os.symlink(output_file, result_link)
return output_file
class ResultModel(object):
"""A data structure that shows test results from run_remote_tests runs.
Results are summarized into data useful for presenting a simple
result dashboard. Each run_remote_tests.XXX folder will be an
individual dictionary entry in the list.
The result model is made up of actual _results, some hardware details,
a list of tests executed (for building report headers) and the
details of the command line execution for aiding re-runs of local_dash.
The _results data structure is organized as follows.
[{'path': path1 # path to the result folder,
'localtime': time suite ended e.g. Sep 05 13:55:29
'timestamp': time suite ended e.g. 1348856263
'dut': {'ec': EC fw version e.g. snow_v1.3.74-f01e91d
'bios': BIOS fw version e.g. Google_Snow.2695.65.0
'hwid': hardware version info e.g. DAISY TEST A-A 9382},
'tests': [{'name': test1_name,
'status': PASS | FAIL # test status
'localtime': time test ended e.g. Sep 05 13:55:29
'timestamp': time test ended e.g. 1348856263
'reason': explanatory text for the failure},
{'name': test2_name,
'status': PASS | FAIL # test status
'localtime': time test ended e.g. Sep 05 13:55:29
'timestamp': time test ended e.g. 1348856263
'reason': explanatory text for the failure}]
},
{'path': path2, ...},
{'path': path3, ...}
]
"""
# Constants for line parsing of csv output from generate_test_report --csv
PATH_FIELD = 0
STATUS_FIELD = 1
def __init__(self):
"""Initialize dictionary that will contain result data model."""
# A list of dictionaries each the results from one run of
# run_remote_tests.sh.
self._results = []
# A list of unique hardware used. Could be a board but is hwid at this time.
self._hardware_set = set([UNKNOWN])
# A list of unique tests discovered.
self._test_set = set()
def _parse_path(self, fields):
"""The suite will always be the toplevel-constant part of the path.
There may be some arbitrary path information in front of the
run_remote_tests.XXX directory but the suite is always represented
by the folder immediately below the run_remote_tests.XXX directory.
It is either a test_name (for a 1-test suite run) or a control file
of tests (e.g. tmp.combined-control.cHThH) for a multi-test suite.
Returns:
A tuple of the full path, actual suite_path and a boolean indicating if
this path_ reflects a suite summary line (instead of a test line).
"""
path_ = fields[self.PATH_FIELD]
start = path_.find('run_remote_tests')
if start < 0:
return None, None, False
path_ = path_[start:]
suite_parts = path_.split('/')
is_suite = (len(suite_parts) <= 2)
suite_path = '/'.join(suite_parts[:2])
return path_, suite_path, is_suite
def get_model(self, cmd=None, config=None, args=None):
"""Some final post-processing before the data is returned.
The model will commonly be viewed in date-descending order.
Args:
cmd: if present, added to the model to aid re-runs.
config: if present, added to the model to aid re-runs.
args: if present, added to the model to aid re-runs.
Returns:
A dictionary with 3 elements: the test-result-list-of-dictionaries, a
list of unique hardware on which the tests were attempted and a list of
unique tests that were attempted. These 3 things allow the recipient to
format some organized tables by hardware and test.
"""
model = {'results': sorted(self._results, key=itemgetter('timestamp'),
reverse=True),
'hardware': sorted(self._hardware_set),
'tests': sorted(self._test_set)}
for k, var in [('cmd', cmd), ('config', config), ('args', args)]:
if var:
model[k] = var
return model
def print_result_model(self):
"""For diagnostic purposes allow the model to be reviewed."""
import pprint
pprint.pprint(self.get_model())
def _parse_test_line(self, fields):
"""Extract the test_name, status and info/time fields from a single test.
Lines are expected to begin with the path and test_status first then
optional key-value pairs may be present with added info. The two time
fields: timestamp and localtime are expected to be present.
Args:
fields: A list of fields extracted from a csv line to be inspected.
"""
result_dict = {}
dut_dict = {}
path_, suite_path, is_suite = self._parse_path(fields)
if not path_:
logging.warning('Unexpected path format (%s).', ','.join(fields))
return result_dict
if is_suite:
result_dict['path'] = suite_path
else:
result_dict['path'] = path_
test_name = os.path.basename(path_)
self._test_set.add(test_name)
result_dict['name'] = test_name
i = self.STATUS_FIELD - 1
for f in fields[self.STATUS_FIELD:]:
i += 1
if f in ['PASS', 'FAIL']:
result_dict['status'] = f
continue
# But, reason is the last field as well so stop when found.
if f.startswith('reason'):
result_dict['reason'] = ','.join(fields[i:])
break
info_kv = f.split('=')
if len(info_kv) != 2:
logging.warning('Unexpected field format: %s.', f)
continue
info_key, info_value = info_kv
if info_key in ['localtime', 'timestamp']:
result_dict[info_key] = info_value
else:
# Save the unique hwid's to track the hardware/boards.
if info_key == 'hwid':
self._hardware_set.add(info_value)
# Save all non-time-based useful keys under dut.
dut_dict[info_key] = info_value
return result_dict, dut_dict
def add_run_lines(self, dir_name, lines):
""" Parse lines from a single test run (folder) into a data model.
The output format from ordinary generate_test_report is:
line1: suite-result (if only 1 test run, the suite is the test)
subsequent lines: test results
The suite-lines are of two formats depending on the test status:
Suites with no failures:
suite_folder/suite_name,PASS[,info keyvals]
Suites with at least one failing test:
suite_folder/suite_name,FAIL[,info keyvals][,reason="failure text"]
Of the subsequent lines, there are two types of line:
1. test-results
2. performance-keyvals [we skip these]
The test-result lines are formatted as the suite-lines above:
suite_folder/test_name,status[,info keyvals][,reason="..."]
The optional info can be any of (comma-separated):
EC version info: fw_version=snow_v1.3.74-f01e91d
BIOS version info: fwid=Google_Snow.2695.65.0
hardware version info: hwid=DAISY TEST A-A 9382
test_datetime: localtime=Sep 05 13:55:29
timestamp=1348856263
failure text: reason="Autotest failure reason text here."
Unfortunately, when tests fail early due to setup/config issues a lot
of data is not collected. So, it's not uncommon for the ec/bios/hwid
info to be unavailable.
The performance-keyval lines are recognized because they do not include
the PASS|FAIL field in the second position.
Args:
dir_name: Directory traversed. Used to default the path.
lines: List of the lines produced by directory analysis.
"""
suite = {'path': [], 'localtime': None, 'timestamp': None,
'dut': {'ec': UNKNOWN, 'bios': UNKNOWN, 'hwid': UNKNOWN},
'tests': []}
folder, last_test_path = os.path.split(dir_name)
for l in lines:
# Skip over generate_test_report warning lines.
if l.startswith('WARNING'):
continue
if not l.strip():
continue
LOG.debug(' %s', l)
f = l.split(',')
if len(f) < 2:
LOG.warning('Unexpected line format: %s.', l)
break
# Skip performance-keyval lines; they have no status.
if not f[self.STATUS_FIELD] in ['PASS', 'FAIL']:
continue
result_dict, dut_dict = self._parse_test_line(fields=f)
# Save the path in case the logs are truncated and cannot find a suite.
if result_dict.get('path'):
last_test_path = result_dict['path']
timestamp = result_dict.get('timestamp')
suite_timestamp = suite.get('timestamp')
if timestamp and (not suite_timestamp or timestamp > suite_timestamp):
suite['timestamp'] = timestamp
suite['localtime'] = result_dict['localtime']
suite['dut'].update(dut_dict)
if result_dict.get('name'):
# Test line was parsed.
suite['tests'].append(result_dict)
elif result_dict.get('path'):
# Suite line was parsed. Save all suite paths in case multiple
# suites (combined-control files) were run.
suite['path'].append(result_dict['path'])
# Aborted tests sometimes emit poor status. Choose valid, related path.
if not suite.get('path'):
suite['path'] = [last_test_path]
logging.warning('Unexpected: suite has no path! Using %s.', suite['path'])
# Choose common parent of common-control suite dirs.
suite['path'] = os.path.commonprefix(suite['path'])
if not os.path.isdir(os.path.join(folder, suite['path'])):
suite['path'] = os.path.dirname(suite['path'])
self._results.append(suite)
def retrieve_test_results(gtr_dir, folder):
"""Run generate_test_report (gtr) and parse its results into a data model.
Args:
gtr_dir: directory where generate_test_report should reside.
folder: a containing folder in which top-level folders named
run_remote_tests* will be traversed for test results.
This pattern is based on copying the contents of a
ChromeOS developer's chroot /tmp directory after
running local tests with run_remote_tests.sh.
Returns:
A ResultModel object with valid results data else None.
"""
generate_test_report = os.path.join(gtr_dir, 'generate_test_report.py')
if not os.path.isfile(generate_test_report):
LOG.error('Cannot find generate_test_report where expected: %s.', gtr_dir)
return None
if not folder:
LOG.error('No test result folder specified.')
return None
print 'Searching for results in folders under: %s' % folder
all_run_dirs = [os.path.join(folder, d) for d in os.listdir(folder)
if d.startswith('run_remote_tests.')]
LOG.debug('Found:\n%s' % ',\n'.join(all_run_dirs))
LOG.info('%d dirs.' % len(all_run_dirs))
print 'Retrieving test results (this can take a while for large folders)...'
result_model = ResultModel()
for d in all_run_dirs:
if os.path.islink(d):
# skip sym-links like run_remote_tests.latest
continue
args = '%s %s' % (' '.join(['--csv', '--info', '--attr', '--sort-chron']),
d)
lines = simple_system_output('%s %s' % (generate_test_report, args))
if not lines:
LOG.warning('No results under %s.', d)
continue
result_model.add_run_lines(d, lines)
return result_model
def main(argv):
"""Automate end-to-end extract and upload workflow.
Runs the steps required to get a single Chrome OS test job result
from Autoest and into a data repository with enhanced reporting
in AppEngine.
"""
base_dir = os.path.dirname(os.path.abspath(argv[0]))
# Parse options
options, args = parse_args()
try:
config_dash = get_json_config(base_dir, options.config_file)
except ConfigException as e:
print e.reason
return
# Default to /tmp from local chroot.
test_result_folder = config_dash.get('result_folder', '/tmp')
test_results = retrieve_test_results(gtr_dir=os.path.dirname(base_dir),
folder=test_result_folder)
if test_results:
if options.print_model:
test_results.print_result_model()
result_model = test_results.get_model(os.path.abspath(argv[0]),
options.config_file, args)
# To enable re-running this command from a web-ui, stash the
# details of this command invocation in the results.
file_name = write_json_file(result_model)
print 'Wrote %s.' % file_name
viewer_path = os.path.join(base_dir, 'dash_template', 'result_viewer')
# Touch forces the bottle web server to refresh.
os.utime(viewer_path, None)
print 'Run %s to serve the test resuls locally.' % viewer_path
if __name__ == '__main__':
main(sys.argv)