blob: 7b53b993dc5dc33c108a5c0d335a35d115aaef07 [file] [log] [blame]
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module providing the summary for multiple test results.
This firmware_summary module is used to collect the test results of
multiple rounds from the logs generated by different firmware versions.
The test results of the various validators of every gesture are displayed.
In addition, the test results of every validator across all gestures are
also summarized.
Usage:
$ python firmware_summary log_directory
A typical summary output looks like
Test Summary (by gesture) : fw_2.41 fw_2.42 count
---------------------------------------------------------------------
one_finger_tracking
CountTrackingIDValidator : 1.00 0.90 12
LinearityBothEndsValidator : 0.97 0.89 12
LinearityMiddleValidator : 1.00 1.00 12
NoGapValidator : 0.74 0.24 12
NoReversedMotionBothEndsValidator : 0.68 0.34 12
NoReversedMotionMiddleValidator : 1.00 1.00 12
ReportRateValidator : 1.00 1.00 12
one_finger_to_edge
CountTrackingIDValidator : 1.00 1.00 4
LinearityBothEndsValidator : 0.88 0.89 4
LinearityMiddleValidator : 1.00 1.00 4
NoGapValidator : 0.50 0.00 4
NoReversedMotionMiddleValidator : 1.00 1.00 4
RangeValidator : 1.00 1.00 4
...
Test Summary (by validator) : fw_2.4 fw_2.4.a count
---------------------------------------------------------------------
CountPacketsValidator : 1.00 0.82 6
CountTrackingIDValidator : 0.92 0.88 84
...
"""
import getopt
import os
import sys
import test_conf as conf
import firmware_log
from collections import defaultdict
from common_util import print_and_exit
from firmware_constants import OPTIONS
from test_conf import (log_root_dir, segment_weights, validator_weights)
class OptionsDisplayMetrics:
"""The options of displaying metrics."""
# Defining the options of displaying metrics
DISPLAY_METRICS_PRIMARY_STATS = 'p'
DISPLAY_METRICS_ALL_STATS = 'a'
DISPLAY_METRICS_RAW_VALUES = 'f'
DISPLAY_METRICS_OPTIONS = [DISPLAY_METRICS_PRIMARY_STATS,
DISPLAY_METRICS_ALL_STATS,
DISPLAY_METRICS_RAW_VALUES]
DISPLAY_METRICS_DEFAULT = DISPLAY_METRICS_PRIMARY_STATS
def __init__(self, option):
"""Initialize with the level value.
@param option: the option of display metrics
"""
if option not in self.DISPLAY_METRICS_OPTIONS:
option = self.DISPLAY_METRICS_DEFAULT
# To display all metrics statistics grouped by validators?
self.display_all_stats = (option == self.DISPLAY_METRICS_ALL_STATS or
option == self.DISPLAY_METRICS_RAW_VALUES)
# To display the raw metrics values in details on file basis?
self.display_raw_values = (option == self.DISPLAY_METRICS_RAW_VALUES)
class FirmwareSummary:
"""Summary for touch device firmware tests."""
def __init__(self, log_dir, display_metrics=False, debug_flag=False,
segment_weights=segment_weights,
validator_weights=validator_weights):
""" segment_weights and validator_weights are passed as arguments
so that it is possible to assign arbitrary weights in unit tests.
"""
if os.path.isdir(log_dir):
self.log_dir = log_dir
else:
error_msg = 'Error: The test result directory does not exist: %s'
print error_msg % log_dir
sys.exit(1)
self.display_metrics = display_metrics
self.slog = firmware_log.SummaryLog(log_dir, segment_weights,
validator_weights, debug_flag)
def _print_summary_title(self, summary_title_str):
"""Print the summary of the test results by gesture."""
# Create a flexible column title format according to the number of
# firmware versions which could be 1, 2, or more.
#
# A typical summary title looks like
# Test Summary () : fw_11.26 fw_11.23
# mean ssd count mean ssd count
# ----------------------------------------------------------------------
#
# The 1st line above is called title_fw.
# The 2nd line above is called title_statistics.
#
# As an example for 2 firmwares, title_fw_format looks like:
# '{0:<37}: {1:>12} {2:>21}'
title_fw_format_list = ['{0:<37}:',]
for i in range(len(self.slog.fws)):
format_space = 12 if i == 0 else (12 + 9)
title_fw_format_list.append('{%d:>%d}' % (i + 1, format_space))
title_fw_format = ' '.join(title_fw_format_list)
# As an example for 2 firmwares, title_statistics_format looks like:
# '{0:>47} {1:>6} {2:>5} {3:>8} {4:>6} {5:>5}'
title_statistics_format_list = []
for i in range(len(self.slog.fws)):
format_space = (12 + 35) if i == 0 else 8
title_statistics_format_list.append('{%d:>%d}' % (3 * i,
format_space))
title_statistics_format_list.append('{%d:>%d}' % (3 * i + 1 , 6))
title_statistics_format_list.append('{%d:>%d}' % (3 * i + 2 , 5))
title_statistics_format = ' '.join(title_statistics_format_list)
# Create title_fw_list
# As an example for two firmware versions, it looks like
# ['Test Summary (by gesture)', 'fw_2.4', 'fw_2.5']
title_fw_list = [summary_title_str,] + self.slog.fws
# Create title_statistics_list
# As an example for two firmware versions, it looks like
# ['mean', 'ssd', 'count', 'mean', 'ssd', 'count', ]
title_statistics_list = ['mean', 'ssd', 'count'] * len(self.slog.fws)
# Print the title.
title_fw = title_fw_format.format(*title_fw_list)
title_statistics = title_statistics_format.format(
*title_statistics_list)
print '\n\n', title_fw
print title_statistics
print '-' * len(title_statistics)
def _print_statistics_score(self, stat):
"""Print the score statistics including average, ssd, and counts.
stat: a list about score statistics, [average, ssd, count]
"""
# Create a flexible format to print scores, ssd, and counts according to
# the number of firmware versions which could be 1, 2, or more.
# As an example with 2 firmware versions, the format looks like
# ' {0:<35}: {1:>8.2f} {2:>6.2f} {3:>5} {4:>8.2f} {5:>6.2f} {6:>5}'
if len(stat) <= 1:
return
statistics_format_list = [' {0:<35}:',]
score_ssd_count_format = '{%d:>8.2f} {%d:>6.2f} {%d:>5}'
for i in range(len(self.slog.fws)):
statistics_format_list.append(
score_ssd_count_format % (i * 3 + 1, i * 3 + 2, i * 3 + 3))
statistics_format = ' '.join(statistics_format_list)
print statistics_format.format(*tuple(stat))
def _print_result_stats(self, gesture=None):
"""Print the result statistics of validators."""
for validator in self.slog.validators:
stat_scores_data = [validator,]
for fw in self.slog.fws:
result = self.slog.get_result(fw=fw, gesture=gesture,
validator=validator)
if result:
stat_scores_data += result.stat_scores.all_data
# Print the score statistics of all firmwares on the same row.
self._print_statistics_score(stat_scores_data)
def _print_result_stats_by_gesture(self):
"""Print the summary of the test results by gesture."""
self._print_summary_title('Test Summary (by gesture)')
for gesture in self.slog.gestures:
print gesture
self._print_result_stats(gesture=gesture)
def _print_result_stats_by_validator(self):
"""Print the summary of the test results by validator. The validator
results of all gestures are combined to compute the statistics.
"""
self._print_summary_title('Test Summary (by validator)')
self._print_result_stats()
def _get_metric_name_for_display(self, metric_name):
"""Get the metric name for display.
We would like to shorten the metric name when displayed.
@param metric_name: a metric name
"""
return metric_name.split('--')[0]
def _print_statistics_of_metrics(self, gesture=None):
"""Print the statistics of metrics by gesture or by validator.
@param gesture: print the statistics grouped by gesture
if this argument is specified; otherwise, by validator.
"""
# Print the complete title which looks like:
# <title_str> <fw1> <fw2> ... <description>
fws = self.slog.fws
num_fws = len(fws)
title_str = ('Metrics statistics by gesture: ' + gesture if gesture else
'Metrics statistics by validator')
complete_title = ('{:<37}: '.format(title_str) +
('{:>10}' * num_fws).format(*fws) +
' {:<40}'.format('description'))
print '\n' * 2 + complete_title
print '-' * (38 + 1 + 10 * num_fws + 17)
# Print the metric name and the metric stats values of every firmwares
name_format = ' ' * 6 + '{:<31}:'
value_format = '{:>10.2f}'
values_format = value_format * num_fws
description_format = ' {:<40}'
for validator in self.slog.validators:
fw_stats_values = defaultdict(dict)
for fw in fws:
result = self.slog.get_result(fw=fw, gesture=gesture,
validator=validator)
stat_metrics = result.stat_metrics
for metric_name in stat_metrics.metrics_values:
fw_stats_values[metric_name][fw] = \
stat_metrics.stats_values[metric_name]
fw_stats_values_printed = False
for metric_name, fw_values_dict in sorted(fw_stats_values.items()):
values = [fw_values_dict[fw] for fw in fws]
# The metrics of some special validators will not be shown
# unless the display_all_stats flag is True or any stats values
# are non-zero.
if (validator not in conf.validators_hidden_when_no_failures or
self.display_metrics.display_all_stats or any(values)):
if not fw_stats_values_printed:
fw_stats_values_printed = True
print ' ' * 2, validator
disp_name = self._get_metric_name_for_display(metric_name)
print name_format.format(disp_name),
print values_format.format(*values),
print description_format.format(
stat_metrics.metrics_props[metric_name].description)
def _print_raw_metrics_values(self):
"""Print the raw metrics values."""
# The subkey() below extracts (gesture, variation, round) from
# metric.key which is (fw, round, gesture, variation, validator)
subkey = lambda key: (key[2], key[3], key[1])
# The sum_len() below is used to calculate the sum of the length
# of the elements in the subkey.
sum_len = lambda lst: sum([len(str(l)) if l else 0 for l in lst])
mnprops = firmware_log.MetricNameProps()
print '\n\nRaw metrics values'
print '-' * 80
for fw in self.slog.fws:
print '\n', fw
for validator in self.slog.validators:
result = self.slog.get_result(fw=fw, validator=validator)
metrics_dict = result.stat_metrics.metrics_dict
if metrics_dict:
print '\n' + ' ' * 3 + validator
for metric_name, metrics in sorted(metrics_dict.items()):
disp_name = self._get_metric_name_for_display(metric_name)
print ' ' * 6 + disp_name
metric_note = mnprops.metrics_props[metric_name].note
if metric_note:
msg = '** Note: value below represents '
print ' ' * 9 + msg + metric_note
# Make a metric value list sorted by
# (gesture, variation, round)
value_list = sorted([(subkey(metric.key), metric.value)
for metric in metrics])
max_len = max([sum_len(value[0]) for value in value_list])
template_prefix = ' ' * 9 + '{:<%d}: ' % (max_len + 5)
for (gesture, variation, round), value in value_list:
template = template_prefix + (
'{}' if isinstance(value, tuple) else '{:.2f}')
gvr_str = '%s.%s (%s)' % (gesture, variation, round)
print template.format(gvr_str, value)
def _print_final_weighted_averages(self):
"""Print the final weighted averages of all validators."""
title_str = 'Test Summary (final weighted averages)'
print '\n\n' + title_str
print '-' * len(title_str)
weighted_average = self.slog.get_final_weighted_average()
for fw in self.slog.fws:
print '%s: %4.3f' % (fw, weighted_average[fw])
def print_result_summary(self):
"""Print the summary of the test results."""
self._print_result_stats_by_gesture()
self._print_result_stats_by_validator()
if self.display_metrics:
self._print_statistics_of_metrics()
if self.display_metrics.display_raw_values:
self._print_raw_metrics_values()
self._print_final_weighted_averages()
def _usage_and_exit():
"""Print the usage message and exit."""
prog = sys.argv[0]
print 'Usage: $ python %s [options]\n' % prog
print 'options:'
print ' -D, --%s' % OPTIONS.DEBUG
print ' enable debug flag'
print ' -d, --%s <directory>' % OPTIONS.DIR
print ' specify which log directory to derive the summary'
print ' -h, --%s' % OPTIONS.HELP
print ' show this help'
print ' -m, --%s <verbose_level>' % OPTIONS.METRICS
print ' display the summary metrics.'
print ' verbose_level:'
print ' p: display the primary metrics statistics (default)'
print ' s: display all metrics statistics'
print ' f: display all metrics statistics and ' \
'the detailed raw metrics values'
print
print 'Examples:'
print ' Specify the log root directory.'
print ' $ python %s -d /tmp' % prog
print ' Display the primary metrics statistics.'
print ' $ python %s -m p' % prog
print ' Display all metrics statistics.'
print ' $ python %s -m s' % prog
print ' Display all metrics statistics and detailed raw metrics values.'
print ' $ python %s -m f' % prog
sys.exit(1)
def _parsing_error(msg):
"""Print the usage and exit when encountering parsing error."""
print 'Error: %s' % msg
_usage_and_exit()
def _parse_options():
"""Parse the options."""
# Set the default values of options.
options = {OPTIONS.DEBUG: False,
OPTIONS.DIR: log_root_dir,
OPTIONS.METRICS: None,
}
try:
short_opt = 'Dd:hm:'
long_opt = [OPTIONS.DEBUG,
OPTIONS.DIR + '=',
OPTIONS.HELP,
OPTIONS.METRICS + '=',
]
opts, args = getopt.getopt(sys.argv[1:], short_opt, long_opt)
except getopt.GetoptError, err:
_parsing_error(str(err))
for opt, arg in opts:
if opt in ('-h', '--%s' % OPTIONS.HELP):
_usage_and_exit()
elif opt in ('-D', '--%s' % OPTIONS.DEBUG):
options[OPTIONS.DEBUG] = True
elif opt in ('-d', '--%s' % OPTIONS.DIR):
options[OPTIONS.DIR] = arg
if not os.path.isdir(arg):
print 'Error: the log directory %s does not exist.' % arg
_usage_and_exit()
elif opt in ('-m', '--%s' % OPTIONS.METRICS):
options[OPTIONS.METRICS] = OptionsDisplayMetrics(arg)
else:
msg = 'This option "%s" is not supported.' % opt
_parsing_error(opt)
return options
if __name__ == '__main__':
options = _parse_options()
summary = FirmwareSummary(options[OPTIONS.DIR],
display_metrics=options[OPTIONS.METRICS],
debug_flag=options[OPTIONS.DEBUG])
summary.print_result_summary()