Fix test failures in results_report refactors.

...I have absolutely no clue how I missed these failures (since I had to
run results_report_unittest to run the new perf parser test), but
apparently I did. The failure happened because we had a _chart_js arg
name, which didn't match up with chart_js (in the caller).

This also fixes a few minor linter complaints that I introduced in
patch-set 2 of said review. All of the other files I touched are clean
(except for a complaint about use of _logger in one of them, but that
was there before I made my changes).

BUG=None
TEST=./run_tests.sh *actually* passes this time. I promise. :)

Change-Id: I98e43b29ff9cef5fc9cf33143adc26b98a861f3e
Reviewed-on: https://chrome-internal-review.googlesource.com/285436
Commit-Ready: George Burgess <gbiv@google.com>
Tested-by: George Burgess <gbiv@google.com>
Reviewed-by: Caroline Tice <cmtice@google.com>
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index 9a45496..cfd4eb9 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -179,18 +179,22 @@
 
   _TestOutput = collections.namedtuple('TestOutput', ['summary_table',
                                                       'perf_html',
+                                                      'chart_js',
                                                       'charts',
                                                       'full_table',
                                                       'experiment_file'])
 
   @staticmethod
-  def _GetTestOutput(perf_table, _chart_js, summary_table, print_table,
+  def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
                      chart_divs, full_table, experiment_file):
+    # N.B. Currently we don't check chart_js; it's just passed through because
+    # cros lint complains otherwise.
     summary_table = print_table(summary_table, 'HTML')
     perf_html = print_table(perf_table, 'HTML')
     full_table = print_table(full_table, 'HTML')
     return HTMLResultsReportTest._TestOutput(summary_table=summary_table,
                                              perf_html=perf_html,
+                                             chart_js=chart_js,
                                              charts=chart_divs,
                                              full_table=full_table,
                                              experiment_file=experiment_file)
@@ -369,6 +373,7 @@
 
 
 class PerfReportParserTest(unittest.TestCase):
+  """Tests for the perf report parser in results_report."""
   @staticmethod
   def _ReadRealPerfReport():
     my_dir = os.path.dirname(os.path.realpath(__file__))
@@ -393,10 +398,10 @@
       self.assertEqual(v, report_cycles[k])
 
     known_instrunctions_percentages = {
-      '0x0000115bb6c35d7a': 1.65,
-      '0x0000115bb7ba9b54': 0.67,
-      '0x0000000000024f56': 0.00,
-      '0xffffffffa4a0ee03': 0.00,
+        '0x0000115bb6c35d7a': 1.65,
+        '0x0000115bb7ba9b54': 0.67,
+        '0x0000000000024f56': 0.00,
+        '0xffffffffa4a0ee03': 0.00,
     }
     report_instructions = report['instructions']
     self.assertEqual(len(report_instructions), 492)