Update crosperf to run telemetry test via autotest.

This updates crosperf to recognize the suite 'telemetry_Crosperf',
which allows users to run telemetry performance tests via test_that
and the telemetry_Crosperf autotest.  This won't be usable in general until
CL 168491 (the telemetry_Crosperf CL) gets committed.

BUG=None
TEST=Invoked telemetry tests using this change.

Note: Profilers don't quite work properly with telemetry_Crosperf yet,
but everything else does.

Change-Id: Id14edc89db79bc244496e8f6a5b614300a00be54
Reviewed-on: https://chrome-internal-review.googlesource.com/144440
Reviewed-by: Caroline Tice <cmtice@google.com>
Commit-Queue: Caroline Tice <cmtice@google.com>
Tested-by: Caroline Tice <cmtice@google.com>
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 9746371..da77d22 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -18,6 +18,23 @@
 from results_cache import CacheConditions
 import test_flag
 
+# Note:  Telemetry benchmark test names sometimes include a ".", causes
+# difficulties in the argument parsing stage.  Therefore we use the
+# translation dictionary below, so we can translate from a name the
+# argument parser will accept to the actual correct benchmark name.
+
+telemetry_perf_tests = {
+    'dromaeo_domcoreattr'     : 'dromaeo.domcoreattr',
+    'dromaeo_domcoremodify'   : 'dromaeo.domcoremodify',
+    'dromaeo_domcorequery'    : 'dromaeo.domcorequery',
+    'dromaeo_domcoretraverse' : 'dromaeo.domcoretraverse',
+    'kraken'                  : 'kraken',
+    'memory_top25'            : 'memory.top25',
+    'octane'                  : 'octane',
+    'robohornet_pro'          : 'robohornet_pro',
+    'smoothness_top25'        : 'smoothness.top25',
+    'sunspider'               : 'sunspider'
+  }
 
 class ExperimentFactory(object):
   """Factory class for building an Experiment, given an ExperimentFile as input.
@@ -79,11 +96,33 @@
       suite = benchmark_settings.GetField("suite")
       use_test_that = benchmark_settings.GetField("use_test_that")
 
-      benchmark = Benchmark(benchmark_name, test_name, test_args,
-                            iterations, outlier_range,
-                            key_results_only, rm_chroot_tmp,
-                            perf_args, suite, use_test_that)
-      benchmarks.append(benchmark)
+      if suite == 'telemetry_Crosperf':
+        if test_name == 'all':
+          # Create and add one benchmark for each telemetry perf test.
+          for test in telemetry_perf_tests.keys():
+            telemetry_test_name = telemetry_perf_tests[test]
+            telemetry_benchmark = Benchmark (telemetry_test_name,
+                                             telemetry_test_name,
+                                             test_args, iterations,
+                                             outlier_range, key_results_only,
+                                             rm_chroot_tmp, perf_args, suite,
+                                             use_test_that)
+            benchmarks.append(telemetry_benchmark)
+        else:
+          # Get correct name of Telemetry benchmark test.
+          test_name = telemetry_perf_tests[test_name]
+          benchmark = Benchmark(test_name, test_name, test_args,
+                                iterations, outlier_range,
+                                key_results_only, rm_chroot_tmp,
+                                perf_args, suite, use_test_that)
+          benchmarks.append(benchmark)
+      else:
+        # Add the single benchmark.
+        benchmark = Benchmark(benchmark_name, test_name, test_args,
+                              iterations, outlier_range,
+                              key_results_only, rm_chroot_tmp,
+                              perf_args, suite, use_test_that)
+        benchmarks.append(benchmark)
 
     # Construct labels.
     labels = []
@@ -154,6 +193,3 @@
                       .format(default_remotes_file))
     else:
       raise Exception("There is not remote for {0}".format(board))
-
-
-
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index ee9c693..8ba2c81 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -9,10 +9,28 @@
 
 from utils import command_executer
 
+TEST_THAT_PATH = '/usr/bin/test_that'
+CHROME_MOUNT_DIR = '/tmp/chrome_root'
+
+def GetProfilerArgs (benchmark):
+  if benchmark.perf_args:
+    perf_args_list = benchmark.perf_args.split(" ")
+    perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:]
+    perf_args = " ".join(perf_args_list)
+    if not perf_args_list[0] in ["record", "stat"]:
+      raise Exception("perf_args must start with either record or stat")
+    extra_test_args = ["profiler=custom_perf",
+                       ("profiler_args=\"'%s'\"" %
+                        perf_args)]
+    return " ".join(extra_test_args)
+  else:
+    return ""
+
 
 class SuiteRunner(object):
   """ This defines the interface from crosperf to test script.
   """
+
   def __init__(self, logger_to_use=None):
     self._logger = logger_to_use
     self._ce = command_executer.GetCommandExecuter(self._logger)
@@ -21,13 +39,15 @@
   def Run(self, machine, label, benchmark, test_args):
     if benchmark.suite == "telemetry":
       return self.Telemetry_Run(machine, label, benchmark)
+    elif benchmark.suite == "telemetry_Crosperf":
+      return self.Telemetry_Crosperf_Run(machine, label, benchmark)
     elif benchmark.use_test_that:
       return self.Test_That_Run(machine, label, benchmark, test_args)
     else:
       return self.Pyauto_Run(machine, label, benchmark, test_args)
 
   def RebootMachine(self, machine_name, chromeos_root):
-    command ="reboot && exit"
+    command = "reboot && exit"
     self._ce.CrosRunCommand(command, machine=machine_name,
                       chromeos_root=chromeos_root)
     time.sleep(60)
@@ -66,13 +86,45 @@
 
     self.RebootMachine(machine, label.chromeos_root)
 
-    command = ("/usr/bin/test_that %s %s %s" %
-               (options, machine, benchmark.test_name))
+    command = ("%s %s %s %s" %
+               (TEST_THAT_PATH, options, machine, benchmark.test_name))
     return self._ce.ChrootRunCommand(label.chromeos_root,
                                      command,
                                      True,
                                      self._ct)
 
+
+  def Telemetry_Crosperf_Run (self, machine, label, benchmark):
+    if not os.path.isdir(label.chrome_src):
+      self._logger.LogFatal("Cannot find chrome src dir to"
+                            " run telemetry.")
+
+    profiler_args = GetProfilerArgs (benchmark)
+    chrome_root_options = ""
+
+    # If chrome_src is outside the chroot, mount it when entering the
+    # chroot.
+    if label.chrome_src.find(label.chromeos_root) == -1:
+      chrome_root_options = (" --chrome_root={0} --chrome_root_mount={1} "
+                             " FEATURES=\"-usersandbox\" "
+                             "CHROME_ROOT={2}".format(label.chrome_src,
+                                                      CHROME_MOUNT_DIR,
+                                                      CHROME_MOUNT_DIR))
+
+    cmd = ('{0} --board={1} --args="iterations={2} test={3} '
+           '{4}" {5} telemetry_Crosperf'.format(TEST_THAT_PATH,
+                                                label.board,
+                                                benchmark.iterations,
+                                                benchmark.test_name,
+                                                profiler_args,
+                                                machine))
+    return self._ce.ChrootRunCommand (label.chromeos_root,
+                                      cmd,
+                                      return_output=True,
+                                      command_terminator=self._ct,
+                                      cros_sdk_options=chrome_root_options)
+
+
   def Telemetry_Run(self, machine, label, benchmark):
     if not os.path.isdir(label.chrome_src):
       self._logger.LogFatal("Cannot find chrome src dir to"
@@ -99,7 +151,10 @@
 
 class MockSuiteRunner(object):
   def __init__(self):
-    pass
+    self._true = True
 
-  def Run(self, *args):
-    return ["", "", 0]
+  def Run(self, *_args):
+    if self._true:
+      return ["", "", 0]
+    else:
+      return ["", "", 0]