Adding support of telemetry to crosperf

BUG=None
TEST=run crosperf with pyauto/telemetry test with/without cache.
      all pass.

Change-Id: If07ac020a9107a79d5780a58fd6dcc924d07f07f
Reviewed-on: https://gerrit-int.chromium.org/36594
Reviewed-by: Luis Lozano <llozano@chromium.org>
Commit-Queue: Yunlian Jiang <yunlian@google.com>
Tested-by: Yunlian Jiang <yunlian@google.com>
diff --git a/crosperf/autotest_runner.py b/crosperf/autotest_runner.py
deleted file mode 100644
index 5611b65..0000000
--- a/crosperf/autotest_runner.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2011 Google Inc. All Rights Reserved.
-
-from utils import command_executer
-
-
-class AutotestRunner(object):
-  """ This defines the interface from crosperf to ./run_remote_tests.sh.
-  """
-  def __init__(self, logger_to_use=None):
-    self._logger = logger_to_use
-    self._ce = command_executer.GetCommandExecuter(self._logger)
-    self._ct = command_executer.CommandTerminator()
-
-  def Run(self, machine_name, chromeos_root, board, autotest_name,
-          autotest_args):
-    """Run the run_remote_test."""
-    options = ""
-    if board:
-      options += " --board=%s" % board
-    if autotest_args:
-      options += " %s" % autotest_args
-    command = "rm -rf /usr/local/autotest/results/*"
-    self._ce.CrosRunCommand(command, machine=machine_name, username="root",
-                            chromeos_root=chromeos_root)
-    command = ("./run_remote_tests.sh --remote=%s %s %s" %
-               (machine_name, options, autotest_name))
-    return self._ce.ChrootRunCommand(chromeos_root, command, True, self._ct)
-
-  def Terminate(self):
-    self._ct.Terminate()
-
-
-class MockAutotestRunner(object):
-  def __init__(self):
-    pass
-
-  def Run(self, *args):
-    return ["", "", 0]
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 8fe8a49..4a2d431 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -1,26 +1,32 @@
 #!/usr/bin/python
 
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 
 
 class Benchmark(object):
   """Class representing a benchmark to be run.
 
-  Contains details of the autotest, arguments to pass to the autotest,
-  iterations to run the autotest and so on. Note that the benchmark name
-  can be different to the autotest name. For example, you may want to have
-  two different benchmarks which run the same autotest with different
+  Contains details of the benchmark suite, arguments to pass to the suite,
+  iterations to run the benchmark suite and so on. Note that the benchmark name
+  can be different to the test suite name. For example, you may want to have
+  two different benchmarks which run the same test_name with different
   arguments.
   """
 
-  def __init__(self, name, autotest_name, autotest_args, iterations,
-               outlier_range, key_results_only, rm_chroot_tmp, perf_args):
+  def __init__(self, name, test_name, test_args, iterations,
+               outlier_range, key_results_only, rm_chroot_tmp, perf_args,
+               suite="pyauto"):
     self.name = name
-    self.autotest_name = autotest_name
-    self.autotest_args = autotest_args
+    #For telemetry, this is the benchmark name.
+    self.test_name = test_name
+    #For telemetry, this is the data.
+    self.test_args = test_args
     self.iterations = iterations
     self.outlier_range = outlier_range
     self.perf_args = perf_args
     self.key_results_only = key_results_only
     self.rm_chroot_tmp = rm_chroot_tmp
     self.iteration_adjusted = False
+    self.suite = suite
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index 80c95c4..da12afd 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
 
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 
 import datetime
 import os
@@ -11,9 +13,11 @@
 from utils import command_executer
 from utils import timeline
 
-from autotest_runner import AutotestRunner
+from suite_runner import SuiteRunner
 from results_cache import Result
 from results_cache import ResultsCache
+from results_cache import TelemetryResult
+
 
 STATUS_FAILED = "FAILED"
 STATUS_SUCCEEDED = "SUCCEEDED"
@@ -22,7 +26,6 @@
 STATUS_WAITING = "WAITING"
 STATUS_PENDING = "PENDING"
 
-
 class BenchmarkRun(threading.Thread):
   def __init__(self, name, benchmark,
                label,
@@ -42,39 +45,43 @@
     self.retval = None
     self.run_completed = False
     self.machine_manager = machine_manager
-    self.cache = ResultsCache()
-    self.autotest_runner = AutotestRunner(self._logger)
+    self.suite_runner = SuiteRunner(self._logger)
     self.machine = None
     self.cache_conditions = cache_conditions
     self.runs_complete = 0
     self.cache_hit = False
     self.failure_reason = ""
-    self.autotest_args = "%s %s" % (benchmark.autotest_args,
+    self.test_args = "%s %s" % (benchmark.test_args,
                                     self._GetExtraAutotestArgs())
     self._ce = command_executer.GetCommandExecuter(self._logger)
     self.timeline = timeline.Timeline()
     self.timeline.Record(STATUS_PENDING)
     self.share_users = share_users
 
+  def ReadCache(self):
+    # Just use the first machine for running the cached version,
+    # without locking it.
+    self.cache = ResultsCache()
+    self.cache.Init(self.label.chromeos_image,
+                    self.label.chromeos_root,
+                    self.benchmark.test_name,
+                    self.iteration,
+                    self.test_args,
+                    self.machine_manager,
+                    self.label.board,
+                    self.cache_conditions,
+                    self._logger,
+                    self.label,
+                    self.share_users,
+                    self.benchmark.suite
+                   )
+
+    self.result = self.cache.ReadResult()
+    self.cache_hit = (self.result is not None)
+
   def run(self):
     try:
-      # Just use the first machine for running the cached version,
-      # without locking it.
-      self.cache.Init(self.label.chromeos_image,
-                      self.label.chromeos_root,
-                      self.benchmark.autotest_name,
-                      self.iteration,
-                      self.autotest_args,
-                      self.machine_manager,
-                      self.label.board,
-                      self.cache_conditions,
-                      self._logger,
-                      self.label,
-                      self.share_users
-                     )
-
-      self.result = self.cache.ReadResult()
-      self.cache_hit = (self.result is not None)
+      self.ReadCache()
 
       if self.result:
         self._logger.LogOutput("%s: Cache hit." % self.name)
@@ -86,8 +93,9 @@
         self.timeline.Record(STATUS_WAITING)
         # Try to acquire a machine now.
         self.machine = self.AcquireMachine()
-        self.cache.remote = self.machine.name
         self.result = self.RunTest(self.machine)
+
+        self.cache.remote = self.machine.name
         self.cache.StoreResult(self.result)
 
       if self.terminated:
@@ -97,7 +105,7 @@
         self.timeline.Record(STATUS_SUCCEEDED)
       else:
         if self.timeline.GetLastEvent() != STATUS_FAILED:
-          self.failure_reason = "Return value of autotest was non-zero."
+          self.failure_reason = "Return value of test suite was non-zero."
           self.timeline.Record(STATUS_FAILED)
 
     except Exception, e:
@@ -114,7 +122,7 @@
 
   def Terminate(self):
     self.terminated = True
-    self.autotest_runner.Terminate()
+    self.suite_runner.Terminate()
     if self.timeline.GetLastEvent() != STATUS_FAILED:
       self.timeline.Record(STATUS_FAILED)
       self.failure_reason = "Thread terminated."
@@ -138,16 +146,19 @@
     return machine
 
   def _GetExtraAutotestArgs(self):
+    if self.benchmark.perf_args and self.benchmark.suite == "telemetry":
+      self._logger.LogError("Telemetry benchmark does not support profiler.")
+
     if self.benchmark.perf_args:
       perf_args_list = self.benchmark.perf_args.split(" ")
       perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:]
       perf_args = " ".join(perf_args_list)
       if not perf_args_list[0] in ["record", "stat"]:
         raise Exception("perf_args must start with either record or stat")
-      extra_autotest_args = ["--profiler=custom_perf",
+      extra_test_args = ["--profiler=custom_perf",
                              ("--profiler_args='perf_options=\"%s\"'" %
                               perf_args)]
-      return " ".join(extra_autotest_args)
+      return " ".join(extra_test_args)
     else:
       return ""
 
@@ -156,20 +167,17 @@
     self.machine_manager.ImageMachine(machine,
                                       self.label)
     self.timeline.Record(STATUS_RUNNING)
-    [retval, out, err] = self.autotest_runner.Run(machine.name,
-                                                  self.label.chromeos_root,
-                                                  self.label.board,
-                                                  self.benchmark.autotest_name,
-                                                  self.autotest_args)
+    [retval, out, err] = self.suite_runner.Run(machine.name,
+                                                  self.label,
+                                                  self.benchmark,
+                                                  self.test_args)
     self.run_completed = True
-
     return Result.CreateFromRun(self._logger,
-                                self.label.chromeos_root,
-                                self.label.board,
-                                self.label.name,
+                                self.label,
                                 out,
                                 err,
-                                retval)
+                                retval,
+                                self.benchmark.suite)
 
   def SetCacheConditions(self, cache_conditions):
     self.cache_conditions = cache_conditions
@@ -184,11 +192,11 @@
     self.machine_manager.ImageMachine(machine,
                                       self.label)
     self.timeline.Record(STATUS_RUNNING)
-    [retval, out, err] = self.autotest_runner.Run(machine.name,
+    [retval, out, err] = self.suite_runner.Run(machine.name,
                                                   self.label.chromeos_root,
                                                   self.label.board,
-                                                  self.benchmark.autotest_name,
-                                                  self.autotest_args)
+                                                  self.benchmark.test_name,
+                                                  self.test_args)
     self.run_completed = True
     rr = Result("Results placed in /tmp/test", "", 0)
     rr.out = out
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 47e027f..c08d189 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
 
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 
 """Testing of benchmark_run."""
 
@@ -8,7 +10,7 @@
 
 from utils import logger
 
-from autotest_runner import MockAutotestRunner
+from suite_runner import MockSuiteRunner
 from benchmark_run import MockBenchmarkRun
 from label import MockLabel
 from benchmark import Benchmark
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 811de26..1c5d271 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -39,6 +39,7 @@
     config.AddConfig("no_email", global_settings.GetField("no_email"))
     share_users = global_settings.GetField("share_users")
     results_dir = global_settings.GetField("results_dir")
+    chrome_src = global_settings.GetField("chrome_src")
     # Default cache hit conditions. The image checksum in the cache and the
     # computed checksum of the image must match. Also a cache file must exist.
     cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
@@ -57,20 +58,21 @@
     all_benchmark_settings = experiment_file.GetSettings("benchmark")
     for benchmark_settings in all_benchmark_settings:
       benchmark_name = benchmark_settings.name
-      autotest_name = benchmark_settings.GetField("autotest_name")
-      if not autotest_name:
-        autotest_name = benchmark_name
-      autotest_args = benchmark_settings.GetField("autotest_args")
+      test_name = benchmark_settings.GetField("test_name")
+      if not test_name:
+        test_name = benchmark_name
+      test_args = benchmark_settings.GetField("test_args")
       iterations = benchmark_settings.GetField("iterations")
       outlier_range = benchmark_settings.GetField("outlier_range")
       perf_args = benchmark_settings.GetField("perf_args")
       rm_chroot_tmp = benchmark_settings.GetField("rm_chroot_tmp")
       key_results_only = benchmark_settings.GetField("key_results_only")
+      suite = benchmark_settings.GetField("suite")
 
-      benchmark = Benchmark(benchmark_name, autotest_name, autotest_args,
+      benchmark = Benchmark(benchmark_name, test_name, test_args,
                             iterations, outlier_range,
                             key_results_only, rm_chroot_tmp,
-                            perf_args)
+                            perf_args, suite)
       benchmarks.append(benchmark)
 
     # Construct labels.
@@ -85,6 +87,8 @@
       my_remote = label_settings.GetField("remote")
       image_md5sum = label_settings.GetField("md5sum")
       cache_dir = label_settings.GetField("cache_dir")
+      chrome_src = label_settings.GetField("chrome_src")
+
     # TODO(yunlian): We should consolidate code in machine_manager.py
     # to derermine whether we are running from within google or not
       if ("corp.google.com" in socket.gethostname() and
@@ -99,10 +103,10 @@
       image_args = label_settings.GetField("image_args")
       if test_flag.GetTestMode():
         label = MockLabel(label_name, image, chromeos_root, board, my_remote,
-                          image_args, image_md5sum, cache_dir)
+                          image_args, image_md5sum, cache_dir, chrome_src)
       else:
         label = Label(label_name, image, chromeos_root, board, my_remote,
-                      image_args, image_md5sum, cache_dir)
+                      image_args, image_md5sum, cache_dir, chrome_src)
       labels.append(label)
 
     email = global_settings.GetField("email")
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 6cee6b7..4ff89f8 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
 
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 
 import StringIO
 import unittest
@@ -39,7 +41,7 @@
 
     self.assertEqual(len(experiment.benchmarks), 1)
     self.assertEqual(experiment.benchmarks[0].name, "PageCycler")
-    self.assertEqual(experiment.benchmarks[0].autotest_name, "PageCycler")
+    self.assertEqual(experiment.benchmarks[0].test_name, "PageCycler")
     self.assertEqual(experiment.benchmarks[0].iterations, 3)
 
     self.assertEqual(len(experiment.labels), 2)
diff --git a/crosperf/label.py b/crosperf/label.py
index be7a868..e819352 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
 
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 
 """The label of benchamrks."""
 
@@ -10,7 +12,7 @@
 
 class Label(object):
   def __init__(self, name, chromeos_image, chromeos_root, board, remote,
-               image_args, image_md5sum, cache_dir):
+               image_args, image_md5sum, cache_dir, chrome_src=None):
     # Expand ~
     chromeos_root = os.path.expanduser(chromeos_root)
     chromeos_image = os.path.expanduser(chromeos_image)
@@ -36,6 +38,11 @@
                         % (name, chromeos_root))
 
     self.chromeos_root = chromeos_root
+    if not chrome_src:
+      self.chrome_src = os.path.join(self.chromeos_root,
+          "chroot/var/cache/chromeos-chrome/chrome-src-internal/src")
+    else:
+      chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chrome_src)
 
 
 class MockLabel(object):
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 11b9f72..b0a43bf 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -30,7 +30,7 @@
     self.checksum = None
     self.locked = False
     self.released_time = time.time()
-    self.autotest_run = None
+    self.test_run = None
     self.chromeos_root = chromeos_root
     self._GetMemoryInfo()
     self._GetCPUInfo()
@@ -42,7 +42,7 @@
   def _ParseMemoryInfo(self):
     line = self.meminfo.splitlines()[0]
     usable_kbytes = int(line.split()[1])
-    # This code is from src/third_party/autotest/files/client/bin/base_utils.py
+    # This code is from src/third_party/test/files/client/bin/base_utils.py
     # usable_kbytes is system's usable DRAM in kbytes,
     #   as reported by memtotal() from device /proc/meminfo memtotal
     #   after Linux deducts 1.5% to 9.5% for system table overhead
@@ -271,13 +271,13 @@
                 if not machine.locked]:
         if m.checksum == image_checksum:
           m.locked = True
-          m.autotest_run = threading.current_thread()
+          m.test_run = threading.current_thread()
           return m
       for m in [machine for machine in self.GetAvailableMachines(label)
                 if not machine.locked]:
         if not m.checksum:
           m.locked = True
-          m.autotest_run = threading.current_thread()
+          m.test_run = threading.current_thread()
           return m
       # This logic ensures that threads waiting on a machine will get a machine
       # with a checksum equal to their image over other threads. This saves time
@@ -289,7 +289,7 @@
                 if not machine.locked]:
         if time.time() - m.released_time > 20:
           m.locked = True
-          m.autotest_run = threading.current_thread()
+          m.test_run = threading.current_thread()
           return m
     return None
 
@@ -337,18 +337,18 @@
                                 "Checksum")
       table = [header]
       for m in self._machines:
-        if m.autotest_run:
-          autotest_name = m.autotest_run.name
-          autotest_status = m.autotest_run.timeline.GetLastEvent()
+        if m.test_run:
+          test_name = m.test_run.name
+          test_status = m.test_run.timeline.GetLastEvent()
         else:
-          autotest_name = ""
-          autotest_status = ""
+          test_name = ""
+          test_status = ""
 
         try:
           machine_string = stringify_fmt % (m.name,
-                                            autotest_name,
+                                            test_name,
                                             m.locked,
-                                            autotest_status,
+                                            test_status,
                                             m.checksum)
         except Exception:
           machine_string = ""
@@ -382,7 +382,7 @@
     self.checksum = None
     self.locked = False
     self.released_time = time.time()
-    self.autotest_run = None
+    self.test_run = None
     self.chromeos_root = chromeos_root
     self.checksum_string = re.sub("\d", "", name)
     #In test, we assume "lumpy1", "lumpy2" are the same machine.
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index f3b3183..4178b46 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -31,12 +31,15 @@
   perf.report, etc. The key generation is handled by the ResultsCache class.
   """
 
-  def __init__(self, chromeos_root, logger, label_name):
-    self._chromeos_root = chromeos_root
+  def __init__(self, logger, label):
+    self._chromeos_root = label.chromeos_root
     self._logger = logger
     self._ce = command_executer.GetCommandExecuter(self._logger)
     self._temp_dir = None
-    self.label_name = label_name
+    self.label = label
+    self.results_dir = None
+    self.perf_data_files = []
+    self.perf_report_files = []
 
   def _CopyFilesTo(self, dest_dir, files_to_copy):
     file_index = 0
@@ -91,6 +94,8 @@
     raise Exception("Could not find results directory.")
 
   def _FindFilesInResultsDir(self, find_args):
+    if not self.results_dir:
+      return None
     command = "find %s %s" % (self.results_dir,
                               find_args)
     ret, out, _ = self._ce.RunCommand(command, return_output=True)
@@ -150,8 +155,8 @@
           value = str(misc.UnitToNumber(num_events))
           self.keyvals[key] = value
 
-  def _PopulateFromRun(self, board, out, err, retval):
-    self._board = board
+  def _PopulateFromRun(self, out, err, retval):
+    self._board = self.label.board
     self.out = out
     self.err = err
     self.retval = retval
@@ -199,7 +204,7 @@
     self._ProcessResults()
 
   def CleanUp(self, rm_chroot_tmp):
-    if rm_chroot_tmp:
+    if rm_chroot_tmp and self.results_dir:
       command = "rm -rf %s" % self.results_dir
       self._ce.RunCommand(command)
     if self._temp_dir:
@@ -216,20 +221,21 @@
       pickle.dump(self.err, f)
       pickle.dump(self.retval, f)
 
-    tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
-    command = ("cd %s && "
-               "tar "
-               "--exclude=var/spool "
-               "--exclude=var/log "
-               "-cjf %s ." % (self.results_dir, tarball))
-    ret = self._ce.RunCommand(command)
-    if ret:
-      raise Exception("Couldn't store autotest output directory.")
+    if self.results_dir:
+      tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
+      command = ("cd %s && "
+                 "tar "
+                 "--exclude=var/spool "
+                 "--exclude=var/log "
+                 "-cjf %s ." % (self.results_dir, tarball))
+      ret = self._ce.RunCommand(command)
+      if ret:
+        raise Exception("Couldn't store autotest output directory.")
     # Store machine info.
     # TODO(asharif): Make machine_manager a singleton, and don't pass it into
     # this function.
     with open(os.path.join(temp_dir, MACHINE_FILE), "w") as f:
-      f.write(machine_manager.machine_checksum_string[self.label_name])
+      f.write(machine_manager.machine_checksum_string[self.label.name])
 
     if os.path.exists(cache_dir):
       command = "rm -rf {0}".format(cache_dir)
@@ -246,22 +252,72 @@
                       (temp_dir, cache_dir))
 
   @classmethod
-  def CreateFromRun(cls, logger, chromeos_root, board, label_name,
-                    out, err, retval):
-    result = cls(chromeos_root, logger, label_name)
-    result._PopulateFromRun(board, out, err, retval)
+  def CreateFromRun(cls, logger, label, out, err, retval, suite="pyauto"):
+    if suite == "telemetry":
+      result = TelemetryResult(logger, label)
+    else:
+      result = cls(logger, label)
+    result._PopulateFromRun(out, err, retval)
     return result
 
   @classmethod
-  def CreateFromCacheHit(cls, chromeos_root, logger, cache_dir, label_name):
-    result = cls(chromeos_root, logger, label_name)
+  def CreateFromCacheHit(cls, logger, label, cache_dir,
+                         suite="pyauto"):
+    if suite == "telemetry":
+      result = TelemetryResult(logger, label)
+    else:
+      result = cls(logger, label)
     try:
       result._PopulateFromCacheDir(cache_dir)
+
     except Exception as e:
       logger.LogError("Exception while using cache: %s" % e)
       return None
     return result
 
+class TelemetryResult(Result):
+
+  def __init__(self, logger, label):
+    super(TelemetryResult, self).__init__(logger, label)
+
+  def _PopulateFromRun(self, out, err, retval):
+    self.out = out
+    self.err = err
+    self.retval = retval
+
+    self._ProcessResults()
+
+  def _ProcessResults(self):
+    # The output is:
+    # url,average_commit_time (ms),...
+    # www.google.com,33.4,21.2,...
+    # We need to convert to this format:
+    # {"www.google.com:average_commit_time (ms)": "33.4",
+    #  "www.google.com:...": "21.2"}
+
+    lines = self.out.splitlines()
+    self.keyvals = {}
+
+    if not lines:
+      return
+    labels = lines[0].split(",")
+    for line in lines[1:]:
+      fields = line.split(",")
+      if (len(fields) != len(labels)):
+        continue
+      for i in range(1, len(labels)):
+        key = "%s %s" % (fields[0], labels[i])
+        value = fields[i]
+        self.keyvals[key] = value
+    self.keyvals["retval"] = self.retval
+
+  def _PopulateFromCacheDir(self, cache_dir):
+    with open(os.path.join(cache_dir, RESULTS_FILE), "r") as f:
+      self.out = pickle.load(f)
+      self.err = pickle.load(f)
+      self.retval = pickle.load(f)
+    self._ProcessResults()
+
 
 class CacheConditions(object):
   # Cache hit only if the result file exists.
@@ -295,14 +351,14 @@
   """
   CACHE_VERSION = 6
 
-  def Init(self, chromeos_image, chromeos_root, autotest_name, iteration,
-           autotest_args, machine_manager, board, cache_conditions,
-           logger_to_use, label, share_users):
+  def Init(self, chromeos_image, chromeos_root, test_name, iteration,
+           test_args, machine_manager, board, cache_conditions,
+           logger_to_use, label, share_users, suite):
     self.chromeos_image = chromeos_image
     self.chromeos_root = chromeos_root
-    self.autotest_name = autotest_name
+    self.test_name = test_name
     self.iteration = iteration
-    self.autotest_args = autotest_args,
+    self.test_args = test_args,
     self.board = board
     self.cache_conditions = cache_conditions
     self.machine_manager = machine_manager
@@ -310,6 +366,7 @@
     self._ce = command_executer.GetCommandExecuter(self._logger)
     self.label = label
     self.share_users = share_users
+    self.suite = suite
 
   def _GetCacheDirForRead(self):
     matching_dirs = []
@@ -363,11 +420,11 @@
           machine_id_checksum = machine.machine_id_checksum
           break
 
-    autotest_args_checksum = hashlib.md5(
-        "".join(self.autotest_args)).hexdigest()
+    test_args_checksum = hashlib.md5(
+        "".join(self.test_args)).hexdigest()
     return (image_path_checksum,
-            self.autotest_name, str(self.iteration),
-            autotest_args_checksum,
+            self.test_name, str(self.iteration),
+            test_args_checksum,
             checksum,
             machine_checksum,
             machine_id_checksum,
@@ -385,10 +442,10 @@
       return None
 
     self._logger.LogOutput("Trying to read from cache dir: %s" % cache_dir)
-
-    result = Result.CreateFromCacheHit(self.chromeos_root,
-                                       self._logger, cache_dir, self.label.name)
-
+    result = Result.CreateFromCacheHit(self._logger,
+                                       self.label,
+                                       cache_dir,
+                                       self.suite)
     if not result:
       return None
 
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 2e5c929..6274a48 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -1,6 +1,9 @@
 #!/usr/bin/python
 
-# Copyright 2012 Google Inc. All Rights Reserved.
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
 """Parse data from benchmark_runs for tabulator."""
 import re
 
@@ -50,15 +53,15 @@
         continue
       benchmark = benchmark_run.benchmark
       key_filter_on = (benchmark.key_results_only and
-                       "PyAutoPerfTest" in benchmark.name + benchmark.autotest_name and
-                       "perf." not in benchmark.autotest_args)
-      for autotest_key in benchmark_run.result.keyvals:
+                       "PyAutoPerfTest" in benchmark.name + benchmark.test_name
+                       and "perf." not in benchmark.test_args)
+      for test_key in benchmark_run.result.keyvals:
         if (key_filter_on and
-            not any([key for key in self.key_filter if key in autotest_key])
+            not any([key for key in self.key_filter if key in test_key])
            ):
           continue
-        result_value = benchmark_run.result.keyvals[autotest_key]
-        cur_dict[autotest_key] = result_value
+        result_value = benchmark_run.result.keyvals[test_key]
+        cur_dict[test_key] = result_value
     self._DuplicatePass()
 
   def _DuplicatePass(self):
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 61c67d5..c56e25e 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
 
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 
 from utils.tabulator import *
 
@@ -479,8 +481,8 @@
 
       for i in range(2, len(data_table)):
         cur_row_data = data_table[i]
-        autotest_key = cur_row_data[0].string_value
-        title = "{0}: {1}".format(item, autotest_key.replace("/", ""))
+        test_key = cur_row_data[0].string_value
+        title = "{0}: {1}".format(item, test_key.replace("/", ""))
         chart = ColumnChart(title, 300, 200)
         chart.AddColumn("Label", "string")
         chart.AddColumn("Average", "number")
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 5cbae5d..365b8cb 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -17,15 +17,15 @@
 class BenchmarkSettings(Settings):
   def __init__(self, name):
     super(BenchmarkSettings, self).__init__(name, "benchmark")
-    self.AddField(TextField("autotest_name",
-                            description="The name of the autotest to run."
+    self.AddField(TextField("test_name",
+                            description="The name of the test to run."
                             "Defaults to the name of the benchmark."))
-    self.AddField(TextField("autotest_args",
+    self.AddField(TextField("test_args",
                             description="Arguments to be passed to the "
-                            "autotest."))
+                            "test."))
     self.AddField(IntegerField("iterations", default=1,
                                description="Number of iterations to run the "
-                               "autotest."))
+                               "test."))
     self.AddField(FloatField("outlier_range", default=0.2,
                              description="The percentage of highest/lowest "
                              "values to omit when computing the average."))
@@ -40,6 +40,8 @@
                             "enables perf commands to record perforamance "
                             "related counters. It  must start with perf "
                             "command record or stat followed by arguments."))
+    self.AddField(TextField("suite", default="pyauto",
+                               description="The type of the benchmark"))
 
 
 class LabelSettings(Settings):
@@ -66,6 +68,11 @@
                             "image_chromeos.py."))
     self.AddField(TextField("cache_dir", default="",
                             description="The cache dir for this image."))
+    self.AddField(TextField("chrome_src",
+                            description="The path to the source of chrome. "
+                            "This is used to run telemetry benchmarks. "
+                            "The default one is the src inside chroot.",
+                            required=False, default=""))
 
 
 class GlobalSettings(Settings):
@@ -80,7 +87,7 @@
                             description="A comma-separated list of ip's of "
                             "chromeos devices to run experiments on."))
     self.AddField(BooleanField("rerun_if_failed", description="Whether to "
-                               "re-run failed autotest runs or not.",
+                               "re-run failed test runs or not.",
                                default=False))
     self.AddField(BooleanField("rm_chroot_tmp", default=False,
                                description="Whether remove the run_remote_test"
@@ -88,7 +95,7 @@
     self.AddField(ListField("email", description="Space-seperated"
                             "list of email addresses to send email to."))
     self.AddField(BooleanField("rerun", description="Whether to ignore the "
-                               "cache and for autotests to be re-run.",
+                               "cache and for tests to be re-run.",
                                default=False))
     self.AddField(BooleanField("same_specs", default=True,
                                description="Ensure cached runs are run on the "
@@ -99,7 +106,7 @@
                                "exact the same remote"))
     self.AddField(IntegerField("iterations", default=1,
                                description="Number of iterations to run all "
-                               "autotests."))
+                               "tests."))
     self.AddField(TextField("chromeos_root",
                             description="The path to a chromeos checkout which "
                             "contains a src/scripts directory. Defaults to "
@@ -128,6 +135,12 @@
                             "use. It accepts multiple users seperated by \",\""))
     self.AddField(TextField("results_dir", default="",
                             description="The results dir"))
+    self.AddField(TextField("chrome_src",
+                            description="The path to the source of chrome. "
+                            "This is used to run telemetry benchmarks. "
+                            "The default one is the src inside chroot.",
+
+                            required=False, default=""))
 
 
 class SettingsFactory(object):
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
new file mode 100644
index 0000000..06d020e
--- /dev/null
+++ b/crosperf/suite_runner.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from utils import command_executer
+
+
+class SuiteRunner(object):
+  """ This defines the interface from crosperf to test script.
+  """
+  def __init__(self, logger_to_use=None):
+    self._logger = logger_to_use
+    self._ce = command_executer.GetCommandExecuter(self._logger)
+    self._ct = command_executer.CommandTerminator()
+
+  def Run(self, machine, label, benchmark, test_args):
+    if benchmark.suite == "telemetry":
+      return self.Telemetry_Run(machine, label, benchmark)
+    else:
+      return self.Pyauto_Run(machine, label, benchmark, test_args)
+
+  def Pyauto_Run(self, machine, label, benchmark, test_args):
+    """Run the run_remote_test."""
+
+    options = ""
+    if label.board:
+      options += " --board=%s" % label.board
+    if test_args:
+      options += " %s" % test_args
+    command = "rm -rf /usr/local/autotest/results/*"
+    self._ce.CrosRunCommand(command, machine=machine, username="root",
+                            chromeos_root=label.chromeos_root)
+    command = ("./run_remote_tests.sh --remote=%s %s %s" %
+               (machine, options, benchmark.test_name))
+    return self._ce.ChrootRunCommand(label.chromeos_root,
+                                     command,
+                                     True,
+                                     self._ct)
+
+  def Telemetry_Run(self, machine, label, benchmark):
+    if not os.path.isdir(label.chrome_src):
+      self._logger.GetLogger().LogFatal("Cannot find chrome src dir to"
+                                        "run telemetry.")
+    rsa_key = os.path.join(label.chromeos_root,
+        "src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa")
+    cmd = ("cd {0} && "
+           "./tools/perf/run_multipage_benchmarks "
+           "--browser=cros-chrome "
+           "--output-format=csv "
+           "--remote={1} "
+           "--identity {2} "
+           "{3} {4}".format(label.chrome_src, machine,
+                            rsa_key,
+                            benchmark.test_name,
+                            benchmark.test_args))
+    return self._ce.RunCommand(cmd, return_output=True,
+                               print_to_console=False)
+
+  def Terminate(self):
+    self._ct.Terminate()
+
+
+class MockSuiteRunner(object):
+  def __init__(self):
+    pass
+
+  def Run(self, *args):
+    return ["", "", 0]
diff --git a/utils/tabulator.py b/utils/tabulator.py
index f75419a..eddaf70 100644
--- a/utils/tabulator.py
+++ b/utils/tabulator.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
 
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 
 """Table generating, analyzing and printing functions.
 
@@ -407,9 +409,10 @@
 
 class KeyAwareComparisonResult(ComparisonResult):
   def _IsLowerBetter(self, key):
-    lower_is_better_keys = ["milliseconds", "ms", "seconds", "KB",
-                            "rdbytes", "wrbytes"]
-    return any([key.startswith(l + "_") for l in lower_is_better_keys])
+    lower_is_better_keys = ["milliseconds", "ms_", "seconds_", "KB",
+                            "rdbytes", "wrbytes", "dropped_percent",
+                            "(ms)", "(seconds)"]
+    return any([l in key for l in lower_is_better_keys])
 
   def _InvertIfLowerIsBetter(self, cell):
     if self._IsLowerBetter(cell.name):