Add 'run_local' to run the test harness locally.

The option 'run_local: True|False' will be passed into autotest.
For example, this is how test_that will be invoked:
    test_that --arges="run_local=True ..." ...

BUG=None
TEST=Tested with the folllowing combinations.
     benchmark: sunspider, smoothness.top_25_smooth
     perf_args: record -e cycles,instructions / (none)

Change-Id: I7315027a7c9433d17a9f3fe54d7e8c3f480ea4f4
Reviewed-on: https://chrome-internal-review.googlesource.com/217370
Reviewed-by: Caroline Tice <cmtice@google.com>
Tested-by: Ting-Yuan Huang <laszio@google.com>
Commit-Queue: Ting-Yuan Huang <laszio@google.com>
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 506a825..7fabf0b 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -17,7 +17,7 @@
 
   def __init__(self, name, test_name, test_args, iterations,
                rm_chroot_tmp, perf_args, suite="",
-               show_all_results=False, retries=0):
+               show_all_results=False, retries=0, run_local=False):
     self.name = name
     #For telemetry, this is the benchmark name.
     self.test_name = test_name
@@ -32,3 +32,6 @@
     self.retries = retries
     if self.suite == "telemetry":
       self.show_all_results = True
+    if run_local and self.suite != 'telemetry_Crosperf':
+      raise Exception("run_local is only supported by telemetry_Crosperf.")
+    self.run_local = run_local
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index 0632dde..cee41da 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -82,7 +82,8 @@
                     self.label,
                     self.share_cache,
                     self.benchmark.suite,
-                    self.benchmark.show_all_results
+                    self.benchmark.show_all_results,
+                    self.benchmark.run_local
                    )
 
     self.result = self.cache.ReadResult()
@@ -247,7 +248,8 @@
                     self.label,
                     self.share_cache,
                     self.benchmark.suite,
-                    self.benchmark.show_all_results
+                    self.benchmark.show_all_results,
+                    self.benchmark.run_local
                    )
 
     self.result = self.cache.ReadResult()
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 5b84aea..65d7ce8 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -92,12 +92,12 @@
 
   def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
                           iterations, rm_chroot_tmp, perf_args, suite,
-                          show_all_results, retries):
+                          show_all_results, retries, run_local):
     """Add all the tests in a set to the benchmarks list."""
     for test_name in benchmark_list:
       telemetry_benchmark = Benchmark (test_name, test_name, test_args,
                                        iterations, rm_chroot_tmp, perf_args,
-                                       suite, show_all_results, retries)
+                                       suite, show_all_results, retries, run_local)
       benchmarks.append(telemetry_benchmark)
 
 
@@ -163,40 +163,46 @@
       iterations = benchmark_settings.GetField("iterations")
       suite = benchmark_settings.GetField("suite")
       retries = benchmark_settings.GetField("retries")
+      run_local = benchmark_settings.GetField("run_local")
 
       if suite == 'telemetry_Crosperf':
         if test_name == 'all_perfv2':
           self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
                                     test_args, iterations, rm_chroot_tmp,
-                                    perf_args, suite, show_all_results, retries)
+                                    perf_args, suite, show_all_results, retries,
+                                    run_local)
         elif test_name == 'all_pagecyclers':
           self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
                                     test_args, iterations, rm_chroot_tmp,
-                                    perf_args, suite, show_all_results, retries)
+                                    perf_args, suite, show_all_results, retries,
+                                    run_local)
         elif test_name == 'all_toolchain_perf':
           self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
                                     test_args, iterations, rm_chroot_tmp,
-                                    perf_args, suite, show_all_results, retries)
+                                    perf_args, suite, show_all_results, retries,
+                                    run_local)
           # Add non-telemetry toolchain-perf benchmarks:
           benchmarks.append(Benchmark('graphics_WebGLAquarium',
                                       'graphics_WebGLAquarium', '', iterations,
                                       rm_chroot_tmp, perf_args, '',
-                                      show_all_results, retries))
+                                      show_all_results, retries,
+                                      run_local=False))
         elif test_name == 'all_toolchain_perf_old':
           self._AppendBenchmarkSet (benchmarks,
                                     telemetry_toolchain_old_perf_tests,
                                     test_args, iterations, rm_chroot_tmp,
-                                    perf_args, suite, show_all_results, retries)
+                                    perf_args, suite, show_all_results, retries,
+                                    run_local)
         else:
           benchmark = Benchmark(test_name, test_name, test_args,
                                 iterations, rm_chroot_tmp, perf_args, suite,
-                                show_all_results, retries)
+                                show_all_results, retries, run_local)
           benchmarks.append(benchmark)
       else:
         # Add the single benchmark.
         benchmark = Benchmark(benchmark_name, test_name, test_args,
                               iterations, rm_chroot_tmp, perf_args, suite,
-                              show_all_results)
+                              show_all_results, run_local)
         benchmarks.append(benchmark)
 
     # Construct labels.
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index de6e572..232f13b 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -471,7 +471,7 @@
   def Init(self, chromeos_image, chromeos_root, test_name, iteration,
            test_args, profiler_args, machine_manager, board, cache_conditions,
            logger_to_use, log_level, label, share_cache, suite,
-           show_all_results):
+           show_all_results, run_local):
     self.chromeos_image = chromeos_image
     self.chromeos_root = chromeos_root
     self.test_name = test_name
@@ -489,6 +489,7 @@
     self.suite = suite
     self.log_level = log_level
     self.show_all = show_all_results
+    self.run_local = run_local
 
   def _GetCacheDirForRead(self):
     matching_dirs = []
@@ -550,7 +551,7 @@
           machine_id_checksum = machine.machine_id_checksum
           break
 
-    temp_test_args = "%s %s" % (self.test_args, self.profiler_args)
+    temp_test_args = "%s %s %s" % (self.test_args, self.profiler_args, self.run_local)
     test_args_checksum = hashlib.md5(
         "".join(temp_test_args)).hexdigest()
     return (image_path_checksum,
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 04e2975..2ca62ad 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -794,7 +794,8 @@
                             self.mock_label,
                             '',         # benchmark_run.share_cache
                             'telemetry_Crosperf',
-                            True)       # benchmark_run.show_all_results
+                            True,       # benchmark_run.show_all_results
+                            False)      # benchmark_run.run_local
 
 
   @mock.patch.object (image_checksummer.ImageChecksummer, 'Checksum')
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index ba21a64..bf15719 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -31,6 +31,11 @@
     self.AddField(IntegerField("retries", default=0,
                                 description="Number of times to retry a "
                                 "benchmark run."))
+    self.AddField(BooleanField("run_local",
+                               description="Run benchmark harness locally. "
+                               "Currently only compatible with the suite: "
+                               "telemetry_Crosperf.",
+                               required=False, default=False))
 
 
 class LabelSettings(Settings):
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index a133227..6a2a129 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -184,12 +184,13 @@
         test_args = test_args[1:-1]
       args_string = "test_args='%s'" % test_args
 
-    cmd = ('{} {} {} --board={} --args="{} test={} '
+    cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
            '{}" {} telemetry_Crosperf'.format(TEST_THAT_PATH,
                                               autotest_dir_arg,
                                               fast_arg,
                                               label.board,
                                               args_string,
+                                              benchmark.run_local,
                                               benchmark.test_name,
                                               profiler_args,
                                               machine))