cheets_CTS: handle packages with no tests.

Sometimes tradefed returns 0 tests for a package. Currently there
there are several packages on ARC for which this is correct. But
sometimes this can be due to unrelated failures.

BUG=b:32488317
TEST=Ran locally 3 new cases:
     a) Package never reveals any tests (eventual failure).
     b) Package reveals no tests as expected (pass).
     c) Package reveals tests even though it wasn' expected (failure).

Change-Id: Ie8f94c615032247b5e0e1d2922ace3fcf4ea267c
Reviewed-on: https://chromium-review.googlesource.com/406524
Reviewed-by: Ilja H. Friedel <ihf@chromium.org>
Tested-by: Ilja H. Friedel <ihf@chromium.org>
diff --git a/server/cros/tradefed_test.py b/server/cros/tradefed_test.py
index e39e43d..e818ab6 100644
--- a/server/cros/tradefed_test.py
+++ b/server/cros/tradefed_test.py
@@ -630,14 +630,15 @@
                 os.path.join(repository_logs, datetime),
                 destination_logs_datetime)
 
-    def _get_failure_expectations(self):
-        """Return a list of waivers and manual tests.
+    def _get_expected_failures(self, directory):
+        """Return a list of expected failures.
 
-        @return: a list of expected failing tests with unchecked status.
+        @return: a list of expected failures.
         """
-        expected_fail_dir = os.path.join(self.bindir, 'expectations')
+        logging.info('Loading expected failures from %s.', directory)
+        expected_fail_dir = os.path.join(self.bindir, directory)
         expected_fail_files = glob.glob(expected_fail_dir + '/*.' + self._abi)
-        expected_fail_tests = set()
+        expected_failures = set()
         for expected_fail_file in expected_fail_files:
             try:
                 file_path = os.path.join(expected_fail_dir, expected_fail_file)
@@ -645,8 +646,8 @@
                     lines = set(f.read().splitlines())
                     logging.info('Loaded %d expected failures from %s',
                                  len(lines), expected_fail_file)
-                    expected_fail_tests = expected_fail_tests | lines
+                    expected_failures |= lines
             except IOError as e:
                 logging.error('Error loading %s (%s).', file_path, e.strerror)
-        logging.info('Finished loading test waivers: %s', expected_fail_tests)
-        return expected_fail_tests
\ No newline at end of file
+        logging.info('Finished loading expected failures: %s', expected_failures)
+        return expected_failures
diff --git a/server/site_tests/cheets_CTS/cheets_CTS.py b/server/site_tests/cheets_CTS/cheets_CTS.py
index d3bd506..ebe5b0e 100644
--- a/server/site_tests/cheets_CTS/cheets_CTS.py
+++ b/server/site_tests/cheets_CTS/cheets_CTS.py
@@ -76,6 +76,12 @@
         logging.info('CTS-tradefed path: %s', self._cts_tradefed)
         self._needs_push_media = False
 
+        # Load waivers and manual tests so TF doesn't re-run them.
+        self.waivers_and_manual_tests = self._get_expected_failures(
+                                                'expectations')
+        # Load packages with no tests.
+        self.notest_packages = self._get_expected_failures('notest_packages')
+
     def _clean_repository(self):
         """Ensures all old logs, results and plans are deleted.
 
@@ -283,33 +289,51 @@
         # Don't download media for tests that don't need it. b/29371037
         if target_package.startswith('android.mediastress'):
             self._needs_push_media = True
-        # Load waivers and manual tests so TF doesn't re-run them.
-        self.waivers_and_manual_tests = self._get_failure_expectations()
-        # Unconditionally run CTS package.
-        with self._login_chrome():
-            self._ready_arc()
-            # Start each iteration with a clean repository. This allows us to
-            # track session_id blindly.
-            self._clean_repository()
-            logging.info('Running %s:', target_package)
-            tests, passed, failed, notexecuted = self._tradefed_run(
-                    target_package)
-            logging.info('RESULT: tests=%d, passed=%d, failed=%d, notexecuted='
-                    '%d', tests, passed, failed, notexecuted)
-            self.summary = ('run(t=%d, p=%d, f=%d, ne=%d)' %
-                    (tests, passed, failed, notexecuted))
-            # An internal self-check. We really should never hit this.
-            if tests != passed + failed + notexecuted:
-                raise error.TestFail('Error: Test count inconsistent. %s' %
-                                     self.summary)
-            # Keep track of global counts as each step works on local failures.
-            total_tests = tests
-            total_passed = passed
-        # The DUT has rebooted at this point and is in a clean state.
+
+        steps = -1  # For historic reasons the first iteration is not counted.
+        total_tests = 0
+        self.summary = ''
+        # Unconditionally run CTS package until we see some tests executed.
+        while steps < self._max_retry and total_tests == 0:
+            with self._login_chrome():
+                self._ready_arc()
+                # Start each valid iteration with a clean repository. This
+                # allows us to track session_id blindly.
+                self._clean_repository()
+                logging.info('Running %s:', target_package)
+                tests, passed, failed, notexecuted = self._tradefed_run(
+                        target_package)
+                logging.info('RESULT: tests=%d, passed=%d, failed=%d, '
+                        'notexecuted=%d', tests, passed, failed, notexecuted)
+                self.summary += ('run(t=%d, p=%d, f=%d, ne=%d)' %
+                        (tests, passed, failed, notexecuted))
+                if tests == 0 and target_package in self.notest_packages:
+                    logging.info('Package has no tests as expected.')
+                    return
+                if tests > 0 and target_package in self.notest_packages:
+                    # We expected no tests, but the new bundle drop must have
+                    # added some for us. Alert us to the situation.
+                    raise error.TestFail('Failed: Remove package %s from '
+                                         'notest_packages directory!' %
+                                         target_package)
+                if tests == 0 and target_package not in self.notest_packages:
+                    logging.error('Did not find any tests in package. Hoping '
+                                  'this is transient. Retry after reboot.')
+                # An internal self-check. We really should never hit this.
+                if tests != passed + failed + notexecuted:
+                   raise error.TestFail('Error: Test count inconsistent. %s' %
+                                        self.summary)
+                # Keep track of global counts as each continue/retry step below
+                # works on local failures.
+                total_tests = tests
+                total_passed = passed
+                steps += 1
+            # The DUT has rebooted at this point and is in a clean state.
+        if total_tests == 0:
+            raise error.TestFail('Error: Could not find any tests in package.')
 
         # If the results were not completed or were failing then continue or
         # retry them iteratively MAX_RETRY times.
-        steps = 0
         while steps < self._max_retry and (notexecuted > 0 or failed > 0):
             # First retry until there is no test is left that was not executed.
             while notexecuted > 0 and steps < self._max_retry:
@@ -397,7 +421,7 @@
                 # The DUT has rebooted at this point and is in a clean state.
 
         # Final classification of test results.
-        if notexecuted > 0 or failed > 0:
+        if total_passed == 0 or notexecuted > 0 or failed > 0:
             raise error.TestFail(
                 'Failed: after %d retries giving up. '
                 'total_passed=%d, failed=%d, notexecuted=%d. %s' %
diff --git a/server/site_tests/cheets_GTS/cheets_GTS.py b/server/site_tests/cheets_GTS/cheets_GTS.py
index 343e870..42e47de 100644
--- a/server/site_tests/cheets_GTS/cheets_GTS.py
+++ b/server/site_tests/cheets_GTS/cheets_GTS.py
@@ -37,6 +37,8 @@
         else:
             self._android_gts = self._install_bundle(_PARTNER_GTS_LOCATION)
 
+        self.waivers = self._get_expected_failures('expectations')
+
     def _run_gts_tradefed(self, target_package):
         """This tests runs the GTS(XTS) tradefed binary and collects results.
 
@@ -83,10 +85,8 @@
 
     def run_once(self, target_package=None):
         """Runs GTS target package exactly once."""
-        self.waivers = self._get_failure_expectations()
-
         with self._login_chrome():
             self._connect_adb()
             self._disable_adb_install_dialog()
             self._wait_for_arc_boot()
-            self._run_gts_tradefed(target_package)
\ No newline at end of file
+            self._run_gts_tradefed(target_package)