Deprecate SimpleTestUpdateAndVerify

We don't need these tests anymore, deprecate them from VM tests.

BUG=chromium:1005282
TEST=pfq-tryjob, CQ passes

Cq-Depend: chromium:1811790
Change-Id: If25399b613c36a6547e2903379397e1d345d4bf9
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crostestutils/+/1812340
Tested-by: Amin Hassani <ahassani@chromium.org>
Reviewed-by: Achuith Bhandarkar <achuith@chromium.org>
Reviewed-by: Mike Frysinger <vapier@chromium.org>
Commit-Queue: Amin Hassani <ahassani@chromium.org>
diff --git a/au_test_harness/au_test.py b/au_test_harness/au_test.py
index c60f30b..418ccc6 100644
--- a/au_test_harness/au_test.py
+++ b/au_test_harness/au_test.py
@@ -8,21 +8,15 @@
 from __future__ import print_function
 
 import os
-import re
 import signal
 import sys
-import time
 import unittest
-import urllib
 
 from functools import partial
 
 from chromite.lib import cros_logging as logging
-from chromite.lib import dev_server_wrapper
 from chromite.lib import signals
-from crostestutils.au_test_harness import cros_test_proxy
 from crostestutils.au_test_harness import gce_au_worker
-from crostestutils.au_test_harness import update_exception
 from crostestutils.au_test_harness import vm_au_worker
 
 
@@ -54,7 +48,6 @@
       options: options class to be parsed from main class.
     """
     cls.base_image_path = options.base_image
-    cls.payload_signing_key = options.payload_signing_key
     cls.target_image_path = options.target_image
     cls.test_results_root = options.test_results_root
     if options.type == 'vm':
@@ -67,38 +60,6 @@
     # Cache away options to instantiate workers later.
     cls.options = options
 
-  def AttemptUpdateWithPayloadExpectedFailure(self, payload, expected_msg):
-    """Attempt a payload update, expect it to fail with expected log."""
-    try:
-      self.worker.UpdateUsingPayload(payload)
-    except update_exception.UpdateException as err:
-      # Will raise ValueError if expected is not found.
-      if re.search(re.escape(expected_msg), err.output, re.MULTILINE):
-        return
-      logging.warning("Didn't find %r in:\n%s", expected_msg, err.output)
-
-    self.fail('We managed to update when failure was expected')
-
-  def AttemptUpdateWithFilter(self, update_filter, proxy_port=8081):
-    """Update through a proxy, with a specified filter, and expect success."""
-    target_image_path = self.worker.PrepareBase(self.target_image_path)
-
-    # We assume that devserver starts at the default port (8080), and start
-    # our proxy at a different one. We then tell our update tools to
-    # have the client connect to our proxy_port instead of the 8080.
-    proxy = cros_test_proxy.CrosTestProxy(
-        port_in=proxy_port,
-        address_out='127.0.0.1',
-        port_out=dev_server_wrapper.DEFAULT_PORT,
-        data_filter=update_filter
-    )
-    proxy.serve_forever_in_thread()
-    try:
-      self.worker.PerformUpdate(target_image_path, target_image_path,
-                                proxy_port=proxy_port)
-    finally:
-      proxy.shutdown()
-
   # --- UNITTEST SPECIFIC METHODS ---
   def setUp(self):
     """Overrides unittest.TestCase.setUp and called before every test.
@@ -125,102 +86,6 @@
     if self._use_signals:
       self._RestoreHandlers()
 
-  def testUpdateKeepStateful(self):
-    """Tests if we can update normally.
-
-    This test checks that we can update by updating the stateful partition
-    rather than wiping it.
-    """
-    self.worker.Initialize(self.options.ssh_port or 9222)
-    # Just make sure some tests pass on original image.  Some old images
-    # don't pass many tests.
-    base_image_path = self.worker.PrepareBase(self.base_image_path)
-
-    # Update to
-    self.worker.PerformUpdate(self.target_image_path, base_image_path)
-    self.assertTrue(self.worker.VerifyImage(self))
-
-    # Update from
-    self.worker.PerformUpdate(self.target_image_path, self.target_image_path)
-    self.assertTrue(self.worker.VerifyImage(self))
-
-  def testUpdateWipeStateful(self):
-    """Tests if we can update after cleaning the stateful partition.
-
-    This test checks that we can update successfully after wiping the
-    stateful partition.
-    """
-    self.worker.Initialize(self.options.ssh_port or 9223)
-    # Just make sure some tests pass on original image.  Some old images
-    # don't pass many tests.
-    base_image_path = self.worker.PrepareBase(self.base_image_path)
-
-    # Update to
-    self.worker.PerformUpdate(self.target_image_path, base_image_path,
-                              'clean')
-    self.assertTrue(self.worker.VerifyImage(self))
-
-    # Update from
-    self.worker.PerformUpdate(self.target_image_path, self.target_image_path,
-                              'clean')
-    self.assertTrue(self.worker.VerifyImage(self))
-
-  def testInterruptedUpdate(self):
-    """Tests what happens if we interrupt payload delivery 3 times."""
-
-    class InterruptionFilter(cros_test_proxy.Filter):
-      """This filter causes the proxy to interrupt the download 3 times.
-
-      It does this by closing the first three connections after they transfer
-      2M total in the outbound direction.
-      """
-
-      def __init__(self):
-        """Defines variable shared across all connections."""
-        self.close_count = 0
-        self.data_size = 0
-
-      def setup(self):
-        """Called once at the start of each connection."""
-        self.data_size = 0
-
-      # Overriden method.  The first three connections transferring more than 2M
-      # outbound will be closed.
-      def OutBound(self, data):
-        if self.close_count < 3:
-          if self.data_size > (2 * 1024 * 1024):
-            self.close_count += 1
-            return None
-
-        self.data_size += len(data)
-        return data
-
-    self.worker.Initialize(self.options.ssh_port or 9224)
-    self.AttemptUpdateWithFilter(InterruptionFilter(), proxy_port=8082)
-
-  def testSimpleSignedUpdate(self):
-    """Test that updates to itself with a signed payload."""
-    self.worker.Initialize(self.options.ssh_port or 9226)
-    signed_target_image_path = self.worker.PrepareBase(self.target_image_path,
-                                                       signed_base=True)
-    if self.payload_signing_key:
-      self.worker.PerformUpdate(
-          self.target_image_path, signed_target_image_path,
-          payload_signing_key=self.payload_signing_key)
-    else:
-      logging.info('No key found to use for signed testing.')
-
-  def SimpleTestUpdateAndVerify(self):
-    """Test that updates to itself.
-
-    We explicitly don't use test prefix so that isn't run by default.  Can be
-    run using test_prefix option.
-    """
-    self.worker.Initialize(self.options.ssh_port or 9227)
-    target_image_path = self.worker.PrepareBase(self.target_image_path)
-    self.worker.PerformUpdate(target_image_path, target_image_path)
-    self.assertTrue(self.worker.VerifyImage(self))
-
   def SimpleTestVerify(self):
     """Test that only verifies the target image.
 
@@ -231,86 +96,6 @@
     self.worker.PrepareBase(self.target_image_path)
     self.assertTrue(self.worker.VerifyImage(self))
 
-  # --- DISABLED TESTS ---
-
-  def NoTestDelayedUpdate(self):
-    """Tests what happens if some data is delayed during update delivery."""
-
-    class DelayedFilter(cros_test_proxy.Filter):
-      """Causes intermittent delays in data transmission.
-
-      It does this by inserting 3 20 second delays when transmitting
-      data after 2M has been sent.
-      """
-      def __init__(self):
-        """Defines variable shared across all connections."""
-        self.data_size = 0
-        self.delay_count = 0
-
-      def setup(self):
-        """Called once at the start of each connection."""
-        self.data_size = 0
-        self.delay_count = 0
-
-      # The first three packets after we reach 2M transferred
-      # are delayed by 20 seconds.
-      def OutBound(self, data):
-        if self.delay_count < 3:
-          if self.data_size > (2 * 1024 * 1024):
-            self.delay_count += 1
-            time.sleep(20)
-
-        self.data_size += len(data)
-        return data
-
-    self.worker.Initialize(self.options.ssh_port or 9225)
-    self.AttemptUpdateWithFilter(DelayedFilter(), proxy_port=8083)
-
-  def NotestPlatformToolchainOptions(self):
-    """Tests the hardened toolchain options."""
-    self.worker.Initialize(self.options.ssh_port or 9229)
-    self.worker.PrepareBase(self.base_image_path)
-    self.assertTrue(self.worker.VerifyImage('platform_ToolchainOptions'))
-
-  # TODO(sosa): Get test to work with verbose.
-  def NotestPartialUpdate(self):
-    """Tests what happens if we attempt to update with a truncated payload."""
-    self.worker.Initialize(self.options.ssh_port or 9230)
-    # Preload with the version we are trying to test.
-    self.worker.PrepareBase(self.target_image_path)
-
-    # Image can be updated at:
-    # ~chrome-eng/chromeos/localmirror/autest-images
-    url = ('http://gsdview.appspot.com/chromeos-localmirror/'
-           'autest-images/truncated_image.gz')
-    payload = os.path.join(self.download_folder, 'truncated_image.gz')
-
-    # Read from the URL and write to the local file
-    urllib.urlretrieve(url, payload)
-
-    expected_msg = 'download_hash_data == update_check_response_hash failed'
-    self.AttemptUpdateWithPayloadExpectedFailure(payload, expected_msg)
-
-  # TODO(sosa): Get test to work with verbose.
-  def NotestCorruptedUpdate(self):
-    """Tests what happens if we attempt to update with a corrupted payload."""
-    self.worker.Initialize(self.options.ssh_port or 9231)
-    # Preload with the version we are trying to test.
-    self.worker.PrepareBase(self.target_image_path)
-
-    # Image can be updated at:
-    # ~chrome-eng/chromeos/localmirror/autest-images
-    url = ('http://gsdview.appspot.com/chromeos-localmirror/'
-           'autest-images/corrupted_image.gz')
-    payload = os.path.join(self.download_folder, 'corrupted.gz')
-
-    # Read from the URL and write to the local file
-    urllib.urlretrieve(url, payload)
-
-    # This update is expected to fail...
-    expected_msg = 'zlib inflate() error:-3'
-    self.AttemptUpdateWithPayloadExpectedFailure(payload, expected_msg)
-
   # --- PRIVATE HELPER FUNCTIONS ---
   def _InstallHandlers(self):
     """Installs signal handlers for SIGINT and SIGTERM."""
diff --git a/au_test_harness/au_worker.py b/au_test_harness/au_worker.py
index 90356a8..9f304ea 100644
--- a/au_test_harness/au_worker.py
+++ b/au_test_harness/au_worker.py
@@ -15,18 +15,11 @@
 import inspect
 import os
 
-from chromite.lib import cros_build_lib
 from chromite.lib import cros_logging as logging
-from chromite.lib import dev_server_wrapper
-from crostestutils.au_test_harness import update_exception
 
 
 class AUWorker(object):
   """Interface for a worker that updates and verifies images."""
-  # Mapping between cached payloads to directory locations.
-  update_cache = None
-
-  # --- INTERFACE ---
 
   def __init__(self, options, test_results_root):
     """Processes options for the specific-type of worker."""
@@ -84,26 +77,11 @@
     """
 
   def UpdateImage(self, image_path, src_image_path='', stateful_change='old',
-                  proxy_port=None, payload_signing_key=None):
+                  proxy_port=None):
     """Implementation of an actual update.
 
     Subclasses must override this method with the correct update procedure for
-    the class. See PerformUpdate for args.
-    """
-
-  def UpdateUsingPayload(self, update_path, stateful_change='old',
-                         proxy_port=None):
-    """Updates target with the pre-generated update stored in update_path.
-
-    Subclasses must override this method with the correct update procedure for
     the class.
-
-    Args:
-      update_path: Path to the image to update with. This directory should
-          contain both update.gz, and stateful.image.gz
-      stateful_change: How to perform the stateful update.
-      proxy_port: Port to have the client connect to. For use with
-          CrosTestProxy.
     """
 
   def VerifyImage(self, unittest, percent_required_to_pass=100, test=''):
@@ -123,43 +101,6 @@
       Returns the percent that passed.
     """
 
-  # --- INTERFACE TO AU_TEST ---
-
-  def PerformUpdate(self, image_path, src_image_path='', stateful_change='old',
-                    proxy_port=None, payload_signing_key=None):
-    """Performs an update using  _UpdateImage and reports any error.
-
-    Subclasses should not override this method but override _UpdateImage
-    instead.
-
-    Args:
-      image_path: Path to the image to update with.  This image must be a test
-          image.
-      src_image_path: Optional.  If set, perform a delta update using the
-          image specified by the path as the source image.
-      stateful_change: How to modify the stateful partition.  Values are:
-          'old': Don't modify stateful partition.  Just update normally.
-          'clean': Uses clobber-state to wipe the stateful partition with the
-              exception of code needed for ssh.
-      proxy_port: Port to have the client connect to. For use with
-          CrosTestProxy.
-      payload_signing_key: Path to the private key to use to sign payloads.
-    Raises an update_exception.UpdateException if _UpdateImage returns an error.
-    """
-    if not self.use_delta_updates:
-      src_image_path = ''
-    key_to_use = payload_signing_key
-
-    self.UpdateImage(image_path, src_image_path, stateful_change, proxy_port,
-                     key_to_use)
-
-  @classmethod
-  def SetUpdateCache(cls, update_cache):
-    """Sets the global update cache for getting paths to devserver payloads."""
-    cls.update_cache = update_cache
-
-  # --- METHODS FOR SUB CLASS USE ---
-
   def PrepareRealBase(self, image_path, signed_base):
     """Prepares a remote device for worker test by updating it to the image."""
     real_image_path = image_path
@@ -181,63 +122,6 @@
 
     return self.vm_image_path
 
-  def GetStatefulChangeFlag(self, stateful_change):
-    """Returns the flag to pass to image_to_vm for the stateful change."""
-    stateful_change_flag = ''
-    if stateful_change:
-      stateful_change_flag = '--stateful_update_flag=%s' % stateful_change
-
-    return stateful_change_flag
-
-  def AppendUpdateFlags(self, cmd, image_path, src_image_path, proxy_port,
-                        payload_signing_key, for_vm=False):
-    """Appends common args to an update cmd defined by an array.
-
-    Modifies cmd in places by appending appropriate items given args.
-
-    Args:
-      cmd: See PerformUpdate.
-      image_path: See PerformUpdate.
-      src_image_path: See PerformUpdate.
-      proxy_port: See PerformUpdate.
-      payload_signing_key: See PerformUpdate.
-      for_vm: Additional optional argument to say that the payload is intended
-          for vm usage (so we don't patch the kernel).
-    """
-    if proxy_port:
-      cmd.append('--proxy_port=%s' % proxy_port)
-    update_id = dev_server_wrapper.GenerateUpdateId(
-        image_path, src_image_path, payload_signing_key,
-        for_vm=for_vm)
-    cache_path = self.update_cache.get(update_id)
-    if cache_path:
-      update_url = dev_server_wrapper.DevServerWrapper.GetDevServerURL(
-          port=proxy_port, sub_dir=cache_path)
-      cmd.append('--update_url=%s' % update_url)
-    else:
-      raise update_exception.UpdateException(
-          1, 'No payload found for %s' % update_id)
-
-  def RunUpdateCmd(self, cmd, log_directory=None):
-    """Runs the given update cmd given verbose options.
-
-    Args:
-      cmd: The shell cmd to run.
-      log_directory: Where to store the logs for this cmd.
-
-    Raises:
-      update_exception.UpdateException: If the update fails.
-    """
-    kwds = dict(print_cmd=False, combine_stdout_stderr=True, error_code_ok=True)
-    if not self.verbose:
-      kwds['redirect_stdout'] = kwds['redirect_stderr'] = True
-    if log_directory:
-      kwds['log_stdout_to_file'] = os.path.join(log_directory, 'update.log')
-    result = cros_build_lib.RunCommand(cmd, **kwds)
-    if result.returncode != 0:
-      logging.warning(result.output)
-      raise update_exception.UpdateException(result.returncode, 'Update failed')
-
   def AssertEnoughTestsPassed(self, unittest, output, percent_required_to_pass):
     """Helper function that asserts a sufficient number of tests passed.
 
diff --git a/au_test_harness/cros_au_test_harness.py b/au_test_harness/cros_au_test_harness.py
index 454fd3b..0bc03e7 100755
--- a/au_test_harness/cros_au_test_harness.py
+++ b/au_test_harness/cros_au_test_harness.py
@@ -17,7 +17,6 @@
 import argparse
 import functools
 import os
-import pickle
 import sys
 import tempfile
 import unittest
@@ -27,14 +26,15 @@
 sys.path.append(constants.CROS_PLATFORM_ROOT)
 sys.path.append(constants.SOURCE_ROOT)
 
+# pylint: disable=wrong-import-position
 from chromite.lib import cros_build_lib
 from chromite.lib import cros_logging as logging
-from chromite.lib import dev_server_wrapper
 from chromite.lib import parallel
 from chromite.lib import sudo
 from chromite.lib import timeout_util
+
 from crostestutils.au_test_harness import au_test
-from crostestutils.au_test_harness import au_worker
+
 from crostestutils.lib import test_helper
 
 # File location for update cache in given folder.
@@ -77,22 +77,6 @@
         raise parallel.BackgroundFailure(msg)
 
 
-def _ReadUpdateCache(dut_type, target_image):
-  """Reads update cache from generate_test_payloads call."""
-  # TODO(wonderfly): Figure out how to use update cache for GCE images.
-  if dut_type == 'gce':
-    return None
-  path_to_dump = os.path.dirname(target_image)
-  cache_file = os.path.join(path_to_dump, CACHE_FILE)
-
-  if os.path.exists(cache_file):
-    logging.info('Loading update cache from ' + cache_file)
-    with open(cache_file) as file_handle:
-      return pickle.load(file_handle)
-
-  return None
-
-
 def _PrepareTestSuite(opts):
   """Returns a prepared test suite given by the opts and test class."""
   au_test.AUTest.ProcessOptions(opts)
@@ -146,10 +130,6 @@
                  'Given: type=%s, base_image=%s.' %
                  (opts.type, opts.base_image))
 
-  if (opts.payload_signing_key and not
-      os.path.isfile(opts.payload_signing_key)):
-    parser.error('Testing requires a valid path to the private key.')
-
   if opts.ssh_private_key and not os.path.isfile(opts.ssh_private_key):
     parser.error('Testing requires a valid path to the ssh private key.')
 
@@ -187,9 +167,6 @@
   parser.add_argument('-j', '--jobs',
                       default=test_helper.CalculateDefaultJobs(), type=int,
                       help='Number of simultaneous jobs')
-  parser.add_argument('--payload_signing_key', default=None,
-                      help='Path to the private key used to sign payloads '
-                      'with.')
   parser.add_argument('-q', '--quick_test', default=False, action='store_true',
                       help='Use a basic test to verify image.')
   parser.add_argument('-m', '--remote',
@@ -226,13 +203,6 @@
 
   CheckOpts(parser, opts)
 
-  # Generate cache of updates to use during test harness.
-  update_cache = _ReadUpdateCache(opts.type, opts.target_image)
-  if not update_cache:
-    msg = ('No update cache found. Update testing will not work.  Run '
-           ' cros_generate_update_payloads if this was not intended.')
-    logging.info(msg)
-
   # Create download folder for payloads for testing.
   download_folder = os.path.join(os.path.realpath(os.path.curdir),
                                  'latest_download')
@@ -243,25 +213,11 @@
       raise
 
   with sudo.SudoKeepAlive():
-    au_worker.AUWorker.SetUpdateCache(update_cache)
-    my_server = None
-    try:
-      # Only start a devserver if we'll need it.
-      if update_cache:
-        my_server = dev_server_wrapper.DevServerWrapper(
-            port=dev_server_wrapper.DEFAULT_PORT,
-            log_dir=opts.test_results_root)
-        my_server.Start()
-
-      if (opts.type == 'vm' or opts.type == 'gce') and opts.parallel:
-        _RunTestsInParallel(opts)
-      else:
-        cros_build_lib.Die('Test harness failed. unsupported test type %s' %
-                           opts.type)
-
-    finally:
-      if my_server:
-        my_server.Stop()
+    if (opts.type == 'vm' or opts.type == 'gce') and opts.parallel:
+      _RunTestsInParallel(opts)
+    else:
+      cros_build_lib.Die('Test harness failed. unsupported test type %s' %
+                         opts.type)
 
 
 if __name__ == '__main__':
diff --git a/au_test_harness/gce_au_worker.py b/au_test_harness/gce_au_worker.py
index a33dd69..2e3db37 100644
--- a/au_test_harness/gce_au_worker.py
+++ b/au_test_harness/gce_au_worker.py
@@ -121,7 +121,7 @@
     return self.PrepareRealBase(image_path, signed_base)
 
   def UpdateImage(self, image_path, src_image_path='', stateful_change='old',
-                  proxy_port=None, payload_signing_key=None):
+                  proxy_port=None):
     """Updates the image on the GCE instance.
 
     Unlike vm_au_worker, UpdateImage always creates a new image and a new
diff --git a/au_test_harness/update_exception.py b/au_test_harness/update_exception.py
deleted file mode 100644
index 4cbf831..0000000
--- a/au_test_harness/update_exception.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Module containing update exceptions."""
-
-
-class UpdateException(Exception):
-  """Exception thrown when _UpdateImage or _UpdateUsingPayload fail."""
-  def __init__(self, code, output):
-    super(UpdateException, self).__init__(output)
-    self.code = code
-    self.output = output
diff --git a/au_test_harness/vm_au_worker.py b/au_test_harness/vm_au_worker.py
index 2953484..c986ffe 100644
--- a/au_test_harness/vm_au_worker.py
+++ b/au_test_harness/vm_au_worker.py
@@ -15,9 +15,7 @@
 from chromite.lib import constants as chromite_constants
 from chromite.lib import cros_build_lib
 from chromite.lib import cros_logging as logging
-from crostestutils.lib import constants
 from crostestutils.au_test_harness import au_worker
-from crostestutils.au_test_harness import update_exception
 
 
 class VMAUWorker(au_worker.AUWorker):
@@ -50,7 +48,7 @@
     # well as the archive stage of cbuildbot. Make a private copy of
     # the VM image, to avoid any conflict.
     _, private_image_path = tempfile.mkstemp(
-        prefix="%s." % buildbot_constants.VM_DISK_PREFIX)
+        prefix='%s.' % buildbot_constants.VM_DISK_PREFIX)
     shutil.copy(self.vm_image_path, private_image_path)
     self.TestInfo('Copied shared disk image %s to %s.' %
                   (self.vm_image_path, private_image_path))
@@ -80,67 +78,6 @@
     except shutil.Error as e:
       logging.warning('Ignoring errors while copying VM files: %s', e)
 
-  def _UpdateCmd(self, log_directory, stateful_change):
-    """Update command to use."""
-    cmd = ['%s/cros_run_test' % chromite_constants.CHROMITE_BIN_DIR,
-           '--board=%s' % self.board,
-           '--image-path=%s' % self.vm_image_path,
-           '--ssh-port=%s' % self._ssh_port,
-           self.graphics_flag,
-           '--host-cmd', '--',
-           os.path.join(constants.CROSTESTUTILS_DIR, 'image_to_live.sh'),
-           '--ssh_port=%s' % self._ssh_port,
-           '--noupdate_hostkey',
-           '--update_log=%s' % os.path.join(log_directory, 'update_engine.log'),
-           '--verify',
-           '--remote=127.0.0.1',
-           self.GetStatefulChangeFlag(stateful_change)]
-    if self.ssh_private_key:
-      cmd.append('--private_key=%s' % self.ssh_private_key)
-    return cmd
-
-  def UpdateImage(self, image_path, src_image_path='', stateful_change='old',
-                  proxy_port='', payload_signing_key=None):
-    """Updates VM image with image_path."""
-    log_directory, fail_directory = self.GetNextResultsPath('update')
-    cmd = self._UpdateCmd(log_directory, stateful_change)
-    self.AppendUpdateFlags(cmd, image_path, src_image_path, proxy_port,
-                           payload_signing_key)
-    self.TestInfo(self.GetUpdateMessage(image_path, src_image_path, True,
-                                        proxy_port))
-    try:
-      self.RunUpdateCmd(cmd, log_directory)
-    except update_exception.UpdateException:
-      self._HandleFail(log_directory, fail_directory)
-      raise
-
-  def UpdateUsingPayload(self, update_path, stateful_change='old',
-                         proxy_port=None):
-    """Updates a vm image using image_to_live.sh."""
-    log_directory, fail_directory = self.GetNextResultsPath('update')
-    cmd = self._UpdateCmd(log_directory, stateful_change) + [
-        '--payload=%s' % update_path
-    ]
-    if proxy_port:
-      cmd.append('--proxy_port=%s' % proxy_port)
-    self.TestInfo(self.GetUpdateMessage(update_path, None, True, proxy_port))
-    try:
-      self.RunUpdateCmd(cmd, log_directory)
-    except update_exception.UpdateException:
-      self._HandleFail(log_directory, fail_directory)
-      raise
-
-  def AppendUpdateFlags(self, cmd, image_path, src_image_path, proxy_port,
-                        payload_signing_key, for_vm=False):
-    """Appends common args to an update cmd defined by an array.
-
-    Calls super function with for_vm set to True. See base class for
-    descriptions for arguments.
-    """
-    super(VMAUWorker, self).AppendUpdateFlags(
-        cmd, image_path, src_image_path, proxy_port, payload_signing_key,
-        for_vm=True)
-
   def VerifyImage(self, unittest, percent_required_to_pass=100, test=''):
     # VMAUWorker disregards |unittest| and |percent_required_to_pass|.
     return self._VerifyImage(test)
diff --git a/ctest/ctest.py b/ctest/ctest.py
index ded2b0a..79e53e4 100755
--- a/ctest/ctest.py
+++ b/ctest/ctest.py
@@ -17,14 +17,16 @@
 sys.path.append(constants.SOURCE_ROOT)
 sys.path.append(constants.CROS_PLATFORM_ROOT)
 
+# pylint: disable=wrong-import-position
 from chromite.lib import cros_build_lib
 from chromite.lib import cros_logging as logging
+
 from crostestutils.lib import image_extractor
 from crostestutils.lib import test_helper
 
 
 class TestException(Exception):
-  """Thrown by RunAUTestHarness if there's a test failure."""
+  """Thrown by RunTestHarness if there's a test failure."""
 
 
 class CTest(object):
@@ -92,48 +94,14 @@
                    'Using target instead.')
       self.base = self.target
 
-  def GenerateUpdatePayloads(self, full):
-    """Generates payloads for the test harness.
+  def RunTestHarness(self, only_verify, suite):
+    """Runs the test harness (suite:smoke).
 
-    Args:
-      full: Build payloads for full test suite.
-    """
-    generator = ('../platform/crostestutils/'
-                 'generate_test_payloads/cros_generate_test_payloads.py')
-
-    cmd = [generator]
-    cmd.append('--target=%s' % self.target)
-    cmd.append('--base=%s' % self.base)
-    cmd.append('--board=%s' % self.board)
-    cmd.append('--jobs=%d' % self.jobs)
-
-    if full:
-      cmd.append('--full_suite')
-    else:
-      cmd.append('--basic_suite')
-
-    if self.type != 'vm':
-      cmd.append('--novm')
-    try:
-      cros_build_lib.RunCommand(cmd, cwd=self.crosutils_root)
-    except cros_build_lib.RunCommandError:
-      logging.error('We failed to generate all the update payloads required '
-                    'for testing. Please see the logs for more info. We print '
-                    'out the log from a failing call to '
-                    'cros_generate_update_payload for error handling.')
-      sys.exit(1)
-
-  def RunAUTestHarness(self, only_verify, quick_update, suite):
-    """Runs the auto update test harness.
-
-    The auto update test harness encapsulates testing the auto-update mechanism
-    for the latest image against the latest official image from the channel.
-    This also tests images with suite:smoke (built-in as part of its
-    verification process).
+    This tests images with suite:smoke (built-in as part of its verification
+    process).
 
     Args:
       only_verify: Only verify the target image.
-      quick_update: Do a quick update test.
       suite: The suite of tests to run.
 
     Raises:
@@ -160,8 +128,6 @@
 
     if only_verify:
       cmd.append('--test_prefix=SimpleTestVerify')
-    elif quick_update:
-      cmd.append('--test_prefix=SimpleTestUpdateAndVerify')
 
     if self.test_results_root:
       cmd.append('--test_results_root=%s' % self.test_results_root)
@@ -195,10 +161,6 @@
                       help='Disable graphics for the vm test.')
   parser.add_argument('--only_verify', action='store_true', default=False,
                       help='Only run basic verification suite.')
-  parser.add_argument('--quick_update', action='store_true',
-                      help='Run a quick update test. This will run a subset of '
-                      'test suite after running autoupdate from target '
-                      'image to itself.')
   parser.add_argument('--remote', default='0.0.0.0',
                       help='For real tests, ip address of the target machine.')
   parser.add_argument('--target_image', default=None,
@@ -219,19 +181,14 @@
                       'image as the root user')
   parser.add_argument('--ssh_port', default=None, type=int,
                       help='ssh port used to ssh into image. (Should only be'
-                      ' used with either --quick_update or --only_verify)')
+                      ' used with --only_verify)')
 
   opts = parser.parse_args()
 
   if not opts.board:
     parser.error('Need board for image to compare against.')
-  if opts.only_verify and opts.quick_update:
-    parser.error(
-        'Only one of --only_verify or --quick_update should be specified.')
-  if opts.ssh_port and not (opts.only_verify or opts.quick_update):
-    parser.error(
-        'ssh_port should be specified with either --only_verify or '
-        '--quick_update')
+  if opts.ssh_port and not opts.only_verify:
+    parser.error('ssh_port should be specified with either --only_verify')
 
   # force absolute path for these opts, since a chdir occurs deeper in the
   # codebase.
@@ -245,11 +202,8 @@
 
   ctest = CTest(opts)
   ctest.FindTargetAndBaseImages()
-  if not opts.only_verify:
-    ctest.GenerateUpdatePayloads(not opts.quick_update)
   try:
-    ctest.RunAUTestHarness(opts.only_verify, opts.quick_update,
-                           opts.suite)
+    ctest.RunTestHarness(opts.only_verify, opts.suite)
   except TestException as e:
     if opts.verbose:
       cros_build_lib.Die(str(e))
diff --git a/generate_test_payloads/__init__.py b/generate_test_payloads/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/generate_test_payloads/__init__.py
+++ /dev/null
diff --git a/generate_test_payloads/constants.py b/generate_test_payloads/constants.py
deleted file mode 120000
index 8d73346..0000000
--- a/generate_test_payloads/constants.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/constants.py
\ No newline at end of file
diff --git a/generate_test_payloads/cros_generate_test_payloads.py b/generate_test_payloads/cros_generate_test_payloads.py
deleted file mode 100755
index f4b723b..0000000
--- a/generate_test_payloads/cros_generate_test_payloads.py
+++ /dev/null
@@ -1,296 +0,0 @@
-#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This module generates update payloads for testing in parallel.
-
-This module generates update payloads in parallel using the devserver. After
-running this module, test payloads are generated and left in the devserver
-cache. In addition, this module produces a serialized dictionary stored
-with the target image that contains a mapping from the update payload name
-to the path it is stored in the devserver cache.  This dictionary can then be
-used by other testing scripts i.e. au_test_harness, to locate and use these
-payloads for testing in virtual machines.
-
-FOR USE OUTSIDE CHROOT ONLY.
-"""
-
-from __future__ import print_function
-
-import hashlib
-import optparse
-import os
-import pickle
-import sys
-
-import constants
-sys.path.append(constants.CROS_PLATFORM_ROOT)
-sys.path.append(constants.SOURCE_ROOT)
-
-from chromite.lib import cros_logging as logging
-from chromite.lib import dev_server_wrapper
-from chromite.lib import locking
-from chromite.lib import sudo
-
-from chromite.lib.paygen import paygen_payload_lib
-from chromite.lib.paygen import paygen_stateful_payload_lib
-
-from crostestutils.au_test_harness import cros_au_test_harness
-
-from crostestutils.lib import test_helper
-
-
-def _GetFileMd5(path):
-  """Calculates the Md5 hash of a file."""
-  hash_md5 = hashlib.md5()
-  with open(path, "rb") as f:
-    for chunk in iter(lambda: f.read(4096 * 10), b""):
-      hash_md5.update(chunk)
-  return hash_md5.hexdigest()
-
-
-class UpdatePayload(object):
-  """Wrapper around an update payload.
-
-  This class defines an update payload that should be generated.  The only
-  required variable to be set is |target|.  If the base image is set to None,
-  this defines a full update payload to the target image.
-
-  Variables:
-    target: Create payload for this image.
-    base: If not None, a delta payload with this image as the source.
-    for_vm: Whether we want the payload for a VM image.
-  """
-  NAME_SPLITTER = '_'
-
-  def __init__(self, target, base, for_vm=False):
-    self.base = base
-    self.target = target
-    self.for_vm = for_vm
-
-    self.payload_dir = None
-
-    self._CalculateUpdateCacheLabel()
-
-  def UpdateId(self):
-    """Generates a unique update id the test harness can understand."""
-    return dev_server_wrapper.GenerateUpdateId(self.target, self.base, None,
-                                               self.for_vm)
-
-  def _CalculateUpdateCacheLabel(self):
-    """Calculates the label associated with this payload.
-
-    It is exactly what devserver does.
-    """
-    self.label = ''
-    if self.base:
-      self.label += _GetFileMd5(self.base) + '_'
-    self.label += _GetFileMd5(self.target)
-
-  def __str__(self):
-    my_repr = self.target
-    if self.base:
-      my_repr = self.base + '->' + my_repr
-
-    if self.for_vm:
-      my_repr = my_repr + '+' + 'for_vm'
-
-    return my_repr
-
-  def __eq__(self, other):
-    return str(self) == str(other)
-
-  def __hash__(self):
-    return hash(str(self))
-
-
-class UpdatePayloadGenerator(object):
-  """Class responsible for generating update payloads."""
-
-  PATH_TO_CACHE_DIR = os.path.join(
-      constants.SOURCE_ROOT, 'chroot/var/lib/devserver/static/cache')
-
-  def __init__(self, options):
-    """Initializes a generator object from parsed options.
-
-    Args:
-      options: Parsed options from main().
-    """
-    self.target = options.target
-    self.base = options.base
-
-    # For vm tests we use the _qemu name for the images.  Regardless of vm or
-    # non vm, these no_vm names are guaranteed to be non-qemu base/target names.
-    self.base_no_vm = self.base
-    self.target_no_vm = self.target
-
-    # Affect what payloads we create.
-    self.board = options.board
-    self.basic_suite = options.basic_suite
-    self.full_suite = options.full_suite
-    self.payloads = set([])
-
-    self.jobs = options.jobs
-
-    self.vm = _ShouldGenerateVM(options)
-
-
-  def _AddUpdatePayload(self, target, base, for_vm=False):
-    """Adds a new required update payload.  If base is None, a full payload."""
-    payload = UpdatePayload(target, base, for_vm)
-
-    payload.payload_dir = os.path.join(self.PATH_TO_CACHE_DIR, payload.label)
-
-    self.payloads.add(payload)
-
-  def GenerateImagesForTesting(self):
-    # All vm testing requires a VM'ized target.
-    if self.vm:
-      self.target = test_helper.CreateVMImage(self.target, self.board)
-
-    if self.full_suite:
-      # The full suite may not have a VM image produced for the test image yet.
-      # Ensure this is created.
-      self.base = test_helper.CreateVMImage(self.base, self.board)
-
-  def GeneratePayloadRequirements(self):
-    """Generate Payload Requirements for AUTestHarness and NPlus1 Testing."""
-    if self.full_suite:
-      if self.target != self.base:
-        # N-1->N. But only add it if the base is different than the target.
-        self._AddUpdatePayload(self.target, self.base, for_vm=self.vm)
-
-      # N->N after N-1->N.
-      self._AddUpdatePayload(self.target, self.target, for_vm=self.vm)
-
-    if self.basic_suite:
-      # Update image to itself from VM base.
-      self._AddUpdatePayload(self.target, self.target, for_vm=self.vm)
-
-  def GenerateUpdatePayloads(self):
-    """Iterates through payload requirements and generates them.
-
-    This is the main method of this class.  It iterates through payloads
-    it needs, generates them, and builds a Cache that can be used by the
-    test harness to reference these payloads.
-
-    Returns:
-      The cache as a Python dict.
-    """
-
-    def GenerateUpdatePayload(payload):
-      """Generates an update payload and writes it into payload file.
-
-      Args:
-        payload: An object of UpdatePayload that defines the parameters of the
-                 payload.
-      """
-      payload_file = os.path.join(payload.payload_dir, 'update.gz')
-
-      logging.info('Generating a %s payload %s to %s %s',
-                   'delta' if payload.base else 'full',
-                   ('from %s' % payload.base) if payload.base else '',
-                   payload.target,
-                   'and not patching the kernel.' if payload.for_vm else '')
-
-      paygen_payload_lib.GenerateUpdatePayload(payload.target, payload_file,
-                                               src_image=payload.base)
-
-      # Generating the stateful update as devserver would've done.
-      paygen_stateful_payload_lib.GenerateStatefulPayload(payload.target,
-                                                          payload.payload_dir)
-
-    for p in self.payloads:
-      GenerateUpdatePayload(p)
-
-    # Build the dictionary from our id's and returned cache paths.
-    return {p.UpdateId(): os.path.join('update', 'cache', p.label)
-            for p in self.payloads}
-
-  def DumpCacheToDisk(self, cache):
-    """Dumps the cache to the same folder as the images."""
-    if not self.basic_suite and not self.full_suite:
-      logging.info('Not dumping payload cache to disk as payloads for the '
-                   'test harness were not requested.')
-    else:
-      cache_file = os.path.join(os.path.dirname(self.target),
-                                cros_au_test_harness.CACHE_FILE)
-
-      logging.info('Dumping %s', cache_file)
-      with open(cache_file, 'w') as file_handle:
-        pickle.dump(cache, file_handle)
-
-
-def _ShouldGenerateVM(options):
-  """Returns true if we will need a VM version of our images."""
-  # This is a combination of options.vm and whether or not we are generating
-  # payloads for vm testing.
-  return options.vm and (options.basic_suite or options.full_suite)
-
-
-def CheckOptions(parser, options):
-  """Checks that given options are valid.
-
-  Args:
-    parser: Parser used to parse options.
-    options: Parse options from OptionParser.
-  """
-  if not options.target or not os.path.isfile(options.target):
-    parser.error('Target image must exist.')
-
-  if not options.base:
-    logging.info('Using target image as base image.')
-    options.base = options.target
-
-  if not os.path.isfile(options.base):
-    parser.error('Base image must exist.')
-
-  if _ShouldGenerateVM(options):
-    if not options.board:
-      parser.error('Board must be set to generate update '
-                   'payloads for vm.')
-
-
-def main():
-  test_helper.SetupCommonLoggingFormat()
-  parser = optparse.OptionParser()
-
-  # Options related to which payloads to generate.
-  parser.add_option('--basic_suite', default=False, action='store_true',
-                    help='Prepare to run the basic au test suite.')
-  parser.add_option('--full_suite', default=False, action='store_true',
-                    help='Prepare to run the full au test suite.')
-
-  # Options related to how to generate test payloads for the test harness.
-  parser.add_option('--novm', default=True, action='store_false', dest='vm',
-                    help='Test Harness payloads will not be tested in a VM.')
-
-  # Options related to the images to test.
-  parser.add_option('--board', help='Board used for the images.')
-  parser.add_option('--base', help='Image we want to test updates from.')
-  parser.add_option('--target', help='Image we want to test updates to.')
-
-  # Miscellaneous options.
-  parser.add_option('--jobs', default=test_helper.CalculateDefaultJobs(),
-                    type=int,
-                    help='Number of payloads to generate in parallel.')
-
-  options = parser.parse_args()[0]
-  CheckOptions(parser, options)
-
-  # Don't allow this code to be run more than once at a time.
-  lock_path = os.path.join(os.path.dirname(__file__), '.lock_file')
-  with locking.FileLock(lock_path, 'generate payloads lock') as lock:
-    lock.write_lock()
-    with sudo.SudoKeepAlive():
-      generator = UpdatePayloadGenerator(options)
-      generator.GenerateImagesForTesting()
-      generator.GeneratePayloadRequirements()
-      cache = generator.GenerateUpdatePayloads()
-      generator.DumpCacheToDisk(cache)
-
-
-if __name__ == '__main__':
-  main()
diff --git a/generate_test_payloads/payload_generation_exception.py b/generate_test_payloads/payload_generation_exception.py
deleted file mode 100644
index d4bf537..0000000
--- a/generate_test_payloads/payload_generation_exception.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Module containing payload generation exception."""
-
-
-class PayloadGenerationException(Exception):
-  """Exception thrown when we fail to create an update payload."""
diff --git a/image_to_live.sh b/image_to_live.sh
deleted file mode 100755
index 78339b4..0000000
--- a/image_to_live.sh
+++ /dev/null
@@ -1,565 +0,0 @@
-#!/bin/bash
-
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Script to update an image onto a live running ChromiumOS instance.
-. $(dirname "$(readlink -f "$0")")/outside_chroot_common.sh || exit 1
-
-. "${SCRIPT_ROOT}/common.sh" ||
-  (echo "Unable to load common.sh" && false) ||
-  exit 1
-
-. "${SCRIPT_ROOT}/remote_access.sh" || die "Unable to load remote_access.sh"
-
-# Flags to control image_to_live.
-DEFINE_boolean ignore_hostname ${FLAGS_TRUE} \
-  "Ignore existing AU hostname on running instance use this hostname."
-DEFINE_boolean ignore_version ${FLAGS_TRUE} \
-  "Ignore existing version on running instance and always update."
-DEFINE_string netdev "eth0" \
-  "The network device to use for figuring out hostname. \
-   This is useful on hosts with multiple NICs."
-DEFINE_string server_log "dev_server.log" \
-  "Path to log for the devserver."
-DEFINE_boolean update "${FLAGS_TRUE}" \
-  "Perform update of root partition."
-DEFINE_boolean update_hostkey ${FLAGS_TRUE} \
-  "Update your known_hosts with the new remote instance's key."
-DEFINE_string update_log "update_engine.log" \
-  "Path to log for the update_engine."
-DEFINE_string update_url "" "Full url of an update image."
-DEFINE_boolean verify ${FLAGS_TRUE} "Verify image on device after update."
-DEFINE_integer repeat 1 "Number of times to run image_to_live."
-
-# Flags for devserver.
-DEFINE_string archive_dir "" \
-  "Deprecated."
-DEFINE_string board "" "Override the board reported by the target"
-DEFINE_integer devserver_port 8080 \
-  "Port to use for devserver."
-DEFINE_string image "" \
-  "Path to the image file to update with, xbuddy paths accepted." i
-DEFINE_string payload "" \
-  "Update with this update payload, ignoring specified images."
-DEFINE_string proxy_port "" \
-  "Have the client request from this proxy instead of devserver."
-DEFINE_string src_image "" \
-  "Create a delta update by passing in the image on the remote machine."
-DEFINE_boolean update_stateful ${FLAGS_TRUE} \
-  "Perform update of stateful partition e.g. /var /usr/local."
-DEFINE_boolean reboot_after_update ${FLAGS_TRUE} \
-  "Reboot after update applied for the update to take effect."
-
-# Flags for stateful update.
-DEFINE_string stateful_update_flag "" \
-  "Flag to pass to stateful update e.g. old, clean, etc." s
-
-DEPRECATION_WARNING="
-!!! You are using a deprecated script !!!
-
-Please use 'cros flash' in the future. See 'cros flash -h' for the details.
-More information available in the link below.
-https://sites.google.com/a/chromium.org/dev/chromium-os/build/cros-flash
-"
-
-FLAGS_HELP="
-Usage: $0 --remote=[target_ip] [--image=[...]] ...
-The remote flag is required to specify a ChromeOS machine to reimage.
-The image flag can be a path to a local image or an XBuddy path.
-For example:
-  $0 --remote=172.0.0.0 --image=./some/path/to/chromium_test_image.bin
-  Would reimage device at 172.0.0.0 with that locally available image.
-  $0 --remote=172.0.0.0 --image='xbuddy:remote/parrot/latest/dev'
-  Uses the latest developer parrot image available on Google Storage.
-  $0 --remote=172.0.0.0 --image='xbuddy:release'
-  Uses the latest test image available on Google Storage.
-  $0 --remote=172.0.0.0 --image='xbuddy:'
-  Uses the latest locally built image for the device board.
-Please see http://goo.gl/6WdLrD for XBuddy documentation."
-
-UPDATER_BIN="/usr/bin/update_engine_client"
-UPDATER_IDLE="UPDATE_STATUS_IDLE"
-UPDATER_NEED_REBOOT="UPDATE_STATUS_UPDATED_NEED_REBOOT"
-UPDATER_UPDATE_CHECK="UPDATE_STATUS_CHECKING_FOR_UPDATE"
-UPDATER_DOWNLOADING="UPDATE_STATUS_DOWNLOADING"
-
-IMAGE_PATH=""
-UPDATE_PATH=""
-ROOTFS_MOUNTPT=""
-STATEFUL_MOUNTPT=""
-
-kill_all_devservers() {
-  # Using ! here to avoid exiting with set -e is insufficient, so use
-  # || true instead.
-  sudo pkill -f devserver\.py || true
-}
-
-cleanup() {
-  if [ -z "${FLAGS_update_url}" ]; then
-    kill_all_devservers
-  fi
-  cleanup_remote_access
-  sudo rm -rf "${TMP}" || true
-  if [ ! -z "${ROOTFS_MOUNTPT}" ]; then
-    rm -rf "${ROOTFS_MOUNTPT}"
-  fi
-  if [ ! -z "${STATEFUL_MOUNTPT}" ]; then
-    rm -rf "${STATEFUL_MOUNTPT}"
-  fi
-}
-
-remote_reboot_sh() {
-  rm -f "${TMP_KNOWN_HOSTS}"
-  remote_sh "$@"
-}
-
-# Returns the hostname of this machine.
-# It tries to find the ipaddress using ifconfig, however, it will
-# default to $HOSTNAME on failure.  We try to use the ip address first as
-# some targets may have dns resolution issues trying to contact back
-# to us.
-get_hostname() {
-  local hostname
-  # Try to parse ifconfig for ip address. Use sudo, because not all distros
-  # allow a common user to call ifconfig.
-  # TODO(zbehan): What if target is not connected via eth0? Update over wifi?
-  # Dedicated usb NIC? Perhaps this detection should be done in the target,
-  # which will get the return address in one way or another. Or maybe we should
-  # just open a ssh tunnel and use localhost.
-  hostname=$(/sbin/ifconfig ${FLAGS_netdev} |
-      grep 'inet addr' |
-      cut -f2 -d':' |
-      cut -f1 -d' ')
-
-  # Fallback to $HOSTNAME if that failed
-  [ -z "${hostname}" ] && hostname=${HOSTNAME}
-
-  echo ${hostname}
-}
-
-is_xbuddy_path() {
-  [[ "${FLAGS_image}" == xbuddy:* ]]
-}
-
-# Starts the devserver and returns the update url to use to get the update.
-start_dev_server() {
-  kill_all_devservers
-  local devserver_flags="--board=${FLAGS_board} --port=${FLAGS_devserver_port}"
-  # Parse devserver flags.
-  if [ -n "${FLAGS_image}" ]; then
-    if is_xbuddy_path; then
-      info "Image flag is an xBuddy path to an image."
-    else
-      info "Forcing the devserver to serve a local image."
-      devserver_flags="${devserver_flags} --pregenerate_update \
-          --image=$(reinterpret_path_for_chroot ${FLAGS_image})"
-      IMAGE_PATH="${FLAGS_image}"
-    fi
-  elif [ -n "${FLAGS_archive_dir}" ]; then
-    echo "archive_dir flag is deprecated. Use --image."
-    exit 1
-  else
-    # IMAGE_PATH should be the newest image and learn the board from
-    # the target.
-    learn_board
-    IMAGE_PATH="${IMAGES_DIR}/${FLAGS_board}/latest"
-    if [[ ! -L ${IMAGE_PATH} ]]; then
-      die "No image found; have you run build_image?"
-    fi
-    IMAGE_PATH="${IMAGE_PATH}/chromiumos_image.bin"
-    devserver_flags="${devserver_flags} \
-        --image=$(reinterpret_path_for_chroot ${IMAGE_PATH})"
-  fi
-
-  if [ -n "${FLAGS_payload}" ]; then
-    devserver_flags="${devserver_flags} \
-        --payload=$(reinterpret_path_for_chroot ${FLAGS_payload})"
-  fi
-
-  if [ -n "${FLAGS_proxy_port}" ]; then
-    devserver_flags="${devserver_flags} \
-        --proxy_port=${FLAGS_proxy_port}"
-  fi
-
-  if [ -n "${FLAGS_src_image}" ]; then
-    devserver_flags="${devserver_flags} \
-        --src_image=\"$(reinterpret_path_for_chroot ${FLAGS_src_image})\""
-  fi
-  # Remove any extra whitespace between words in flags.
-  devserver_flags=$(echo ${devserver_flags} | sed 's/ \+/ /g')
-  info "Starting devserver with flags ${devserver_flags}"
-
-  # Clobber dev_server log in case image_to_live is run with sudo previously.
-  if [ -f "${FLAGS_server_log}" ]; then
-    sudo rm -f "${FLAGS_server_log}"
-  fi
-
-  # Need to inherit environment variables to discover gsutil credentials.
-  if cros_sdk -- cros_workon --host list |
-      grep chromeos-base/cros-devutils &> /dev/null; then
-    info "cros_workon for devserver detected. Running devserver from source."
-    cros_sdk -- sudo -E ../platform/dev/devserver.py ${devserver_flags} \
-       > ${FLAGS_server_log} 2>&1 &
-  else
-    cros_sdk -- sudo -E start_devserver ${devserver_flags} \
-      > ${FLAGS_server_log} 2>&1 &
-  fi
-
-  info "Waiting on devserver to start. " \
-       "Be patient as the server generates the update before starting."
-  until netstat -lnp 2>&1 | grep :${FLAGS_devserver_port} > /dev/null; do
-    sleep 5
-    if ! pgrep -f "devserver" > /dev/null; then
-      tail -n 10 "${FLAGS_server_log}"
-      die "Devserver failed, see dev_server.log -- snippet above"
-    fi
-  done
-  if is_xbuddy_path; then
-    local devserver_url=$(echo $(get_devserver_url) | sed "s#/update##")
-    local xbuddy_path="xbuddy/${FLAGS_image##*xbuddy:}?for_update=true"
-    info "Xbuddy image detected. Using xbuddy RPC: " ${xbuddy_path}
-    UPDATE_PATH="$(curl -f "${devserver_url}/${xbuddy_path}")"
-    if [ -z "${UPDATE_PATH}" ]; then
-      tail -n 10 "${FLAGS_server_log}"
-      die "XBuddy failed to stage the image specified by ${FLAGS_image}."
-    fi
-    info "XBuddy returned uri to use for update: ${UPDATE_PATH}."
-    if [[ "${UPDATE_PATH}" != *"/update"* ]]; then
-      die "XBuddy did not return a valid update uri."
-    fi
-  fi
-}
-
-# Copies stateful update script which fetches the newest stateful update
-# from the dev server and prepares the update. chromeos_startup finishes
-# the update on next boot.
-run_stateful_update() {
-  local dev_url="${UPDATE_PATH}"
-  local stateful_url=""
-  local stateful_update_args=""
-
-  # Parse stateful update flag.
-  if [ -n "${FLAGS_stateful_update_flag}" ]; then
-    stateful_update_args="${stateful_update_args} \
-        --stateful_change ${FLAGS_stateful_update_flag}"
-  fi
-
-  # Assume users providing an update url are using an archive_dir path.
-  stateful_url=$(echo ${dev_url} | sed -e "s/update/static/")
-
-  info "Starting stateful update using URL ${stateful_url}"
-
-  # Copy over update script and run update.
-  local chroot_path="${SCRIPTS_DIR}/../../chroot"
-  local stateful_update_script="/usr/bin/stateful_update"
-
-  remote_cp_to "${chroot_path}/${stateful_update_script}" "/tmp"
-  remote_sh "mount -o remount,exec /tmp"
-  remote_sh "/tmp/stateful_update ${stateful_update_args} ${stateful_url}"
-}
-
-get_update_args() {
-  if [ -z "${1}" ]; then
-    die "No url provided for update."
-  fi
-
-  local update_args="--omaha_url=${1}"
-
-  info "Omaha URL: " ${update_args}
-
-  if [[ ${FLAGS_ignore_version} -eq ${FLAGS_TRUE} ]]; then
-    info "Forcing update independent of the current version"
-    update_args="--update ${update_args}"
-  fi
-
-  echo "${update_args}"
-}
-
-get_devserver_url() {
-  local devserver_url=""
-  local port=${FLAGS_devserver_port}
-
-  if [[ -n ${FLAGS_proxy_port} ]]; then
-    port=${FLAGS_proxy_port}
-  fi
-
-  if [ ${FLAGS_ignore_hostname} -eq ${FLAGS_TRUE} ]; then
-    if [ -z "${FLAGS_update_url}" ]; then
-      devserver_url="http://$(get_hostname):${port}/update"
-    else
-      devserver_url="${FLAGS_update_url}"
-    fi
-  fi
-
-  echo "${devserver_url}"
-}
-
-truncate_update_log() {
-  remote_sh "> /var/log/update_engine.log"
-}
-
-get_update_log() {
-  remote_sh "cat /var/log/update_engine.log"
-  echo "${REMOTE_OUT}" > "${FLAGS_update_log}"
-}
-
-# Used to store the current update status of the remote update engine.
-REMOTE_UPDATE_STATUS=
-
-# Returns ${1} reported by the update client e.g. PROGRESS, CURRENT_OP.
-get_var_from_remote_status() {
-  echo "${REMOTE_UPDATE_STATUS}" |
-      grep ${1} |
-      cut -f 2 -d =
-}
-
-# Updates the remote status variable for the update engine.
-update_remote_status() {
-  remote_sh "${UPDATER_BIN} --status 2> /dev/null"
-  REMOTE_UPDATE_STATUS="${REMOTE_OUT}"
-}
-
-# Both updates the remote status and gets the given variables.
-get_update_var() {
-  update_remote_status
-  get_var_from_remote_status "${1}"
-}
-
-# Returns the current status / progress of the update engine.
-# This is expected to run in its own thread.
-status_thread() {
-  local timeout=5
-
-  info "Devserver handling ping.  Check ${FLAGS_server_log} for more info."
-  sleep ${timeout}
-
-  update_remote_status
-  local current_state=""
-  local next_state="$(get_var_from_remote_status CURRENT_OP)"
-
-  # For current status, only print out status changes.
-  # For download, show progress.
-  # Finally if no status change print out .'s to keep dev informed.
-  while [ "${current_state}" != "${UPDATER_NEED_REBOOT}" ] && \
-      [ "${current_state}" != "${UPDATER_IDLE}" ]; do
-    if [ "${current_state}" != "${next_state}" ]; then
-      info "State of updater has changed to: ${next_state}"
-    elif [ "${next_state}" = "${UPDATER_DOWNLOADING}" ]; then
-      echo "Download progress $(get_var_from_remote_status PROGRESS)"
-    else
-      echo -n "."
-    fi
-
-    sleep ${timeout}
-    current_state="${next_state}"
-    update_remote_status
-    next_state="$(get_var_from_remote_status CURRENT_OP)"
-  done
-}
-
-# Pings the update_engine to see if it responds or a max timeout is reached.
-# Returns 1 if max timeout is reached.
-wait_until_update_engine_is_ready() {
-  local wait_timeout=1
-  local max_timeout=60
-  local time_elapsed=0
-  while ! get_update_var CURRENT_OP > /dev/null; do
-    sleep ${wait_timeout}
-    time_elapsed=$(( time_elapsed + wait_timeout ))
-    echo -n "."
-    if [ ${time_elapsed} -gt ${max_timeout} ]; then
-      return 1
-    fi
-  done
-}
-
-# Runs the autoupdate.
-run_auto_update() {
-  # Truncate the update log so our log file is clean.
-  truncate_update_log
-
-  local update_args="$(get_update_args "${UPDATE_PATH}")"
-  info "Waiting to initiate contact with the update_engine."
-  wait_until_update_engine_is_ready || die "Could not contact update engine."
-
-  info "Starting update using args ${update_args}"
-
-  # Sets up secondary threads to track the update progress and logs
-  status_thread &
-  local status_thread_pid=$!
-  trap "kill ${status_thread_pid}; cleanup" EXIT INT TERM
-
-  # Actually run the update.  This is a blocking call.
-  remote_sh "${UPDATER_BIN} ${update_args}"
-
-  # Clean up secondary threads.
-  kill ${status_thread_pid} 2> /dev/null || warn "Failed to kill status thread"
-  trap cleanup EXIT INT TERM
-
-  local update_status="$(get_update_var CURRENT_OP)"
-  if [ "${update_status}" = ${UPDATER_NEED_REBOOT} ]; then
-    info "Autoupdate was successful."
-    return 0
-  else
-    warn "Autoupdate was unsuccessful.  Status returned was ${update_status}."
-    return 1
-  fi
-}
-
-verify_image() {
-  info "Verifying image."
-  if [ -n "${FLAGS_update_url}" ]; then
-    warn "Verify is not compatible with setting an update url."
-    return
-  fi
-
-  if is_xbuddy_path; then
-    warn "Verify is not currently compatible with xbuddy."
-    return
-  fi
-
-  ROOTFS_MOUNTPT=$(mktemp -d)
-  STATEFUL_MOUNTPT=$(mktemp -d)
-  "${SCRIPTS_DIR}/mount_gpt_image.sh" --from "$(dirname "${IMAGE_PATH}")" \
-                     --image "$(basename ${IMAGE_PATH})" \
-                     --rootfs_mountpt="${ROOTFS_MOUNTPT}" \
-                     --stateful_mountpt="${STATEFUL_MOUNTPT}" \
-                     --read_only
-
-  local lsb_release=$(cat ${ROOTFS_MOUNTPT}/etc/lsb-release)
-  info "Verifying image with release:"
-  echo ${lsb_release}
-
-  "${SCRIPTS_DIR}/mount_gpt_image.sh" --unmount \
-                     --rootfs_mountpt="${ROOTFS_MOUNTPT}" \
-                     --stateful_mountpt="${STATEFUL_MOUNTPT}"
-
-  remote_sh "cat /etc/lsb-release"
-  info "Remote image reports:"
-  echo ${REMOTE_OUT}
-
-  if [ "${lsb_release}" = "${REMOTE_OUT}" ]; then
-    info "Update was successful and image verified as ${lsb_release}."
-    return 0
-  else
-    warn "Image verification failed."
-    return 1
-  fi
-}
-
-find_root_dev() {
-  remote_sh "rootdev -s"
-  echo ${REMOTE_OUT}
-}
-
-run_once() {
-  if [ "$(get_update_var CURRENT_OP)" != "${UPDATER_IDLE}" ]; then
-    warn "Machine is in a bad state.  Resetting the update_engine."
-    remote_sh "${UPDATER_BIN} --reset_status 2> /dev/null"
-  fi
-
-  local initial_root_dev=$(find_root_dev)
-  UPDATE_PATH="$(get_devserver_url)"
-  if [ -z "${FLAGS_update_url}" ]; then
-    # Start local devserver if no update url specified.
-    start_dev_server
-  fi
-
-  local update_pid
-  if [ ${FLAGS_update} -eq ${FLAGS_TRUE} ]; then
-    run_auto_update &
-    update_pid=$!
-  fi
-
-  local stateful_pid
-  local stateful_tmp_file
-  if [ ${FLAGS_update_stateful} -eq ${FLAGS_TRUE} ]; then
-    stateful_tmp_file=$(mktemp)
-    run_stateful_update &> "${stateful_tmp_file}" &
-    stateful_pid=$!
-  fi
-
-  if [ -n "${update_pid}" ] && ! wait ${update_pid}; then
-    warn "Update failed. " \
-       "Dumping update_engine.log for debugging and/or bug reporting."
-    get_update_log
-    tail -n 200 "${FLAGS_update_log}" >&2
-    die "Update was not successful."
-  fi
-
-  if [ -n "${stateful_pid}" ]; then
-    local stateful_success=0
-    if ! wait ${stateful_pid}; then
-      stateful_success=1
-    fi
-    cat "${stateful_tmp_file}"
-    rm "${stateful_tmp_file}"
-    if [ ${stateful_success} -ne 0 ]; then
-      die "Stateful update was not successful."
-    fi
-  fi
-
-  if [ ${FLAGS_reboot_after_update} -eq ${FLAGS_FALSE} ]; then
-    echo "Not rebooting because of --noreboot_after_update"
-    return 0
-  fi
-
-  remote_reboot
-
-  if [[ ${FLAGS_update_hostkey} -eq ${FLAGS_TRUE} ]]; then
-    local known_hosts="${HOME}/.ssh/known_hosts"
-    cp "${known_hosts}" "${known_hosts}~"
-    grep -v "^${FLAGS_remote} " "${known_hosts}" > "${TMP}/new_known_hosts"
-    cat "${TMP}/new_known_hosts" "${TMP_KNOWN_HOSTS}" > "${known_hosts}"
-    chmod 0640 "${known_hosts}"
-    info "New updated in ${known_hosts}, backup made."
-  fi
-
-  remote_sh "grep ^CHROMEOS_RELEASE_DESCRIPTION= /etc/lsb-release"
-  if [ ${FLAGS_verify} -eq ${FLAGS_TRUE} ]; then
-    verify_image
-
-    if [ "${initial_root_dev}" == "$(find_root_dev)" ]; then
-      # At this point, the software version didn't change, but we didn't
-      # switch partitions either. Means it was an update to the same version
-      # that failed.
-      die "The root partition did NOT change. The update failed."
-    fi
-  else
-    local release_description=$(echo ${REMOTE_OUT} | cut -d '=' -f 2)
-    info "Update was successful and rebooted to $release_description"
-  fi
-}
-
-main() {
-  echo "$DEPRECATION_WARNING"
-
-  cd "${SCRIPTS_DIR}"
-
-  FLAGS "$@" || exit 1
-  eval set -- "${FLAGS_ARGV}"
-
-  set -e
-
-  trap cleanup EXIT INT TERM
-
-  TMP=$(mktemp -d /tmp/image_to_live.XXXX)
-
-  remote_access_init
-
-  for i in $(seq 1 ${FLAGS_repeat}); do
-    echo "Iteration: " $i of ${FLAGS_repeat}
-    run_once
-    if [ ${FLAGS_repeat} -gt 1 ]; then
-      print_time_elapsed
-      remote_sh "${UPDATER_BIN} --reset_status 2> /dev/null"
-    fi
-  done
-
-  command_completed
-  exit 0
-}
-
-main $@