blob: 67d3d93b478344aa8226535d80528b38e77d5e7c [file] [log] [blame]
"""This class defines the Remote host class."""
import os, logging, urllib, time
from autotest_lib.client.common_lib import error
from autotest_lib.server import utils
from autotest_lib.server.hosts import base_classes
class RemoteHost(base_classes.Host):
This class represents a remote machine on which you can run
It may be accessed through a network, a serial line, ...
It is not the machine autoserv is running on.
Implementation details:
This is an abstract class, leaf subclasses must implement the methods
listed here and in parent classes which have no implementation. They
may reimplement methods which already have an implementation. You
must not instantiate this class but should instantiate one of those
leaf subclasses.
LAST_BOOT_TAG = object()
VAR_LOG_MESSAGES_COPY_PATH = "/var/tmp/messages.autotest_start"
def _initialize(self, hostname, autodir=None, *args, **dargs):
super(RemoteHost, self)._initialize(*args, **dargs)
self.hostname = hostname
self.autodir = autodir
self.tmp_dirs = []
def __repr__(self):
return "<remote host: %s>" % self.hostname
def close(self):
super(RemoteHost, self).close()
if hasattr(self, 'tmp_dirs'):
for dir in self.tmp_dirs:
try:'rm -rf "%s"' % (utils.sh_escape(dir)))
except error.AutoservRunError:
def job_start(self):
Abstract method, called the first time a remote host object
is created for a specific host after a job starts.
This method depends on the create_host factory being used to
construct your host object. If you directly construct host objects
you will need to call this method yourself (and enforce the
single-call rule).
try:'rm -f %s' % self.VAR_LOG_MESSAGES_COPY_PATH)'cp /var/log/messages %s' %
except Exception, e:
# Non-fatal error'Failed to copy /var/log/messages at startup: %s', e)
def get_autodir(self):
return self.autodir
def set_autodir(self, autodir):
This method is called to make the host object aware of the
where autotest is installed. Called in server/
after a successful install
self.autodir = autodir
def sysrq_reboot(self):'echo b > /proc/sysrq-trigger &')
def halt(self, timeout=DEFAULT_HALT_TIMEOUT, wait=True):'/sbin/halt')
if wait:
def reboot(self, timeout=DEFAULT_REBOOT_TIMEOUT, label=LAST_BOOT_TAG,
kernel_args=None, wait=True, fastsync=False,
reboot_cmd=None, **dargs):
Reboot the remote host.
timeout - How long to wait for the reboot.
label - The label we should boot into. If None, we will
boot into the default kernel. If it's LAST_BOOT_TAG,
we'll boot into whichever kernel was .boot'ed last
(or the default kernel if we haven't .boot'ed in this
job). If it's None, we'll boot into the default kernel.
If it's something else, we'll boot into that.
wait - Should we wait to see if the machine comes back up.
fastsync - Don't wait for the sync to complete, just start one
and move on. This is for cases where rebooting prompty
is more important than data integrity and/or the
machine may have disks that cause sync to never return.
reboot_cmd - Reboot command to execute.
if self.job:
if label == self.LAST_BOOT_TAG:
label = self.job.last_boot_tag
self.job.last_boot_tag = label
self.reboot_setup(label=label, kernel_args=kernel_args, **dargs)
if label or kernel_args:
if not label:
label = self.bootloader.get_default_title()
if kernel_args:
self.bootloader.add_args(label, kernel_args)
def reboot():
self.record("GOOD", None, "reboot.start")
current_boot_id = self.get_boot_id()
# sync before starting the reboot, so that a long sync during
# shutdown isn't timed out by wait_down's short timeout
if not fastsync:'sync; sync', timeout=timeout, ignore_status=True)
if reboot_cmd:
# Try several methods of rebooting in increasing harshness.'(('
' sync &'
' sleep 5; reboot &'
' sleep 60; reboot -f &'
' sleep 10; reboot -nf &'
' sleep 10; telinit 6 &'
') </dev/null >/dev/null 2>&1 &)')
except error.AutoservRunError:
self.record("ABORT", None, "reboot.start",
"reboot command failed")
if wait:
self.wait_for_restart(timeout, old_boot_id=current_boot_id,
# if this is a full reboot-and-wait, run the reboot inside a group
if wait:
self.log_op(self.OP_REBOOT, reboot)
def suspend(self, timeout, suspend_cmd, **dargs):
Suspend the remote host.
timeout - How long to wait for the suspend.
susped_cmd - suspend command to execute.
# define a function for the supend and run it in a group
def suspend():
self.record("GOOD", None, "suspend.start for %d seconds" % (timeout))
except error.AutoservRunError:
self.record("ABORT", None, "suspend.start",
"suspend command failed")
raise error.AutoservSuspendError("suspend command failed")
# Wait for some time, to ensure the machine is going to sleep.
# Not too long to check if the machine really suspended.
time_slice = min(timeout / 2, 300)
time_counter = time_slice
while time_counter < timeout + 60:
# Check if the machine is back. We check regularely to
# ensure the machine was suspended long enough.
if, tries=1, deadline=1) == 0:
if time_counter > timeout - 10:
time_slice = 5
time_counter += time_slice
if, tries=1, deadline=1) != 0:
raise error.AutoservSuspendError(
"DUT is not responding after %d seconds" % (time_counter))
start_time = time.time()
self.log_op(self.OP_SUSPEND, suspend)
lasted = time.time() - start_time
if (lasted < timeout):
raise error.AutoservSuspendError(
"Suspend did not last long enough: %d instead of %d" % (
lasted, timeout))
def reboot_followup(self, *args, **dargs):
super(RemoteHost, self).reboot_followup(*args, **dargs)
if self.job:
def wait_for_restart(self, timeout=DEFAULT_REBOOT_TIMEOUT, **dargs):
Wait for the host to come back from a reboot. This wraps the
generic wait_for_restart implementation in a reboot group.
def op_func():
super(RemoteHost, self).wait_for_restart(timeout=timeout, **dargs)
self.log_op(self.OP_REBOOT, op_func)
def cleanup(self):
super(RemoteHost, self).cleanup()
def get_tmp_dir(self, parent='/tmp'):
Return the pathname of a directory on the host suitable
for temporary file storage.
The directory and its content will be deleted automatically
on the destruction of the Host object that was used to obtain
""""mkdir -p %s" % parent)
template = os.path.join(parent, 'autoserv-XXXXXX')
dir_name ="mktemp -d %s" % template).stdout.rstrip()
return dir_name
def get_platform_label(self):
Return the platform label, or None if platform label is not set.
if self.job:
keyval_path = os.path.join(self.job.resultdir, 'host_keyvals',
keyvals = utils.read_keyval(keyval_path)
return keyvals.get('platform', None)
return None
def get_all_labels(self):
Return all labels, or empty list if label is not set.
if self.job:
keyval_path = os.path.join(self.job.resultdir, 'host_keyvals',
keyvals = utils.read_keyval(keyval_path)
all_labels = keyvals.get('labels', '')
if all_labels:
all_labels = all_labels.split(',')
return [urllib.unquote(label) for label in all_labels]
return []
def delete_tmp_dir(self, tmpdir):
Delete the given temporary directory on the remote machine.
"""'rm -rf "%s"' % utils.sh_escape(tmpdir), ignore_status=True)
def check_uptime(self):
Check that uptime is available and monotonically increasing.
if not self.is_up():
raise error.AutoservHostError('Client does not appear to be up')
result ="/bin/cat /proc/uptime", 30)
return result.stdout.strip().split()[0]
def check_for_lkdtm(self):
Check for kernel dump test module. return True if exist.
cmd = 'ls /sys/kernel/debug/provoke-crash/DIRECT'
return, ignore_status=True).exit_status == 0
def are_wait_up_processes_up(self):
Checks if any HOSTS waitup processes are running yet on the
remote host.
Returns True if any the waitup processes are running, False
processes = self.get_wait_up_processes()
if len(processes) == 0:
return True # wait up processes aren't being used
for procname in processes:
exit_status ="{ ps -e || ps; } | grep '%s'" % procname,
if exit_status == 0:
return True
return False