blob: b8dc0db3e06017c88f3e54293d8c51c34bba10c6 [file] [log] [blame]
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module provides some tools to interact with LXC containers, for example:
1. Download base container from given GS location, setup the base container.
2. Create a snapshot as test container from base container.
3. Mount a directory in drone to the test container.
4. Run a command in the container and return the output.
5. Cleanup, e.g., destroy the container.
This tool can also be used to set up a base container for test. For example,
python -s -p /tmp/container
This command will download and setup base container in directory /tmp/container.
After that command finishes, you can run lxc command to work with the base
container, e.g.,
lxc-start -P /tmp/container -n base -d
lxc-attach -P /tmp/container -n base
import argparse
import logging
import os
import re
import socket
import sys
import time
import common
from autotest_lib.client.bin import utils
from autotest_lib.client.common_lib import error
from autotest_lib.client.common_lib import global_config
from autotest_lib.client.common_lib.cros import retry
from autotest_lib.client.common_lib.cros.graphite import autotest_es
from autotest_lib.client.common_lib.cros.graphite import autotest_stats
from autotest_lib.server import utils as server_utils
from autotest_lib.site_utils import lxc_config
from autotest_lib.site_utils import lxc_utils
config = global_config.global_config
# Name of the base container.
BASE = config.get_config_value('AUTOSERV', 'container_base_name')
# Naming convention of test container, e.g., test_300_1422862512_2424, where:
# 300: The test job ID.
# 1422862512: The tick when container is created.
# 2424: The PID of autoserv that starts the container.
TEST_CONTAINER_NAME_FMT = 'test_%s_%d_%d'
# Naming convention of the result directory in test container.
RESULT_DIR_FMT = os.path.join(lxc_config.CONTAINER_AUTOTEST_DIR, 'results',
# Attributes to retrieve about containers.
ATTRIBUTES = ['name', 'state']
# Format for mount entry to share a directory in host with container.
# source is the directory in host, destination is the directory in container.
# readonly is a binding flag for readonly mount, its value should be `,ro`.
MOUNT_FMT = ('lxc.mount.entry = %(source)s %(destination)s none '
'bind%(readonly)s 0 0')
SSP_ENABLED = config.get_config_value('AUTOSERV', 'enable_ssp_container',
type=bool, default=True)
# url to the folder stores base container.
CONTAINER_BASE_FOLDER_URL = config.get_config_value('AUTOSERV',
# Default directory used to store LXC containers.
DEFAULT_CONTAINER_PATH = config.get_config_value('AUTOSERV', 'container_path')
# Path to drone_temp folder in the container, which stores the control file for
# test job to run.
CONTROL_TEMP_PATH = os.path.join(lxc_config.CONTAINER_AUTOTEST_DIR, 'drone_tmp')
# Bash command to return the file count in a directory. Test the existence first
# so the command can return an error code if the directory doesn't exist.
COUNT_FILE_CMD = '[ -d %(dir)s ] && ls %(dir)s | wc -l'
# Command line to append content to a file
APPEND_CMD_FMT = ('echo \'%(content)s\' | sudo tee --append %(file)s'
'> /dev/null')
# Path to site-packates in Moblab
MOBLAB_SITE_PACKAGES = '/usr/lib64/python2.7/site-packages'
MOBLAB_SITE_PACKAGES_CONTAINER = '/usr/local/lib/python2.7/dist-packages/'
# Flag to indicate it's running in a Moblab. Due to, lxc-ls has
# different behavior in Moblab.
IS_MOBLAB = utils.is_moblab()
# TODO(dshi): If we are adding more logic in how lxc should interact with
# different systems, we should consider code refactoring to use a setting-style
# object to store following flags mapping to different systems.
# TODO( Snapshot clone is disabled until Moblab can
# support overlayfs or aufs, which requires a newer kernel.
# Number of seconds to wait for network to be up in a container.
# Network bring up is slower in Moblab.
# Type string for container related metadata.
CONTAINER_CREATE_RETRY_METADB_TYPE = 'container_create_retry'
CONTAINER_RUN_TEST_METADB_TYPE = 'container_run_test'
# The container's hostname MUST start with `test_`. DHCP server in MobLab uses
# that prefix to determine the lease time.
STATS_KEY = 'lxc.%s' % socket.gethostname().replace('.', '_')
timer = autotest_stats.Timer(STATS_KEY)
# Timer used inside container should not include the hostname, as that will
# create individual timer for each container.
container_timer = autotest_stats.Timer('lxc')
def _get_container_info_moblab(container_path, **filters):
"""Get a collection of container information in the given container path
in a Moblab.
TODO( remove this method once python 3 can be installed
in Moblab and lxc-ls command can use python 3 code.
When running in Moblab, lxc-ls behaves differently from a server with python
3 installed:
1. lxc-ls returns a list of containers installed under /etc/lxc, the default
lxc container directory.
2. lxc-ls --active lists all active containers, regardless where the
container is located.
For such differences, we have to special case Moblab to make the behavior
close to a server with python 3 installed. That is,
1. List only containers in a given folder.
2. Assume all active containers have state of RUNNING.
@param container_path: Path to look for containers.
@param filters: Key value to filter the containers, e.g., name='base'
@return: A list of dictionaries that each dictionary has the information of
a container. The keys are defined in ATTRIBUTES.
info_collection = []
active_containers ='sudo lxc-ls --active').stdout.split()
name_filter = filters.get('name', None)
state_filter = filters.get('state', None)
if filters and set(filters.keys()) - set(['name', 'state']):
raise error.ContainerError('When running in Moblab, container list '
'filter only supports name and state.')
for name in os.listdir(container_path):
# Skip all files and folders without rootfs subfolder.
if (os.path.isfile(os.path.join(container_path, name)) or
not lxc_utils.path_exists(os.path.join(container_path, name,
info = {'name': name,
'state': 'RUNNING' if name in active_containers else 'STOPPED'
if ((name_filter and name_filter != info['name']) or
(state_filter and state_filter != info['state'])):
return info_collection
def get_container_info(container_path, **filters):
"""Get a collection of container information in the given container path.
This method parse the output of lxc-ls to get a list of container
information. The lxc-ls command output looks like:
base STOPPED - - NO - - - -
test_123 RUNNING - NO 8359 6.28MB 6.28MB 0.0MB
@param container_path: Path to look for containers.
@param filters: Key value to filter the containers, e.g., name='base'
@return: A list of dictionaries that each dictionary has the information of
a container. The keys are defined in ATTRIBUTES.
return _get_container_info_moblab(container_path, **filters)
cmd = 'sudo lxc-ls -P %s -f -F %s' % (os.path.realpath(container_path),
output =
info_collection = []
for line in output.splitlines()[1:]:
# Only LXC 1.x has the second line of '-' as a separator.
if line.startswith('------'):
info_collection.append(dict(zip(ATTRIBUTES, line.split())))
if filters:
filtered_collection = []
for key, value in filters.iteritems():
for info in info_collection:
if key in info and info[key] == value:
info_collection = filtered_collection
return info_collection
def cleanup_if_fail():
"""Decorator to do cleanup if container fails to be set up.
def deco_cleanup_if_fail(func):
"""Wrapper for the decorator.
@param func: Function to be called.
def func_cleanup_if_fail(*args, **kwargs):
"""Decorator to do cleanup if container fails to be set up.
The first argument must be a ContainerBucket object, which can be
used to retrieve the container object by name.
@param func: function to be called.
@param args: arguments for function to be called.
@param kwargs: keyword arguments for function to be called.
bucket = args[0]
name = utils.get_function_arg_value(func, 'name', args, kwargs)
skip_cleanup = utils.get_function_arg_value(
func, 'skip_cleanup', args, kwargs)
except (KeyError, ValueError):
skip_cleanup = False
return func(*args, **kwargs)
exc_info = sys.exc_info()
container = bucket.get(name)
if container and not skip_cleanup:
except error.CmdError as e:
job_id = utils.get_function_arg_value(
func, 'job_id', args, kwargs)
except (KeyError, ValueError):
job_id = ''
metadata={'drone': socket.gethostname(),
'job_id': job_id,
'success': False}
# Record all args if job_id is not available.
if not job_id:
metadata['args'] = str(args)
if kwargs:
# Raise the cached exception with original backtrace.
raise exc_info[0], exc_info[1], exc_info[2]
return func_cleanup_if_fail
return deco_cleanup_if_fail
@retry.retry(error.CmdError, timeout_min=5)
def download_extract(url, target, extract_dir):
"""Download the file from given url and save it to the target, then extract.
@param url: Url to download the file.
@param target: Path of the file to save to.
@param extract_dir: Directory to extract the content of the file to.
"""'sudo wget --timeout=300 -nv %s -O %s' % (url, target),
stderr_tee=utils.TEE_TO_LOGS)'sudo tar -xvf %s -C %s' % (target, extract_dir))
def install_package_precheck(packages):
"""If SSP is not enabled or the test is running in chroot (using test_that),
packages installation should be skipped.
The check does not raise exception so tests started by test_that or running
in an Autotest setup with SSP disabled can continue. That assume the running
environment, chroot or a machine, has the desired packages installed
@param packages: A list of names of the packages to install.
@return: True if package installation can continue. False if it should be
if not SSP_ENABLED and not utils.is_in_container():'Server-side packaging is not enabled. Install package %s '
'is skipped.', packages)
return False
if server_utils.is_inside_chroot():'Test is running inside chroot. Install package %s is '
'skipped.', packages)
return False
return True
@retry.retry(error.CmdError, timeout_min=30)
def install_packages(packages=[], python_packages=[]):
"""Install the given package inside container.
@param packages: A list of names of the packages to install.
@param python_packages: A list of names of the python packages to install
using pip.
@raise error.ContainerError: If package is attempted to be installed outside
a container.
@raise error.CmdError: If the package doesn't exist or failed to install.
if not install_package_precheck(packages or python_packages):
if not utils.is_in_container():
raise error.ContainerError('Package installation is only supported '
'when test is running inside container.')
# Always run apt-get update before installing any container. The base
# container may have outdated cache.'sudo apt-get update')
# Make sure the lists are not None for iteration.
packages = [] if not packages else packages
if python_packages:
packages.extend(['python-pip', 'python-dev'])
if packages:'sudo apt-get install %s -y --force-yes' % ' '.join(packages))
logging.debug('Packages are installed: %s.', packages)
target_setting = ''
# For containers running in Moblab, /usr/local/lib/python2.7/dist-packages/
# is a readonly mount from the host. Therefore, new python modules have to
# be installed in /usr/lib/python2.7/dist-packages/
# Containers created in Moblab does not have autotest/site-packages folder.
if not os.path.exists('/usr/local/autotest/site-packages'):
target_setting = '--target="/usr/lib/python2.7/dist-packages/"'
if python_packages:'sudo pip install %s %s' % (target_setting,
' '.join(python_packages)))
logging.debug('Python packages are installed: %s.', python_packages)
@retry.retry(error.CmdError, timeout_min=20)
def install_package(package):
"""Install the given package inside container.
This function is kept for backwards compatibility reason. New code should
use function install_packages for better performance.
@param package: Name of the package to install.
@raise error.ContainerError: If package is attempted to be installed outside
a container.
@raise error.CmdError: If the package doesn't exist or failed to install.
logging.warn('This function is obsoleted, please use install_packages '
@retry.retry(error.CmdError, timeout_min=20)
def install_python_package(package):
"""Install the given python package inside container using pip.
This function is kept for backwards compatibility reason. New code should
use function install_packages for better performance.
@param package: Name of the python package to install.
@raise error.CmdError: If the package doesn't exist or failed to install.
logging.warn('This function is obsoleted, please use install_packages '
class Container(object):
"""A wrapper class of an LXC container.
The wrapper class provides methods to interact with a container, e.g.,
start, stop, destroy, run a command. It also has attributes of the
container, including:
name: Name of the container.
state: State of the container, e.g., ABORTING, RUNNING, STARTING, STOPPED,
lxc-ls can also collect other attributes of a container including:
ipv4: IP address for IPv4.
ipv6: IP address for IPv6.
autostart: If the container will autostart at system boot.
pid: Process ID of the container.
memory: Memory used by the container, as a string, e.g., "6.2MB"
ram: Physical ram used by the container, as a string, e.g., "6.2MB"
swap: swap used by the container, as a string, e.g., "1.0MB"
For performance reason, such info is not collected for now.
The attributes available are defined in ATTRIBUTES constant.
def __init__(self, container_path, attribute_values):
"""Initialize an object of LXC container with given attribute values.
@param container_path: Directory that stores the container.
@param attribute_values: A dictionary of attribute values for the
self.container_path = os.path.realpath(container_path)
# Path to the rootfs of the container. This will be initialized when
# property rootfs is retrieved.
self._rootfs = None
for attribute, value in attribute_values.iteritems():
setattr(self, attribute, value)
def refresh_status(self):
"""Refresh the status information of the container.
containers = get_container_info(self.container_path,
if not containers:
raise error.ContainerError(
'No container found in directory %s with name of %s.' %
attribute_values = containers[0]
for attribute, value in attribute_values.iteritems():
setattr(self, attribute, value)
def rootfs(self):
"""Path to the rootfs of the container.
This property returns the path to the rootfs of the container, that is,
the folder where the container stores its local files. It reads the
attribute lxc.rootfs from the config file of the container, e.g.,
lxc.rootfs = /usr/local/autotest/containers/t4/rootfs
If the container is created with snapshot, the rootfs is a chain of
folders, separated by `:` and ordered by how the snapshot is created,
lxc.rootfs = overlayfs:/usr/local/autotest/containers/base/rootfs:
This function returns the last folder in the chain, in above example,
that is `/usr/local/autotest/containers/t4_s/delta0`
Files in the rootfs will be accessible directly within container. For
example, a folder in host "[rootfs]/usr/local/file1", can be accessed
inside container by path "/usr/local/file1". Note that symlink in the
host can not across host/container boundary, instead, directory mount
should be used, refer to function mount_dir.
@return: Path to the rootfs of the container.
if not self._rootfs:
cmd = ('sudo lxc-info -P %s -n %s -c lxc.rootfs' %
lxc_rootfs_config =
match = re.match('lxc.rootfs = (.*)', lxc_rootfs_config)
if not match:
raise error.ContainerError(
'Failed to locate rootfs for container %s. lxc.rootfs '
'in the container config file is %s' %
(, lxc_rootfs_config))
lxc_rootfs =
self.clone_from_snapshot = ':' in lxc_rootfs
if self.clone_from_snapshot:
self._rootfs = lxc_rootfs.split(':')[-1]
self._rootfs = lxc_rootfs
return self._rootfs
def attach_run(self, command, bash=True):
"""Attach to a given container and run the given command.
@param command: Command to run in the container.
@param bash: Run the command through bash -c "command". This allows
pipes to be used in command. Default is set to True.
@return: The output of the command.
@raise error.CmdError: If container does not exist, or not running.
cmd = 'sudo lxc-attach -P %s -n %s' % (self.container_path,
if bash and not command.startswith('bash -c'):
command = 'bash -c "%s"' % utils.sh_escape(command)
cmd += ' -- %s' % command
# TODO(dshi): Set sudo to default to False when test
# container can be unprivileged container.
def is_network_up(self):
"""Check if network is up in the container by curl base container url.
@return: True if the network is up, otherwise False.
self.attach_run('curl --head %s' % CONTAINER_BASE_URL)
return True
except error.CmdError as e:
return False
def start(self, wait_for_network=True):
"""Start the container.
@param wait_for_network: True to wait for network to be up. Default is
set to True.
@raise ContainerError: If container does not exist, or fails to start.
cmd = 'sudo lxc-start -P %s -n %s -d' % (self.container_path,
output =
if self.state != 'RUNNING':
raise error.ContainerError(
'Container %s failed to start. lxc command output:\n%s' %
if wait_for_network:
logging.debug('Wait for network to be up.')
start_time = time.time()
logging.debug('Network is up after %.2f seconds.',
time.time() - start_time)
def stop(self):
"""Stop the container.
@raise ContainerError: If container does not exist, or fails to start.
cmd = 'sudo lxc-stop -P %s -n %s' % (self.container_path,
output =
if self.state != 'STOPPED':
raise error.ContainerError(
'Container %s failed to be stopped. lxc command output:\n'
'%s' % (os.path.join(self.container_path,,
def destroy(self, force=True):
"""Destroy the container.
@param force: Set to True to force to destroy the container even if it's
running. This is faster than stop a container first then
try to destroy it. Default is set to True.
@raise ContainerError: If container does not exist or failed to destroy
the container.
cmd = 'sudo lxc-destroy -P %s -n %s' % (self.container_path,
if force:
cmd += ' -f'
def mount_dir(self, source, destination, readonly=False):
"""Mount a directory in host to a directory in the container.
@param source: Directory in host to be mounted.
@param destination: Directory in container to mount the source directory
@param readonly: Set to True to make a readonly mount, default is False.
# Destination path in container must be relative.
destination = destination.lstrip('/')
# Create directory in container for mount.'sudo mkdir -p %s' % os.path.join(self.rootfs, destination))
config_file = os.path.join(self.container_path,, 'config')
mount = MOUNT_FMT % {'source': source,
'destination': destination,
'readonly': ',ro' if readonly else ''} % {'content': mount, 'file': config_file})
def verify_autotest_setup(self, job_folder):
"""Verify autotest code is set up properly in the container.
@param job_folder: Name of the job result folder.
@raise ContainerError: If autotest code is not set up properly.
# Test autotest code is setup by verifying a list of
# (directory, minimum file count)
site_packages_path = os.path.join(lxc_config.CONTAINER_AUTOTEST_DIR,
directories_to_check = [
(RESULT_DIR_FMT % job_folder, 0),
(site_packages_path, 3)]
for directory, count in directories_to_check:
result = self.attach_run(command=(COUNT_FILE_CMD %
{'dir': directory})).stdout
logging.debug('%s entries in %s.', int(result), directory)
if int(result) < count:
raise error.ContainerError('%s is not properly set up.' %
# lxc-attach and run command does not run in shell, thus .bashrc is not
# loaded. Following command creates a symlink in /usr/bin/ for gsutil
# if it's installed.
# TODO(dshi): Remove this code after lab container is updated with
# gsutil installed in /usr/bin/
self.attach_run('test -f /root/gsutil/gsutil && '
'ln -s /root/gsutil/gsutil /usr/bin/gsutil || true')
def modify_import_order(self):
"""Swap the python import order of lib and local/lib.
In Moblab, the host's python modules located in
/usr/lib64/python2.7/site-packages is mounted to following folder inside
container: /usr/local/lib/python2.7/dist-packages/. The modules include
an old version of requests module, which is used in autotest
site-packages. For test, the module is only used in
dev_server/symbolicate_dump for and
When pip is installed inside the container, it installs requests module
with version of 2.2.1 in /usr/lib/python2.7/dist-packages/. The version
is newer than the one used in autotest site-packages, but not the latest
According to /usr/lib/python2.7/, modules in /usr/local/lib are
imported before the ones in /usr/lib. That leads to pip to use the older
version of requests (0.11.2), and it will fail. On the other hand,
requests module 2.2.1 can't be installed in CrOS (refer to CL:265759),
and higher version of requests module can't work with pip.
The only fix to resolve this is to switch the import order, so modules
in /usr/lib can be imported before /usr/local/lib.
site_module = '/usr/lib/python2.7/'
self.attach_run("sed -i ':a;N;$!ba;s/\"local\/lib\",\\n/"
"\"lib_placeholder\",\\n/g' %s" % site_module)
self.attach_run("sed -i ':a;N;$!ba;s/\"lib\",\\n/"
"\"local\/lib\",\\n/g' %s" % site_module)
self.attach_run('sed -i "s/lib_placeholder/lib/g" %s' %
class ContainerBucket(object):
"""A wrapper class to interact with containers in a specific container path.
def __init__(self, container_path=DEFAULT_CONTAINER_PATH):
"""Initialize a ContainerBucket.
@param container_path: Path to the directory used to store containers.
Default is set to AUTOSERV/container_path in
global config.
self.container_path = os.path.realpath(container_path)
def get_all(self):
"""Get details of all containers.
@return: A dictionary of all containers with detailed attributes,
indexed by container name.
info_collection = get_container_info(self.container_path)
containers = {}
for info in info_collection:
container = Container(self.container_path, info)
containers[] = container
return containers
def get(self, name):
"""Get a container with matching name.
@param name: Name of the container.
@return: A container object with matching name. Returns None if no
container matches the given name.
return self.get_all().get(name, None)
def exist(self, name):
"""Check if a container exists with the given name.
@param name: Name of the container.
@return: True if the container with the given name exists, otherwise
returns False.
return self.get(name) != None
def destroy_all(self):
"""Destroy all containers, base must be destroyed at the last.
containers = self.get_all().values()
for container in sorted(containers,
key=lambda n: 1 if == BASE else 0):'Destroy container %s.',
def create_from_base(self, name, disable_snapshot_clone=False,
"""Create a container from the base container.
@param name: Name of the container.
@param disable_snapshot_clone: Set to True to force to clone without
using snapshot clone even if the host supports that.
@param force_cleanup: Force to cleanup existing container.
@return: A Container object for the created container.
@raise ContainerError: If the container already exist.
@raise error.CmdError: If lxc-clone call failed for any reason.
if self.exist(name) and not force_cleanup:
raise error.ContainerError('Container %s already exists.' % name)
# Cleanup existing container with the given name.
container_folder = os.path.join(self.container_path, name)
if lxc_utils.path_exists(container_folder) and force_cleanup:
container = Container(self.container_path, {'name': name})
except error.CmdError as e:
# The container could be created in a incompleted state. Delete
# the container folder instead.
logging.warn('Failed to destroy container %s, error: %s',
name, e)'sudo rm -rf "%s"' % container_folder)
use_snapshot = SUPPORT_SNAPSHOT_CLONE and not disable_snapshot_clone
snapshot = '-s' if use_snapshot else ''
# overlayfs is the default clone backend storage. However it is not
# supported in Ganeti yet. Use aufs as the alternative.
aufs = '-B aufs' if utils.is_vm() and use_snapshot else ''
cmd = ('sudo lxc-clone -p %s -P %s %s' %
(self.container_path, self.container_path,
' '.join([BASE, name, snapshot, aufs])))
return self.get(name)
except error.CmdError:
if not use_snapshot:
# Snapshot clone failed, retry clone without snapshot. The retry
# won't hit the code here and cause an infinite loop as
# disable_snapshot_clone is set to True.
container = self.create_from_base(
name, disable_snapshot_clone=True, force_cleanup=True)
# Report metadata about retry success.,
metadata={'drone': socket.gethostname(),
'name': name,
'success': True})
return container
def setup_base(self, name=BASE, force_delete=False):
"""Setup base container.
@param name: Name of the base container, default to base.
@param force_delete: True to force to delete existing base container.
This action will destroy all running test
containers. Default is set to False.
if not self.container_path:
raise error.ContainerError(
'You must set a valid directory to store containers in '
'global config "AUTOSERV/ container_path".')
if not os.path.exists(self.container_path):
base_path = os.path.join(self.container_path, name)
if self.exist(name) and not force_delete:
'Base container already exists. Set force_delete to True '
'to force to re-stage base container. Note that this '
'action will destroy all running test containers')
# Set proper file permission. base container in moblab may have
# owner of not being root. Force to update the folder's owner.
# TODO(dshi): Change root to current user when test container can be
# unprivileged container.'sudo chown -R root "%s"' % base_path)'sudo chgrp -R root "%s"' % base_path)
# Destroy existing base container if exists.
if self.exist(name):
# TODO: We may need to destroy all snapshots created from this base
# container, not all container.
# Download and untar the base container.
tar_path = os.path.join(self.container_path, '%s.tar.xz' % name)
path_to_cleanup = [tar_path, base_path]
for path in path_to_cleanup:
if os.path.exists(path):'sudo rm -rf "%s"' % path)
container_url = CONTAINER_BASE_URL_FMT % name
download_extract(container_url, tar_path, self.container_path)
# Remove the downloaded container tar file.'sudo rm "%s"' % tar_path)
# Set proper file permission.
# TODO(dshi): Change root to current user when test container can be
# unprivileged container.'sudo chown -R root "%s"' % base_path)'sudo chgrp -R root "%s"' % base_path)
# Update container config with container_path from global config.
config_path = os.path.join(base_path, 'config')'sudo sed -i "s|container_dir|%s|g" "%s"' %
(self.container_path, config_path))
def setup_test(self, name, job_id, server_package_url, result_path,
control=None, skip_cleanup=False, job_folder=None,
"""Setup test container for the test job to run.
The setup includes:
1. Install autotest_server package from given url.
2. Copy over local shadow_config.ini.
3. Mount local site-packages.
4. Mount test result directory.
TODO(dshi): Setup also needs to include test control file for autoserv
to run in container.
@param name: Name of the container.
@param job_id: Job id for the test job to run in the test container.
@param server_package_url: Url to download autotest_server package.
@param result_path: Directory to be mounted to container to store test
@param control: Path to the control file to run the test job. Default is
set to None.
@param skip_cleanup: Set to True to skip cleanup, used to troubleshoot
container failures.
@param job_folder: Folder name of the job, e.g., 123-debug_user.
@param dut_name: Name of the dut to run test, used as the hostname of
the container. Default is None.
@return: A Container object for the test container.
@raise ContainerError: If container does not exist, or not running.
start_time = time.time()
if not os.path.exists(result_path):
raise error.ContainerError('Result directory does not exist: %s',
result_path = os.path.abspath(result_path)
# Save control file to result_path temporarily. The reason is that the
# control file in drone_tmp folder can be deleted during scheduler
# restart. For test not using SSP, the window between test starts and
# control file being picked up by the test is very small (< 2 seconds).
# However, for tests using SSP, it takes around 1 minute before the
# container is setup. If scheduler is restarted during that period, the
# control file will be deleted, and the test will fail.
if control:
control_file_name = os.path.basename(control)
safe_control = os.path.join(result_path, control_file_name)'cp %s %s' % (control, safe_control))
# Create test container from the base container.
container = self.create_from_base(name)
# Update the hostname of the test container to be `dut_name`.
# Some TradeFed tests use hostname in test results, which is used to
# group test results in dashboard. The default container name is set to
# be the name of the folder, which is unique (as it is composed of job
# id and timestamp. For better result view, the container's hostname is
# set to be a string containing the dut hostname.
if dut_name:
config_file = os.path.join(container.container_path, name, 'config')
lxc_utsname_setting = (
'lxc.utsname = ' +
CONTAINER_UTSNAME_FORMAT % dut_name.replace('.', '_')) % {'content': lxc_utsname_setting,
'file': config_file})
# Deploy server side package
usr_local_path = os.path.join(container.rootfs, 'usr', 'local')
autotest_pkg_path = os.path.join(usr_local_path,
autotest_path = os.path.join(usr_local_path, 'autotest')
# sudo is required so os.makedirs may not work.'sudo mkdir -p %s'% usr_local_path)
download_extract(server_package_url, autotest_pkg_path, usr_local_path)
deploy_config_manager = lxc_config.DeployConfigManager(container)
# Copy over control file to run the test job.
if control:
container_drone_temp = os.path.join(autotest_path, 'drone_tmp')'sudo mkdir -p %s'% container_drone_temp)
container_control_file = os.path.join(
container_drone_temp, control_file_name)
# Move the control file stored in the result folder to container.'sudo mv %s %s' % (safe_control, container_control_file))
site_packages_path = MOBLAB_SITE_PACKAGES
site_packages_container_path = MOBLAB_SITE_PACKAGES_CONTAINER[1:]
site_packages_path = os.path.join(common.autotest_dir,
site_packages_container_path = os.path.join(
lxc_config.CONTAINER_AUTOTEST_DIR, 'site-packages')
mount_entries = [(site_packages_path, site_packages_container_path,
(os.path.join(common.autotest_dir, 'puppylab'),
os.path.join(RESULT_DIR_FMT % job_folder),
for mount_config in deploy_config_manager.mount_configs:
# Update container config to mount directories.
for source, destination, readonly in mount_entries:
container.mount_dir(source, destination, readonly)
# Update file permissions.
# TODO(dshi): Skip following action when test container
# can be unprivileged container.'sudo chown -R root "%s"' % autotest_path)'sudo chgrp -R root "%s"' % autotest_path)
metadata={'drone': socket.gethostname(),
'job_id': job_id,
'time_used': time.time() - start_time,
'success': True})
logging.debug('Test container %s is set up.', name)
return container
def parse_options():
"""Parse command line inputs.
@raise argparse.ArgumentError: If command line arguments are invalid.
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--setup', action='store_true',
help='Set up base container.')
parser.add_argument('-p', '--path', type=str,
help='Directory to store the container.',
parser.add_argument('-f', '--force_delete', action='store_true',
help=('Force to delete existing containers and rebuild '
'base containers.'))
parser.add_argument('-n', '--name', type=str,
help='Name of the base container.',
options = parser.parse_args()
if not options.setup and not options.force_delete:
raise argparse.ArgumentError(
'Use --setup to setup a base container, or --force_delete to '
'delete all containers in given path.')
return options
def main():
"""main script."""
# Force to run the setup as superuser.
# TODO(dshi): Set remove this enforcement when test
# container can be unprivileged container.
if utils.sudo_require_password():
logging.warn('SSP requires root privilege to run commands, please '
'grant root access to this process.')'sudo true')
options = parse_options()
bucket = ContainerBucket(container_path=options.path)
if options.setup:
bucket.setup_base(, force_delete=options.force_delete)
elif options.force_delete:
if __name__ == '__main__':