blob: 9577aabfde3bef29661267c9135ebff0823ca62d [file] [log] [blame]
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# All scripts should die on error unless commands are specifically excepted
# by prefixing with '!' or surrounded by 'set +e' / 'set -e'.
# The number of jobs to pass to tools that can run in parallel (such as make
# and dpkg-buildpackage
if [[ -z ${NUM_JOBS} ]]; then
NUM_JOBS=$(grep -c "^processor" /proc/cpuinfo)
# Ensure that any sub scripts we invoke get the max proc count.
export NUM_JOBS
# Returns the pv command if it's available, otherwise plain-old cat. Note that
# this function echoes the command, rather than running it, so it can be used
# as an argument to other commands (like sudo).
pv_cat_cmd() {
if type -P pv >&/dev/null; then
# Limit pv's output to 80 columns, for readability.
local term_cols=$(stty size 2>/dev/null | cut -d' ' -f2)
if [[ ${term_cols:-0} -gt 80 ]]; then
echo pv -w 80 -B 4m
echo pv -B 4m
echo cat
# Make sure we have the location and name of the calling script, using
# the current value if it is already set.
: ${SCRIPT_LOCATION:=$(dirname "$(readlink -f "$0")")}
: ${SCRIPT_NAME:=$(basename "$0")}
# Detect whether we're inside a chroot or not
if [[ -e /etc/debian_chroot ]]; then
# Determine and set up variables needed for fancy color output (if supported).
if tput colors >&/dev/null; then
# order matters: we want VIDOFF last so that when we trace with `set -x`,
# our terminal doesn't bleed colors as bash dumps the values of vars.
V_BOLD_RED=$(tput bold; tput setaf 1)
V_BOLD_GREEN=$(tput bold; tput setaf 2)
V_BOLD_YELLOW=$(tput bold; tput setaf 3)
V_REVERSE=$(tput rev)
V_VIDOFF=$(tput sgr0)
# Turn on bash debug support if available for backtraces.
shopt -s extdebug 2>/dev/null
# Output a backtrace all the way back to the raw invocation, suppressing
# only the _dump_trace frame itself.
_dump_trace() {
local j n p func src line args
for (( n = ${#FUNCNAME[@]}; n > 1; --n )); do
func=${FUNCNAME[${n} - 1]}
line=${BASH_LINENO[${n} - 1]}
if [[ -z ${BASH_ARGC[${n} -1]} ]]; then
args='(args unknown, no debug available)'
for (( j = 0; j < ${BASH_ARGC[${n} -1]}; ++j )); do
args="${args:+${args} }'${BASH_ARGV[$(( p - j - 1 ))]}'"
! (( p -= ${BASH_ARGC[${n} - 1]} ))
if [[ ${n} == ${#FUNCNAME[@]} ]]; then
error "script called: ${0##/*} ${args}"
error "Backtrace: (most recent call is last)"
error "$(printf ' file %s, line %s, called: %s %s' \
"${src}" "${line}" "${func}" "${args}")"
# Declare these asap so that code below can safely assume they exist.
_message() {
local prefix=$1
if [[ $# -eq 0 ]]; then
echo -e "${prefix}${CROS_LOG_PREFIX:-""}:${V_VIDOFF}" >&2
# Handle newlines in the message, prefixing each chunk correctly.
# Do this in a subshell to avoid having to track IFS/set -f state.
set +f
set -- $*
IFS=' '
if [[ $# -eq 0 ]]; then
# Empty line was requested.
set -- ''
for line in "$@"; do
echo -e "${prefix}${CROS_LOG_PREFIX:-}: ${line}${V_VIDOFF}" >&2
info() {
_message "${V_BOLD_GREEN}INFO " "$*"
warn() {
_message "${V_BOLD_YELLOW}WARNING " "$*"
error() {
_message "${V_BOLD_RED}ERROR " "$*"
# For all die functions, they must explicitly force set +eu;
# no reason to have them cause their own crash if we're inthe middle
# of reporting an error condition then exiting.
die_err_trap() {
local command=$1 result=$2
set +e +u
# Per the message, bash misreports 127 as 1 during err trap sometimes.
# Note this fact to ensure users don't place too much faith in the
# exit code in that case.
set -- "Command '${command}' exited with nonzero code: ${result}"
if [[ ${result} -eq 1 ]] && [[ -z $(type -t ${command}) ]]; then
set -- "$@" \
'(Note bash sometimes misreports "command not found" as exit code 1 '\
'instead of 127)'
error "Command failed:"
die_notrace "$@"
# Exit this script due to a failure, outputting a backtrace in the process.
die() {
set +e +u
error "Error was:"
die_notrace "$@"
# Exit this script w/out a backtrace.
die_notrace() {
set +e +u
if [[ $# -eq 0 ]]; then
set -- '(no error message given)'
local line
for line in "$@"; do
error "${DIE_PREFIX}${line}"
exit 1
# Check for a single string in a list of space-separated strings.
# e.g. has "foo" "foo bar baz" is true, but has "f" "foo bar baz" is not.
has() { [[ " ${*:2} " == *" $1 "* ]]; }
# Directory locations inside the dev chroot; try the new default,
# falling back to user specific paths if the upgrade has yet to
# happen.
[[ ${USER} == "root" ]] && _user="${SUDO_USER}"
_CHROOT_TRUNK_DIRS=( "/home/${_user}/trunk" /mnt/host/source )
_DEPOT_TOOLS_DIRS=( "/home/${_user}/depot_tools" /mnt/host/depot_tools )
unset _user
_process_mount_pt() {
# Given 4 arguments; the root path, the variable to set,
# the old location, and the new; finally, forcing the upgrade is doable
# via if a 5th arg is provided.
# This will then try to migrate the old to new if we can do so right now
# (else leaving symlinks in place w/in the new), and will set $1 to the
# new location.
local base=${1:-/} var=$2 old=$3 new=$4 force=${5:-false}
local _sudo=$([[ ${USER} != "root" ]] && echo sudo)
local val=${new}
if [[ -L ${base}/${new} ]] || [[ ! -e ${base}/${new} ]]; then
# Ok, it's either a symlink or this is the first run. Upgrade if we can-
# specifically, if we're outside the chroot and we can rmdir the old.
# If we cannot rmdir the old, that's due to a mount being bound to that
# point (even if we can't see it, it's there)- thus fallback to adding
# compat links.
if ${force} || ( [[ ${INSIDE_CHROOT} -eq 0 ]] && \
${_sudo} rmdir "${base}/${old}" 2>/dev/null ); then
${_sudo} rm -f "${base}/${new}" || :
${_sudo} mkdir -p "${base}/${new}" "$(dirname "${base}/${old}" )"
${_sudo} ln -s "${new}" "${base}/${old}"
if [[ ! -L ${base}/${new} ]]; then
# We can't do the upgrade right now; install compatibility links.
${_sudo} mkdir -p "$(dirname "${base}/${new}")" "${base}/${old}"
${_sudo} ln -s "${old}" "${base}/${new}"
eval "${var}=\"${val}\""
set_chroot_trunk_dir() {
# This takes two optional arguments; the first being the path to the chroot
# base; this is only used by enter_chroot. The second argument is whether
# or not to force the new pathways; this is only used by make_chroot. Passing
# a non-null value for $2 forces the new paths.
if [[ ${INSIDE_CHROOT} -eq 0 ]] && [[ -z ${1-} ]]; then
# Can't do the upgrade, thus skip trying to do so.
_process_mount_pt "$1" CHROOT_TRUNK_DIR "${_CHROOT_TRUNK_DIRS[@]}" ${2:+true}
_process_mount_pt "$1" DEPOT_TOOLS_DIR "${_DEPOT_TOOLS_DIRS[@]}" ${2:+true}
# Construct a list of possible locations for the source tree. This list is
# based on various environment variables and globals that may have been set
# by the calling script.
get_gclient_root_list() {
if [[ ${INSIDE_CHROOT} -eq 1 ]]; then
if [[ -n ${COMMON_SH} ]]; then echo "$(dirname "${COMMON_SH}")/../.."; fi
if [[ -n ${BASH_SOURCE} ]]; then echo "$(dirname "${BASH_SOURCE}")/../.."; fi
# Based on the list of possible source locations we set GCLIENT_ROOT if it is
# not already defined by looking for a src directory in each seach path
# location. If we do not find a valid looking root we error out.
get_gclient_root() {
if [[ -n ${GCLIENT_ROOT} ]]; then
for path in $(get_gclient_root_list); do
if [[ -d ${path}/src ]]; then
if [[ -z ${GCLIENT_ROOT} ]]; then
# Using dash or sh, we don't know where we are. $0 refers to the calling
# script, not ourselves, so that doesn't help us.
echo "Unable to determine location for If you are sourcing"
echo " from a script run via dash or sh, you must do it in the"
echo "following way:"
echo ' COMMON_SH="$(dirname "$0")/../../scripts/"'
echo ' . "${COMMON_SH}"'
echo "where the first line is the relative path from your script to"
echo ""
exit 1
# Populate the ENVIRONMENT_WHITELIST array.
load_environment_whitelist() {
set -f
set +f
# Find root of source tree
# Canonicalize the directories for the root dir and the calling script.
# readlink is part of coreutils and should be present even in a bare chroot.
# This is better than just using
# FOO="$(cd ${FOO} ; pwd)"
# since that leaves symbolic links intact.
# Note that 'realpath' is equivalent to 'readlink -f'.
GCLIENT_ROOT=$(readlink -f "${GCLIENT_ROOT}")
# Other directories should always be pathed down from GCLIENT_ROOT.
# Load developer's custom settings. Default location is in scripts dir,
# since that's available both inside and outside the chroot. By convention,
# settings from this file are variables starting with 'CHROMEOS_'
if [[ -f ${CHROMEOS_DEV_SETTINGS} ]]; then
# Turn on exit-on-error during custom settings processing
SAVE_OPTS=$(set +o)
# Read settings
# Restore previous state of exit-on-error
eval "${SAVE_OPTS}"
# Load shflags
# NOTE: This code snippet is in particular used by the au-generator (which
# stores shflags in ./lib/shflags/) and should not be touched.
if [[ -f ${SCRIPTS_DIR}/lib/shflags/shflags ]]; then
. "${SCRIPTS_DIR}/lib/shflags/shflags" || die "Couldn't find shflags"
. ./lib/shflags/shflags || die "Couldn't find shflags"
# Our local mirror
# Upstream mirrors and build suites come in 2 flavors
# DEV - development chroot, used to build the chromeos image
# IMG - bootable image, to run on actual hardware
# Default location for chroot
# All output files from build should go under ${DEFAULT_BUILD_ROOT}, so that
# they don't pollute the source directory.
# Set up a global ALL_BOARDS value
if [[ -d ${SRC_ROOT}/overlays ]]; then
ALL_BOARDS=$(cd "${SRC_ROOT}/overlays"; \
ls -1d overlay-* 2>&- | sed 's,overlay-,,g')
# Normalize whitespace.
# Sets the default board variable for calling script.
if [[ -f ${GCLIENT_ROOT}/src/scripts/.default_board ]]; then
# Check for user typos like whitespace.
if [[ -n ${DEFAULT_BOARD//[a-zA-Z0-9-_]} ]]; then
die ".default_board: invalid name detected; please fix:" \
# Stub to get people to upgrade.
get_default_board() {
warn "please upgrade your script, and make sure to run build_packages"
# Enable --fast by default.
# Directory to store built images. Should be set by sourcing script when used.
# Standard filenames
# Install make for portage ebuilds. Used by build_image and gmergefs.
# TODO: Is /usr/local/autotest-chrome still used by anyone?
# Mask for base, dev, and test images (build_image, build_image --test)
# Mask for factory test image (build_image --factory)
# Mask for factory install shim (build_image factory_install)
# -----------------------------------------------------------------------------
# Functions
setup_board_warning() {
echo "${V_REVERSE}================ WARNING =====================${V_VIDOFF}"
echo "*** No default board detected in " \
echo "*** Either run setup_board with default flag set"
echo "*** or echo |board_name| > ${GCLIENT_ROOT}/src/scripts/.default_board"
is_nfs() {
[[ $(stat -f -L -c %T "$1") == "nfs" ]]
warn_if_nfs() {
if is_nfs "$1"; then
warn "$1 is on NFS. This is untested. You can send patches if it's broken."
# Enter a chroot and restart the current script if needed
restart_in_chroot_if_needed() {
# NB: Pass in ARGV: restart_in_chroot_if_needed "$@"
if [[ ${INSIDE_CHROOT} -ne 1 ]]; then
# Get inside_chroot path for script.
local chroot_path="$(reinterpret_path_for_chroot "$0")"
exec ${GCLIENT_ROOT}/chromite/bin/cros_sdk -- "${chroot_path}" "$@"
# Fail unless we're inside the chroot. This guards against messing up your
# workstation.
assert_inside_chroot() {
if [[ ${INSIDE_CHROOT} -ne 1 ]]; then
echo "This script must be run inside the chroot. Run this first:"
echo " cros_sdk"
exit 1
# Fail if we're inside the chroot. This guards against creating or entering
# nested chroots, among other potential problems.
assert_outside_chroot() {
if [[ ${INSIDE_CHROOT} -ne 0 ]]; then
echo "This script must be run outside the chroot."
exit 1
assert_not_root_user() {
if [[ ${UID:-$(id -u)} == 0 ]]; then
echo "This script must be run as a non-root user."
exit 1
assert_root_user() {
if [[ ${UID:-$(id -u)} != 0 ]] || [[ ${SUDO_USER:-root} == "root" ]]; then
die_notrace "This script must be run using sudo from a non-root user."
# Check that all arguments are flags; that is, there are no remaining arguments
# after parsing from shflags. Allow (with a warning) a single empty-string
# argument.
# TODO: fix buildbot so that it doesn't pass the empty-string parameter,
# then change this function.
# Usage: check_flags_only_and_allow_null_arg "$@" && set --
check_flags_only_and_allow_null_arg() {
local do_shift=1
if [[ $# -eq 1 ]] && [[ -z $1 ]]; then
echo "$0: warning: ignoring null argument" >&2
if [[ $# -gt 0 ]]; then
echo "error: invalid arguments: \"$*\"" >&2
exit 1
return ${do_shift}
# Removes single quotes around parameter
# Arguments:
# $1 - string which optionally has surrounding quotes
# Returns:
# None, but prints the string without quotes.
remove_quotes() {
echo "$1" | sed -e "s/^'//; s/'$//"
# Writes stdin to the given file name as root using sudo in overwrite mode.
# $1 - The output file name.
sudo_clobber() {
sudo tee "$1" >/dev/null
# Writes stdin to the given file name as root using sudo in append mode.
# $1 - The output file name.
sudo_append() {
sudo tee -a "$1" >/dev/null
# Execute multiple commands in a single sudo. Generally will speed things
# up by avoiding multiple calls to `sudo`. If any commands fail, we will
# call die with the failing command. We can handle a max of ~100 commands,
# but hopefully no one will ever try that many at once.
# $@ - The commands to execute, one per arg.
sudo_multi() {
local i cmds
# Construct the shell code to execute. It'll be of the form:
# ... && ( ( command ) || exit <command index> ) && ...
# This way we know which command exited. The exit status of
# the underlying command is lost, but we never cared about it
# in the first place (other than it is non zero), so oh well.
for (( i = 1; i <= $#; ++i )); do
cmds+=" && ( ( ${!i} ) || exit $(( i + 10 )) )"
# Execute our constructed shell code.
sudo -- sh -c ":${cmds[*]}" && i=0 || i=$?
# See if this failed, and if so, print out the failing command.
if [[ $i -gt 10 ]]; then
: $(( i -= 10 ))
die "sudo_multi failed: ${!i}"
elif [[ $i -ne 0 ]]; then
die "sudo_multi failed for unknown reason $i"
# Writes stdin to the given file name as the sudo user in overwrite mode.
# $@ - The output file names.
user_clobber() {
install -m644 -o ${SUDO_UID} -g ${SUDO_GID} /dev/stdin "$@"
# Copies the specified file owned by the user to the specified location.
# If the copy fails as root (e.g. due to root_squash and NFS), retry the copy
# with the user's account before failing.
user_cp() {
cp -p "$@" 2>/dev/null || sudo -u ${SUDO_USER} -- cp -p "$@"
# Appends stdin to the given file name as the sudo user.
# $1 - The output file name.
user_append() {
cat >> "$1"
chown ${SUDO_UID}:${SUDO_GID} "$1"
# Create the specified directory, along with parents, as the sudo user.
# $@ - The directories to create.
user_mkdir() {
install -o ${SUDO_UID} -g ${SUDO_GID} -d "$@"
# Create the specified symlink as the sudo user.
# $1 - Link target
# $2 - Link name
user_symlink() {
ln -sfT "$1" "$2"
chown -h ${SUDO_UID}:${SUDO_GID} "$2"
# Locate all mounts below a specified directory.
# $1 - The root tree.
sub_mounts() {
# Assume that `mount` outputs a list of mount points in the order
# that things were mounted (since it always has and hopefully always
# will). As such, we have to unmount in reverse order to cleanly
# unmount submounts (think /dev/pts and /dev).
awk -v path=$1 -v len="${#1}" \
'(substr($2, 1, len) == path) { print $2 }' /proc/mounts | \
tac | \
sed -e 's/\\040(deleted)$//'
# Hack(zbehan): If a bind mount's source is mysteriously removed,
# we'd end up with an orphaned mount with the above string in its name.
# It can only be seen through /proc/mounts and will stick around even
# when it should be gone already.
# Unmounts a directory, if the unmount fails, warn, and then lazily unmount.
# $1 - The path to unmount.
safe_umount_tree() {
local mounts=$(sub_mounts "$1")
# Hmm, this shouldn't normally happen, but anything is possible.
if [[ -z ${mounts} ]]; then
return 0
# First try to unmount in one shot to speed things up.
# Hide output since we may have devices mounted within a mount point.
if safe_umount -d ${mounts} 2> /dev/null; then
return 0
# Check whether our mounts were successfully unmounted.
mounts=$(sub_mounts "$1")
if [[ -z ${mounts} ]]; then
warn "umount failed, but devices were unmounted anyway"
return 0
# Well that didn't work, so lazy unmount remaining ones.
warn "Failed to unmount ${mounts}"
warn "Doing a lazy unmount"
if ! safe_umount -d -l ${mounts}; then
mounts=$(sub_mounts "$1")
die "Failed to lazily unmount ${mounts}"
# Run umount as root.
safe_umount() {
$([[ ${UID:-$(id -u)} != 0 ]] && echo sudo) umount "$@"
get_git_id() {
git var GIT_COMMITTER_IDENT | sed -e 's/^.*<\(\S\+\)>.*$/\1/'
# Fixes symlinks that are incorrectly prefixed with the build root $1
# rather than the real running root '/'.
# TODO(sosa) - Merge setup - cleanup below with this method.
fix_broken_symlinks() {
local build_root=$1
local symlinks=$(find "${build_root}/usr/local" -lname "${build_root}/*")
local symlink
for symlink in ${symlinks}; do
echo "Fixing ${symlink}"
local target=$(ls -l "${symlink}" | cut -f 2 -d '>')
# Trim spaces from target (bashism).
target=${target/ /}
# Make new target (removes rootfs prefix).
new_target=$(echo ${target} | sed "s#${build_root}##")
echo "Fixing symlink ${symlink}"
sudo unlink "${symlink}"
sudo ln -sf "${new_target}" "${symlink}"
# Sets up symlinks for the developer root. It is necessary to symlink
# usr and local since the developer root is mounted at /usr/local and
# applications expect to be installed under /usr/local/bin, etc.
# This avoids packages installing into /usr/local/usr/local/bin.
# $1 specifies the symlink target for the developer root.
# $2 specifies the symlink target for the var directory.
# $3 specifies the location of the stateful partition.
setup_symlinks_on_root() {
# Give args better names.
local dev_image_target=$1
local var_target=$2
local dev_image_root="$3/dev_image"
# If our var target is actually the standard var, we are cleaning up the
# symlinks (could also check for /usr/local for the dev_image_target).
if [[ ${var_target} == "/var" ]]; then
echo "Cleaning up /usr/local symlinks for ${dev_image_root}"
echo "Setting up symlinks for /usr/local for ${dev_image_root}"
# Set up symlinks that should point to ${dev_image_target}.
local path
for path in usr local; do
if [[ -h ${dev_image_root}/${path} ]]; then
sudo unlink "${dev_image_root}/${path}"
elif [[ -e ${dev_image_root}/${path} ]]; then
die "${dev_image_root}/${path} should be a symlink if exists"
sudo ln -s "${dev_image_target}" "${dev_image_root}/${path}"
# Setup var symlink.
if [[ -h ${dev_image_root}/var ]]; then
sudo unlink "${dev_image_root}/var"
elif [[ -e ${dev_image_root}/var ]]; then
die "${dev_image_root}/var should be a symlink if it exists"
sudo ln -s "${var_target}" "${dev_image_root}/var"
# These two helpers clobber the ro compat value in our root filesystem.
# When the system is built with --enable_rootfs_verification, bit-precise
# integrity checking is performed. That precision poses a usability issue on
# systems that automount partitions with recognizable filesystems, such as
# ext2/3/4. When the filesystem is mounted 'rw', ext2 metadata will be
# automatically updated even if no other writes are performed to the
# filesystem. In addition, ext2+ does not support a "read-only" flag for a
# given filesystem. That said, forward and backward compatibility of
# filesystem features are supported by tracking if a new feature breaks r/w or
# just write compatibility. We abuse the read-only compatibility flag[1] in
# the filesystem header by setting the high order byte (le) to FF. This tells
# the kernel that features R24-R31 are all enabled. Since those features are
# undefined on all ext-based filesystem, all standard kernels will refuse to
# mount the filesystem as read-write -- only read-only[2].
# [1] 32-bit flag we are modifying:
# [2] Mount behavior is enforced here:
# N.B., if the high order feature bits are used in the future, we will need to
# revisit this technique.
disable_rw_mount() {
local rootfs=$1
local offset="${2-0}" # in bytes
local ro_compat_offset=$((0x464 + 3)) # Set 'highest' byte
printf '\377' |
sudo dd of="${rootfs}" seek=$((offset + ro_compat_offset)) \
conv=notrunc count=1 bs=1
enable_rw_mount() {
local rootfs=$1
local offset="${2-0}"
local ro_compat_offset=$((0x464 + 3)) # Set 'highest' byte
printf '\000' |
sudo dd of="${rootfs}" seek=$((offset + ro_compat_offset)) \
conv=notrunc count=1 bs=1
# Get current timestamp. Assumes runs at startup.
start_time=$(date +%s)
# Get time elapsed since start_time in seconds.
get_elapsed_seconds() {
local end_time=$(date +%s)
local elapsed_seconds=$(( end_time - start_time ))
echo ${elapsed_seconds}
# Print time elapsed since start_time.
print_time_elapsed() {
# Optional first arg to specify elapsed_seconds. If not given, will
# recalculate elapsed time to now. Optional second arg to specify
# command name associated with elapsed time.
local elapsed_seconds=${1:-$(get_elapsed_seconds)}
local cmd_base=${2:-}
local minutes=$(( elapsed_seconds / 60 ))
local seconds=$(( elapsed_seconds % 60 ))
if [[ -n ${cmd_base} ]]; then
info "Elapsed time (${cmd_base}): ${minutes}m${seconds}s"
info "Elapsed time: ${minutes}m${seconds}s"
# Associative array for filling in extra command-specific stats before
# calling command_completed.
# Save original command line.
command_line_arr=( "$0" "$@" )
command_completed() {
# Call print_elapsed_time regardless.
local run_time=$(get_elapsed_seconds)
local cmd_base=$(basename "${command_line_arr[0]}")
print_time_elapsed ${run_time} ${cmd_base}
# Prepare command stats in an associative array. Additonal command-specific
# stats can be added through EXTRA_COMMAND_STATS associative array.
declare -A stats
[host]=$(hostname -f)
[cpu_count]=$(grep -c processor /proc/cpuinfo)
[cpu_type]=$(uname -p)
local attr
for attr in "${!EXTRA_COMMAND_STATS[@]}"; do
# Prepare temporary file for stats.
local tmpfile=$(mktemp -t tmp.stats.XXXXXX)
trap "rm -f '${tmpfile}'" EXIT
# Write stats out to temporary file.
echo "Chromium OS Build Command Stats - Version 1" > "${tmpfile}"
for attr in "${!stats[@]}"; do
echo "${attr} ${stats[${attr}]}"
done >> "${tmpfile}"
# Call upload_command_stats on the stats file. If it fails do not stop.
"${GCLIENT_ROOT}"/chromite/bin/upload_command_stats "${tmpfile}" || true
rm "${tmpfile}"
trap - EXIT
# The board and variant command line options can be used in a number of ways
# to specify the board and variant. The board can encode both pieces of
# information separated by underscores. Or the variant can be passed using
# the separate variant option. This function extracts the canonical board and
# variant information and provides it in the BOARD, VARIANT and BOARD_VARIANT
# variables.
get_board_and_variant() {
local flags_board=$1
local flags_variant=$2
BOARD=$(echo "${flags_board}" | cut -d '_' -f 1)
VARIANT=${flags_variant:-$(echo "${flags_board}" | cut -s -d '_' -f 2)}
if [[ -n ${VARIANT} ]]; then
# Load configuration files that allow board-specific overrides of default
# functionality to be specified in overlays.
# $1 - Board to match overlays to.
# $2 - File to load.
load_board_specific_script() {
local board=$1 file=$2 overlay
[[ $# -ne 2 ]] && die "load_board_specific_script requires exactly 2 params"
for overlay in $(cros_list_overlays --board "${board}"); do
local setup_sh="${overlay}/scripts/${file}"
if [[ -e ${setup_sh} ]]; then
source "${setup_sh}"
# Check that the specified file exists. If the file path is empty or the file
# doesn't exist on the filesystem generate useful error messages. Otherwise
# show the user the name and path of the file that will be used. The padding
# parameter can be used to tabulate multiple name:path pairs. For example:
# check_for_file "really long name" "...:" ""
# check_for_file "short name" ".........:" ""
# Results in the following output:
# Using really long name...:
# Using short name.........:
# If tabulation is not required then passing "" for padding generates the
# output "Using <name> <path>"
check_for_file() {
local name=$1
local padding=$2
local path=$3
if [[ -z ${path} ]]; then
die "No ${name} file specified."
if [[ ! -e ${path} ]]; then
die "No ${name} file found at: ${path}"
info "Using ${name}${padding} ${path}"
# Check that the specified tool exists. If it does not exist in the PATH
# generate a useful error message indicating how to install the ebuild
# that contains the required tool.
check_for_tool() {
local tool=$1
local ebuild=$2
if ! which "${tool}" >/dev/null; then
error "The ${tool} utility was not found in your path. Run the following"
error "command in your chroot to install it: sudo -E emerge ${ebuild}"
exit 1
# Reinterprets path from outside the chroot for use inside.
# Returns "" if "" given.
# $1 - The path to reinterpret.
reinterpret_path_for_chroot() {
if [[ ${INSIDE_CHROOT} -ne 1 ]]; then
if [[ -z $1 ]]; then
echo ""
local path_abs_path=$(readlink -f "$1")
local gclient_root_abs_path=$(readlink -f "${GCLIENT_ROOT}")
# Strip the repository root from the path.
local relative_path=$(echo ${path_abs_path} \
| sed "s:${gclient_root_abs_path}/::")
if [[ ${relative_path} == "${path_abs_path}" ]]; then
die "Error reinterpreting path. Path $1 is not within source tree."
# Prepend the chroot repository path.
echo "/mnt/host/source/${relative_path}"
# Path is already inside the chroot :).
echo "$1"
emerge_custom_kernel() {
local install_root=$1
local root=/build/${FLAGS_board}
local tmp_pkgdir=${root}/custom-packages
# Clean up any leftover state in custom directories.
sudo rm -rf "${tmp_pkgdir}"
# Update chromeos-initramfs to contain the latest binaries from the build
# tree. This is basically just packaging up already-built binaries from
# ${root}. We are careful not to muck with the existing prebuilts so that
# prebuilts can be uploaded in parallel.
# TODO(davidjames): Implement ABI deps so that chromeos-initramfs will be
# rebuilt automatically when its dependencies change.
sudo -E PKGDIR="${tmp_pkgdir}" ${EMERGE_BOARD_CMD} -1 \
chromeos-base/chromeos-initramfs || die "Cannot emerge chromeos-initramfs"
# Verify all dependencies of the kernel are installed. This should be a
# no-op, but it's good to check in case a developer didn't run
# build_packages. We need the expand_virtual call to workaround a bug
# in portage where it only installs the virtual pkg.
local kernel=$(portageq-${FLAGS_board} expand_virtual ${root} \
sudo -E PKGDIR="${tmp_pkgdir}" ${EMERGE_BOARD_CMD} --onlydeps \
${kernel} || die "Cannot emerge kernel dependencies"
# Build the kernel. This uses the standard root so that we can pick up the
# initramfs from there. But we don't actually install the kernel to the
# standard root, because that'll muck up the kernel debug symbols there,
# which we want to upload in parallel.
sudo -E PKGDIR="${tmp_pkgdir}" ${EMERGE_BOARD_CMD} --buildpkgonly \
${kernel} || die "Cannot emerge kernel"
# Install the custom kernel to the provided install root.
sudo -E PKGDIR="${tmp_pkgdir}" ${EMERGE_BOARD_CMD} --usepkgonly \
--root=${install_root} ${kernel} || die "Cannot emerge kernel to root"
enable_strict_sudo() {
if [[ -z ${CROS_SUDO_KEEP_ALIVE} ]]; then
echo "$0 was somehow invoked in a way that the sudo keep alive could"
echo "not be found. Failing due to this. See"
exit 126
sudo() {
$(type -P sudo) -n "$@"
# Checks that stdin and stderr are both terminals.
# If so, we assume that there is a live user we can interact with.
# This check can be overridden by setting the CROS_NO_PROMPT environment
# variable to a non-empty value.
is_interactive() {
[[ -z ${CROS_NO_PROMPT} && -t 0 && -t 2 ]]
assert_interactive() {
if ! is_interactive; then
die "Script ${0##*/} tried to get user input on a non-interactive terminal."
# Selection menu with a default option: this is similar to bash's select
# built-in, only that in case of an empty selection it'll return the default
# choice. Like select, it uses PS3 as the prompt.
# $1: name of variable to be assigned the selected value; it better not be of
# the form choose_foo to avoid conflict with local variables.
# $2: default value to return in case of an empty user entry.
# $3: value to return in case of an invalid choice.
# $...: options for selection.
# Usage example:
# PS3="Select one [1]: "
# choose reply "foo" "ERROR" "foo" "bar" "foobar"
# This will present the following menu and prompt:
# 1) foo
# 2) bar
# 3) foobar
# Select one [1]:
# The return value will be stored in a variable named 'reply'. If the input is
# 1, 2 or 3, the return value will be "foo", "bar" or "foobar", respectively.
# If it is empty (i.e. the user clicked Enter) it will be "foo". Anything else
# will return "ERROR".
choose() {
typeset -i choose_i=1
# Retrieve output variable name and default return value.
local choose_reply=$1
local choose_default=$2
local choose_invalid=$3
shift 3
# Select a return value
unset REPLY
if [[ $# -gt 0 ]]; then
# Actual options provided, present a menu and prompt for a choice.
local choose_opt
for choose_opt in "$@"; do
echo "${choose_i}) ${choose_opt}" >&2
: $(( ++choose_i ))
read -p "$PS3"
# Filter out strings containing non-digits.
if [[ ${REPLY} != "${REPLY%%[!0-9]*}" ]]; then
if [[ ${choose_i} -ge 1 && ${choose_i} -le $# ]]; then
# Valid choice, return the corresponding value.
eval ${choose_reply}=\""${!choose_i}"\"
elif [[ -z ${REPLY} ]]; then
# Empty choice, return default value.
eval ${choose_reply}=\""${choose_default}"\"
# Invalid choice, return corresponding value.
eval ${choose_reply}=\""${choose_invalid}\""
# Display --help if requested. This is used to hide options from help
# that are not intended for developer use.
# How to use:
# 1) Declare the options that you want to appear in help.
# 2) Call this function.
# 3) Declare the options that you don't want to appear in help.
# See build_packages for example usage.
show_help_if_requested() {
local opt
for opt in "$@"; do
if [[ ${opt} == "-h" || ${opt} == "--help" ]]; then
exit 0
switch_to_strict_mode() {
# Set up strict execution mode; note that the trap
# must follow switch_to_strict_mode, else it will have no effect.
set -e
trap 'die_err_trap "${BASH_COMMAND:-command unknown}" "$?"' ERR
if [[ $# -ne 0 ]]; then
set "$@"
# TODO: Re-enable this once shflags is set -e safe.
okboat() {
echo -e "${V_BOLD_GREEN}"
cat <<BOAT
. o ..
o . o o.o
OK \' ' ' ' ' ' /
echo -e "${V_VIDOFF}"
failboat() {
echo -e "${V_BOLD_RED}"
cat <<BOAT
' )
) (
( .') __/\
(. /o/` \
__/o/` \
FAIL / /o/` /
echo -e "${V_VIDOFF}"
die "$* failed"