blob: 0e450338744b6e9973ff9d048ebb12c28061c4f3 [file] [log] [blame]
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
if [[ "${ARCH}" == "arm64" ]]; then
elif [[ "${ARCH}" == "amd64" ]]; then
# This will add console=ttyS0 kernel cmdline flag, thus rerouting
# dmesg output to ttyS0 (serial port).
# This will add tty1 console before ttyS0. This way ttyS0
# will be used as a default output for the boot messages from
# userland apps. Kernel messages will go to both outputs (if present)
# Don't install upstart files.
INSTALL_MASK+=" /etc/init"
# Don't install symbol table for kdump kernel.
INSTALL_MASK+=" /boot/kdump/*"
# Removes given item from the INSTALL_MASK string
#Disable wildcard expansion and create array from the INSTALL_MASK string.
set -f
set +f
for value in "${INSTALL_MASK_ARRAY[@]}"
# Remove target from INSTALL_MASK
if [[ ${value} != "${target}" ]]; then
INSTALL_MASK_NEW+=" ${value}"
# /boot/config-* is masked in through INSTALL_MASK which
# prevents installing already generated kernel config. This function
# removes it from the INSTALL_MASK
remove_from_install_mask "/boot/config-*"
# Temporary directory containing the package list
# files used for creating cos-package-info.json.
cleanup_temp_package_dir() {
if [[ -d "${TEMP_PACKAGE_DIR}" ]]; then
rm -fr "${TEMP_PACKAGE_DIR}"
# board_make_image_bootable() is invoked as part of build_image after the
# kernel partitions are built and the rootfs is locked. This runs very
# late in the image build process.
board_make_image_bootable() {
local -r image="$1"
local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
. "${script_root}/" || exit 1
if ! bootloader_install "${image}"; then
error "Could not install bootloaders on ${image}"
return 1
create_runtime_package_info() {
local -r runtime_pkg_file="$1/runtime_pkg_info"
# The "emerge" command below generates the list of packages that
# virtual/target-os depends on. These are the packages that are
# installed into the image. Its results look like
# ...
# [binary R ] app-arch/gzip-1.9 to /build/lakitu/
# [binary R ] dev-libs/popt-1.16-r2 to /build/lakitu/
# [binary R ] app-emulation/docker-credential-helpers-0.6.3-r1 to /build/lakitu/
# ...
# This command line is similar to what ListInstalledPackage function (in
# chromite/licensing/ does.
# The "sed" command extracts the category name, the package name, and the
# version from each line. With that, the example above is converted to
# ...
# app-arch/gzip-1.9
# dev-libs/popt-1.16-r2
# app-emulation/docker-credential-helpers-0.6.3-r1
# ...
"emerge-${BOARD}" \
--quiet --with-bdeps=n --with-bdeps-auto=n \
--usepkgonly --emptytree --pretend \
--color=n virtual/target-os | \
sed -E 's/\[[^]]+\] (.+) to \/build\/.*/\1/' \
> "${runtime_pkg_file}"
echo "${runtime_pkg_file}"
create_buildtime_package_info() {
local -r all_pkg_file="$1/all_pkg_info"
# The "emerge" command below generates the list of packages that
# virtual/target-os depends on. It includes both buildtime dependent
# packages and installed packages. Its results look like
# ...
# [ebuild R ] virtual/pkgconfig-0-r1 to /build/lakitu/
# [ebuild R ] dev-go/protobuf-1.3.2-r1 to /build/lakitu/
# [binary R ] app-arch/gzip-1.9 to /build/lakitu/
# ...
# The "sed" command extracts the category name, the package name, and the
# version from each line. With that, the example above is converted to
# ...
# virtual/pkgconfig-0-r1
# dev-go/protobuf-1.3.2-r1
# app-arch/gzip-1.9
# ...
# The "emerge-${BOARD}" command list packages that are either a dependency
# due to DEPEND or BDEPEND or both. In case of both, multiple entries are
# present and `sort` and `uniq` are used to avoid that.
"emerge-${BOARD}" \
--quiet --with-bdeps=y --with-bdeps-auto=y \
--emptytree --pretend --color=n \
virtual/target-os | \
sed -E 's/\[[^]]+\] ([[:alnum:]_.//-]*).*/\1/' | \
sort | uniq \
> "${all_pkg_file}"
# Package listed above does not include some of the SDK related
# dependencies such as glibc and gcc. To add those packages,
# use the package list present at /etc/portage/profile/package.provided
# under sysroot. It is the same location from where cros_extract_deps
# gets those package for generation of CPE file.
local -r pkg_list_file="/build/${BOARD}/etc/portage/profile/package.provided"
while IFS= read -r line
# Check for empty line and comment.
[[ -z "${line}" || "${line}" =~ ^#.* ]] && continue
echo "${line}" >> "${all_pkg_file}"
done < "${pkg_list_file}"
# Find the buildtime dependent packages.
# Argument $2 represent the file containing the installed packages.
# All the ebuild files should have a GPL license and therefore,
# should be distributed in the image. So, there is no need
# to check whether the package belongs to a public overlay or not.
local -r buildtime_pkg_file="$1/buildtime_pkg_info"
fgrep -v -f "$2" "${all_pkg_file}" > "${buildtime_pkg_file}"
echo "${buildtime_pkg_file}"
create_package_info() {
trap cleanup_temp_package_dir EXIT
TEMP_PACKAGE_DIR="$(mktemp -d -p /tmp package-list.XXXXXXXXXX)"
local -r runtime_pkg_file=$(create_runtime_package_info "${TEMP_PACKAGE_DIR}")
local -r buildtime_pkg_file=$(create_buildtime_package_info \
local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
sudo "${script_root}/" \
--input="installedPackages:${runtime_pkg_file},buildTimePackages:${buildtime_pkg_file}" \
--output="${root_fs_dir}"/etc/cos-package-info.json \
trap - EXIT
write_toolchain_path() {
local -r cros_overlay="/mnt/host/source/src/third_party/chromiumos-overlay"
local -r sdk_ver_file="${cros_overlay}/chromeos/binhost/host/sdk_version.conf"
local -r ctarget="$(portageq-"${BOARD}" envvar CHOST)"
. "${sdk_ver_file}"
local -r toolchain_path="${TC_PATH/\%\(target\)s/${ctarget}}"
# Write toolchain path to image.
echo "${toolchain_path}" | \
sudo tee "${root_fs_dir}/etc/toolchain-path" > /dev/null
# Write toolchain path to build dir so it can be exported as an artifact.
echo "${toolchain_path}" | \
sudo tee "${BUILD_DIR}/toolchain_path" > /dev/null
# Moves the given rootfs file (a relative path to "root_fs_dir") to the given
# artifact location (a relative path to "BUILD_DIR"). The directory containing
# the rootfs file is deleted if it becomes empty after this move. If the
# rootfs file doesn't exist, this function puts an empty file at the given
# artifact location.
move_for_artifact() {
local rootfs_file="${root_fs_dir}/$1"
local artifact="${BUILD_DIR}/$2"
if [[ ! -f "${rootfs_file}" ]]; then
touch "${artifact}"
cp "${rootfs_file}" "${artifact}"
sudo rm "${rootfs_file}"
if [[ -z "$(ls -A "$(dirname "${rootfs_file}")")" ]]; then
sudo rmdir "$(dirname "${rootfs_file}")"
# Creates toolchain_env file in BUILD_DIR so that it can be exported as an
# artifact.
export_toolchain_env() {
# File that has kernel compiler information.
move_for_artifact "etc/toolchain_env" "toolchain_env"
# Creates kernel_info file in BUILD_DIR so that it can be exported as an
# artifact.
export_kernel_info() {
# File with kernel information.
move_for_artifact "etc/kernel_info" "kernel_info"
# Creates kernel_commit file in BUILD_DIR so that it can be exported as an
# artifact.
export_kernel_commit() {
# File with kernel commit ID.
move_for_artifact "etc/kernel_commit" "kernel_commit"
# Export kernel config file as a BUILD artifact.
# Copy config file to BUILD_DIR.
# For example: config-5.10.53
cp "${root_fs_dir}"/boot/config-* "${BUILD_DIR}/kernel_config"
# Exports default GPU driver version file as an artifact.
export_gpu_default_version() {
local -r script_root="$1"
local -r default_driver_file="${script_root}/gpu_default_version"
local -r default_driver_artifact="${BUILD_DIR}/gpu_default_version"
# Copy scripts/gpu_default_version to BUILD artifact
cp "${default_driver_file}" "${default_driver_artifact}"
# Exports latest GPU driver version file as an artifact.
export_gpu_latest_version() {
local -r script_root="$1"
local -r latest_driver_file="${script_root}/gpu_latest_version"
local -r latest_driver_artifact="${BUILD_DIR}/gpu_latest_version"
# Copy scripts/gpu_latest_version to BUILD artifact
cp "${latest_driver_file}" "${latest_driver_artifact}"
# Exports R470 GPU driver version file as an artifact.
# This is needed for supporting K80 GPU which is not
# compatible with NVIDIA R510 or newer drivers.
export_gpu_R470_version() {
local -r script_root="$1"
local -r R470_driver_file="${script_root}/gpu_R470_version"
local -r R470_driver_artifact="${BUILD_DIR}/gpu_R470_version"
# Copy scripts/gpu_R470_version to BUILD artifact
cp "${R470_driver_file}" "${R470_driver_artifact}"
# Apply recommended file permissions by CIS Benchmark
change_file_permissions_for_cis() {
# set permissions of systemd timer files as recommended by CIS
sudo find "${root_fs_dir}"/usr/lib/systemd/user \
"${root_fs_dir}"/usr/lib/systemd/system \
"${root_fs_dir}"/lib/systemd/system \
-type f -name *.timer | xargs sudo chmod go-wrx
# Set grub.cfg file permissions to 400 as recommended by CIS
if [[ -f "${root_fs_dir}"/boot/efi/boot/grub.cfg ]]; then
sudo chmod 400 "${root_fs_dir}"/boot/efi/boot/grub.cfg
# Set /root dir permissions to 750 as recommended by CIS
if [[ -d "${root_fs_dir}"/root ]]; then
sudo chmod 750 "${root_fs_dir}"/root
# board_finalize_base_image() is invoked as part of build_image before the
# rootfs is locked. Some rootfs changes have been made before this, and more
# rootfs changes are made after this.
board_finalize_base_image() {
local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
info "Generating cos-package-info.json."
cp "${root_fs_dir}"/etc/cos-package-info.json \
move_for_artifact "opt/google/src/kernel-src.tar.gz" \
move_for_artifact "opt/google/src/kernel-headers.tgz" \
cp "${BOARD_ROOT}/usr/lib/debug/boot/vmlinux" "${BUILD_DIR}/vmlinux"
export_gpu_default_version "${script_root}"
export_gpu_latest_version "${script_root}"
export_gpu_R470_version "${script_root}"
# /etc/machine-id gets installed by sys-apps/dbus and is a symlink.
# This conflicts with systemd's machine-id generation mechanism,
# so we remove the symlink and recreate it as an empty file.
sudo rm "${root_fs_dir}"/etc/machine-id
sudo touch "${root_fs_dir}"/etc/machine-id
local shim_arch=""
if [[ "${ARCH}" == "amd64" ]]; then
elif [[ "${ARCH}" == "arm64" ]]; then
error "Unsupported architecture. No shim. ARCH=${ARCH}"
return 1
info "Copying shim to boot${shim_arch}.efi"
sudo cp \
"${root_fs_dir}/boot/efi/boot/shim${shim_arch}.efi" \
info "Successfully copied shim to boot${shim_arch}.efi"
info "Deleting legacy EFI bootloaders"
# Don't delete bootx64.efi here, since it can be the shim now.
sudo rm -f "${root_fs_dir}"/boot/efi/boot/bootia32.efi
info "Successfully deleted legacy EFI bootloaders"
info "Creating a bootloader config template"
# Align images to 1MB page size.
# Azure image requires 1MB page size alignment; while
# MTest and VIT image requires 4K page size alignment.
# We use 1MB as the image alignment and apply to all
# images since nowadays, most storage is aligned to 4096 instead of 512.
# Also COS image is used as virtual image in hypervisor/cloud
# environment.
align_image_to_1m() {
local base_image="$1"
local size
local mod
local new_size
echo "Aligning to 1MiB to meet Azure/Mtest/VIT requirements"
size="$(stat --printf "%s" "${base_image}")"
mod="$(( size % (1 << 20) ))"
if [[ "${mod}" != 0 ]]; then
new_size="$(( (1 << 20) - mod + size ))"
qemu-img resize "${base_image}" "${new_size}"
# Might need to relocate the secondary GPT headers
cgpt repair "${base_image}"
# board_setup() is invoked as part of build_image when the base image is
# completely finished.
board_setup() {
local base_image="$1"
align_image_to_1m "${base_image}"