blob: 9e47a82c94702074aa6509fe0dfe48280fa5a066 [file] [log] [blame]
#!/bin/bash
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This will add console=ttyS0 kernel cmdline flag, thus rerouting
# dmesg output to ttyS0 (serial port).
FLAGS_enable_serial=ttyS0
# Don't install upstart files.
INSTALL_MASK+=" /etc/init"
# Don't install symbol table for kdump kernel.
INSTALL_MASK+=" /boot/kdump/System.map-*"
# Temporary directory containing the package list
# files used for creating cos-package-info.json.
TEMP_PACKAGE_DIR=""
cleanup_temp_package_dir() {
if [[ -d "${TEMP_PACKAGE_DIR}" ]]; then
rm -fr "${TEMP_PACKAGE_DIR}"
fi
}
# board_make_image_bootable() is invoked as part of build_image after the
# kernel partitions are built and the rootfs is locked. This runs very
# late in the image build process.
board_make_image_bootable() {
local -r image="$1"
local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
. "${script_root}/bootloader_install.sh" || exit 1
if ! bootloader_install "${image}"; then
error "Could not install bootloaders on ${image}"
return 1
fi
}
create_runtime_package_info() {
local -r runtime_pkg_file="$1/runtime_pkg_info"
# The "emerge" command below generates the list of packages that
# virtual/target-os depends on. These are the packages that are
# installed into the image. Its results look like
#
# ...
# [binary R ] app-arch/gzip-1.9 to /build/lakitu/
# [binary R ] dev-libs/popt-1.16-r2 to /build/lakitu/
# [binary R ] app-emulation/docker-credential-helpers-0.6.3-r1 to /build/lakitu/
# ...
#
# This command line is similar to what ListInstalledPackage function (in
# chromite/licensing/licenses_lib.py) does.
#
# The "sed" command extracts the category name, the package name, and the
# version from each line. With that, the example above is converted to
#
# ...
# app-arch/gzip-1.9
# dev-libs/popt-1.16-r2
# app-emulation/docker-credential-helpers-0.6.3-r1
# ...
"emerge-${BOARD}" \
--quiet --with-bdeps=n --with-bdeps-auto=n \
--usepkgonly --emptytree --pretend \
--color=n virtual/target-os | \
sed -E 's/\[[^]]+\] (.+) to \/build\/.*/\1/' \
> "${runtime_pkg_file}"
echo "${runtime_pkg_file}"
}
create_buildtime_package_info() {
local -r all_pkg_file="$1/all_pkg_info"
# The "emerge" command below generates the list of packages that
# virtual/target-os depends on. It includes both buildtime dependent
# packages and installed packages. Its results look like
#
# ...
# [ebuild R ] virtual/pkgconfig-0-r1 to /build/lakitu/
# [ebuild R ] dev-go/protobuf-1.3.2-r1 to /build/lakitu/
# [binary R ] app-arch/gzip-1.9 to /build/lakitu/
# ...
#
# The "sed" command extracts the category name, the package name, and the
# version from each line. With that, the example above is converted to
#
# ...
# virtual/pkgconfig-0-r1
# dev-go/protobuf-1.3.2-r1
# app-arch/gzip-1.9
# ...
#
# The "emerge-${BOARD}" command list packages that are either a dependency
# due to DEPEND or BDEPEND or both. In case of both, multiple entries are
# present and `sort` and `uniq` are used to avoid that.
#
"emerge-${BOARD}" \
--quiet --with-bdeps=y --with-bdeps-auto=y \
--emptytree --pretend --color=n \
virtual/target-os | \
sed -E 's/\[[^]]+\] ([[:alnum:]_.//-]*).*/\1/' | \
sort | uniq \
> "${all_pkg_file}"
# Package listed above does not include some of the SDK related
# dependencies such as glibc and gcc. To add those packages,
# use the package list present at /etc/portage/profile/package.provided
# under sysroot. It is the same location from where cros_extract_deps
# gets those package for generation of CPE file.
local -r pkg_list_file="/build/${BOARD}/etc/portage/profile/package.provided"
while IFS= read -r line
do
# Check for empty line and comment.
[[ -z "${line}" || "${line}" =~ ^#.* ]] && continue
echo "${line}" >> "${all_pkg_file}"
done < "${pkg_list_file}"
# Find the buildtime dependent packages.
# Argument $2 represent the file containing the installed packages.
# All the ebuild files should have a GPL license and therefore,
# should be distributed in the image. So, there is no need
# to check whether the package belongs to a public overlay or not.
local -r buildtime_pkg_file="$1/buildtime_pkg_info"
fgrep -v -f "$2" "${all_pkg_file}" > "${buildtime_pkg_file}"
echo "${buildtime_pkg_file}"
}
create_package_info() {
trap cleanup_temp_package_dir EXIT
TEMP_PACKAGE_DIR="$(mktemp -d -p /tmp package-list.XXXXXXXXXX)"
local -r runtime_pkg_file=$(create_runtime_package_info "${TEMP_PACKAGE_DIR}")
local -r buildtime_pkg_file=$(create_buildtime_package_info \
"${TEMP_PACKAGE_DIR}" \
"${runtime_pkg_file}")
local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
sudo "${script_root}/create_pkg_info.py" \
--input="installedPackages:${runtime_pkg_file},buildTimePackages:${buildtime_pkg_file}" \
--output="${root_fs_dir}"/etc/cos-package-info.json \
--build-id="${CHROMEOS_VERSION_STRING}"
cleanup_temp_package_dir
trap - EXIT
}
write_toolchain_path() {
local -r cros_overlay="/mnt/host/source/src/third_party/chromiumos-overlay"
local -r sdk_ver_file="${cros_overlay}/chromeos/binhost/host/sdk_version.conf"
local -r ctarget="$(portageq-"${BOARD}" envvar CHOST)"
. "${sdk_ver_file}"
local -r toolchain_path="${TC_PATH/\%\(target\)s/${ctarget}}"
# Write toolchain path to image.
echo "${toolchain_path}" | \
sudo tee "${root_fs_dir}/etc/toolchain-path" > /dev/null
# Write toolchain path to build dir so it can be exported as an artifact.
echo "${toolchain_path}" | \
sudo tee "${BUILD_DIR}/toolchain_path" > /dev/null
}
# Moves the given rootfs file (a relative path to "root_fs_dir") to the given
# artifact location (a relative path to "BUILD_DIR"). The directory containing
# the rootfs file is deleted if it becomes empty after this move. If the
# rootfs file doesn't exist, this function puts an empty file at the given
# artifact location.
move_for_artifact() {
local rootfs_file="${root_fs_dir}/$1"
local artifact="${BUILD_DIR}/$2"
if [[ ! -f "${rootfs_file}" ]]; then
touch "${artifact}"
return
fi
cp "${rootfs_file}" "${artifact}"
sudo rm "${rootfs_file}"
if [[ -z "$(ls -A "$(dirname "${rootfs_file}")")" ]]; then
sudo rmdir "$(dirname "${rootfs_file}")"
fi
}
# Creates toolchain_env file in BUILD_DIR so that it can be exported as an
# artifact.
export_toolchain_env() {
# File that has kernel compiler information.
move_for_artifact "etc/toolchain_env" "toolchain_env"
}
# Creates kernel_info file in BUILD_DIR so that it can be exported as an
# artifact.
export_kernel_info() {
# File with kernel information.
move_for_artifact "etc/kernel_info" "kernel_info"
}
# Creates kernel_commit file in BUILD_DIR so that it can be exported as an
# artifact.
export_kernel_commit() {
# File with kernel commit ID.
move_for_artifact "etc/kernel_commit" "kernel_commit"
}
# Exports default GPU driver version file as an artifact.
export_gpu_default_version() {
local -r script_root="$1"
local -r default_driver_file="${script_root}/gpu_default_version"
local -r default_driver_artifact="${BUILD_DIR}/gpu_default_version"
# Copy scripts/gpu_default_version to BUILD artifact
cp "${default_driver_file}" "${default_driver_artifact}"
}
# board_finalize_base_image() is invoked as part of build_image before the
# rootfs is locked. Some rootfs changes have been made before this, and more
# rootfs changes are made after this.
board_finalize_base_image() {
local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
create_package_info
cp "${root_fs_dir}"/etc/cos-package-info.json \
"${BUILD_DIR}"/cos-package-info.json
write_toolchain_path
move_for_artifact "opt/google/src/kernel-src.tar.gz" \
"kernel-src.tar.gz"
move_for_artifact "opt/google/src/kernel-headers.tgz" \
"kernel-headers.tgz"
export_toolchain_env
export_kernel_info
export_kernel_commit
cp "${BOARD_ROOT}/usr/lib/debug/boot/vmlinux" "${BUILD_DIR}/vmlinux"
export_gpu_default_version "${script_root}"
# /etc/machine-id gets installed by sys-apps/dbus and is a symlink.
# This conflicts with systemd's machine-id generation mechanism,
# so we remove the symlink and recreate it as an empty file.
sudo rm "${root_fs_dir}"/etc/machine-id
sudo touch "${root_fs_dir}"/etc/machine-id
# set permissions of systemd timer files as recommended by CIS
sudo find "${root_fs_dir}"/usr/lib/systemd/user \
"${root_fs_dir}"/usr/lib/systemd/system \
"${root_fs_dir}"/lib/systemd/system \
-type f -name *.timer | xargs sudo chmod go-wrx
# Set grub.cfg file permissions to 400 as recommended by CIS
if [[ -f "${root_fs_dir}"/boot/efi/boot/grub.cfg ]]; then
sudo chmod 400 "${root_fs_dir}"/boot/efi/boot/grub.cfg
fi
local shim_arch=""
if [[ "${ARCH}" == "amd64" ]]; then
shim_arch="x64"
elif [[ "${ARCH}" == "arm64" ]]; then
shim_arch="aa64"
else
error "Unsupported architecture. No shim. ARCH=${ARCH}"
return 1
fi
info "Copying shim to boot${shim_arch}.efi"
sudo cp \
"${root_fs_dir}/boot/efi/boot/shim${shim_arch}.efi" \
"${root_fs_dir}/boot/efi/boot/boot${shim_arch}.efi"
info "Successfully copied shim to boot${shim_arch}.efi"
info "Deleting legacy EFI bootloaders"
# Don't delete bootx64.efi here, since it can be the shim now.
sudo rm -f "${root_fs_dir}"/boot/efi/boot/bootia32.efi
info "Successfully deleted legacy EFI bootloaders"
info "Populating dbx"
sudo mkdir -p "${esp_fs_dir}"/efi/Google/GSetup/dbx
sudo cp "${script_root}"/dbx/* "${esp_fs_dir}"/efi/Google/GSetup/dbx
sudo chmod -R 755 "${esp_fs_dir}"/efi/Google/GSetup/dbx
info "Successfully populated dbx"
}
# Align Azure images to 1MB page size.
align_azure_image() {
local base_image="$1"
local size
local mod
local new_size
echo "Aligning to 1MiB to meet Azure requirements"
size="$(stat --printf "%s" "${base_image}")"
mod="$(( size % (1 << 20) ))"
if [[ "${mod}" != 0 ]]; then
new_size="$(( (1 << 20) - mod + size ))"
qemu-img resize "${base_image}" "${new_size}"
# Might need to relocate the secondary GPT headers
cgpt repair "${base_image}"
fi
}
# board_setup() is invoked as part of build_image when the base image is
# completely finished.
board_setup() {
local base_image="$1"
if has "platform_azure" "$("portageq-${FLAGS_board}" envvar USE)"; then
align_azure_image "${base_image}"
fi
}