| #!/bin/bash |
| # Copyright 2015 The Chromium OS Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| if [[ "${ARCH}" == "arm64" ]]; then |
| FLAGS_enable_serial=ttyAMA0 |
| elif [[ "${ARCH}" == "amd64" ]]; then |
| # This will add console=ttyS0 kernel cmdline flag, thus rerouting |
| # dmesg output to ttyS0 (serial port). |
| FLAGS_enable_serial="ttyS0,115200" |
| # This will add tty1 console before ttyS0. This way ttyS0 |
| # will be used as a default output for the boot messages from |
| # userland apps. Kernel messages will go to both outputs (if present) |
| FLAGS_boot_args="console=tty1" |
| fi |
| |
| # Don't install upstart files. |
| INSTALL_MASK+=" /etc/init" |
| |
| # Don't install symbol table for kdump kernel. |
| INSTALL_MASK+=" /boot/kdump/System.map-*" |
| |
| # Removes given item from the INSTALL_MASK string |
| remove_from_install_mask(){ |
| target="$1" |
| #Disable wildcard expansion and create array from the INSTALL_MASK string. |
| set -f |
| INSTALL_MASK_ARRAY=(${INSTALL_MASK}) |
| set +f |
| INSTALL_MASK_NEW="" |
| for value in "${INSTALL_MASK_ARRAY[@]}" |
| do |
| # Remove target from INSTALL_MASK |
| if [[ ${value} != "${target}" ]]; then |
| INSTALL_MASK_NEW+=" ${value}" |
| fi |
| done |
| INSTALL_MASK="${INSTALL_MASK_NEW}" |
| unset INSTALL_MASK_NEW |
| } |
| |
| # /boot/config-* is masked in common.sh through INSTALL_MASK which |
| # prevents installing already generated kernel config. This function |
| # removes it from the INSTALL_MASK |
| remove_from_install_mask "/boot/config-*" |
| |
| # Temporary directory containing the package list |
| # files used for creating cos-package-info.json. |
| TEMP_PACKAGE_DIR="" |
| |
| cleanup_temp_package_dir() { |
| if [[ -d "${TEMP_PACKAGE_DIR}" ]]; then |
| rm -fr "${TEMP_PACKAGE_DIR}" |
| fi |
| } |
| |
| # board_make_image_bootable() is invoked as part of build_image after the |
| # kernel partitions are built and the rootfs is locked. This runs very |
| # late in the image build process. |
| board_make_image_bootable() { |
| local -r image="$1" |
| local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")" |
| . "${script_root}/bootloader_install.sh" || exit 1 |
| if ! bootloader_install "${image}"; then |
| error "Could not install bootloaders on ${image}" |
| return 1 |
| fi |
| } |
| |
| create_runtime_package_info() { |
| local -r runtime_pkg_file="$1/runtime_pkg_info" |
| |
| # The "emerge" command below generates the list of packages that |
| # virtual/target-os depends on. These are the packages that are |
| # installed into the image. Its results look like |
| # |
| # ... |
| # [binary R ] app-arch/gzip-1.9 to /build/lakitu/ |
| # [binary R ] dev-libs/popt-1.16-r2 to /build/lakitu/ |
| # [binary R ] app-emulation/docker-credential-helpers-0.6.3-r1 to /build/lakitu/ |
| # ... |
| # |
| # This command line is similar to what ListInstalledPackage function (in |
| # chromite/licensing/licenses_lib.py) does. |
| # |
| # The "sed" command extracts the category name, the package name, and the |
| # version from each line. With that, the example above is converted to |
| # |
| # ... |
| # app-arch/gzip-1.9 |
| # dev-libs/popt-1.16-r2 |
| # app-emulation/docker-credential-helpers-0.6.3-r1 |
| # ... |
| "emerge-${BOARD}" \ |
| --quiet --with-bdeps=n --with-bdeps-auto=n \ |
| --usepkgonly --emptytree --pretend \ |
| --color=n virtual/target-os | \ |
| sed -E 's/\[[^]]+\] (.+) to \/build\/.*/\1/' \ |
| > "${runtime_pkg_file}" |
| |
| echo "${runtime_pkg_file}" |
| } |
| |
| create_buildtime_package_info() { |
| local -r all_pkg_file="$1/all_pkg_info" |
| |
| # The "emerge" command below generates the list of packages that |
| # virtual/target-os depends on. It includes both buildtime dependent |
| # packages and installed packages. Its results look like |
| # |
| # ... |
| # [ebuild R ] virtual/pkgconfig-0-r1 to /build/lakitu/ |
| # [ebuild R ] dev-go/protobuf-1.3.2-r1 to /build/lakitu/ |
| # [binary R ] app-arch/gzip-1.9 to /build/lakitu/ |
| # ... |
| # |
| # The "sed" command extracts the category name, the package name, and the |
| # version from each line. With that, the example above is converted to |
| # |
| # ... |
| # virtual/pkgconfig-0-r1 |
| # dev-go/protobuf-1.3.2-r1 |
| # app-arch/gzip-1.9 |
| # ... |
| # |
| # The "emerge-${BOARD}" command list packages that are either a dependency |
| # due to DEPEND or BDEPEND or both. In case of both, multiple entries are |
| # present and `sort` and `uniq` are used to avoid that. |
| # |
| "emerge-${BOARD}" \ |
| --quiet --with-bdeps=y --with-bdeps-auto=y \ |
| --emptytree --pretend --color=n \ |
| virtual/target-os | \ |
| sed -E 's/\[[^]]+\] ([[:alnum:]_.//-]*).*/\1/' | \ |
| sort | uniq \ |
| > "${all_pkg_file}" |
| |
| # Package listed above does not include some of the SDK related |
| # dependencies such as glibc and gcc. To add those packages, |
| # use the package list present at /etc/portage/profile/package.provided |
| # under sysroot. It is the same location from where cros_extract_deps |
| # gets those package for generation of CPE file. |
| local -r pkg_list_file="/build/${BOARD}/etc/portage/profile/package.provided" |
| # The below condition will help in |
| # reading lines where \n is not present. |
| while read -r line || [ -n "${line}" ] |
| do |
| # skip comment. |
| [[ "${line}" =~ ^#.* ]] && continue |
| echo "${line}" >> "${all_pkg_file}" |
| done < "${pkg_list_file}" |
| |
| # Find the buildtime dependent packages. |
| # Argument $2 represent the file containing the installed packages. |
| # All the ebuild files should have a GPL license and therefore, |
| # should be distributed in the image. So, there is no need |
| # to check whether the package belongs to a public overlay or not. |
| local -r buildtime_pkg_file="$1/buildtime_pkg_info" |
| fgrep -v -f "$2" "${all_pkg_file}" > "${buildtime_pkg_file}" |
| |
| echo "${buildtime_pkg_file}" |
| } |
| |
| create_package_info() { |
| trap cleanup_temp_package_dir EXIT |
| TEMP_PACKAGE_DIR="$(mktemp -d -p /tmp package-list.XXXXXXXXXX)" |
| |
| local -r runtime_pkg_file=$(create_runtime_package_info "${TEMP_PACKAGE_DIR}") |
| local -r buildtime_pkg_file=$(create_buildtime_package_info \ |
| "${TEMP_PACKAGE_DIR}" \ |
| "${runtime_pkg_file}") |
| |
| local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")" |
| sudo "${script_root}/create_pkg_info.py" \ |
| --input="installedPackages:${runtime_pkg_file},buildTimePackages:${buildtime_pkg_file}" \ |
| --output="${root_fs_dir}"/etc/cos-package-info.json \ |
| --build-id="${CHROMEOS_VERSION_STRING}" |
| |
| cleanup_temp_package_dir |
| trap - EXIT |
| } |
| |
| write_toolchain_path() { |
| local -r cros_overlay="/mnt/host/source/src/third_party/chromiumos-overlay" |
| local -r sdk_ver_file="${cros_overlay}/chromeos/binhost/host/sdk_version.conf" |
| local -r ctarget="$(portageq-"${BOARD}" envvar CHOST)" |
| . "${sdk_ver_file}" |
| local -r toolchain_path="${TC_PATH/\%\(target\)s/${ctarget}}" |
| # Write toolchain path to image. |
| echo "${toolchain_path}" | \ |
| sudo tee "${root_fs_dir}/etc/toolchain-path" > /dev/null |
| # Write toolchain path to build dir so it can be exported as an artifact. |
| echo "${toolchain_path}" | \ |
| sudo tee "${BUILD_DIR}/toolchain_path" > /dev/null |
| } |
| |
| # Moves the given rootfs file (a relative path to "root_fs_dir") to the given |
| # artifact location (a relative path to "BUILD_DIR"). The directory containing |
| # the rootfs file is deleted if it becomes empty after this move. If the |
| # rootfs file doesn't exist, this function puts an empty file at the given |
| # artifact location. |
| move_for_artifact() { |
| local rootfs_file="${root_fs_dir}/$1" |
| local artifact="${BUILD_DIR}/$2" |
| if [[ ! -f "${rootfs_file}" ]]; then |
| touch "${artifact}" |
| return |
| fi |
| cp "${rootfs_file}" "${artifact}" |
| sudo rm "${rootfs_file}" |
| if [[ -z "$(ls -A "$(dirname "${rootfs_file}")")" ]]; then |
| sudo rmdir "$(dirname "${rootfs_file}")" |
| fi |
| } |
| |
| # Creates toolchain_env file in BUILD_DIR so that it can be exported as an |
| # artifact. |
| export_toolchain_env() { |
| # File that has kernel compiler information. |
| move_for_artifact "etc/toolchain_env" "toolchain_env" |
| } |
| |
| # Creates kernel_info file in BUILD_DIR so that it can be exported as an |
| # artifact. |
| export_kernel_info() { |
| # File with kernel information. |
| move_for_artifact "etc/kernel_info" "kernel_info" |
| } |
| |
| # Creates kernel_commit file in BUILD_DIR so that it can be exported as an |
| # artifact. |
| export_kernel_commit() { |
| # File with kernel commit ID. |
| move_for_artifact "etc/kernel_commit" "kernel_commit" |
| } |
| |
| # Export kernel config file as a BUILD artifact. |
| export_kernel_config(){ |
| # Copy config file to BUILD_DIR. |
| # For example: config-5.10.53 |
| cp "${root_fs_dir}"/boot/config-* "${BUILD_DIR}/kernel_config" |
| } |
| |
| # Exports GPU driver versions textproto file as an artifact. |
| export_gpu_driver_versions_textproto() { |
| local -r script_root="$1" |
| local -r gpu_driver_versions_file="${script_root}/gpu_driver_versions.textproto" |
| local -r gpu_driver_versions_artifact="${BUILD_DIR}/gpu_driver_versions.textproto" |
| |
| # Copy scripts/gpu_driver_versions.textproto to BUILD artifact |
| cp "${gpu_driver_versions_file}" "${gpu_driver_versions_artifact}" |
| } |
| |
| # Exports default GPU driver version file as an artifact. |
| export_gpu_default_version() { |
| local -r script_root="$1" |
| local -r default_driver_file="${script_root}/gpu_default_version" |
| local -r default_driver_artifact="${BUILD_DIR}/gpu_default_version" |
| |
| # Copy scripts/gpu_default_version to BUILD artifact |
| cp "${default_driver_file}" "${default_driver_artifact}" |
| } |
| |
| # Exports latest GPU driver version file as an artifact. |
| export_gpu_latest_version() { |
| local -r script_root="$1" |
| local -r latest_driver_file="${script_root}/gpu_latest_version" |
| local -r latest_driver_artifact="${BUILD_DIR}/gpu_latest_version" |
| |
| # Copy scripts/gpu_latest_version to BUILD artifact |
| cp "${latest_driver_file}" "${latest_driver_artifact}" |
| } |
| |
| # Exports R535 GPU driver version file as an artifact. |
| export_gpu_R535_version() { |
| local -r script_root="$1" |
| local -r R535_driver_file="${script_root}/gpu_R535_version" |
| local -r R535_driver_artifact="${BUILD_DIR}/gpu_R535_version" |
| |
| # Copy scripts/gpu_R535_version to BUILD artifact |
| cp "${R535_driver_file}" "${R535_driver_artifact}" |
| } |
| |
| # Exports R550 GPU driver version file as an artifact. |
| export_gpu_R550_version() { |
| local -r script_root="$1" |
| local -r R550_driver_file="${script_root}/gpu_R550_version" |
| local -r R550_driver_artifact="${BUILD_DIR}/gpu_R550_version" |
| |
| # Copy scripts/gpu_R550_version to BUILD artifact |
| cp "${R550_driver_file}" "${R550_driver_artifact}" |
| } |
| |
| # Apply recommended file permissions by CIS Benchmark |
| change_file_permissions_for_cis() { |
| # set permissions of systemd timer files as recommended by CIS |
| sudo find "${root_fs_dir}"/usr/lib/systemd/user \ |
| "${root_fs_dir}"/usr/lib/systemd/system \ |
| "${root_fs_dir}"/lib/systemd/system \ |
| -type f -name *.timer | xargs sudo chmod go-wrx |
| |
| # Set grub.cfg file permissions to 400 as recommended by CIS |
| if [[ -f "${root_fs_dir}"/boot/efi/boot/grub.cfg ]]; then |
| sudo chmod 400 "${root_fs_dir}"/boot/efi/boot/grub.cfg |
| fi |
| |
| # Set /root dir permissions to 750 as recommended by CIS |
| if [[ -d "${root_fs_dir}"/root ]]; then |
| sudo chmod 750 "${root_fs_dir}"/root |
| fi |
| } |
| |
| # Creates the contents of known_modules.json that node_problem_detector uses |
| # to validate that new modules are not added after the image is built. |
| create_known_modules_json() { |
| local -r modules_root="${root_fs_dir}/lib/modules/" |
| # For every file ending in .ko, get the module name by replacing hyphens |
| # with underscores and removing the extension. Then add all of the |
| # module names as {"moduleName": "..."} objects in a JSON array. |
| find "${modules_root}" -name '*.ko' -printf '%f\n' | |
| sed 's/-/_/g' | |
| sed 's/\.ko$//' | |
| sort -u | |
| xargs printf '{"moduleName": "%s"},' | |
| sed -r 's/(.*),/[\1]/' | |
| jq |
| } |
| |
| # board_finalize_base_image() is invoked as part of build_image before the |
| # rootfs is locked. Some rootfs changes have been made before this, and more |
| # rootfs changes are made after this. |
| board_finalize_base_image() { |
| local -r script_root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")" |
| info "Generating cos-package-info.json." |
| (create_package_info) |
| cp "${root_fs_dir}"/etc/cos-package-info.json \ |
| "${BUILD_DIR}"/cos-package-info.json |
| write_toolchain_path |
| move_for_artifact "opt/google/src/kernel-src.tar.gz" \ |
| "kernel-src.tar.gz" |
| move_for_artifact "opt/google/src/kernel-headers.tgz" \ |
| "kernel-headers.tgz" |
| out_of_tree_drivers_dir="/build/${BOARD}/opt/google/drivers" |
| if [[ -d "${out_of_tree_drivers_dir}" ]] |
| then |
| mkdir -p "${BUILD_DIR}/extensions/gpu" |
| for driver in $(ls "${out_of_tree_drivers_dir}") |
| do |
| if [[ $driver == *.run ]] || [[ $driver == *signature.tar.gz ]] |
| then |
| info "Exporting NVIDIA precompiled driver ${driver}" |
| cp "${out_of_tree_drivers_dir}/${driver}" \ |
| "${BUILD_DIR}/extensions/gpu/${driver}" |
| else |
| info "Exporting NVIDIA prebuilt OSS kernel modules ${driver}" |
| cp "${out_of_tree_drivers_dir}/${driver}" \ |
| "${BUILD_DIR}/${driver}" |
| fi |
| done |
| fi |
| export_toolchain_env |
| export_kernel_info |
| export_kernel_commit |
| export_kernel_config |
| cp "${BOARD_ROOT}/usr/lib/debug/boot/vmlinux" "${BUILD_DIR}/vmlinux" |
| # EdgeOS boards do not use these GPU versions |
| if [[ "${BOARD}" != gmec-* ]] |
| then |
| export_gpu_driver_versions_textproto "${script_root}" |
| export_gpu_default_version "${script_root}" |
| export_gpu_latest_version "${script_root}" |
| export_gpu_R535_version "${script_root}" |
| export_gpu_R550_version "${script_root}" |
| fi |
| |
| # Generate known_modules.json if node_problem_detector is installed. |
| local -r node_problem_detector_dir="${root_fs_dir}/etc/node_problem_detector" |
| if [[ -e "${node_problem_detector_dir}" ]] |
| then |
| info "Generating known_modules.json" |
| known_modules_json=$(create_known_modules_json) |
| |
| local -r \ |
| known_modules_path="${node_problem_detector_dir}/known_modules.json" |
| # Write output to known_modules.json |
| echo "${known_modules_json}" | sudo tee "${known_modules_path}" > /dev/null |
| # Make sure the file can never be modified by non-root users after this |
| sudo chmod 644 "${known_modules_path}" |
| fi |
| |
| # /etc/machine-id gets installed by sys-apps/dbus and is a symlink. |
| # This conflicts with systemd's machine-id generation mechanism, |
| # so we remove the symlink and recreate it as an empty file. |
| sudo rm "${root_fs_dir}"/etc/machine-id |
| sudo touch "${root_fs_dir}"/etc/machine-id |
| |
| change_file_permissions_for_cis |
| # EdgeOS boards have a version of Cilium that can't support nftables yet |
| if [[ "${BOARD}" != gmec-* ]] |
| then |
| # Make nf_tables backend default |
| sudo env ROOT=${root_fs_dir} eselect iptables set xtables-nft-multi |
| fi |
| |
| local shim_arch="" |
| if [[ "${ARCH}" == "amd64" ]]; then |
| shim_arch="x64" |
| elif [[ "${ARCH}" == "arm64" ]]; then |
| shim_arch="aa64" |
| else |
| error "Unsupported architecture. No shim. ARCH=${ARCH}" |
| return 1 |
| fi |
| info "Copying shim to boot${shim_arch}.efi" |
| sudo cp \ |
| "${root_fs_dir}/boot/efi/boot/shim${shim_arch}.efi" \ |
| "${root_fs_dir}/boot/efi/boot/boot${shim_arch}.efi" |
| info "Successfully copied shim to boot${shim_arch}.efi" |
| |
| info "Deleting legacy EFI bootloaders" |
| # Don't delete bootx64.efi here, since it can be the shim now. |
| sudo rm -f "${root_fs_dir}"/boot/efi/boot/bootia32.efi |
| info "Successfully deleted legacy EFI bootloaders" |
| info "Creating a bootloader config template" |
| } |
| |
| # Align images to 1MB page size. |
| # Azure image requires 1MB page size alignment; while |
| # MTest and VIT image requires 4K page size alignment. |
| # We use 1MB as the image alignment and apply to all |
| # images since nowadays, most storage is aligned to 4096 instead of 512. |
| # Also COS image is used as virtual image in hypervisor/cloud |
| # environment. |
| align_image_to_1m() { |
| local base_image="$1" |
| local size |
| local mod |
| local new_size |
| echo "Aligning to 1MiB to meet Azure/Mtest/VIT requirements" |
| size="$(stat --printf "%s" "${base_image}")" |
| mod="$(( size % (1 << 20) ))" |
| if [[ "${mod}" != 0 ]]; then |
| new_size="$(( (1 << 20) - mod + size ))" |
| qemu-img resize "${base_image}" "${new_size}" |
| # Might need to relocate the secondary GPT headers |
| cgpt repair "${base_image}" |
| fi |
| } |
| |
| # board_setup() is invoked as part of build_image when the base image is |
| # completely finished. |
| board_setup() { |
| local base_image="$1" |
| align_image_to_1m "${base_image}" |
| } |