Merge commit '347a30e8c76afe64ecf2f4a3e2933a7ae6d90364' into sdk-update

Update to CrOS 15457.0.0

BUG=b/277779682
TEST=presubmit
RELEASE_NOTE=None

Change-Id: I0b34030ad7ae3870e1692c1a90b00e69b18999f1
diff --git a/DIR_METADATA b/DIR_METADATA
new file mode 100644
index 0000000..a6fc2cb
--- /dev/null
+++ b/DIR_METADATA
@@ -0,0 +1,17 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+#   https://source.chromium.org/chromium/infra/infra/+/HEAD:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+#   https://source.chromium.org/chromium/infra/infra/+/HEAD:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+buganizer {
+  component_id: 1027774  # ChromeOS > Infra > Build
+}
+
+buganizer_public {
+  component_id: 1037860  # Chrome OS Public Tracker > Services > Infra > Build
+}
+
+team_email: "chromeos-build-discuss@google.com"
diff --git a/OWNERS b/OWNERS
index be4b643..1f774dd 100644
--- a/OWNERS
+++ b/OWNERS
@@ -4,4 +4,4 @@
 per-file remote_access.sh = file:/OWNERS.kernel
 per-file create_remote_test_driver = file:chromiumos/chromite:/OWNERS.testplatform
 
-include chromiumos/chromite:/OWNERS.build
+include chromiumos/owners:v1:/infra/OWNERS.build
diff --git a/OWNERS.kernel b/OWNERS.kernel
index 5cf9ce5..1e02bb0 100644
--- a/OWNERS.kernel
+++ b/OWNERS.kernel
@@ -1,4 +1,4 @@
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg
index fafdbe2..5093233 100644
--- a/PRESUBMIT.cfg
+++ b/PRESUBMIT.cfg
@@ -1,3 +1,2 @@
 [Hook Scripts]
 cros lint = cros lint ${PRESUBMIT_FILES}
-cgpt_unittest = ./build_library/cgpt_unittest.py
diff --git a/bin/cros_make_image_bootable b/bin/cros_make_image_bootable
index 542229f..45d6dd3 100755
--- a/bin/cros_make_image_bootable
+++ b/bin/cros_make_image_bootable
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
+# Copyright 2010 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
@@ -25,6 +25,7 @@
 }
 
 find_common_sh
+# shellcheck source=../common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 # --- END COMMON.SH BOILERPLATE ---
 
@@ -32,10 +33,15 @@
 assert_inside_chroot
 
 # Load functions and constants for chromeos-install
+# shellcheck source=../../platform2/chromeos-common-script/share/chromeos-common.sh
 . /usr/share/misc/chromeos-common.sh || exit 1
+# shellcheck source=../build_library/build_image_util.sh
 . "${BUILD_LIBRARY_DIR}/build_image_util.sh" || exit 1
+# shellcheck source=../build_library/disk_layout_util.sh
 . "${BUILD_LIBRARY_DIR}/disk_layout_util.sh" || exit 1
+# shellcheck source=../build_library/mount_gpt_util.sh
 . "${BUILD_LIBRARY_DIR}/mount_gpt_util.sh" || exit 1
+# shellcheck source=../build_library/ext2_sb_util.sh
 . "${BUILD_LIBRARY_DIR}/ext2_sb_util.sh" || exit 1
 
 switch_to_strict_mode
@@ -148,6 +154,7 @@
 
 # board_options.sh relies on ${SRC_IMAGE} environment variable.
 SRC_IMAGE="${IMAGE}"
+# shellcheck source=../build_library/board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
 load_board_specific_script "board_specific_setup.sh"
 
@@ -261,6 +268,13 @@
     info "Disabling r/w mount of the root filesystem"
     local rootfs_offset="$(partoffset ${image} 3)"
     disable_rw_mount "${image}" "$(( rootfs_offset * 512 ))"
+
+    # For factory_install images, override FLAGS_enable_rootfs_verification
+    # here, so the following build_img calls won't make kernel set up the
+    # device mapper on initialization.
+    if [[ "${FLAGS_image_type}" == "factory_install" ]]; then
+      FLAGS_enable_rootfs_verification=${FLAGS_FALSE}
+    fi
   fi
 
   trap "unmount_image ; die 'cros_make_image_bootable failed.'" EXIT
@@ -301,32 +315,56 @@
     "${FLAGS_image_type}" ROOT-A)"
   local rootfs_fs_size=$(get_filesystem_size "${FLAGS_image_type}" \
     "${partition_num_root_a}")
-  build_img "vmlinuz.image" "${root_dev}" "${rootfs_fs_size}" "${keyblock}" \
-            "recovery_kernel_data_key.vbprivk" "recovery_key.vbpubk"
-  build_img "hd_vmlinuz.image" "${root_dev}" "${rootfs_fs_size}" \
-            "kernel.keyblock" "kernel_data_key.vbprivk" "kernel_subkey.vbpubk" \
-            "vmlinuz_hd.vblock"
+
+  # Usually we need to ensure that there will always be a regular kernel on
+  # recovery and non-recovery images. But for factory shim, we need to
+  # support 2 kernels signed with different recovery key. b/269192903
+  local kern_a_image="vmlinuz.image"
+  build_img "${kern_a_image}" "${root_dev}" "${rootfs_fs_size}" \
+      "${keyblock}" "recovery_kernel_data_key.vbprivk" "recovery_key.vbpubk"
+  local kern_b_image="vmlinuz_b.image"
+  if [[ "${FLAGS_image_type}" == "factory_install" ]]; then
+    kern_b_image="vmlinuz.image"
+  else
+    build_img "${kern_b_image}" "${root_dev}" "${rootfs_fs_size}" \
+              "kernel.keyblock" "kernel_data_key.vbprivk" \
+              "kernel_subkey.vbpubk" "vmlinuz_hd.vblock"
+  fi
 
   # Check the size of kernel image and issue warning when image size is
   # near the limit.
-  local kernel_image_size_A=$(stat -c '%s' ${FLAGS_output_dir}/vmlinuz.image)
+  local kernel_image_size_A=$(stat -c '%s' ${FLAGS_output_dir}/${kern_a_image})
   info "Kernel image A   size is ${kernel_image_size_A} bytes."
-  local kernel_image_size_B=$(stat -c '%s' ${FLAGS_output_dir}/hd_vmlinuz.image)
+  local kernel_image_size_B=$(stat -c '%s' ${FLAGS_output_dir}/${kern_b_image})
   info "Kernel image B   size is ${kernel_image_size_B} bytes."
   local partition_num_kern_a="$(get_layout_partition_number \
     "${FLAGS_image_type}" KERN-A)"
   check_kernel_size ${kernel_image_size_A} ${partition_num_kern_a} A
   local partition_num_kern_b="$(get_layout_partition_number \
     "${FLAGS_image_type}" KERN-B)"
-  check_kernel_size ${kernel_image_size_B} ${partition_num_kern_b} B
+
+  # Since the kernel-b of factory shim is optional, ignore kernel-b if the size
+  # of kernel-b is less than or equal to the default value 1MiB.
+  local need_kern_b=${FLAGS_TRUE}
+  if [[ "${FLAGS_image_type}" == "factory_install" ]]; then
+    local kernel_partition_size=$(get_partition_size ${FLAGS_image_type} \
+                                                   ${partition_num_kern_b})
+    local block_size="$(get_block_size)"
+    if [[ "${kernel_partition_size}" -le "${block_size}" ]]; then
+      need_kern_b=${FLAGS_FALSE}
+      warn "Kernel partition B is skipped!"
+    fi
+  fi
+  if [[ ${need_kern_b} -eq ${FLAGS_TRUE} ]]; then
+    check_kernel_size ${kernel_image_size_B} ${partition_num_kern_b} B
+  fi
 
   local rootfs_hash_size=$(stat -c '%s' ${FLAGS_rootfs_hash})
   local rootfs_partition_size=$(get_partition_size ${FLAGS_image_type} \
       ${partition_num_root_a})
   local rootfs_hash_pad=$(( rootfs_partition_size - rootfs_fs_size ))
   info "Appending rootfs.hash (${rootfs_hash_size} bytes) to the root fs"
-  if [[ ${rootfs_hash_size} -gt ${rootfs_hash_pad} ]]
-  then
+  if [[ ${rootfs_hash_size} -gt ${rootfs_hash_pad} ]]; then
     die "rootfs_partition_size - rootfs_fs_size is less than the needed " \
         "rootfs_hash_size (${rootfs_hash_size}), update your disk layout " \
         "configuration"
@@ -355,14 +393,15 @@
             "${FLAGS_statefulfs_mountpoint}"
   fi
 
-  # Install the kernel to both slots A and B so there will always be a regular
-  # kernel in slot B on recovery and non-recovery images.
+  # Install the kernel to both slots A and B.
   local koffset="$(partoffset ${image} ${partition_num_kern_a})"
-  sudo dd if="${FLAGS_output_dir}/vmlinuz.image" of="${image}" \
+  sudo dd if="${FLAGS_output_dir}/${kern_a_image}" of="${image}" \
     conv=notrunc bs=512 seek=${koffset} status=none
-  koffset="$(partoffset ${image} ${partition_num_kern_b})"
-  sudo dd if="${FLAGS_output_dir}/hd_vmlinuz.image" of="${image}" \
-    conv=notrunc bs=512 seek=${koffset} status=none
+  if [[ ${need_kern_b} -eq ${FLAGS_TRUE} ]]; then
+    koffset="$(partoffset ${image} ${partition_num_kern_b})"
+    sudo dd if="${FLAGS_output_dir}/${kern_b_image}" of="${image}" \
+      conv=notrunc bs=512 seek=${koffset} status=none
+  fi
 
   # Update the bootloaders.  The EFI system partition will be updated.
   local kernel_part=
@@ -381,7 +420,7 @@
 
   if [[ "${FLAGS_arch}" = "x86" || "${FLAGS_arch}" = "amd64" ]]; then
     # Use the kernel partition to acquire configuration flags.
-    kernel_part="--kernel_partition='${FLAGS_output_dir}/vmlinuz.image'"
+    kernel_part="--kernel_partition='${FLAGS_output_dir}/${kern_a_image}'"
     # Install syslinux on the EFI System Partition.
     kernel_part="${kernel_part} --install_syslinux"
   elif [[ "${FLAGS_arch}" = "arm64" ]]; then
@@ -410,12 +449,21 @@
   fi
 
   # We don't need to keep these files around anymore.
-  sudo rm -f "${FLAGS_rootfs_hash}" "${FLAGS_output_dir}/vmlinuz.image" \
-             "${FLAGS_output_dir}/hd_vmlinuz.image" \
+  sudo rm -f "${FLAGS_rootfs_hash}" "${FLAGS_output_dir}/${kern_a_image}" \
+             "${FLAGS_output_dir}/${kern_b_image}" \
              "${FLAGS_output_dir}/vmlinuz_hd.vblock"
 
   sudo losetup -d ${image_dev}
   unmount_image
+
+  # Since the kern-b will be signed with another recovery key, need to make
+  # kern-b bootable.
+  # TODO(b/270262345) Remove it after updating GPT flags.
+  if [[ "${FLAGS_image_type}" == "factory_install" \
+    && ${need_kern_b} -eq ${FLAGS_TRUE} ]]
+  then
+    sudo cgpt add -i "${partition_num_kern_b}" -P 15 -S 1 -T 15 "${IMAGE}"
+  fi
   trap - EXIT
 }
 
diff --git a/bin/proxy-gw b/bin/proxy-gw
index 19fe3da..ebc4277 100755
--- a/bin/proxy-gw
+++ b/bin/proxy-gw
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/build_image b/build_image
index 100ed27..8c0258f 100755
--- a/build_image
+++ b/build_image
@@ -1,9 +1,10 @@
 #!/bin/bash
 
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
+# Copyright 2022 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# shellcheck source=common.sh
 . "$(dirname "$0")/common.sh" || exit 1
 
 new_script="build_image"
diff --git a/build_image.sh b/build_image.sh
index c966a79..ac706aa 100755
--- a/build_image.sh
+++ b/build_image.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
+# Copyright 2022 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -18,14 +18,6 @@
     'Please run `build_images` from $PATH (in chromite/bin/) instead.'
 fi
 
-# Make sure we run with network disabled to prevent leakage.
-if [[ -z ${UNSHARE} ]]; then
-  if [[ $(id -u) -ne 0 ]]; then
-    exec sudo -E env PATH="${PATH}" "$0" "$@"
-  fi
-  exec unshare -n -- sudo -E UNSHARE=true -u "${SUDO_USER}" -- "$0" "$@"
-fi
-
 # Discard the 'script-is-run-only-by-chromite-and-not-users' flag.
 shift
 
@@ -115,8 +107,6 @@
 
 load_board_specific_script "board_specific_setup.sh"
 
-sudo_clear_shadow_locks "/build/${FLAGS_board}"
-
 # TODO: <prebuild hook>
 
 # Create the base image.
diff --git a/build_kernel_image.sh b/build_kernel_image.sh
index dcc4034..9519d51 100755
--- a/build_kernel_image.sh
+++ b/build_kernel_image.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
+# Copyright 2010 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -12,7 +12,8 @@
 # reflected in ensure_secure_kernelparams.config and deployed to production
 # signing before landed here.
 
-SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
+# shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 
 # Flags.
@@ -28,7 +29,7 @@
   "The path to the kernel (Default: vmlinuz)"
 DEFINE_string working_dir "/tmp/vmlinuz.working" \
   "Working directory for in-progress files. (Default: /tmp/vmlinuz.working)"
-DEFINE_boolean keep_work ${FLAGS_FALSE} \
+DEFINE_boolean keep_work "${FLAGS_FALSE}" \
   "Keep temporary files (*.keyblock, *.vbpubk). (Default: false)"
 DEFINE_string keys_dir "${VBOOT_TESTKEYS_DIR}" \
   "Directory with the RSA signing keys. (Defaults to test keys)"
@@ -60,9 +61,9 @@
   "Cryptographic hash algorithm used for dm-verity. (Default: sha256)"
 DEFINE_string verity_salt "" \
   "Salt to use for rootfs hash (Default: \"\")"
-DEFINE_boolean enable_rootfs_verification ${FLAGS_TRUE} \
+DEFINE_boolean enable_rootfs_verification "${FLAGS_TRUE}" \
   "Enable kernel-based root fs integrity checking. (Default: true)"
-DEFINE_boolean enable_bootcache ${FLAGS_FALSE} \
+DEFINE_boolean enable_bootcache "${FLAGS_FALSE}" \
   "Enable boot cache to accelerate booting. (Default: false)"
 DEFINE_string enable_serial "" \
   "Enable serial port for printks. Example values: ttyS0"
@@ -78,23 +79,25 @@
 
 # N.B.  Ordering matters for some of the libraries below, because
 # some of the files contain initialization used by later files.
+# shellcheck source=build_library/board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
+# shellcheck source=build_library/disk_layout_util.sh
 . "${BUILD_LIBRARY_DIR}/disk_layout_util.sh" || exit 1
 
 
 rootdigest() {
   local digest=${table#*root_hexdigest=}
-  echo ${digest% salt*}
+  echo "${digest% salt*}"
 }
 
 salt() {
   local salt=${table#*salt=}
-  echo ${salt%}
+  echo "${salt%}"
 }
 
 hashstart() {
   local hash=${table#*hashstart=}
-  echo ${hash% alg*}
+  echo "${hash% alg*}"
 }
 
 # Estimate of sectors used by verity
@@ -134,7 +137,7 @@
   else
     # We try to autodetect the rootfs_image filesystem size.
     if [[ -f "${FLAGS_rootfs_image}" ]]; then
-      root_fs_size=$(stat -c '%s' ${FLAGS_rootfs_image})
+      root_fs_size=$(stat -c '%s' "${FLAGS_rootfs_image}")
     elif [[ -b "${FLAGS_rootfs_image}" ]]; then
       root_fs_type="$(awk -v rootdev="${FLAGS_rootfs_image}" \
                      '$1 == rootdev { print $3 }' /proc/mounts | head -n 1)"
@@ -301,18 +304,18 @@
   # Legacy BIOS will use the kernel in the rootfs (via syslinux), as will
   # standard EFI BIOS (via grub, from the EFI System Partition). Chrome OS
   # BIOS will use a separate signed kernel partition, which we'll create now.
-  cat <<EOF >> "${FLAGS_working_dir}/config.txt"
+  cat <<EOF >> "${config}"
 add_efi_memmap
 boot=local
 noresume
 noswap
 i915.modeset=1
 EOF
-  WORK="${WORK} ${FLAGS_working_dir}/config.txt"
+  WORK="${WORK} ${config}"
 
   bootloader_path="/lib64/bootstub/bootstub.efi"
 elif [[ "${FLAGS_arch}" == "arm" || "${FLAGS_arch}" == "mips"  || "${FLAGS_arch}" == "arm64" ]]; then
-  WORK="${WORK} ${FLAGS_working_dir}/config.txt"
+  WORK="${WORK} ${config}"
 
   # arm does not need/have a bootloader in kernel partition
   dd if="/dev/zero" of="${FLAGS_working_dir}/bootloader.bin" bs=512 count=1
@@ -328,14 +331,14 @@
 # an artifact by cbuildbot.  Non .bin's need to be explicitly specified
 # and would require the entire set of artifacts to be specified.
 info "Saving kernel as ${FLAGS_working_dir}/vmlinuz.bin"
-cp ${kernel_image} ${FLAGS_working_dir}/vmlinuz.bin
+cp "${kernel_image}" "${FLAGS_working_dir}/vmlinuz.bin"
 
 for image_type in $(get_image_types); do
   already_seen_rootfs=0
-  for partition in $(get_partitions ${image_type}); do
-    format=$(get_format ${image_type} "${partition}")
+  for partition in $(get_partitions "${image_type}"); do
+    format=$(get_format "${image_type}" "${partition}")
     if [[ "${format}" == "ubi" ]]; then
-      type=$(get_type ${image_type} "${partition}")
+      type=$(get_type "${image_type}" "${partition}")
       # cgpt.py ensures that the rootfs partitions are compatible, in that if
       # one is ubi then both are, and they have the same number of reserved
       # blocks. We only want to attach one of them in boot to save time, so
@@ -349,26 +352,25 @@
       else
         partname="${partition}"
       fi
-      reserved=$(get_reserved_erase_blocks ${image_type} "${partition}")
-      echo "ubi.mtd=${partname},0,${reserved},${partname}" \
-          >> "${FLAGS_working_dir}/config.txt"
-      fs_format=$(get_filesystem_format ${image_type} "${partition}")
+      reserved=$(get_reserved_erase_blocks "${image_type}" "${partition}")
+      echo "ubi.mtd=${partname},0,${reserved},${partname}" >> "${config}"
+      fs_format=$(get_filesystem_format "${image_type}" "${partition}")
       if [[ "${fs_format}" != "ubifs" ]]; then
-        echo "ubi.block=${partname},0" >> "${FLAGS_working_dir}/config.txt"
+        echo "ubi.block=${partname},0" >> "${config}"
       fi
     fi
   done
 done
 
-config_file="${FLAGS_working_dir}/config.txt"
-modify_kernel_command_line "${config_file}"
+modify_kernel_command_line "${config}"
+
 # Create and sign the kernel blob
 vbutil_kernel \
   --pack "${FLAGS_to}" \
   --keyblock "${FLAGS_keys_dir}/${FLAGS_keyblock}" \
   --signprivate "${FLAGS_keys_dir}/${FLAGS_private}" \
   --version 1 \
-  --config "${config_file}" \
+  --config "${config}" \
   --bootloader "${bootloader_path}" \
   --vmlinuz "${kernel_image}" \
   --arch "${FLAGS_arch}"
@@ -387,7 +389,7 @@
 if [[ ${FLAGS_keep_work} -eq ${FLAGS_FALSE} ]]; then
   info "Cleaning up temporary files: ${WORK}"
   rm ${WORK}
-  rmdir ${FLAGS_working_dir}
+  rmdir "${FLAGS_working_dir}"
 fi
 
 info "Kernel partition image emitted: ${FLAGS_to}"
diff --git a/build_library/README.disk_layout b/build_library/README.disk_layout
index 2ca2944..0b12f05 100644
--- a/build_library/README.disk_layout
+++ b/build_library/README.disk_layout
@@ -1,4 +1,4 @@
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -14,14 +14,17 @@
     modern images, or legacy_disk_layout.json for old boards.
 
 A layout file consists of layouts. Common types of layouts would be "base",
-"usb", "vm" and "common". Each of these layouts is made up of one or more
-partitions.
+"usb", "usb-updatable", "vm" and "common". Each of these layouts is made up of
+one or more partitions.
 
 The "common" layout is special, since it's the layout on which all other layouts
-get overlaid upon. The base layout represents the "installed" layout, the way
+get overlaid upon. "base" layout represents the "installed" layout, the way
 things should be once the image has been installed onto a device. The "usb"
 layout represents how things should look on a USB image (typically, this means
-that ROOT-B is 1 block long).
+that ROOT-B is 1 block long). "usb-updatable" tracks "usb", but its ROOT-B is
+the same size of ROOT-A, so it is updatable.
+"base" can be different from "usb-updatable" if the device storage is specific
+(direct raw flash instead of usual block device storage).
 
 
 Here is an example disk layout with comments.
@@ -348,6 +351,5 @@
 how do I do this now?
 
 Most commonly, create an image using one of the development layouts:
-    ./build_image --disk_layout 2gb-rootfs
-    ./build_image --disk_layout 2gb-rootfs-updatable
+    ./build_image --disk_layout usb-updatable
     ./build_image --disk_layout 4gb-rootfs
diff --git a/build_library/base_image_util.sh b/build_library/base_image_util.sh
index 849a3e3..92e49e6 100755
--- a/build_library/base_image_util.sh
+++ b/build_library/base_image_util.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -147,6 +147,18 @@
   sudo mkdir -p "${BOARD_ROOT}/build/dev-install"
   sudo mv "${pkgs_out}/package.installable" "${BOARD_ROOT}/build/dev-install/"
 
+  # installable_commands is the list of all programs installed into ${PATH} from
+  # package.installable.
+  local installable_packages=()
+  mapfile -t installable_packages \
+    < "${BOARD_ROOT}/build/dev-install/package.installable"
+  qlist-"${BOARD}" -e "${installable_packages[@]}" | \
+    grep -E '^(/usr)?/s?bin/' | \
+    awk -F/ '{print $NF}' | \
+    sort -u > "${pkgs_out}/installable_commands"
+  sudo cp "${pkgs_out}/installable_commands" \
+    "${root_fs_dir}/usr/share/dev-install/installable_commands"
+
   local package_provided_dir="${root_fs_dir}/usr/share/dev-install/"
   package_provided_dir+="portage/make.profile/package.provided"
 
@@ -280,6 +292,45 @@
   install_libc_for_abi "${root_fs_dir}" "${CHOST}"
 }
 
+inject_system_wide_scudo() {
+  local root_fs_dir="$1"
+  info "'system_wide_scudo' enabled; Injecting scudo..."
+  local arch
+  local libdir
+  case "${ARCH}" in
+    x86)
+      arch='i386'
+      libdir='lib'
+      ;;
+    amd64)
+      arch='x86_64'
+      libdir='lib64'
+      ;;
+    arm)
+      arch='armhf'
+      libdir="lib"
+      ;;
+    arm64)
+      arch='aarch64'
+      libdir="lib64"
+      ;;
+    *) die "Inject scudo: unknown ARCH '${ARCH}'";;
+  esac
+  local libname="libclang_rt.scudo_standalone-${arch}.so"
+  local rootfs_scudo_loc="/usr/${libdir}/${libname}"
+  local preload_file="${root_fs_dir}/etc/ld.so.preload"
+  if [[ -f "${preload_file}" ]]; then
+    die "Inject scudo: preload '${preload_file}' already exists"
+  fi
+  local absolute_scudo_loc="${root_fs_dir}${rootfs_scudo_loc}"
+  if [[ -f "${absolute_scudo_loc}" ]]; then
+    echo "${rootfs_scudo_loc}" | sudo tee "${preload_file}" > /dev/null
+    info "Wrote ${rootfs_scudo_loc} to ${preload_file}"
+  else
+    die "Inject scudo: lib '${absolute_scudo_loc}' does NOT exist"
+  fi
+}
+
 create_base_image() {
   local image_name=$1
   local rootfs_verification_enabled=$2
@@ -339,19 +390,31 @@
   # trim the image size as much as possible.
   emerge_to_image --root="${root_fs_dir}" ${BASE_PACKAGE}
 
+  # Inject scudo system wide if system_wide_scudo is enabled.
+  if has "system_wide_scudo" in "$(portageq-"${BOARD}" envvar USE)"; then
+    # We do inject at this stage (and not during build_packages) because
+    # some unit tests fail when tested with Scudo. This isn't an inherent
+    # problem with Scudo, but instead is a problem with shared memory regions
+    # between allocators. If the host is using GNU Allocator and the VM is
+    # using Scudo, we're going to end up with a lot of memory errors.
+    inject_system_wide_scudo "${root_fs_dir}"
+  fi
+
   # Run depmod to recalculate the kernel module dependencies.
   run_depmod "${BOARD_ROOT}" "${root_fs_dir}"
 
   # Generate the license credits page for the packages installed on this
   # image in a location that will be used by Chrome.
   info "Generating license credits page. Time:"
-  sudo mkdir -p "${root_fs_dir}/opt/google/chrome/resources"
-  local license_path="${root_fs_dir}/opt/google/chrome/resources/about_os_credits.html"
+  local dir_name="${root_fs_dir}/opt/google/chrome/resources"
+  sudo mkdir -p "${dir_name}"
+  local license_path="${dir_name}/about_os_credits.html.gz"
   time info_run sudo "${GCLIENT_ROOT}/chromite/licensing/licenses" \
     --board="${BOARD}" \
     --log-level error \
     --generate-licenses \
-    --output "${license_path}"
+    --output "${license_path}" \
+    --compress-output
   # Copy the license credits file to ${BUILD_DIR} so that is will be uploaded
   # as artifact later in ArchiveStage.
   if [[ -r "${license_path}" ]]; then
@@ -503,9 +566,6 @@
   # use those templates to update the legacy boot partition (12/ESP)
   # on update.
   # (This script does not populate vmlinuz.A and .B needed by syslinux.)
-  # Factory install shims may be booted from USB by legacy EFI BIOS, which does
-  # not support verified boot yet (see create_legacy_bootloader_templates.sh)
-  # so rootfs verification is disabled if we are building with --factory_install
   local enable_rootfs_verification=
   if [[ ${rootfs_verification_enabled} -eq ${FLAGS_TRUE} ]]; then
     enable_rootfs_verification="--enable_rootfs_verification"
diff --git a/build_library/board_options.sh b/build_library/board_options.sh
index 50f6243..41a5ee9 100644
--- a/build_library/board_options.sh
+++ b/build_library/board_options.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/build_library/build_common.sh b/build_library/build_common.sh
index 87ef6b2..8fefe8b 100644
--- a/build_library/build_common.sh
+++ b/build_library/build_common.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -9,11 +9,13 @@
 # common to all the scripts.
 
 # SCRIPT_ROOT must be set prior to sourcing this file
+# shellcheck source=../common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 
 # All scripts using this file must be run inside the chroot.
 restart_in_chroot_if_needed "$@"
 
+# shellcheck source=../../platform2/chromeos-common-script/share/chromeos-common.sh
 . /usr/share/misc/chromeos-common.sh || exit 1
 
 locate_gpt
diff --git a/build_library/build_image_util.sh b/build_library/build_image_util.sh
index 53c735d..2b79116 100755
--- a/build_library/build_image_util.sh
+++ b/build_library/build_image_util.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/build_library/cgpt.py b/build_library/cgpt.py
deleted file mode 100755
index 984d3ec..0000000
--- a/build_library/cgpt.py
+++ /dev/null
@@ -1,1660 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Parse and operate based on disk layout files.
-
-For information on the JSON format, see:
-  http://dev.chromium.org/chromium-os/developer-guide/disk-layout-format
-
-The --adjust_part flag takes arguments like:
-  <label>:<op><size>
-Where:
-  <label> is a label name as found in the disk layout file
-  <op> is one of the three: + - =
-  <size> is a number followed by an optional size qualifier:
-         B, KiB, MiB, GiB, TiB: bytes, kibi-, mebi-, gibi-, tebi- (base 1024)
-         B,   K,   M,   G,   T: short hand for above
-         B,  KB,  MB,  GB,  TB: bytes, kilo-, mega-, giga-, tera- (base 1000)
-
-This will set the ROOT-A partition size to 1 gibibytes (1024 * 1024 * 1024 * 1):
-  --adjust_part ROOT-A:=1GiB
-This will grow the ROOT-A partition size by 500 mebibytes (1024 * 1024 * 500):
-  --adjust_part ROOT-A:+500MiB
-This will shrink the ROOT-A partition size by 10 mebibytes (1024 * 1024 * 10):
-  --adjust_part ROOT-A:-20MiB
-"""
-
-from __future__ import division
-from __future__ import print_function
-
-import argparse
-import copy
-import inspect
-import json
-import math
-import os
-import re
-import sys
-
-
-class ConfigNotFound(Exception):
-  """Config Not Found"""
-
-class PartitionNotFound(Exception):
-  """Partition Not Found"""
-
-class InvalidLayout(Exception):
-  """Invalid Layout"""
-
-class InvalidAdjustment(Exception):
-  """Invalid Adjustment"""
-
-class InvalidSize(Exception):
-  """Invalid Size"""
-
-class ConflictingOptions(Exception):
-  """Conflicting Options"""
-
-class ConflictingPartitionOrder(Exception):
-  """The partition order in the parent and child layout don't match."""
-
-class MismatchedRootfsFormat(Exception):
-  """Rootfs partitions in different formats"""
-
-class MismatchedRootfsBlocks(Exception):
-  """Rootfs partitions have different numbers of reserved erase blocks"""
-
-class MissingEraseBlockField(Exception):
-  """Partition has reserved erase blocks but not other fields needed"""
-
-class ExcessFailureProbability(Exception):
-  """Chances are high that the partition will have too many bad blocks"""
-
-class UnalignedPartition(Exception):
-  """Partition size does not divide erase block size"""
-
-class ExpandNandImpossible(Exception):
-  """Partition is raw NAND and marked with the incompatible expand feature"""
-
-class ExcessPartitionSize(Exception):
-  """Partitions sum to more than the size of the whole device"""
-
-COMMON_LAYOUT = 'common'
-BASE_LAYOUT = 'base'
-# Blocks of the partition entry array.
-SIZE_OF_PARTITION_ENTRY_ARRAY_BYTES = 16 * 1024
-SIZE_OF_PMBR = 1
-SIZE_OF_GPT_HEADER = 1
-DEFAULT_SECTOR_SIZE = 512
-MAX_SECTOR_SIZE = 8 * 1024
-START_SECTOR = 4 * MAX_SECTOR_SIZE
-SECONDARY_GPT_BYTES = SIZE_OF_PARTITION_ENTRY_ARRAY_BYTES + \
-  SIZE_OF_GPT_HEADER * MAX_SECTOR_SIZE
-
-def ParseHumanNumber(operand):
-  """Parse a human friendly number
-
-  This handles things like 4GiB and 4MB and such.  See the usage string for
-  full details on all the formats supported.
-
-  Args:
-    operand: The number to parse (may be an int or string)
-
-  Returns:
-    An integer
-  """
-  operand = str(operand)
-  negative = -1 if operand.startswith('-') else 1
-  if negative == -1:
-    operand = operand[1:]
-  operand_digits = re.sub(r'\D', r'', operand)
-
-  size_factor = block_factor = 1
-  suffix = operand[len(operand_digits):].strip()
-  if suffix:
-    size_factors = {'B': 0, 'K': 1, 'M': 2, 'G': 3, 'T': 4,}
-    try:
-      size_factor = size_factors[suffix[0].upper()]
-    except KeyError:
-      raise InvalidAdjustment('Unknown size type %s' % suffix)
-    if size_factor == 0 and len(suffix) > 1:
-      raise InvalidAdjustment('Unknown size type %s' % suffix)
-    block_factors = {'': 1024, 'B': 1000, 'IB': 1024,}
-    try:
-      block_factor = block_factors[suffix[1:].upper()]
-    except KeyError:
-      raise InvalidAdjustment('Unknown size type %s' % suffix)
-
-  return int(operand_digits) * pow(block_factor, size_factor) * negative
-
-
-def ProduceHumanNumber(number):
-  """A simple reverse of ParseHumanNumber, converting a number to human form.
-
-  Args:
-    number: A number (int) to be converted to human form.
-
-  Returns:
-    A string, such as "1 KiB", that satisfies the condition
-      ParseHumanNumber(ProduceHumanNumber(i)) == i.
-  """
-  scales = [
-      (2**40, 'Ti'),
-      (10**12, 'T'),
-      (2**30, 'Gi'),
-      (10**9, 'G'),
-      (2**20, 'Mi'),
-      (10**6, 'M'),
-      (2**10, 'Ki'),
-      (10**3, 'K')
-  ]
-  for denom, suffix in scales:
-    if (number % denom) == 0:
-      return '%d %sB' % (number // denom, suffix)
-  return str(number)
-
-
-def ParseRelativeNumber(max_number, number):
-  """Return the number that is relative to |max_number| by |number|
-
-  We support three forms:
-   90% - |number| is a percentage of |max_number|
-   100 - |number| is the answer already (and |max_number| is ignored)
-   -90 - |number| is subtracted from |max_number|
-
-  Args:
-    max_number: The limit to use when |number| is negative or a percent
-    number: The (possibly relative) number to parse (may be an int or string)
-  """
-  max_number = int(max_number)
-  number = str(number)
-  if number.endswith('%'):
-    percent = number[:-1] / 100
-    return int(max_number * percent)
-  else:
-    number = ParseHumanNumber(number)
-    if number < 0:
-      return max_number + number
-    else:
-      return number
-
-
-def _ApplyLayoutOverrides(layout_to_override, layout):
-  """Applies |layout| overrides on to |layout_to_override|.
-
-  First add missing partition from layout to layout_to_override.
-  Then, update partitions in layout_to_override with layout information.
-  """
-  # First check that all the partitions defined in both layouts are defined in
-  # the same order in each layout. Otherwise, the order in which they end up
-  # in the merged layout doesn't match what the user sees in the child layout.
-  common_nums = set.intersection(
-      {part['num'] for part in layout_to_override if 'num' in part},
-      {part['num'] for part in layout if 'num' in part})
-  layout_to_override_order = [part['num'] for part in layout_to_override
-                              if part.get('num') in common_nums]
-  layout_order = [part['num'] for part in layout
-                  if part.get('num') in common_nums]
-  if layout_order != layout_to_override_order:
-    raise ConflictingPartitionOrder(
-        'Layouts share partitions %s but they are in different order: '
-        'layout_to_override: %s, layout: %s' % (
-            sorted(common_nums),
-            [part.get('num') for part in layout_to_override],
-            [part.get('num') for part in layout]))
-
-  # Merge layouts with the partitions in the same order they are in both
-  # layouts.
-  part_index = 0
-  for part_to_apply in layout:
-    num = part_to_apply.get('num')
-
-    if part_index == len(layout_to_override):
-      # The part_to_apply is past the list of partitions to override, this
-      # means that is a new partition added at the end.
-      # Need of deepcopy, in case we change layout later.
-      layout_to_override.append(copy.deepcopy(part_to_apply))
-    elif layout_to_override[part_index].get('num') is None and num is None:
-      # Allow modifying gaps after a partition.
-      # TODO(deymo): Drop support for "gap" partitions and use alignment
-      # instead.
-      layout_to_override[part_index].update(part_to_apply)
-    elif num in common_nums:
-      while layout_to_override[part_index].get('num') != num:
-        part_index += 1
-      layout_to_override[part_index].update(part_to_apply)
-    else:
-      # Need of deepcopy, in case we change layout later.
-      layout_to_override.insert(part_index, copy.deepcopy(part_to_apply))
-    part_index += 1
-
-
-def LoadJSONWithComments(filename):
-  """Loads a JSON file ignoring lines with comments.
-
-  RFC 7159 doesn't allow comments on the file JSON format. This functions loads
-  a JSON file removing all the comment lines. A comment line is any line
-  starting with # and optionally indented with whitespaces. Note that inline
-  comments are not supported.
-
-  Args:
-    filename: The input filename.
-
-  Returns:
-    The parsed JSON object.
-  """
-  regex = re.compile(r'^\s*#.*')
-  with open(filename) as f:
-    source = ''.join(regex.sub('', line) for line in f)
-  return json.loads(source)
-
-
-def _LoadStackedPartitionConfig(filename):
-  """Loads a partition table and its possible parent tables.
-
-  This does very little validation.  It's just enough to walk all of the parent
-  files and merges them with the current config.  Overall validation is left to
-  the caller.
-
-  Args:
-    filename: Filename to load into object.
-
-  Returns:
-    Object containing disk layout configuration
-  """
-  if not os.path.exists(filename):
-    raise ConfigNotFound('Partition config %s was not found!' % filename)
-  config = LoadJSONWithComments(filename)
-
-  # Let's first apply our new configs onto base.
-  common_layout = config['layouts'].setdefault(COMMON_LAYOUT, [])
-  for layout_name, layout in config['layouts'].items():
-    # Don't apply on yourself.
-    if layout_name == COMMON_LAYOUT or layout_name == '_comment':
-      continue
-
-    # Need to copy a list of dicts so make a deep copy.
-    working_layout = copy.deepcopy(common_layout)
-    _ApplyLayoutOverrides(working_layout, layout)
-    config['layouts'][layout_name] = working_layout
-
-  dirname = os.path.dirname(filename)
-  # Now let's inherit the values from all our parents.
-  for parent in config.get('parent', '').split():
-    parent_filename = os.path.join(dirname, parent)
-    if not os.path.exists(parent_filename):
-      # Try loading the parent file from the cgpt.py directory (global config).
-      parent_filename = os.path.join(os.path.join(os.path.dirname(__file__),
-                                                  parent))
-    parent_config = _LoadStackedPartitionConfig(parent_filename)
-
-    # First if the parent is missing any fields the new config has, fill them
-    # in.
-    for key in config.keys():
-      if key == 'parent':
-        continue
-      elif key == 'metadata':
-        # We handle this especially to allow for inner metadata fields to be
-        # added / modified.
-        parent_config.setdefault(key, {})
-        parent_config[key].update(config[key])
-      else:
-        parent_config.setdefault(key, config[key])
-
-    # The overrides work by taking the parent_config, apply the new config
-    # layout info, and return the resulting config which is stored in the parent
-    # config.
-
-    # So there's an issue where an inheriting layout file may contain new
-    # layouts not previously defined in the parent layout. Since we are
-    # building these layout files based on the parent configs and overriding
-    # new values, we first add the new layouts not previously defined in the
-    # parent config using a copy of the base layout from that parent config.
-    parent_layouts = set(parent_config['layouts'])
-    config_layouts = set(config['layouts'])
-    new_layouts = config_layouts - parent_layouts
-
-    # Actually add the copy. Use a copy such that each is unique.
-    parent_cmn_layout = parent_config['layouts'].setdefault(COMMON_LAYOUT, [])
-    for layout_name in new_layouts:
-      parent_config['layouts'][layout_name] = copy.deepcopy(parent_cmn_layout)
-
-    # Iterate through each layout in the parent config and apply the new layout.
-    common_layout = config['layouts'].setdefault(COMMON_LAYOUT, [])
-    for layout_name, parent_layout in parent_config['layouts'].items():
-      if layout_name == '_comment':
-        continue
-
-      layout_override = config['layouts'].setdefault(layout_name, [])
-      if layout_name != COMMON_LAYOUT:
-        _ApplyLayoutOverrides(parent_layout, common_layout)
-
-      _ApplyLayoutOverrides(parent_layout, layout_override)
-
-    config = parent_config
-
-  config.pop('parent', None)
-  return config
-
-
-def LoadPartitionConfig(filename):
-  """Loads a partition tables configuration file into a Python object.
-
-  Args:
-    filename: Filename to load into object
-
-  Returns:
-    Object containing disk layout configuration
-  """
-
-  valid_keys = set(('_comment', 'metadata', 'layouts', 'parent'))
-  valid_layout_keys = set((
-      '_comment', 'num', 'fs_blocks', 'fs_block_size', 'fs_align', 'bytes',
-      'uuid', 'label', 'format', 'fs_format', 'type', 'features',
-      'size', 'fs_size', 'fs_options', 'erase_block_size', 'hybrid_mbr',
-      'reserved_erase_blocks', 'max_bad_erase_blocks', 'external_gpt',
-      'page_size', 'size_min', 'fs_size_min'))
-  valid_features = set(('expand', 'last_partition'))
-
-  config = _LoadStackedPartitionConfig(filename)
-  try:
-    metadata = config['metadata']
-    metadata['fs_block_size'] = ParseHumanNumber(metadata['fs_block_size'])
-    if metadata.get('fs_align') is None:
-      metadata['fs_align'] = metadata['fs_block_size']
-    else:
-      metadata['fs_align'] = ParseHumanNumber(metadata['fs_align'])
-
-    if (metadata['fs_align'] < metadata['fs_block_size']) or \
-       (metadata['fs_align'] % metadata['fs_block_size']):
-      raise InvalidLayout('fs_align must be a multiple of fs_block_size')
-
-    unknown_keys = set(config.keys()) - valid_keys
-    if unknown_keys:
-      raise InvalidLayout('Unknown items: %r' % unknown_keys)
-
-    if len(config['layouts']) <= 0:
-      raise InvalidLayout('Missing "layouts" entries')
-
-    if not BASE_LAYOUT in config['layouts'].keys():
-      raise InvalidLayout('Missing "base" config in "layouts"')
-
-    for layout_name, layout in config['layouts'].items():
-      if layout_name == '_comment':
-        continue
-
-      for part in layout:
-        unknown_keys = set(part.keys()) - valid_layout_keys
-        if unknown_keys:
-          raise InvalidLayout('Unknown items in layout %s: %r' %
-                              (layout_name, unknown_keys))
-
-        if part.get('num') == 'metadata' and 'type' not in part:
-          part['type'] = 'blank'
-
-        if part['type'] != 'blank':
-          for s in ('num', 'label'):
-            if not s in part:
-              raise InvalidLayout('Layout "%s" missing "%s"' % (layout_name, s))
-
-        if 'size' in part:
-          part['bytes'] = ParseHumanNumber(part['size'])
-          if 'size_min' in part:
-            size_min = ParseHumanNumber(part['size_min'])
-            if part['bytes'] < size_min:
-              part['bytes'] = size_min
-        elif part.get('num') != 'metadata':
-          part['bytes'] = 1
-
-        if 'fs_size' in part:
-          part['fs_bytes'] = ParseHumanNumber(part['fs_size'])
-          if 'fs_size_min' in part:
-            fs_size_min = ParseHumanNumber(part['fs_size_min'])
-            if part['fs_bytes'] < fs_size_min:
-              part['fs_bytes'] = fs_size_min
-          if part['fs_bytes'] <= 0:
-            raise InvalidSize(
-                'File system size "%s" must be positive' %
-                part['fs_size'])
-          if part['fs_bytes'] > part['bytes']:
-            raise InvalidSize(
-                'Filesystem may not be larger than partition: %s %s: %d > %d' %
-                (layout_name, part['label'], part['fs_bytes'], part['bytes']))
-          if part['fs_bytes'] % metadata['fs_align'] != 0:
-            raise InvalidSize(
-                'File system size: "%s" (%s bytes) is not an even multiple of '
-                'fs_align: %s' %
-                (part['fs_size'], part['fs_bytes'], metadata['fs_align']))
-          if part.get('format') == 'ubi':
-            part_meta = GetMetadataPartition(layout)
-            page_size = ParseHumanNumber(part_meta['page_size'])
-            eb_size = ParseHumanNumber(part_meta['erase_block_size'])
-            ubi_eb_size = eb_size - 2 * page_size
-            if (part['fs_bytes'] % ubi_eb_size) != 0:
-              # Trim fs_bytes to multiple of UBI eraseblock size.
-              fs_bytes = part['fs_bytes'] - (part['fs_bytes'] % ubi_eb_size)
-              raise InvalidSize(
-                  'File system size: "%s" (%d bytes) is not a multiple of UBI '
-                  'erase block size (%d). Please set "fs_size" to "%s" in the '
-                  '"common" layout instead.' %
-                  (part['fs_size'], part['fs_bytes'], ubi_eb_size,
-                   ProduceHumanNumber(fs_bytes)))
-
-        if 'fs_blocks' in part:
-          max_fs_blocks = part['bytes'] // metadata['fs_block_size']
-          part['fs_blocks'] = ParseRelativeNumber(max_fs_blocks,
-                                                  part['fs_blocks'])
-          part['fs_bytes'] = part['fs_blocks'] * metadata['fs_block_size']
-          if part['fs_bytes'] % metadata['fs_align'] != 0:
-            raise InvalidSize(
-                'File system size: "%s" (%s bytes) is not an even multiple of '
-                'fs_align: %s' %
-                (part['fs_blocks'], part['fs_bytes'], metadata['fs_align']))
-
-          if part['fs_bytes'] > part['bytes']:
-            raise InvalidLayout(
-                'Filesystem may not be larger than partition: %s %s: %d > %d' %
-                (layout_name, part['label'], part['fs_bytes'], part['bytes']))
-        if 'erase_block_size' in part:
-          part['erase_block_size'] = ParseHumanNumber(part['erase_block_size'])
-        if 'page_size' in part:
-          part['page_size'] = ParseHumanNumber(part['page_size'])
-
-        part.setdefault('features', [])
-        unknown_features = set(part['features']) - valid_features
-        if unknown_features:
-          raise InvalidLayout('%s: Unknown features: %s' %
-                              (part['label'], unknown_features))
-  except KeyError as e:
-    raise InvalidLayout('Layout is missing required entries: %s' % e)
-
-  return config
-
-
-def _GetPrimaryEntryArrayPaddingBytes(config):
-  """Return the start LBA of the primary partition entry array.
-
-  Normally this comes after the primary GPT header but can be adjusted by
-  setting the "primary_entry_array_padding_bytes" key under "metadata" in
-  the config.
-
-  Args:
-    config: The config dictionary.
-
-  Returns:
-    The position of the primary partition entry array.
-  """
-
-  return  config['metadata'].get('primary_entry_array_padding_bytes', 0)
-
-
-def _HasBadEraseBlocks(partitions):
-  return 'max_bad_erase_blocks' in GetMetadataPartition(partitions)
-
-
-def _HasExternalGpt(partitions):
-  return GetMetadataPartition(partitions).get('external_gpt', False)
-
-
-def _GetPartitionStartByteOffset(config, partitions):
-  """Return the first usable location (LBA) for partitions.
-
-  This value is the byte offset after the PMBR, the primary GPT header, and
-  partition entry array.
-
-  We round it up to 32K bytes to maintain the same layout as before in the
-  normal (no padding between the primary GPT header and its partition entry
-  array) case.
-
-  Args:
-    config: The config dictionary.
-    partitions: List of partitions to process
-
-  Returns:
-    A suitable byte offset for partitions.
-  """
-
-  if _HasExternalGpt(partitions):
-    # If the GPT is external, then the offset of the partitions' actual data
-    # will be 0, and we don't need to make space at the beginning for the GPT.
-    return 0
-  else:
-    return START_SECTOR + _GetPrimaryEntryArrayPaddingBytes(config)
-
-
-def GetTableTotals(config, partitions):
-  """Calculates total sizes/counts for a partition table.
-
-  Args:
-    config: The config dictionary.
-    partitions: List of partitions to process
-
-  Returns:
-    Dict containing totals data
-  """
-
-  fs_block_align_losses = 0
-  start_sector = _GetPartitionStartByteOffset(config, partitions)
-  ret = {
-      'expand_count': 0,
-      'expand_min': 0,
-      'last_partition_count': 0,
-      'byte_count': start_sector,
-  }
-
-  # Total up the size of all non-expanding partitions to get the minimum
-  # required disk size.
-  for partition in partitions:
-    if partition.get('num') == 'metadata':
-      continue
-
-    if partition.get('type') in ('data', 'rootfs') and partition['bytes'] > 1:
-      fs_block_align_losses += config['metadata']['fs_align']
-    else:
-      fs_block_align_losses += config['metadata']['fs_block_size']
-    if 'expand' in partition['features']:
-      ret['expand_count'] += 1
-      ret['expand_min'] += partition['bytes']
-    else:
-      ret['byte_count'] += partition['bytes']
-    if 'last_partition' in partition['features']:
-      ret['last_partition_count'] += 1
-
-  # Account for the secondary GPT header and table.
-  ret['byte_count'] += SECONDARY_GPT_BYTES
-
-  # At present, only one expanding partition is permitted.
-  # Whilst it'd be possible to have two, we don't need this yet
-  # and it complicates things, so it's been left out for now.
-  if ret['expand_count'] > 1:
-    raise InvalidLayout('1 expand partition allowed, %d requested'
-                        % ret['expand_count'])
-
-  # Only one partition can be last on the disk.
-  if ret['last_partition_count'] > 1:
-    raise InvalidLayout('Only one last partition allowed, %d requested'
-                        % ret['last_partition_count'])
-
-  # We lose some extra bytes from the alignment which are now not considered in
-  # min_disk_size because partitions are aligned on the fly. Adding
-  # fs_block_align_losses corrects for the loss.
-  ret['min_disk_size'] = ret['byte_count'] + ret['expand_min'] + \
-    fs_block_align_losses
-
-  return ret
-
-
-def GetPartitionTable(options, config, image_type):
-  """Generates requested image_type layout from a layout configuration.
-
-  This loads the base table and then overlays the requested layout over
-  the base layout.
-
-  Args:
-    options: Flags passed to the script
-    config: Partition configuration file object
-    image_type: Type of image eg base/test/dev/factory_install
-
-  Returns:
-    Object representing a selected partition table
-  """
-
-  # We make a deep copy so that changes to the dictionaries in this list do not
-  # persist across calls.
-  try:
-    partitions = copy.deepcopy(config['layouts'][image_type])
-  except KeyError:
-    raise InvalidLayout('Unknown layout: %s' % image_type)
-  metadata = config['metadata']
-
-  # Convert fs_options to a string.
-  for partition in partitions:
-    fs_options = partition.get('fs_options', '')
-    if isinstance(fs_options, dict):
-      fs_format = partition.get('fs_format')
-      fs_options = fs_options.get(fs_format, '')
-    elif not isinstance(fs_options, str):
-      raise InvalidLayout('Partition number %s: fs_format must be a string or '
-                          'dict, not %s' % (partition.get('num'),
-                                            type(fs_options)))
-    if '"' in fs_options or "'" in fs_options:
-      raise InvalidLayout('Partition number %s: fs_format cannot have quotes' %
-                          partition.get('num'))
-    partition['fs_options'] = fs_options
-
-  for adjustment_str in options.adjust_part.split():
-    adjustment = adjustment_str.split(':')
-    if len(adjustment) < 2:
-      raise InvalidAdjustment('Adjustment "%s" is incomplete' % adjustment_str)
-
-    label = adjustment[0]
-    operator = adjustment[1][0]
-    operand = adjustment[1][1:]
-    ApplyPartitionAdjustment(partitions, metadata, label, operator, operand)
-
-  return partitions
-
-
-def ApplyPartitionAdjustment(partitions, metadata, label, operator, operand):
-  """Applies an adjustment to a partition specified by label
-
-  Args:
-    partitions: Partition table to modify
-    metadata: Partition table metadata
-    label: The label of the partition to adjust
-    operator: Type of adjustment (+/-/=)
-    operand: How much to adjust by
-  """
-
-  partition = GetPartitionByLabel(partitions, label)
-
-  operand_bytes = ParseHumanNumber(operand)
-
-  if operator == '+':
-    partition['bytes'] += operand_bytes
-  elif operator == '-':
-    partition['bytes'] -= operand_bytes
-  elif operator == '=':
-    partition['bytes'] = operand_bytes
-  else:
-    raise ValueError('unknown operator %s' % operator)
-
-  if partition['type'] == 'rootfs':
-    # If we're adjusting a rootFS partition, we assume the full partition size
-    # specified is being used for the filesytem, minus the space reserved for
-    # the hashpad.
-    partition['fs_bytes'] = partition['bytes']
-    partition['fs_blocks'] = partition['fs_bytes'] // metadata['fs_block_size']
-    partition['bytes'] = int(partition['bytes'] * 1.15)
-
-def GetPartitionTableFromConfig(options, layout_filename, image_type):
-  """Loads a partition table and returns a given partition table type
-
-  Args:
-    options: Flags passed to the script
-    layout_filename: The filename to load tables from
-    image_type: The type of partition table to return
-  """
-
-  config = LoadPartitionConfig(layout_filename)
-  partitions = GetPartitionTable(options, config, image_type)
-
-  return partitions
-
-
-def GetScriptShell():
-  """Loads and returns the skeleton script for our output script.
-
-  Returns:
-    A string containing the skeleton script
-  """
-
-  script_shell_path = os.path.join(os.path.dirname(__file__), 'cgpt_shell.sh')
-  with open(script_shell_path, 'r') as f:
-    script_shell = ''.join(f.readlines())
-
-  # Before we return, insert the path to this tool so somebody reading the
-  # script later can tell where it was generated.
-  script_shell = script_shell.replace('@SCRIPT_GENERATOR@', script_shell_path)
-
-  return script_shell
-
-
-def GetFullPartitionSize(partition, metadata):
-  """Get the size of the partition including metadata/reserved space in bytes.
-
-  The partition only has to be bigger for raw NAND devices. Formula:
-  - Add UBI per-block metadata (2 pages) if partition is UBI
-  - Round up to erase block size
-  - Add UBI per-partition metadata (4 blocks) if partition is UBI
-  - Add reserved erase blocks
-  """
-
-  erase_block_size = metadata.get('erase_block_size', 0)
-  size = partition['bytes']
-
-  if erase_block_size == 0:
-    return size
-
-  # See "Flash space overhead" in
-  # http://www.linux-mtd.infradead.org/doc/ubi.html
-  # for overhead calculations.
-  is_ubi = partition.get('format') == 'ubi'
-  reserved_erase_blocks = partition.get('reserved_erase_blocks', 0)
-  page_size = metadata.get('page_size', 0)
-
-  if is_ubi:
-    ubi_block_size = erase_block_size - 2 * page_size
-    erase_blocks = (size + ubi_block_size - 1) // ubi_block_size
-    size += erase_blocks * 2 * page_size
-
-  erase_blocks = (size + erase_block_size - 1) // erase_block_size
-  size = erase_blocks * erase_block_size
-
-  if is_ubi:
-    size += erase_block_size * 4
-
-  size += reserved_erase_blocks * erase_block_size
-  return size
-
-
-def WriteLayoutFunction(options, slines, func, image_type, config):
-  """Writes a shell script function to write out a given partition table.
-
-  Args:
-    options: Flags passed to the script
-    slines: lines to write to the script
-    func: function of the layout:
-       for removable storage device: 'partition',
-       for the fixed storage device: 'base'
-    image_type: Type of image eg base/test/dev/factory_install
-    config: Partition configuration file object
-  """
-
-  gpt_add = '${GPT} add -i %d -b $(( curr / block_size )) -s ${blocks} -t %s \
-    -l "%s" ${target}'
-  partitions = GetPartitionTable(options, config, image_type)
-  metadata = GetMetadataPartition(partitions)
-  partition_totals = GetTableTotals(config, partitions)
-  fs_align_snippet = [
-      'if [ $(( curr %% %d )) -gt 0 ]; then' % config['metadata']['fs_align'],
-      '  : $(( curr += %d - curr %% %d ))' %
-      ((config['metadata']['fs_align'],) * 2),
-      'fi',
-  ]
-
-  lines = [
-      'write_%s_table() {' % func,
-  ]
-
-  if _HasExternalGpt(partitions):
-    # Read GPT from device to get size, then wipe it out and operate
-    # on GPT in tmpfs. We don't rely on cgpt's ability to deal
-    # directly with the GPT on SPI NOR flash because rewriting the
-    # table so many times would take a long time (>30min).
-    # Also, wiping out the previous GPT with create_image won't work
-    # for NAND and there's no equivalent via cgpt.
-    lines += [
-        'gptfile=$(mktemp)',
-        'flashrom -r -iRW_GPT:${gptfile}',
-        'gptsize=$(stat ${gptfile} --format %s)',
-        'dd if=/dev/zero of=${gptfile} bs=${gptsize} count=1',
-        'target="-D %d ${gptfile}"' % metadata['bytes'],
-    ]
-  else:
-    lines += [
-        'local target="$1"',
-        'create_image "${target}" %d' % partition_totals['min_disk_size'],
-    ]
-
-  lines += [
-      'local blocks',
-      'block_size=$(blocksize "${target}")',
-      'numsecs=$(numsectors "${target}")',
-  ]
-
-  # ${target} is referenced unquoted because it may expand into multiple
-  # arguments in the case of NAND
-  lines += [
-      'local curr=%d' % _GetPartitionStartByteOffset(config, partitions),
-      '# Make sure Padding is block_size aligned.',
-      'if [ $(( %d & (block_size - 1) )) -gt 0 ]; then' %
-      _GetPrimaryEntryArrayPaddingBytes(config),
-      '  echo "Primary Entry Array padding is not block aligned." >&2',
-      '  exit 1',
-      'fi',
-      '# Create the GPT headers and tables. Pad the primary ones.',
-      '${GPT} create -p $(( %d / block_size )) ${target}' %
-      _GetPrimaryEntryArrayPaddingBytes(config),
-  ]
-
-  metadata = GetMetadataPartition(partitions)
-  stateful = None
-  last_part = None
-  # Set up the expanding partition size and write out all the cgpt add
-  # commands.
-  for partition in partitions:
-    if partition.get('num') == 'metadata':
-      continue
-
-    partition['var'] = GetFullPartitionSize(partition, metadata)
-    if 'expand' in partition['features']:
-      stateful = partition
-      continue
-
-    # Save the last partition to place at the end of the disk..
-    if 'last_partition' in partition['features']:
-      last_part = partition
-      continue
-
-    if (partition.get('type') in ['data', 'rootfs'] and partition['bytes'] > 1):
-      lines += fs_align_snippet
-
-    if partition['var'] != 0 and partition.get('num') != 'metadata':
-      lines += [
-          'blocks=$(( %s / block_size ))' % partition['var'],
-          'if [ $(( %s %% block_size )) -gt 0 ]; then' % partition['var'],
-          '   : $(( blocks += 1 ))',
-          'fi',
-      ]
-
-    if partition['type'] != 'blank':
-      lines += [
-          gpt_add % (partition['num'], partition['type'], partition['label']),
-      ]
-
-    # Increment the curr counter ready for the next partition.
-    if partition['var'] != 0 and partition.get('num') != 'metadata':
-      lines += [
-          ': $(( curr += blocks * block_size ))',
-      ]
-
-  if stateful is not None:
-    lines += fs_align_snippet + [
-        'blocks=$(( numsecs - (curr + %d) / block_size ))' %
-        SECONDARY_GPT_BYTES,
-    ]
-    if last_part is not None:
-      lines += [
-          'reserved_blocks=$(( (%s + block_size - 1) / block_size ))'
-          % last_part['var'],
-          ': $(( blocks = blocks - reserved_blocks ))',
-      ]
-    lines += [
-        gpt_add % (stateful['num'], stateful['type'], stateful['label']),
-        ': $(( curr += blocks * block_size ))',
-    ]
-
-  if last_part is not None:
-    lines += [
-        'reserved_blocks=$(( (%s + block_size - 1) / block_size ))'
-        % last_part['var'],
-        'blocks=$((reserved_blocks))',
-        gpt_add % (last_part['num'], last_part['type'], last_part['label']),
-    ]
-
-  # Set default priorities and retry counter on kernel partitions.
-  tries = 15
-  prio = 15
-  # The order of partition numbers in this loop matters.
-  # Make sure partition #2 is the first one, since it will be marked as
-  # default bootable partition.
-  for partition in GetPartitionsByType(partitions, 'kernel'):
-    lines += [
-        '${GPT} add -i %s -S 0 -T %i -P %i ${target}' %
-        (partition['num'], tries, prio)
-    ]
-    prio = 0
-    # When not writing 'base' function, make sure the other partitions are
-    # marked as non-bootable (retry count == 0), since the USB layout
-    # doesn't have any valid data in slots B & C. But with base function,
-    # called by chromeos-install script, the KERNEL A partition is replicated
-    # into both slots A & B, so we should leave both bootable for error
-    # recovery in this case.
-    if func != 'base':
-      tries = 0
-
-  efi_partitions = GetPartitionsByType(partitions, 'efi')
-  if efi_partitions:
-    lines += [
-        '${GPT} boot -p -b $2 -i %d ${target}' % efi_partitions[0]['num'],
-        '${GPT} add -i %s -B 1 ${target}' % efi_partitions[0]['num'],
-    ]
-  else:
-    # Provide a PMBR all the time for boot loaders (like u-boot)
-    # that expect one to always be there.
-    lines += [
-        '${GPT} boot -p -b $2 ${target}',
-    ]
-
-  if metadata.get('hybrid_mbr'):
-    lines += ['install_hybrid_mbr ${target}']
-  lines += ['${GPT} show ${target}']
-
-  if _HasExternalGpt(partitions):
-    lines += ['flashrom -w -iRW_GPT:${gptfile} --noverify-all']
-
-  slines += '%s\n}\n\n' % '\n  '.join(lines)
-
-
-def WritePartitionSizesFunction(options, slines, func, image_type, config,
-                                data):
-  """Writes out the partition size variable that can be extracted by a caller.
-
-  Args:
-    options: Flags passed to the script
-    slines: lines to write to the script file
-    func: function of the layout:
-       for removable storage device: 'partition',
-       for the fixed storage device: 'base'
-    image_type: Type of image eg base/test/dev/factory_install
-    config: Partition configuration file object
-    data: data dict we will write to a json file
-  """
-  func_name = 'load_%s_vars' % func
-  lines = [
-      '%s() {' % func_name,
-      'DEFAULT_ROOTDEV="%s"' % config['metadata'].get('rootdev_%s' % func, ''),
-  ]
-
-  data[func_name] = {}
-  data[func_name]['DEFAULT_ROOTDEV'] = (
-      '%s' % config['metadata'].get('rootdev_%s' % func, '')
-  )
-
-  partitions = GetPartitionTable(options, config, image_type)
-  for partition in partitions:
-    if partition.get('num') == 'metadata':
-      continue
-    for key in ('label', 'num'):
-      if key in partition:
-        shell_label = str(partition[key]).replace('-', '_').upper()
-        part_bytes = partition['bytes']
-        reserved_ebs = partition.get('reserved_erase_blocks', 0)
-        fs_bytes = partition.get('fs_bytes', part_bytes)
-        part_format = partition.get('format', '')
-        fs_format = partition.get('fs_format', '')
-        fs_options = partition.get('fs_options', '')
-        partition_num = partition.get('num', '')
-        args = [('PARTITION_SIZE_', part_bytes),
-                ('RESERVED_EBS_', reserved_ebs),
-                ('DATA_SIZE_', fs_bytes),
-                ('FORMAT_', part_format),
-                ('FS_FORMAT_', fs_format)]
-        sargs = [('FS_OPTIONS_', fs_options),
-                ('PARTITION_NUM_', partition_num)]
-        for arg, value in args:
-          label = arg + shell_label
-          lines += ['%s=%s' % (label, value),]
-          data[func_name][label] = '%s' % value
-        for arg, value in sargs:
-          label = arg + shell_label
-          lines += ['%s="%s"' % (label, value),]
-          data[func_name][label] = '"%s"' % value
-  slines += '%s\n}\n\n' % '\n  '.join(lines)
-
-
-def GetPartitionByNumber(partitions, num):
-  """Given a partition table and number returns the partition object.
-
-  Args:
-    partitions: List of partitions to search in
-    num: Number of partition to find
-
-  Returns:
-    An object for the selected partition
-  """
-  for partition in partitions:
-    if partition.get('num') == int(num):
-      return partition
-
-  raise PartitionNotFound('Partition %s not found' % num)
-
-
-def GetPartitionsByType(partitions, typename):
-  """Given a partition table and type returns the partitions of the type.
-
-  Partitions are sorted in num order.
-
-  Args:
-    partitions: List of partitions to search in
-    typename: The type of partitions to select
-
-  Returns:
-    A list of partitions of the type
-  """
-  out = []
-  for partition in partitions:
-    if partition.get('type') == typename:
-      out.append(partition)
-  return sorted(out, key=lambda partition: partition.get('num'))
-
-
-def GetMetadataPartition(partitions):
-  """Given a partition table returns the metadata partition object.
-
-  Args:
-    partitions: List of partitions to search in
-
-  Returns:
-    An object for the metadata partition
-  """
-  for partition in partitions:
-    if partition.get('num') == 'metadata':
-      return partition
-
-  return {}
-
-
-def GetPartitionByLabel(partitions, label):
-  """Given a partition table and label returns the partition object.
-
-  Args:
-    partitions: List of partitions to search in
-    label: Label of partition to find
-
-  Returns:
-    An object for the selected partition
-  """
-  for partition in partitions:
-    if 'label' not in partition:
-      continue
-    if partition['label'] == label:
-      return partition
-
-  raise PartitionNotFound('Partition "%s" not found' % label)
-
-
-def WritePartitionScript(options, image_type, layout_filename, sfilename,
-                         vfilename):
-  """Writes a shell script with functions for the base and requested layouts.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    sfilename: Filename to write the finished script to
-    vfilename: Filename to write the partition variables json data to
-  """
-  config = LoadPartitionConfig(layout_filename)
-
-  with open(sfilename, 'w') as f, open(vfilename, 'w') as jFile:
-    script_shell = GetScriptShell()
-    f.write(script_shell)
-
-    data = {}
-    slines = []
-    for func, layout in (('base', BASE_LAYOUT), ('partition', image_type)):
-      WriteLayoutFunction(options, slines, func, layout, config)
-      WritePartitionSizesFunction(options, slines, func, layout, config, data)
-
-    f.write(''.join(slines))
-    json.dump(data, jFile)
-
-    # TODO: Backwards compat.  Should be killed off once we update
-    #       cros_generate_update_payload to use the new code.
-    partitions = GetPartitionTable(options, config, BASE_LAYOUT)
-    partition = GetPartitionByLabel(partitions, 'ROOT-A')
-    f.write('ROOTFS_PARTITION_SIZE=%s\n' % (partition['bytes'],))
-
-
-def GetBlockSize(_options, layout_filename):
-  """Returns the partition table block size.
-
-  Args:
-    options: Flags passed to the script
-    layout_filename: Path to partition configuration file
-
-  Returns:
-    Block size of all partitions in the layout
-  """
-
-  config = LoadPartitionConfig(layout_filename)
-  return config['metadata']['block_size']
-
-
-def GetFilesystemBlockSize(_options, layout_filename):
-  """Returns the filesystem block size.
-
-  This is used for all partitions in the table that have filesystems.
-
-  Args:
-    options: Flags passed to the script
-    layout_filename: Path to partition configuration file
-
-  Returns:
-    Block size of all filesystems in the layout
-  """
-
-  config = LoadPartitionConfig(layout_filename)
-  return config['metadata']['fs_block_size']
-
-
-def GetImageTypes(_options, layout_filename):
-  """Returns a list of all the image types in the layout.
-
-  Args:
-    options: Flags passed to the script
-    layout_filename: Path to partition configuration file
-
-  Returns:
-    List of all image types
-  """
-
-  config = LoadPartitionConfig(layout_filename)
-  return ' '.join(config['layouts'].keys())
-
-
-def GetType(options, image_type, layout_filename, num):
-  """Returns the type of a given partition for a given layout.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    Type of the specified partition.
-  """
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-  return partition.get('type')
-
-
-def GetPartitions(options, image_type, layout_filename):
-  """Returns the partition numbers for the image_type.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-
-  Returns:
-    A space delimited string of partition numbers.
-  """
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  return ' '.join(str(p['num']) for p in partitions
-                  if 'num' in p and p['num'] != 'metadata')
-
-
-def GetUUID(options, image_type, layout_filename, num):
-  """Returns the filesystem UUID of a given partition for a given layout type.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    UUID of specified partition. Defaults to random if not set.
-  """
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-  return partition.get('uuid', 'random')
-
-
-def GetPartitionSize(options, image_type, layout_filename, num):
-  """Returns the partition size of a given partition for a given layout type.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    Size of selected partition in bytes
-  """
-
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-
-  return partition['bytes']
-
-
-def GetFilesystemFormat(options, image_type, layout_filename, num):
-  """Returns the filesystem format of a given partition for a given layout type.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    Format of the selected partition's filesystem
-  """
-
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-
-  return partition.get('fs_format')
-
-
-def GetFormat(options, image_type, layout_filename, num):
-  """Returns the format of a given partition for a given layout type.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    Format of the selected partition's filesystem
-  """
-
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-
-  return partition.get('format')
-
-
-def GetFilesystemOptions(options, image_type, layout_filename, num):
-  """Returns the filesystem options of a given partition and layout type.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    The selected partition's filesystem options
-  """
-
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-
-  return partition.get('fs_options')
-
-
-def GetFilesystemSize(options, image_type, layout_filename, num):
-  """Returns the filesystem size of a given partition for a given layout type.
-
-  If no filesystem size is specified, returns the partition size.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    Size of selected partition filesystem in bytes
-  """
-
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-
-  if 'fs_bytes' in partition:
-    return partition['fs_bytes']
-  else:
-    return partition['bytes']
-
-
-def GetLabel(options, image_type, layout_filename, num):
-  """Returns the label for a given partition.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    Label of selected partition, or 'UNTITLED' if none specified
-  """
-
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-
-  if 'label' in partition:
-    return partition['label']
-  else:
-    return 'UNTITLED'
-
-
-def GetNumber(options, image_type, layout_filename, label):
-  """Returns the partition number of a given label.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    label: Number of the partition you want to read from
-
-  Returns:
-    The number of the partition corresponding to the label.
-  """
-
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByLabel(partitions, label)
-  return partition['num']
-
-
-def GetReservedEraseBlocks(options, image_type, layout_filename, num):
-  """Returns the number of erase blocks reserved in the partition.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-    num: Number of the partition you want to read from
-
-  Returns:
-    Number of reserved erase blocks
-  """
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  partition = GetPartitionByNumber(partitions, num)
-  if 'reserved_erase_blocks' in partition:
-    return partition['reserved_erase_blocks']
-  else:
-    return 0
-
-
-def _DumpLayout(options, config, image_type):
-  """Prints out a human readable disk layout in on-disk order.
-
-  Args:
-    options: Flags passed to the script.
-    config: Partition configuration file object.
-    image_type: Type of image e.g. base/test/dev/factory_install.
-  """
-  try:
-    partitions = GetPartitionTable(options, config, image_type)
-  except InvalidLayout as e:
-    print(str(e), file=sys.stderr)
-    sys.exit(1)
-
-  label_len = max(len(x['label']) for x in partitions if 'label' in x)
-  type_len = max(len(x['type']) for x in partitions if 'type' in x)
-
-  msg = 'num:%4s label:%-*s type:%-*s size:%-10s fs_size:%-10s features:%s'
-
-  print('\n%s Layout Data' % image_type.upper())
-  for partition in partitions:
-    if partition.get('num') == 'metadata':
-      continue
-
-    size = ProduceHumanNumber(partition['bytes'])
-    if 'fs_bytes' in partition:
-      fs_size = ProduceHumanNumber(partition['fs_bytes'])
-    else:
-      fs_size = 'auto'
-
-    print(msg % (
-        partition.get('num', 'auto'),
-        label_len,
-        partition.get('label', ''),
-        type_len,
-        partition.get('type', ''),
-        size,
-        fs_size,
-        partition.get('features', []),
-    ))
-
-
-def DoDebugOutput(options, layout_filename, image_type):
-  """Prints out a human readable disk layout in on-disk order.
-
-  Args:
-    options: Flags passed to the script
-    layout_filename: Path to partition configuration file
-    image_type: Type of image e.g. ALL/LIST/base/test/dev/factory_install
-  """
-  if image_type == 'LIST':
-    print(GetImageTypes(options, layout_filename))
-    return
-
-  config = LoadPartitionConfig(layout_filename)
-
-  # Print out non-layout options first.
-  print('Config Data')
-  metadata_msg = 'field:%-14s value:%s'
-  for key in config.keys():
-    if key not in ('layouts', '_comment'):
-      print(metadata_msg % (key, config[key]))
-
-  if image_type == 'ALL':
-    for layout in config['layouts']:
-      _DumpLayout(options, config, layout)
-  else:
-    _DumpLayout(options, config, image_type)
-
-
-def CheckRootfsPartitionsMatch(partitions):
-  """Checks that rootfs partitions are substitutable with each other.
-
-  This function asserts that either all rootfs partitions are in the same format
-  or none have a format, and it asserts that have the same number of reserved
-  erase blocks.
-  """
-  partition_format = None
-  reserved_erase_blocks = -1
-  for partition in partitions:
-    if partition.get('type') == 'rootfs':
-      new_format = partition.get('format', '')
-      new_reserved_erase_blocks = partition.get('reserved_erase_blocks', 0)
-
-      if partition_format is None:
-        partition_format = new_format
-        reserved_erase_blocks = new_reserved_erase_blocks
-
-      if new_format != partition_format:
-        raise MismatchedRootfsFormat(
-            'mismatched rootfs formats: "%s" and "%s"' %
-            (partition_format, new_format))
-
-      if reserved_erase_blocks != new_reserved_erase_blocks:
-        raise MismatchedRootfsBlocks(
-            'mismatched rootfs reserved erase block counts: %s and %s' %
-            (reserved_erase_blocks, new_reserved_erase_blocks))
-
-
-def Combinations(n, k):
-  """Calculate the binomial coefficient, i.e., "n choose k"
-
-  This calculates the number of ways that k items can be chosen from
-  a set of size n. For example, if there are n blocks and k of them
-  are bad, then this returns the number of ways that the bad blocks
-  can be distributed over the device.
-  See http://en.wikipedia.org/wiki/Binomial_coefficient
-
-  For convenience to the caller, this function allows impossible cases
-  as input and returns 0 for them.
-  """
-  if k < 0 or n < k:
-    return 0
-  return math.factorial(n) // (math.factorial(k) * math.factorial(n - k))
-
-
-def CheckReservedEraseBlocks(partitions):
-  """Checks that the reserved_erase_blocks in each partition is good.
-
-  This function checks that a reasonable value was given for the reserved
-  erase block count. In particular, it checks that there's a less than
-  1 in 100k probability that, if the manufacturer's maximum bad erase
-  block count is met, and assuming bad blocks are uniformly randomly
-  distributed, then more bad blocks will fall in this partition than are
-  reserved. Smaller partitions need a larger reserve percentage.
-
-  We take the number of reserved blocks as a parameter in disk_layout.json
-  rather than just calculating the value so that it can be tweaked
-  explicitly along with others in squeezing the image onto flash. But
-  we check it so that users have an easy method for determining what's
-  acceptable--just try out a new value and do ./build_image.
-  """
-  for partition in partitions:
-    if ('reserved_erase_blocks' in partition or
-        partition.get('format') in ('ubi', 'nand')):
-      if partition.get('bytes', 0) == 0:
-        continue
-      metadata = GetMetadataPartition(partitions)
-      if (not _HasBadEraseBlocks(partitions)
-          or 'reserved_erase_blocks' not in partition
-          or 'bytes' not in metadata
-          or 'erase_block_size' not in metadata
-          or 'page_size' not in metadata):
-        raise MissingEraseBlockField(
-            'unable to check if partition %s will have too many bad blocks due '
-            'to missing metadata field' % partition['label'])
-
-      reserved = partition['reserved_erase_blocks']
-      erase_block_size = metadata['erase_block_size']
-      device_erase_blocks = metadata['bytes'] // erase_block_size
-      device_bad_blocks = metadata['max_bad_erase_blocks']
-      distributions = Combinations(device_erase_blocks, device_bad_blocks)
-      partition_erase_blocks = partition['bytes'] // erase_block_size
-      # The idea is to calculate the number of ways that there could be reserved
-      # or more bad blocks inside the partition, assuming that there are
-      # device_bad_blocks in the device in total (the worst case). To get the
-      # probability, we divide this count by the total number of ways that the
-      # bad blocks can be distributed on the whole device. To find the first
-      # number, we sum over increasing values for the count of bad blocks within
-      # the partition the number of ways that those bad blocks can be inside the
-      # partition, multiplied by the number of ways that the remaining blocks
-      # can be distributed outside of the partition.
-      ways_for_failure = sum(
-          Combinations(partition_erase_blocks, partition_bad_blocks) *
-          Combinations(device_erase_blocks - partition_erase_blocks,
-                       device_bad_blocks - partition_bad_blocks)
-          for partition_bad_blocks
-          in range(reserved + 1, device_bad_blocks + 1))
-      probability = ways_for_failure / distributions
-      if probability > 0.00001:
-        raise ExcessFailureProbability('excessive probability %f of too many '
-                                       'bad blocks in partition %s'
-                                       % (probability, partition['label']))
-
-
-def CheckSimpleNandProperties(partitions):
-  """Checks that NAND partitions are erase-block-aligned and not expand"""
-  if not _HasBadEraseBlocks(partitions):
-    return
-  metadata = GetMetadataPartition(partitions)
-  for partition in partitions:
-    erase_block_size = metadata['erase_block_size']
-    if partition['bytes'] % erase_block_size != 0:
-      raise UnalignedPartition(
-          'partition size %s does not divide erase block size %s' %
-          (partition['bytes'], erase_block_size))
-    if 'expand' in partition['features']:
-      raise ExpandNandImpossible(
-          'expand partitions may not be used with raw NAND')
-
-
-def CheckTotalSize(partitions):
-  """Checks that the sum size of all partitions fits within the device"""
-  metadata = GetMetadataPartition(partitions)
-  if 'bytes' not in metadata:
-    return
-  capacity = metadata['bytes']
-  total = sum(GetFullPartitionSize(partition, metadata)
-              for partition in partitions if partition.get('num') != 'metadata')
-  if total > capacity:
-    raise ExcessPartitionSize('capacity = %d, total=%d' % (capacity, total))
-
-
-def Validate(options, image_type, layout_filename):
-  """Validates a layout file, used before reading sizes to check for errors.
-
-  Args:
-    options: Flags passed to the script
-    image_type: Type of image eg base/test/dev/factory_install
-    layout_filename: Path to partition configuration file
-  """
-  partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
-  CheckRootfsPartitionsMatch(partitions)
-  CheckTotalSize(partitions)
-  CheckSimpleNandProperties(partitions)
-  CheckReservedEraseBlocks(partitions)
-
-
-class ArgsAction(argparse.Action):  # pylint: disable=no-init
-  """Helper to add all arguments to an args array.
-
-  ArgumentParser does not let you specify the same dest for multiple args.
-  We take care of appending to the 'args' array ourselves here.
-  """
-
-  def __call__(self, parser, namespace, values, option_string=None):
-    args = getattr(namespace, 'args', [])
-    args.append(values)
-    setattr(namespace, 'args', args)
-
-
-class HelpAllAction(argparse.Action):
-  """Display all subcommands help in one go."""
-
-  def __init__(self, *args, **kwargs):
-    if 'nargs' in kwargs:
-      raise ValueError('nargs not allowed')
-    kwargs['nargs'] = 0
-    argparse.Action.__init__(self, *args, **kwargs)
-
-  def __call__(self, parser, namespace, values, option_string=None):
-    print('%s\nCommands:' % (parser.description,), end='')
-    subparser = getattr(namespace, 'help_all')
-    for key, subparser in namespace.help_all.choices.items():
-      # Should we include the desc of each arg too ?
-      print('\n  %s %s\n    %s' %
-            (key, subparser.get_default('help_all'), subparser.description))
-    sys.exit(0)
-
-
-def GetParser():
-  """Return a parser for the CLI."""
-  parser = argparse.ArgumentParser(
-      description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
-  parser.add_argument('--adjust_part', metavar='SPEC', default='',
-                      help='adjust partition sizes')
-
-  action_map = {
-      'write': WritePartitionScript,
-      'readblocksize': GetBlockSize,
-      'readfsblocksize': GetFilesystemBlockSize,
-      'readpartsize': GetPartitionSize,
-      'readformat': GetFormat,
-      'readfsformat': GetFilesystemFormat,
-      'readfssize': GetFilesystemSize,
-      'readimagetypes': GetImageTypes,
-      'readfsoptions': GetFilesystemOptions,
-      'readlabel': GetLabel,
-      'readnumber': GetNumber,
-      'readreservederaseblocks': GetReservedEraseBlocks,
-      'readtype': GetType,
-      'readpartitionnums': GetPartitions,
-      'readuuid': GetUUID,
-      'debug': DoDebugOutput,
-      'validate': Validate,
-  }
-
-  # Subparsers are required by default under Python 2.  Python 3 changed to
-  # not required, but didn't include a required option until 3.7.  Setting
-  # the required member works in all versions (and setting dest name).
-  subparsers = parser.add_subparsers(title='Commands', dest='command')
-  subparsers.required = True
-
-  for name, func in sorted(action_map.items()):
-    # Turn the func's docstring into something we can show the user.
-    desc, doc = func.__doc__.split('\n', 1)
-    # Extract the help for each argument.
-    args_help = {}
-    for line in doc.splitlines():
-      if ':' in line:
-        arg, text = line.split(':', 1)
-        args_help[arg.strip()] = text.strip()
-
-    argspec = inspect.getfullargspec(func)
-    # Skip the first argument as that'll be the options field.
-    args = argspec.args[1:]
-
-    subparser = subparsers.add_parser(name, description=desc, help=desc)
-    subparser.set_defaults(callback=func,
-                           help_all=' '.join('<%s>' % x for x in args))
-    for arg in args:
-      subparser.add_argument(arg, action=ArgsAction, help=args_help[arg])
-
-  parser.add_argument('--help-all', action=HelpAllAction, default=subparsers,
-                      help='show all commands and their help in one screen')
-
-  return parser
-
-
-def main(argv):
-  parser = GetParser()
-  opts = parser.parse_args(argv)
-
-  ret = opts.callback(opts, *opts.args)
-  if ret is not None:
-    print(ret)
-
-
-if __name__ == '__main__':
-  sys.exit(main(sys.argv[1:]))
diff --git a/build_library/cgpt_shell.sh b/build_library/cgpt_shell.sh
deleted file mode 100644
index 3d804e7..0000000
--- a/build_library/cgpt_shell.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# This script is automatically generated by @SCRIPT_GENERATOR@.
-# Do not edit!
-
-if ! type numsectors >/dev/null 2>&1; then
-  . "/usr/share/misc/chromeos-common.sh" || exit 1
-fi
-locate_gpt
-
-# Usage: create_image <device> <min_disk_size>
-# If <device> is a block device, wipes out the GPT
-# If it's not, it creates a new file of the requested size
-create_image() {
-  local dev="$1"
-  local min_disk_size="$2"
-
-  if [ -b "${dev}" ]; then
-    # Make sure block size is not greater than 8K. Otherwise the partition
-    # start calculation won't fit.
-    block_size=$(blocksize "${dev}")
-    if [ "${block_size}" -gt 8192 ]; then
-      echo "Destination blocksize too large. Only blocksizes of 8192 bytes and \
-        smaller are supported." >&2
-      exit 1
-    fi
-
-    # Zap any old partitions (otherwise gpt complains).
-    dd if=/dev/zero of="${dev}" conv=notrunc bs=512 count=64
-    dd if=/dev/zero of="${dev}" conv=notrunc bs=512 count=64 \
-      seek=$(( min_disk_size / 512 - 64 ))
-  else
-    if [ ! -e "${dev}" ]; then
-      # Align to 512 bytes
-      min_disk_size=$(( (min_disk_size + 511) & ~511 ))
-      truncate -s "${min_disk_size}" "${dev}"
-    fi
-  fi
-}
-
diff --git a/build_library/cgpt_unittest.py b/build_library/cgpt_unittest.py
deleted file mode 100755
index 6e8aea6..0000000
--- a/build_library/cgpt_unittest.py
+++ /dev/null
@@ -1,577 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Unit tests for cgpt."""
-
-# pylint: disable=W0212
-
-from __future__ import print_function
-
-import os
-import shutil
-import tempfile
-import unittest
-
-import cgpt
-
-
-class JSONLoadingTest(unittest.TestCase):
-  """Test stacked JSON loading functions."""
-
-  def __init__(self, *args, **kwargs):
-    unittest.TestCase.__init__(self, *args, **kwargs)
-    self.tempdir = None
-    self.maxDiff = 1000
-
-  def setUp(self):
-    self.tempdir = tempfile.mkdtemp(prefix='cgpt-test_')
-    self.layout_json = os.path.join(self.tempdir, 'test_layout.json')
-    self.parent_layout_json = os.path.join(self.tempdir,
-                                           'test_layout_parent.json')
-
-  def tearDown(self):
-    if self.tempdir is not None:
-      shutil.rmtree(self.tempdir)
-      self.tempdir = None
-
-  def testJSONComments(self):
-    """Test that we ignore comments in JSON in lines starting with #."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""# This line is a comment.
-{
-    # Here I have another comment starting with some whitespaces on the left.
-    "layouts": {
-        "common": []
-    }
-}
-""")
-    self.assertEqual(cgpt._LoadStackedPartitionConfig(self.layout_json),
-                     {'layouts': {'common': []}})
-
-  def testJSONCommentsLimitations(self):
-    """Test that we can't parse inline comments in JSON.
-
-    If we ever enable this, we need to change the README.disk_layout
-    documentation to mention it.
-    """
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-    "layouts": { # This is an inline comment, but is not supported.
-        "common": []}}""")
-    self.assertRaises(ValueError,
-                      cgpt._LoadStackedPartitionConfig, self.layout_json)
-
-  def testPartitionOrderPreserved(self):
-    """Test that the order of the partitions is the same as in the parent."""
-    with open(self.parent_layout_json, 'w') as f:
-      f.write("""{
-  "layouts": {
-    "common": [
-      {
-        "num": 3,
-        "name": "Part 3"
-      },
-      {
-        "num": 2,
-        "name": "Part 2"
-      },
-      {
-        "num": 1,
-        "name": "Part 1"
-      }
-    ]
-  }
-}""")
-    parent_layout = cgpt._LoadStackedPartitionConfig(self.parent_layout_json)
-
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "parent": "%s",
-  "layouts": {
-    "common": []
-  }
-}""" % self.parent_layout_json)
-    layout = cgpt._LoadStackedPartitionConfig(self.layout_json)
-    self.assertEqual(parent_layout, layout)
-
-    # Test also that even overriding one partition keeps all of them in order.
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "parent": "%s",
-  "layouts": {
-    "common": [
-      {
-        "num": 2,
-        "name": "Part 2"
-      }
-    ]
-  }
-}""" % self.parent_layout_json)
-    layout = cgpt._LoadStackedPartitionConfig(self.layout_json)
-    self.assertEqual(parent_layout, layout)
-
-  def testGetStartByteOffsetIsAccurate(self):
-    """Test that padding_bytes results in a valid start sector."""
-
-    test_params = (
-        # block_size, primary_entry_array_padding_bytes (in blocks)
-        (512, 2),
-        (512, 32768),
-        (1024, 32768),
-    )
-    for i in test_params:
-      with open(self.layout_json, 'w') as f:
-        f.write("""{
-  "metadata": {
-    "block_size": %d,
-    "fs_block_size": 4096,
-    "primary_entry_array_padding_bytes": %d
-  },
-  "layouts": {
-    "base": [
-      {
-        "type": "blank",
-        "size": "32 MiB"
-      }
-    ]
-  }
-}""" % (i[0], i[1] * i[0]))
-
-      config = cgpt.LoadPartitionConfig(self.layout_json)
-      class Options(object):
-        """Fake options"""
-        adjust_part = ''
-      partitions = cgpt.GetPartitionTable(Options(), config, 'base')
-      start_offset = cgpt._GetPartitionStartByteOffset(config, partitions)
-      self.assertEqual(start_offset, cgpt.START_SECTOR + i[1] * i[0])
-
-  def testGetTableTotalsSizeIsAccurate(self):
-    """Test that primary_entry_array_lba results in an accurate block count."""
-    test_params = (
-        # block_size, primary_entry_array_padding_bytes (in blocks),
-        # partition size (MiB)
-        (512, 2, 32),
-        (1024, 2, 32),
-        (512, 2, 64),
-        (512, 32768, 32),
-        (1024, 32768, 32),
-        (1024, 32768, 64),
-    )
-    for i in test_params:
-      with open(self.layout_json, 'w') as f:
-        f.write("""{
-  "metadata": {
-    "block_size": %d,
-    "fs_block_size": 4096,
-    "primary_entry_array_padding_bytes": %d
-  },
-  "layouts": {
-    "base": [
-      {
-        "type": "blank",
-        "size": "%d MiB"
-      }
-    ]
-  }
-}""" % (i[0], i[1] * i[0], i[2]))
-
-      config = cgpt.LoadPartitionConfig(self.layout_json)
-      class Options(object):
-        """Fake options"""
-        adjust_part = ''
-      partitions = cgpt.GetPartitionTable(Options(), config, 'base')
-      totals = cgpt.GetTableTotals(config, partitions)
-
-      # Calculate the expected image block size.
-      total_size = (
-          cgpt._GetPartitionStartByteOffset(config, partitions) +
-          sum([x['bytes'] for x in partitions]) +
-          cgpt.SECONDARY_GPT_BYTES)
-
-      self.assertEqual(totals['byte_count'], total_size)
-
-  def testGapPartitionsAreIncluded(self):
-    """Test that empty partitions (gaps) can be included in the child layout."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "layouts": {
-    # The common layout is empty but is applied to all the other layouts.
-    "common": [],
-    "base": [
-      {
-        "num": 2,
-        "name": "Part 2"
-      },
-      {
-        # Pad out, but not sure why.
-        "type": "blank",
-        "size": "64 MiB"
-      },
-      {
-        "num": 1,
-        "name": "Part 1"
-      }
-    ]
-  }
-}""")
-    self.assertEqual(
-        cgpt._LoadStackedPartitionConfig(self.layout_json),
-        {
-            'layouts': {
-                'common': [],
-                'base': [
-                    {'num': 2, 'name': 'Part 2'},
-                    {'type': 'blank', 'size': '64 MiB'},
-                    {'num': 1, 'name': 'Part 1'}
-                ]
-            }})
-
-  def testPartitionOrderShouldMatch(self):
-    """Test that the partition order in parent and child layouts must match."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "layouts": {
-    "common": [
-      {"num": 1},
-      {"num": 2}
-    ],
-    "base": [
-      {"num": 2},
-      {"num": 1}
-    ]
-  }
-}""")
-    self.assertRaises(cgpt.ConflictingPartitionOrder,
-                      cgpt._LoadStackedPartitionConfig, self.layout_json)
-
-  def testOnlySharedPartitionsOrderMatters(self):
-    """Test that only the order of the partition in both layouts matters."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "layouts": {
-    "common": [
-      {"num": 1},
-      {"num": 2},
-      {"num": 3}
-    ],
-    "base": [
-      {"num": 2},
-      {"num": 12},
-      {"num": 3},
-      {"num": 5}
-    ]
-  }
-}""")
-    self.assertEqual(
-        cgpt._LoadStackedPartitionConfig(self.layout_json),
-        {
-            'layouts': {
-                'common': [
-                    {'num': 1},
-                    {'num': 2},
-                    {'num': 3}
-                ],
-                'base': [
-                    {'num': 1},
-                    {'num': 2},
-                    {'num': 12},
-                    {'num': 3},
-                    {'num': 5}
-                ]
-            }})
-
-  def testFileSystemSizeMustBePositive(self):
-    """Test that zero or negative file system size will raise exception."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "metadata": {
-    "block_size": "512",
-    "fs_block_size": "4 KiB"
-  },
-  "layouts": {
-    "base": [
-      {
-        "num": 1,
-        "type": "rootfs",
-        "label": "ROOT-A",
-        "fs_size": "0 KiB"
-      }
-    ]
-  }
-}""")
-    try:
-      cgpt.LoadPartitionConfig(self.layout_json)
-    except cgpt.InvalidSize as e:
-      self.assertTrue('must be positive' in str(e))
-    else:
-      self.fail('InvalidSize not raised.')
-
-  def testFileSystemSizeLargerThanPartition(self):
-    """Test that file system size must not be greater than partition."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "metadata": {
-    "block_size": "512",
-    "fs_block_size": "4 KiB"
-  },
-  "layouts": {
-    "base": [
-      {
-        "num": 1,
-        "type": "rootfs",
-        "label": "ROOT-A",
-        "size": "4 KiB",
-        "fs_size": "8 KiB"
-      }
-    ]
-  }
-}""")
-    try:
-      cgpt.LoadPartitionConfig(self.layout_json)
-    except cgpt.InvalidSize as e:
-      self.assertTrue('may not be larger than partition' in str(e))
-    else:
-      self.fail('InvalidSize not raised.')
-
-  def testFileSystemSizeNotMultipleBlocks(self):
-    """Test that file system size must be multiples of file system blocks."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "metadata": {
-    "block_size": "512",
-    "fs_block_size": "4 KiB"
-  },
-  "layouts": {
-    "base": [
-      {
-        "num": 1,
-        "type": "rootfs",
-        "label": "ROOT-A",
-        "size": "4 KiB",
-        "fs_size": "3 KiB"
-      }
-    ]
-  }
-}""")
-    try:
-      cgpt.LoadPartitionConfig(self.layout_json)
-    except cgpt.InvalidSize as e:
-      self.assertTrue('not an even multiple of fs_align' in str(e))
-    else:
-      self.fail('InvalidSize not raised.')
-
-  def testFileSystemSizeForUbiWithNoPageSize(self):
-    """Test that "page_size" must be present to calculate UBI fs size."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "metadata": {
-    "block_size": "512",
-    "fs_block_size": "4 KiB"
-  },
-  "layouts": {
-    "base": [
-      {
-        "num": 1,
-        "type": "rootfs",
-        "format": "ubi",
-        "label": "ROOT-A",
-        "size": "4 KiB",
-        "fs_size": "4 KiB"
-      }
-    ]
-  }
-}""")
-    try:
-      cgpt.LoadPartitionConfig(self.layout_json)
-    except cgpt.InvalidLayout as e:
-      self.assertTrue('page_size' in str(e))
-    else:
-      self.fail('InvalidLayout not raised.')
-
-  def testFileSystemSizeForUbiWithNoEraseBlockSize(self):
-    """Test that "erase_block_size" must be present to calculate UBI fs size."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "metadata": {
-    "block_size": "512",
-    "fs_block_size": "4 KiB"
-  },
-  "layouts": {
-    "base": [
-      {
-        "num": "metadata",
-        "page_size": "4 KiB"
-      },
-      {
-        "num": 1,
-        "type": "rootfs",
-        "format": "ubi",
-        "label": "ROOT-A",
-        "size": "4 KiB",
-        "fs_size": "4 KiB"
-      }
-    ]
-  }
-}""")
-    try:
-      cgpt.LoadPartitionConfig(self.layout_json)
-    except cgpt.InvalidLayout as e:
-      self.assertTrue('erase_block_size' in str(e))
-    else:
-      self.fail('InvalidLayout not raised.')
-
-  def testFileSystemSizeForUbiIsNotMultipleOfUbiEraseBlockSize(self):
-    """Test that we raise when fs_size is not multiple of eraseblocks."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "metadata": {
-    "block_size": "512",
-    "fs_block_size": "4 KiB"
-  },
-  "layouts": {
-    "base": [
-      {
-        "num": "metadata",
-        "page_size": "4 KiB",
-        "erase_block_size": "262144"
-      },
-      {
-        "num": 1,
-        "type": "rootfs",
-        "format": "ubi",
-        "label": "ROOT-A",
-        "size": "256 KiB",
-        "fs_size": "256 KiB"
-      }
-    ]
-  }
-}""")
-    try:
-      cgpt.LoadPartitionConfig(self.layout_json)
-    except cgpt.InvalidSize as e:
-      self.assertTrue('to "248 KiB" in the "common" layout' in str(e))
-    else:
-      self.fail('InvalidSize not raised')
-
-  def testFileSystemSizeForUbiIsMultipleOfUbiEraseBlockSize(self):
-    """Test that everything is okay when fs_size is multiple of eraseblocks."""
-    with open(self.layout_json, 'w') as f:
-      f.write("""{
-  "metadata": {
-    "block_size": "512",
-    "fs_block_size": "4 KiB"
-  },
-  "layouts": {
-    "base": [
-      {
-        "num": "metadata",
-        "page_size": "4 KiB",
-        "erase_block_size": "262144"
-      },
-      {
-        "num": 1,
-        "type": "rootfs",
-        "format": "ubi",
-        "label": "ROOT-A",
-        "size": "256 KiB",
-        "fs_size": "253952"
-      }
-    ]
-  }
-}""")
-    self.assertEqual(
-        cgpt.LoadPartitionConfig(self.layout_json),
-        {
-            u'layouts': {
-                u'base': [
-                    {
-                        u'erase_block_size': 262144,
-                        'features': [],
-                        u'num': u'metadata',
-                        u'page_size': 4096,
-                        'type': 'blank'
-                    },
-                    {
-                        'bytes': 262144,
-                        'features': [],
-                        u'format': u'ubi',
-                        'fs_bytes': 253952,
-                        u'fs_size': u'253952',
-                        u'label': u'ROOT-A',
-                        u'num': 1,
-                        u'size': u'256 KiB',
-                        u'type': u'rootfs'
-                    }
-                ],
-                'common': []
-            },
-            u'metadata': {
-                u'block_size': u'512',
-                'fs_align': 4096,
-                u'fs_block_size': 4096
-            }
-        })
-
-
-class UtilityTest(unittest.TestCase):
-  """Test various utility functions in cgpt.py."""
-
-  def testParseHumanNumber(self):
-    """Test that ParseHumanNumber is correct."""
-    test_cases = [
-        ('1', 1),
-        ('2', 2),
-        ('1KB', 1000),
-        ('1KiB', 1024),
-        ('1 K', 1024),
-        ('1 KiB', 1024),
-        ('3 MB', 3000000),
-        ('4 MiB', 4 * 2**20),
-        ('5GB', 5 * 10**9),
-        ('6GiB', 6 * 2**30),
-        ('7TB', 7 * 10**12),
-        ('8TiB', 8 * 2**40),
-    ]
-    for inp, exp in test_cases:
-      self.assertEqual(cgpt.ParseHumanNumber(inp), exp)
-
-  def testProduceHumanNumber(self):
-    """Test that ProduceHumanNumber is correct."""
-    test_cases = [
-        ('1', 1),
-        ('2', 2),
-        ('1 KB', 1000),
-        ('1 KiB', 1024),
-        ('3 MB', 3 * 10**6),
-        ('4 MiB', 4 * 2**20),
-        ('5 GB', 5 * 10**9),
-        ('6 GiB', 6 * 2**30),
-        ('7 TB', 7 * 10**12),
-        ('8 TiB', 8 * 2**40),
-    ]
-    for exp, inp in test_cases:
-      self.assertEqual(cgpt.ProduceHumanNumber(inp), exp)
-
-  def testGetScriptShell(self):
-    """Verify GetScriptShell works."""
-    data = cgpt.GetScriptShell()
-    self.assertIn('#!/bin/sh', data)
-
-  def testParseProduce(self):
-    """Test that ParseHumanNumber(ProduceHumanNumber()) yields same value."""
-    test_cases = [
-        1, 2,
-        1000, 1024,
-        2 * 10**6, 2 * 2**20,
-        3 * 10**9, 3 * 2**30,
-        4 * 10**12, 4 * 2**40
-    ]
-    for n in test_cases:
-      self.assertEqual(cgpt.ParseHumanNumber(cgpt.ProduceHumanNumber(n)), n)
-
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/build_library/create_legacy_bootloader_templates.sh b/build_library/create_legacy_bootloader_templates.sh
index e3c47c3..330214d 100755
--- a/build_library/create_legacy_bootloader_templates.sh
+++ b/build_library/create_legacy_bootloader_templates.sh
@@ -1,14 +1,16 @@
 #!/bin/bash
 
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 # Helper script to generate GRUB bootloader configuration files for
 # x86 platforms.
 
-SCRIPT_ROOT=$(readlink -f $(dirname "$0")/..)
+SCRIPT_ROOT=$(readlink -f "$(dirname "$0")"/..)
+# shellcheck source=../common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
+# shellcheck source=disk_layout_util.sh
 . "${BUILD_LIBRARY_DIR}/disk_layout_util.sh" || exit 1
 
 # We're invoked only by build_image, which runs in the chroot
@@ -23,9 +25,9 @@
   "Path to populate with bootloader templates (Default: /tmp/boot)"
 DEFINE_string boot_args "" \
   "Additional boot arguments to pass to the commandline (Default: '')"
-DEFINE_boolean enable_bootcache ${FLAGS_FALSE} \
+DEFINE_boolean enable_bootcache "${FLAGS_FALSE}" \
   "Default all bootloaders to NOT use boot cache."
-DEFINE_boolean enable_rootfs_verification ${FLAGS_FALSE} \
+DEFINE_boolean enable_rootfs_verification "${FLAGS_FALSE}" \
   "Controls if verity is used for root filesystem checking (Default: false)"
 DEFINE_string enable_serial "tty2" \
   "Enable serial port for printks. Example values: ttyS0 (Default: tty2)"
@@ -88,6 +90,7 @@
   :
 }
 
+# shellcheck source=board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
 load_board_specific_script "build_kernel_image.sh"
 modify_kernel_command_line "${config_file}"
@@ -117,7 +120,7 @@
 if [[ "${FLAGS_arch}" = "x86" || "${FLAGS_arch}" = "amd64"  ]]; then
   # TODO: For some reason the /dev/disk/by-uuid is not being generated by udev
   # in the initramfs. When we figure that out, switch to root=UUID=${UUID}.
-  sudo mkdir -p ${FLAGS_to}
+  sudo mkdir -p "${FLAGS_to}"
 
   # /boot/syslinux must be installed in partition 12 as /syslinux/.
   SYSLINUX_DIR="${FLAGS_to}/syslinux"
@@ -211,7 +214,7 @@
 
   grub_args=(
     -p /efi/boot
-    part_gpt gptpriority test fat ext2 hfs hfsplus normal boot chain
+    part_gpt gptpriority test fat ext2 normal boot chain
     efi_gop configfile linux
   # For more context on SBAT, see chromiumos-overlay/sys-boot/grub/README.md
     -s "${SRC_ROOT}/third_party/chromiumos-overlay/sys-boot/grub/files/sbat.csv"
diff --git a/build_library/dev_image_util.sh b/build_library/dev_image_util.sh
index 9397fad..c1a3ba4 100755
--- a/build_library/dev_image_util.sh
+++ b/build_library/dev_image_util.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/build_library/disk_layout_util.sh b/build_library/disk_layout_util.sh
index 2f5d464..f44e321 100644
--- a/build_library/disk_layout_util.sh
+++ b/build_library/disk_layout_util.sh
@@ -1,12 +1,15 @@
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 # BUILD_LIBRARY_DIR must be set prior to sourcing this file, since this file
 # is sourced as ${BUILD_LIBRARY_DIR}/disk_layout_util.sh
+# shellcheck source=filesystem_util.sh
 . "${BUILD_LIBRARY_DIR}/filesystem_util.sh" || exit 1
 
-CGPT_PY="${BUILD_LIBRARY_DIR}/cgpt.py"
+# shellcheck disable=SC2154
+CGPT_PY="${GCLIENT_ROOT}/chromite/scripts/disk_layout_tool"
+# shellcheck disable=SC2034
 PARTITION_SCRIPT_PATH="usr/sbin/write_gpt.sh"
 DISK_LAYOUT_PATH=
 
@@ -221,7 +224,7 @@
 
   # Write out the header for the script.
   local gpt_layout=$(${GPT} show "${image}" | sed -e 's/^/# /')
-  for x in "${unpack}" "${pack}" "${mount}" "${umount}"; do
+  for x in "${unpack}" "${pack}" "${mount}"; do
     cat >"${x}" <<\EOF
 #!/bin/bash -eu
 # File automatically generated. Do not edit.
@@ -288,8 +291,19 @@
 
 EOF
 
-    if [[ "${x}" != "${umount}" ]]; then
+    if [[ "${x}" == "${pack}" || "${x}" == "${unpack}" ]]; then
       cat >>"${x}" <<\EOF
+echo
+echo "WARNING: $0 is deprecated and will be removed."
+echo "WARNING: If you rely on this script, please email go/cros-build-help."
+echo "NB: You can use losetup to efficiently access partitions:"
+echo "    losetup --show -f -P ${TARGET}"
+echo
+sleep 1
+EOF
+    fi
+
+    cat >>"${x}" <<\EOF
 # Losetup has support for partitions, and offset= has issues.
 # See crbug.com/954188
 LOOPDEV=''
@@ -304,7 +318,6 @@
 fi
 
 EOF
-    fi
 
     echo "${gpt_layout}" >> "${x}"
   done
@@ -319,7 +332,7 @@
     local size_b=$(( size * 512 ))
     local label=$(${GPT} show "${image}" -i ${part} -l)
 
-    for x in "${unpack}" "${pack}" "${mount}" "${umount}"; do
+    for x in "${unpack}" "${pack}" "${mount}"; do
       cat <<EOF >> "${x}"
 case \${PART:-${part}} in
 ${part}|"${label}")
@@ -356,24 +369,16 @@
 ln -sfT ${dir} "${dir}_${label}"
 ) &
 EOF
-      cat <<-EOF >>"${umount}"
-if [[ -d ${dir} ]]; then
-  (
-  sudo umount ${dir} || :
-  rmdir ${dir}
-  rm -f "${dir}_${label}"
-  ) &
-fi
-EOF
     fi
 
-    for x in "${unpack}" "${pack}" "${mount}" "${umount}"; do
+    for x in "${unpack}" "${pack}" "${mount}"; do
       echo "esac" >> "${x}"
     done
   done < <(${GPT} show -q "${image}")
 
   echo "wait" >> "${mount}"
-  echo "wait" >> "${umount}"
+
+  cp "${BUILD_LIBRARY_DIR}/umount_image_helper.sh" "${umount}"
 
   chmod +x "${unpack}" "${pack}" "${mount}" "${umount}"
 }
diff --git a/build_library/disk_layout_v2.json b/build_library/disk_layout_v2.json
index b5870b0..40f4da1 100644
--- a/build_library/disk_layout_v2.json
+++ b/build_library/disk_layout_v2.json
@@ -79,7 +79,7 @@
         "num": 5,
         "label": "ROOT-B",
         "type": "rootfs",
-        "size": "4096 MiB"
+        "size": "4 GiB"
       },
       {
         # Slot A rootfs. Rootfs + extras must fit.
@@ -102,9 +102,10 @@
         "fs_format": "ext2",
         "fs_options": {
           "squashfs": "-noI -no-exports -comp lzo -Xalgorithm lzo1x_999 -Xcompression-level 9",
+          "ext2": "-i 65536",
           "btrfs": "skinny-metadata"
         },
-        "size": "4096 MiB",
+        "size": "4 GiB",
         "fs_size": "2300 MiB",
         "uuid": "clear"
       },
@@ -134,16 +135,40 @@
         "size": "2400 MiB"
       }
     ],
+    # CAN apply updates when running from USB.
+    "usb-updatable": [
+      {
+        # The partition size matches base, so it's installable.
+        "num": 5,
+        "size": "2400 MiB"
+      },
+      {
+        # The partition size matches base, so it's installable.
+        "num": 3,
+        "size": "2400 MiB"
+      },
+      {
+        "num": 1,
+        "size": "4 GiB"
+      }
+    ],
     # Used for factory install images.
     "factory_install": [
       {
+        "num": 4,
+        "size": "1"
+      },
+      {
         "num": 5,
-        "size": "2 MiB"
+        "size": "1"
       },
       {
         "num": 3,
         "size": "420 MiB",
-        "fs_size": "400 MiB"
+        "fs_size": "400 MiB",
+        "fs_options": {
+          "ext2": "-i 32768"
+        }
       },
       {
         "num": 1,
@@ -180,40 +205,6 @@
         "size": "2 MiB"
       }
     ],
-    # Larger rootfs, suitable for development with symbols, etc.
-    # Cannot apply updates when running from USB (no slot B).
-    "2gb-rootfs": [
-      {
-        # Will be grown to size from base on install.
-        "num": 5,
-        "size": "2 MiB"
-      },
-      {
-        # Will be shrunk to size from base on install.
-        "num": 3,
-        "size": "2048 MiB",
-        "fs_size": "2000 MiB"
-      }
-    ],
-    # Larger rootfs, suitable for development with symbols, etc.
-    # CAN apply updates when running from USB.
-    "2gb-rootfs-updatable": [
-      {
-        # The partition size matches base, so it's installable.
-        "num": 5,
-        "size": "2048 MiB"
-      },
-      {
-        # The partition size matches base, so it's installable.
-        "num": 3,
-        "size": "2048 MiB",
-        "fs_size": "2000 MiB"
-      },
-      {
-        "num": 1,
-        "size": "4 GiB"
-      }
-    ],
     # Very large rootfs, suitable for development with symbols,
     # etc. Cannot apply updates when running from USB (no slot B)
     "4gb-rootfs": [
@@ -225,7 +216,7 @@
         # This partition is larger than the base partition, so the
         # installer will corrupt the disk during installation.
         "num": 3,
-        "size": "4096 MiB",
+        "size": "4 GiB",
         "fs_size": "4000 MiB"
       }
     ],
@@ -238,7 +229,7 @@
       },
       {
         "num": 3,
-        "size": "16384 MiB",
+        "size": "16 GiB",
         "fs_size": "16000 MiB"
       }
     ]
diff --git a/build_library/disk_layout_v3.json b/build_library/disk_layout_v3.json
index 2a88548..afb8953 100644
--- a/build_library/disk_layout_v3.json
+++ b/build_library/disk_layout_v3.json
@@ -1,10 +1,3 @@
-# Changes from disk_layout_v2:
-# * Introduce 128MB MINIOS-A and MINIOS-B on partitions 9 and 10, with
-#   10 being set as last_partition.
-# * Reduce OEM partition 8 from 16MB to 4MB.
-# * Remove -no-exports from ROOTFS-A squashfs options.
-# * Add powerwash data as partition 11.
-# * Delete 2gb-rootfs, 2gb-rootfs-updatable, and 4gb-rootfs.
 {
   # See README.disk_layout.
   "parent": "common_disk_layout.json",
@@ -82,9 +75,10 @@
         "fs_format": "ext2",
         "fs_options": {
           "squashfs": "-noI -comp lzo -Xalgorithm lzo1x_999 -Xcompression-level 9",
+          "ext2": "-i 65536",
           "btrfs": "skinny-metadata"
         },
-        "size": "4096 MiB",
+        "size": "4 GiB",
         "fs_size": "2300 MiB",
         "uuid": "clear"
       },
@@ -102,7 +96,7 @@
         "num": 5,
         "label": "ROOT-B",
         "type": "rootfs",
-        "size": "4096 MiB"
+        "size": "4 GiB"
       },
       {
         # Powerwash data, including rollback data.
@@ -144,12 +138,17 @@
       {
         "num": 5,
         "size": "2 MiB"
+      }
+    ],
+    # Same as USB, but updatable.
+    "usb-updatable": [
+      {
+        "num": 3,
+        "size": "2400 MiB"
       },
       {
-        # MINIOS-B partitions are not filled with anything on USB images.
-        "num": 10,
-        "size": "2 MiB",
-        "type": "reserved"
+        "num": 5,
+        "size": "2400 MiB"
       }
     ],
     # Used for factory install images.
@@ -157,20 +156,22 @@
       {
         "num": 3,
         "size": "420 MiB",
-        "fs_size": "400 MiB"
+        "fs_size": "400 MiB",
+        "fs_options": {
+          "ext2": "-i 32768"
+        }
+      },
+      {
+        "num": 4,
+        "size": "1"
       },
       {
         "num": 5,
-        "size": "2 MiB"
+        "size": "1"
       },
       {
         "num": 1,
         "size": "140 MiB"
-      },
-      {
-        "num": 10,
-        "size": "2 MiB",
-        "type": "reserved"
       }
     ],
     "recovery": [
@@ -200,11 +201,6 @@
         # Stateful on recovery is dynamically resized larger.
         "num": 1,
         "size": "2 MiB"
-      },
-      {
-        "num": 10,
-        "size": "2 MiB",
-        "type": "reserved"
       }
     ],
     # Huge rootfs, suitable for VM only images, should not be used
@@ -212,17 +208,12 @@
     "16gb-rootfs": [
       {
         "num": 3,
-        "size": "16384 MiB",
+        "size": "16 GiB",
         "fs_size": "16000 MiB"
       },
       {
         "num": 5,
         "size": "2 MiB"
-      },
-      {
-        "num": 10,
-        "size": "2 MiB",
-        "type": "reserved"
       }
     ]
   }
diff --git a/build_library/ext2_sb_util.sh b/build_library/ext2_sb_util.sh
index 52b5446..14411d9 100644
--- a/build_library/ext2_sb_util.sh
+++ b/build_library/ext2_sb_util.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/build_library/filesystem_util.sh b/build_library/filesystem_util.sh
index 997cfd1..59e6b84 100644
--- a/build_library/filesystem_util.sh
+++ b/build_library/filesystem_util.sh
@@ -1,7 +1,8 @@
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# shellcheck source=../common.sh
 . "${BUILD_LIBRARY_DIR}/../common.sh" || exit 1
 
 # Usage: fs_parse_option <mount_options> <option_key> [default_value]
diff --git a/build_library/filesystem_util_unittest.sh b/build_library/filesystem_util_unittest.sh
index 9ee394a..3bc347e 100755
--- a/build_library/filesystem_util_unittest.sh
+++ b/build_library/filesystem_util_unittest.sh
@@ -1,9 +1,10 @@
 #!/bin/bash
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 BUILD_LIBRARY_DIR=$(dirname $0)
+# shellcheck source=filesystem_util.sh
 . "${BUILD_LIBRARY_DIR}/filesystem_util.sh" || exit 1
 
 set -e -u
diff --git a/build_library/legacy_disk_layout.json b/build_library/legacy_disk_layout.json
index 71fb304..705ab79 100644
--- a/build_library/legacy_disk_layout.json
+++ b/build_library/legacy_disk_layout.json
@@ -90,7 +90,7 @@
         "num": 5,
         "label": "ROOT-B",
         "type": "rootfs",
-        "size": "2048 MiB"
+        "size": "2 GiB"
       },
       {
         # Slot A rootfs. Rootfs + extras (AKA verity) must fit, AKA:
@@ -123,9 +123,10 @@
         "fs_format": "ext2",
         "fs_options": {
           "squashfs": "-noI -no-exports -comp lzo -Xalgorithm lzo1x_999 -Xcompression-level 9",
+          "ext2": "-i 65536",
           "btrfs": "skinny-metadata"
         },
-        "size": "2048 MiB",
+        "size": "2 GiB",
         "fs_size": "1991 MiB",
         "uuid": "clear"
       },
@@ -153,6 +154,9 @@
         "size": "2 MiB"
       }
     ],
+    # CAN apply updates when running from USB.
+    "usb-updatable": [
+    ],
     # Used for factory install images.
     "factory_install": [
       {
@@ -166,7 +170,10 @@
       {
         "num": 3,
         "size": "420 MiB",
-        "fs_size": "400 MiB"
+        "fs_size": "400 MiB",
+        "fs_options": {
+          "ext2": "-i 32768"
+        }
       },
       {
         "num": 1,
@@ -197,40 +204,6 @@
         "size": "2 MiB"
       }
     ],
-    # Larger rootfs, suitable for development with symbols, etc.
-    # Cannot apply updates when running from USB (no slot B).
-    "2gb-rootfs": [
-      {
-        # Will be grown to size from base on install.
-        "num": 5,
-        "size": "2 MiB"
-      },
-      {
-        # Will be shrunk to size from base on install.
-        "num": 3,
-        "size": "2048 MiB",
-        "fs_size": "2000 MiB"
-      }
-    ],
-    # Larger rootfs, suitable for development with symbols, etc.
-    # CAN apply updates when running from USB.
-    "2gb-rootfs-updatable": [
-      {
-        # The partition size matches base, so it's installable.
-        "num": 5,
-        "size": "2048 MiB"
-      },
-      {
-        # The partition size matches base, so it's installable.
-        "num": 3,
-        "size": "2048 MiB",
-        "fs_size": "2000 MiB"
-      },
-      {
-        "num": 1,
-        "size": "4 GiB"
-      }
-    ],
     # Very large rootfs, suitable for development with symbols,
     # etc. Cannot apply updates when running from USB (no slot B)
     "4gb-rootfs": [
@@ -242,7 +215,7 @@
         # This partition is larger than the base partition, so the
         # installer will corrupt the disk during installation.
         "num": 3,
-        "size": "4096 MiB",
+        "size": "4 GiB",
         "fs_size": "4000 MiB"
       }
     ],
@@ -255,7 +228,7 @@
       },
       {
         "num": 3,
-        "size": "16384 MiB",
+        "size": "16 GiB",
         "fs_size": "16000 MiB"
       }
     ]
diff --git a/build_library/mount_gpt_util.sh b/build_library/mount_gpt_util.sh
index c828c91..7f0772f 100755
--- a/build_library/mount_gpt_util.sh
+++ b/build_library/mount_gpt_util.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/build_library/perl_rebuild.sh b/build_library/perl_rebuild.sh
index 82768b7..e285b5f 100755
--- a/build_library/perl_rebuild.sh
+++ b/build_library/perl_rebuild.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Copyright 2017 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -10,6 +10,7 @@
 # Usage: simply run `perl_rebuild.sh` inside chroot without any argument.
 
 SCRIPT_ROOT="$(readlink -f "$(dirname "$0")/..")"
+# shellcheck source=../common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 
 perl_rebuild() {
@@ -28,6 +29,7 @@
       sudo qmerge -Uyq "${pkgs[@]}"
     fi
     sudo perl-cleaner --all -- --quiet
+    sudo find /usr/lib*/perl5/vendor_perl -type d -empty -delete
   fi
 }
 
diff --git a/build_library/selinux_util.sh b/build_library/selinux_util.sh
index b2382f8..88fefe6 100644
--- a/build_library/selinux_util.sh
+++ b/build_library/selinux_util.sh
@@ -1,4 +1,4 @@
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/build_library/test_image_util.sh b/build_library/test_image_util.sh
index 38c2c11..ed2deb3 100755
--- a/build_library/test_image_util.sh
+++ b/build_library/test_image_util.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -58,8 +58,4 @@
          ${image_name} --force_developer_mode
     fi
   fi
-
-  if type board_test_setup &>/dev/null; then
-    board_test_setup "${BUILD_DIR}/${image_name}"
-  fi
 }
diff --git a/build_library/umount_image_helper.sh b/build_library/umount_image_helper.sh
new file mode 100755
index 0000000..9bfc1ac
--- /dev/null
+++ b/build_library/umount_image_helper.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 2022 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+SCRIPT_ROOT=$(readlink -f -- "$(dirname -- "$0")")
+
+usage() {
+  cat <<EOF
+Usage: $0 [-h|--help]
+
+Unmount the disk image in ${SCRIPT_ROOT} and delete dir_* paths.
+EOF
+}
+
+main() {
+  if [[ $# -ne 0 ]]; then
+    usage
+    exit 1
+  fi
+
+  cd "${SCRIPT_ROOT}" || exit 1
+
+  # See if any paths exist to avoid errors with missing paths.
+  set -- dir_[0-9]*
+  if [[ $# -gt 1 || "$1" != "dir_[0-9]*" ]]; then
+    find dir_[0-9]* -maxdepth 0 -type l -delete
+  fi
+
+  # See if any paths exist to avoid errors with missing paths.
+  set -- dir_[0-9]*
+  if [[ $# -gt 1 || "$1" != "dir_[0-9]*" ]]; then
+    sudo umount -r dir_[0-9]*
+    rmdir dir_[0-9]*
+  fi
+}
+
+main "$@"
diff --git a/build_packages b/build_packages
deleted file mode 100755
index 68dc80e..0000000
--- a/build_packages
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-. "$(dirname "$0")/common.sh" || exit 1
-
-# Script must run inside the chroot
-restart_in_chroot_if_needed "$@"
-
-new_script="build_packages"
-warn "$0: This script is deprecated and will be removed."
-warn "All users must migrate to ${new_script} in chromite/bin."
-warn "You can simply change all references of $0 to \`${new_script}\`" \
-  "from \$PATH (in chromite/bin/)."
-warn "This old script will be removed by July 2022."
-warn "If you have questions or found code that needs updating, please" \
-  "contact chromium-os-dev@, or file a bug at go/cros-build-bug."
-exec "${CHROMITE_BIN}/${new_script}" "$@"
diff --git a/build_sdk_board b/build_sdk_board
index eec815e..fe2af33 100755
--- a/build_sdk_board
+++ b/build_sdk_board
@@ -1,12 +1,13 @@
 #!/bin/bash
 
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 # The host (chroot) specific "setup_board" process. This separates the chroot
 # specific setup from the generic board setup.
 
+# shellcheck source=common.sh
 . "$(dirname "$0")/common.sh" || exit 1
 
 # Script must run inside the chroot
@@ -17,10 +18,10 @@
 # Developer-visible flags.
 DEFINE_string board "amd64-host" \
   "The name of the board to set up."
-DEFINE_boolean force $FLAGS_FALSE \
+DEFINE_boolean force "${FLAGS_FALSE}" \
   "Force re-creating board root."
 
-FLAGS_HELP="usage: $(basename $0) [flags]
+FLAGS_HELP="usage: $(basename "$0") [flags]
 
 setup_host_board builds the chroot for the amd64-host (chroot) board.
 This should not need to be called except by the SDK Builder.
@@ -40,25 +41,19 @@
 BOARD_ROOT="/build/${BOARD}"
 CHROMIUMOS_OVERLAY="${CHROOT_TRUNK_DIR}/src/third_party/chromiumos-overlay"
 CHROMIUMOS_CONFIG="${CHROMIUMOS_OVERLAY}/chromeos/config"
-CHROMIUMOS_PROFILES="${CHROMIUMOS_OVERLAY}/profiles"
 BOARD_ETC="${BOARD_ROOT}/etc"
 BOARD_SETUP="${BOARD_ETC}/make.conf.board_setup"
 BOARD_PROFILE="${BOARD_ETC}/portage/profile"
 
-eval $(portageq envvar -v CHOST PKGDIR)
+eval "$(portageq envvar -v CHOST PKGDIR)"
 
-SYSROOT_EXISTS=false
 if [ -d "${BOARD_ROOT}" ]; then
   if [[ ${FLAGS_force} -eq ${FLAGS_TRUE} ]]; then
     echo "--force set.  Re-creating ${BOARD_ROOT}..."
     # Removal takes long. Make it asynchronous.
-    TEMP_DIR=`mktemp -d`
+    TEMP_DIR=$(mktemp -d)
     sudo mv "${BOARD_ROOT}" "${TEMP_DIR}"
     sudo rm -rf --one-file-system "${TEMP_DIR}" &
-  else
-    # The sysroot exists. Take note so that we can exit early once the
-    # configuration has been updated.
-    SYSROOT_EXISTS=true
   fi
 fi
 
@@ -114,9 +109,8 @@
 fi
 
 EMERGE_CMD="${CHROMITE_BIN}/parallel_emerge"
-TOOLCHAIN_PACKAGES=(
-  $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host)
-)
+mapfile -t TOOLCHAIN_PACKAGES < \
+  <("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host)
 # Sanity check we got some valid results.
 if [[ ${#TOOLCHAIN_PACKAGES[@]} -eq 0 ]]; then
   die_notrace "cros_setup_toolchains failed"
@@ -124,7 +118,7 @@
 PACKAGES=( system virtual/target-sdk )
 
 run_emerge() {
-  info_run sudo -E ${EMERGE_CMD} "$@"
+  info_run sudo -E "${EMERGE_CMD}" "$@"
 }
 
 # First, rebuild all packages from scratch. This is needed to make sure
@@ -141,16 +135,19 @@
 info_run sudo eclean -d packages
 
 # Next, install our rebuilt packages into our separate root.
-HOST_FLAGS="--root=$BOARD_ROOT --update --verbose --deep --root-deps"
-HOST_FLAGS+=" --newuse --usepkgonly"
-run_emerge $HOST_FLAGS --with-bdeps=y "${PACKAGES[@]}"
+HOST_FLAGS=(
+  "--root=${BOARD_ROOT}" --update --verbose --deep --root-deps
+  --newuse --usepkgonly
+)
+run_emerge "${HOST_FLAGS[@]}" --with-bdeps=y "${PACKAGES[@]}"
 # Install our rebuilt packages from the nobdeps target into our separate root
 # without their build-time deps.  We also avoid adding this target to the
 # world set so that subsequent update_chroot commands won't re-import the
 # build deps.
-run_emerge $HOST_FLAGS --with-bdeps=n --oneshot \
+run_emerge "${HOST_FLAGS[@]}" --with-bdeps=n --oneshot \
   virtual/target-sdk-nobdeps
-info_run sudo cp -a "${PKGDIR}" $BOARD_ROOT/packages
+# shellcheck disable=SC2154  # PKGDIR is defined via eval above
+info_run sudo cp -a "${PKGDIR}" "${BOARD_ROOT}/packages"
 
 # Copy our chroot version into the newly packaged chroot.
 sudo cp -a "${CHROOT_VERSION_FILE}" "${BOARD_ROOT}${CHROOT_VERSION_FILE}"
@@ -159,6 +156,21 @@
 sudo find "${BOARD_ROOT}" -type f -name '*.la' -exec \
   sed -i -e "s|${BOARD_ROOT}/|/|g" {} +
 
+# Remove wrapper scripts left behind in the sysroot. These are not supposed to
+# be part of the final filesystem.
+sudo rm -rf "${BOARD_ROOT}/build"
+
+# Enable locale that some Chrome scripts assume exist.
+sudo sed -i -e '/^#en_US.UTF-8/s:#::' "${BOARD_ROOT}/etc/locale.gen"
+sudo mount --bind /dev "${BOARD_ROOT}/dev"
+sudo chroot "${BOARD_ROOT}" locale-gen -u
+sudo umount "${BOARD_ROOT}/dev"
+
+# b/278101251: /build/amd64-host doesn't include ccache's link tree by default,
+# which makes `FEATURES=ccache` quietly fail for host packages. Ensure it's
+# built here.
+sudo ROOT="${BOARD_ROOT}" "${BOARD_ROOT}/usr/bin/ccache-config" --install-links
+
 command_completed
 echo "Done!"
 echo "The SYSROOT is: ${BOARD_ROOT}"
diff --git a/chroot_version_hooks.d/182_binutils235_upgrade b/chroot_version_hooks.d/182_binutils235_upgrade
deleted file mode 100644
index 9a8b21c..0000000
--- a/chroot_version_hooks.d/182_binutils235_upgrade
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# See comments in the hook.
-. /mnt/host/source/src/scripts/chroot_version_hooks.d/204_zstd
-
-# Upgrade binutils to avoid binutils being pulled into
-# build graph when building target packages, https://crbug.com/1171084
-sudo emerge -ugq sys-devel/binutils
diff --git a/chroot_version_hooks.d/183_sandbox_update b/chroot_version_hooks.d/183_sandbox_update
deleted file mode 100644
index bdaed62..0000000
--- a/chroot_version_hooks.d/183_sandbox_update
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# See comments in the hook.
-. /mnt/host/source/src/scripts/chroot_version_hooks.d/204_zstd
-
-# Upgrade sandbox to avoid renameat failures, https://crbug.com/1176957
-sudo emerge -ugq sys-apps/sandbox
diff --git a/chroot_version_hooks.d/184_binutils235_upgrade b/chroot_version_hooks.d/184_binutils235_upgrade
deleted file mode 120000
index d519755..0000000
--- a/chroot_version_hooks.d/184_binutils235_upgrade
+++ /dev/null
@@ -1 +0,0 @@
-182_binutils235_upgrade
\ No newline at end of file
diff --git a/chroot_version_hooks.d/185_sdk_profile.d b/chroot_version_hooks.d/185_sdk_profile.d
deleted file mode 100644
index 35bd414..0000000
--- a/chroot_version_hooks.d/185_sdk_profile.d
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-sudo rm -f "/etc/profile.d/chromiumos-niceties.sh"
-sudo ln -sfT \
-  "/mnt/host/source/chromite/sdk/etc/profile.d/50-chromiumos-niceties.sh" \
-  "/etc/profile.d/50-chromiumos-niceties.sh"
diff --git a/chroot_version_hooks.d/186_bash_upgrade b/chroot_version_hooks.d/186_bash_upgrade
deleted file mode 100644
index a2c1d77..0000000
--- a/chroot_version_hooks.d/186_bash_upgrade
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# See comments in the hook.
-. /mnt/host/source/src/scripts/chroot_version_hooks.d/204_zstd
-
-# Upgrade bash to the latest version we support.
-# Bash updates change behavior, and build scripts are made to support the
-# latest bash version, and may not work with older versions.
-sudo emerge -ugq app-shells/bash
diff --git a/chroot_version_hooks.d/188_binutils236_upgrade b/chroot_version_hooks.d/188_binutils236_upgrade
deleted file mode 120000
index d519755..0000000
--- a/chroot_version_hooks.d/188_binutils236_upgrade
+++ /dev/null
@@ -1 +0,0 @@
-182_binutils235_upgrade
\ No newline at end of file
diff --git a/chroot_version_hooks.d/189_purge_pypy b/chroot_version_hooks.d/189_purge_pypy
deleted file mode 100644
index 127df1a..0000000
--- a/chroot_version_hooks.d/189_purge_pypy
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Force rebuild of pypy3 to ensure portage picks up changes that cause byte
-# compilation of pypy3 libs.
-sudo emerge --rage-clean pypy3 || :
diff --git a/chroot_version_hooks.d/190_make_conf_board_setup_cleanup b/chroot_version_hooks.d/190_make_conf_board_setup_cleanup
deleted file mode 100644
index c573cc9..0000000
--- a/chroot_version_hooks.d/190_make_conf_board_setup_cleanup
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Remove vars that reference the amd64-host sysroot from
-# /etc/make.conf.board_setup for it to be correctly used in the SDK.
-sudo sed -E -i '/ROOT|PKG_CONFIG/d' /etc/make.conf.board_setup
diff --git a/chroot_version_hooks.d/191_bash_completion_cleanup b/chroot_version_hooks.d/191_bash_completion_cleanup
deleted file mode 100644
index 29cd3d1..0000000
--- a/chroot_version_hooks.d/191_bash_completion_cleanup
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# We moved the file.
-sed -i \
-  -e '/^# Set up bash autocompletion.$/d' \
-  -e '/^\. ~\/trunk\/src\/scripts\/bash_completion/d' \
-  -e '/^\. ~\/chromiumos\/src\/scripts\/bash_completion/d' \
-  -e '/^\. .*\/chromite\/sdk\/etc\/bash_completion.d\/cros/d' \
-  /home/*/.bashrc 2>/dev/null || :
diff --git a/chroot_version_hooks.d/193_package_keywords b/chroot_version_hooks.d/193_package_keywords
deleted file mode 100644
index bc409cf..0000000
--- a/chroot_version_hooks.d/193_package_keywords
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Portage changed the file name.
-
-migrate() {
-  local root="$1"
-  local etc="${root}/etc/portage"
-
-  # If the old dir doesn't exist, nothing to migrate.
-  if [[ ! -d "${etc}/package.keywords" ]]; then
-    return 0
-  fi
-
-  # Clear possibly empty dir.
-  sudo rmdir "${etc}/package.keywords" 2>/dev/null || :
-
-  # Create the new dir.
-  sudo mkdir -p "${etc}/package.accept_keywords"
-
-  # Move the content.
-  sudo mv "${etc}"/package.keywords/* "${etc}"/package.accept_keywords
-
-  # Delete the new empty dir.
-  sudo rmdir "${etc}/package.keywords"
-}
-
-migrate / &
-for board in /build/*/; do
-  migrate "${board}" &
-done
-wait
diff --git a/chroot_version_hooks.d/194_argcomplete_cleanup b/chroot_version_hooks.d/194_argcomplete_cleanup
deleted file mode 100644
index d641f5c..0000000
--- a/chroot_version_hooks.d/194_argcomplete_cleanup
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Remove file generated by the activate-global-python-argcomplete command.
-sudo rm -f \
-  /mnt/host/source/chromite/sdk/etc/bash_completion.d/python-argcomplete.sh
-
-# Cleanup command from .bash_profile.
-sed -i \
-  -e '/^activate-global-python-argcomplete/d' \
-  /home/*/.bash_profile 2>/dev/null || :
diff --git a/chroot_version_hooks.d/195_cros_bash_completion b/chroot_version_hooks.d/195_cros_bash_completion
deleted file mode 100644
index e281e3c..0000000
--- a/chroot_version_hooks.d/195_cros_bash_completion
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# We moved the file.
-if [[ ! -L /etc/bash_completion.d/cros ]]; then
-  sudo ln -sf \
-    /mnt/host/source/chromite/sdk/etc/bash_completion.d/cros \
-    /etc/bash_completion.d/cros
-fi
diff --git a/chroot_version_hooks.d/196_cleanup_dev_rust b/chroot_version_hooks.d/196_cleanup_dev_rust
deleted file mode 120000
index d880e2d..0000000
--- a/chroot_version_hooks.d/196_cleanup_dev_rust
+++ /dev/null
@@ -1 +0,0 @@
-192_cleanup_dev_rust
\ No newline at end of file
diff --git a/chroot_version_hooks.d/197_trunk_to_chromiumos b/chroot_version_hooks.d/197_trunk_to_chromiumos
deleted file mode 100644
index edddd0a..0000000
--- a/chroot_version_hooks.d/197_trunk_to_chromiumos
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# We renamed the symlink.
-for old_link in /home/*/trunk; do
-  new_link="${old_link%/*}/chromiumos"
-  if [[ -L "${old_link}" && ! -L "${new_link}" ]]; then
-    if [[ -f "${new_link}" ]]; then
-      rm -f "${new_link}"
-    elif [[ -d "${new_link}" ]]; then
-      # Some devs have a little .cache state for unknown reason.  Delete the
-      # few known safe paths, but don't try to delete the whole tree.
-      rm -rf "${new_link}/.cache/common"
-      rmdir "${new_link}/.cache" 2>/dev/null
-      if ! rmdir "${new_link}"; then
-        echo "ERROR: ~/chromiumos exists inside your SDK when it should not."
-      fi
-    fi
-    ln -s /mnt/host/source "${new_link}"
-  fi
-done
diff --git a/chroot_version_hooks.d/198_binutils236_upgrade b/chroot_version_hooks.d/198_binutils236_upgrade
deleted file mode 120000
index d519755..0000000
--- a/chroot_version_hooks.d/198_binutils236_upgrade
+++ /dev/null
@@ -1 +0,0 @@
-182_binutils235_upgrade
\ No newline at end of file
diff --git a/chroot_version_hooks.d/199_emerge_libunwind b/chroot_version_hooks.d/199_emerge_libunwind
deleted file mode 100644
index a53bc8f..0000000
--- a/chroot_version_hooks.d/199_emerge_libunwind
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# See comments in the hook.
-. /mnt/host/source/src/scripts/chroot_version_hooks.d/204_zstd
-
-# Force all boards to re-emerge llvm-libunwind
-# b/210927982
-
-for board_root in /build/*; do
-  board_name=${board_root##*/}
-  if [[ -d "${board_root}/var/db/pkg" ]]; then
-    emerge-${board_name} -gq sys-libs/gcc-libs sys-libs/llvm-libunwind --nodeps
-  fi
-done
-
-echo "Completed installation of sys-libs/llvm-libunwind in board sysroots"
diff --git a/chroot_version_hooks.d/200_trunk_chroot_cmd b/chroot_version_hooks.d/200_trunk_chroot_cmd
deleted file mode 100644
index d7c2ce1..0000000
--- a/chroot_version_hooks.d/200_trunk_chroot_cmd
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# We renamed the symlink.
-sed -i '/CHROOT_CWD/s:/trunk/:/chromiumos/:' /home/*/.bash_profile
diff --git a/chroot_version_hooks.d/201_remove_i686-pc-linux-gnu b/chroot_version_hooks.d/201_remove_i686-pc-linux-gnu
deleted file mode 100644
index 62fc983..0000000
--- a/chroot_version_hooks.d/201_remove_i686-pc-linux-gnu
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# The i686-pc-linux-gnu toolchain was renamed to i686-cros-linux-gnu, so delete
-# the old toolchain.
-# https://issuetracker.google.com/issues/187786439
-sudo crossdev --force -C i686-pc-linux-gnu
diff --git a/chroot_version_hooks.d/202_clean_python2 b/chroot_version_hooks.d/202_clean_python2
deleted file mode 100644
index 7cdd10b..0000000
--- a/chroot_version_hooks.d/202_clean_python2
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# These packages have been removed, so make sure they're cleaned up.
-pkgs=(
-  dev-python/enum34
-  dev-python/mox
-  dev-python/backports-functools-lru-cache
-  dev-python/enum34
-  dev-python/funcsigs
-  dev-python/functools32
-  dev-python/futures
-  dev-python/ipaddress
-  dev-python/mox
-  dev-python/pathlib
-  dev-python/subprocess32
-  virtual/python-enum34
-  virtual/python-funcsigs
-  virtual/python-futures
-)
-
-sudo qmerge -Uqy "${pkgs[@]}"
diff --git a/chroot_version_hooks.d/203_emerge_libcxx b/chroot_version_hooks.d/203_emerge_libcxx
deleted file mode 100644
index 9dde085..0000000
--- a/chroot_version_hooks.d/203_emerge_libcxx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# See comments in the hook.
-. /mnt/host/source/src/scripts/chroot_version_hooks.d/204_zstd
-
-# Force all boards to re-emerge sys-libs/libcxxabi sys-libs/libcxx.
-# This is required for switch to runtimes builds (b/204093890).
-
-sudo emerge -gqu --nodeps sys-libs/libcxxabi sys-libs/libcxx
-for board_root in /build/*; do
-  board_name=${board_root##*/}
-  if [[ -d "${board_root}/var/db/pkg" ]]; then
-    emerge-${board_name} -gqu --nodeps sys-libs/libcxxabi sys-libs/libcxx
-  fi
-done
diff --git a/chroot_version_hooks.d/204_zstd b/chroot_version_hooks.d/204_zstd
deleted file mode 100644
index 1cb0298..0000000
--- a/chroot_version_hooks.d/204_zstd
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# If an older SDK doesn't have zstd yet, the SDK produces the zstd binpkg using
-# zstd, so we can't install it.  Force it from source.
-
-if ! type -P zstd >/dev/null; then
-  sudo emerge -1O --buildpkg=n zstd
-fi
diff --git a/chroot_version_hooks.d/205_delete_unused_symlinks b/chroot_version_hooks.d/205_delete_unused_symlinks
deleted file mode 100644
index aec5ca7..0000000
--- a/chroot_version_hooks.d/205_delete_unused_symlinks
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# These shouldn't be used anymore.
-
-sudo ln -sfT \
-  "$(realpath /etc/portage/make.profile)" \
-  /etc/portage/make.profile
-sudo rm -f /usr/local/portage/{chromiumos,eclass-overlay,stable}
diff --git a/chroot_version_hooks.d/206_emerge_glibc_without_libcrypt b/chroot_version_hooks.d/206_emerge_glibc_without_libcrypt
deleted file mode 100644
index 6cedd58..0000000
--- a/chroot_version_hooks.d/206_emerge_glibc_without_libcrypt
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2022 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Remove old virtual/libcrypt-1 and rebuild chroot glibc without libcrypt
-# which is replaced by libxcrypt + virtual/libcrypt-2.
-sudo qmerge -Uqy virtual/libcrypt
-sudo emerge -ugq sys-libs/glibc virtual/libcrypt sys-libs/libxcrypt
diff --git a/chroot_version_hooks.d/207_chromite_cipd_dir b/chroot_version_hooks.d/207_chromite_cipd_dir
deleted file mode 100644
index 8befb57..0000000
--- a/chroot_version_hooks.d/207_chromite_cipd_dir
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright 2022 The ChromiumOS Authors.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-rm -rf ~/chromiumos/chromite/.cipd_bin/
diff --git a/chroot_version_hooks.d/208_proto_cleanup b/chroot_version_hooks.d/208_proto_cleanup
deleted file mode 120000
index c1542c6..0000000
--- a/chroot_version_hooks.d/208_proto_cleanup
+++ /dev/null
@@ -1 +0,0 @@
-./187_cros_camera_cleanup
\ No newline at end of file
diff --git a/chroot_version_hooks.d/209_update_portage b/chroot_version_hooks.d/209_update_portage
deleted file mode 100644
index 5208b3b..0000000
--- a/chroot_version_hooks.d/209_update_portage
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright 2022 The ChromiumOS Authors.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Update portage to pick up recent fixes before updating rest of chroot.
-sudo emerge -gu -j8 sys-apps/portage
diff --git a/chroot_version_hooks.d/187_cros_camera_cleanup b/chroot_version_hooks.d/210_rustc_upgrade
similarity index 66%
rename from chroot_version_hooks.d/187_cros_camera_cleanup
rename to chroot_version_hooks.d/210_rustc_upgrade
index de82cf9..4af5386 100644
--- a/chroot_version_hooks.d/187_cros_camera_cleanup
+++ b/chroot_version_hooks.d/210_rustc_upgrade
@@ -1,4 +1,4 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2023 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -6,4 +6,6 @@
 # package consolidation (b/177958529).
 # Clean local portage caches. We have stale intermediates after the protobuf
 # # upgrade (crbug.com/1346059).
+# Clean caches again because of stale intermediates from dev-lang/rust upgrade
+# prior to droping the slot operator from dev-rust DEPENDs (b/245844306).
 sudo rm -rf /var/cache/portage/* /build/*/var/cache/portage/*
diff --git a/chroot_version_hooks.d/192_cleanup_dev_rust b/chroot_version_hooks.d/211_cleanup_dev_rust
similarity index 94%
rename from chroot_version_hooks.d/192_cleanup_dev_rust
rename to chroot_version_hooks.d/211_cleanup_dev_rust
index be40a67..2871d29 100644
--- a/chroot_version_hooks.d/192_cleanup_dev_rust
+++ b/chroot_version_hooks.d/211_cleanup_dev_rust
@@ -1,4 +1,4 @@
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/chroot_version_hooks.d/212_update_third_party_crates_src b/chroot_version_hooks.d/212_update_third_party_crates_src
new file mode 100644
index 0000000..919ee90
--- /dev/null
+++ b/chroot_version_hooks.d/212_update_third_party_crates_src
@@ -0,0 +1,6 @@
+# Copyright 2022 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Update third-party-crates-src to sidestep b/247596883. See comment 7.
+sudo emerge -ug dev-rust/third-party-crates-src
diff --git a/chroot_version_hooks.d/213_cleanup_dev_rust b/chroot_version_hooks.d/213_cleanup_dev_rust
new file mode 120000
index 0000000..0fb3240
--- /dev/null
+++ b/chroot_version_hooks.d/213_cleanup_dev_rust
@@ -0,0 +1 @@
+211_cleanup_dev_rust
\ No newline at end of file
diff --git a/chroot_version_hooks.d/214_generate_en_us_utf8_locale b/chroot_version_hooks.d/214_generate_en_us_utf8_locale
new file mode 100644
index 0000000..ffd87f6
--- /dev/null
+++ b/chroot_version_hooks.d/214_generate_en_us_utf8_locale
@@ -0,0 +1,12 @@
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Avoid generating en_US.UTF-8 on the fly during enter_chroot. Pregen the
+# locale instead for every sdk so it is available immediately.
+
+# Uncomment the en_US.UTF-8 locale if it isn't already present.
+grep -q '^en_US.UTF-8' /etc/locale.gen || \
+  sudo sed -i -e '/^#en_US.UTF-8/s:#::' /etc/locale.gen
+# Generate locales.
+sudo locale-gen -u
diff --git a/chroot_version_hooks.d/215_rewrite_sudoers b/chroot_version_hooks.d/215_rewrite_sudoers
new file mode 120000
index 0000000..bf2318e
--- /dev/null
+++ b/chroot_version_hooks.d/215_rewrite_sudoers
@@ -0,0 +1 @@
+../sdk_lib/rewrite-sudoers.d.sh
\ No newline at end of file
diff --git a/chroot_version_hooks.d/216_purge_sdk b/chroot_version_hooks.d/216_purge_sdk
new file mode 100644
index 0000000..0682c94
--- /dev/null
+++ b/chroot_version_hooks.d/216_purge_sdk
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Merged into 222_purge_sdk.
diff --git a/chroot_version_hooks.d/217_update_bindgen b/chroot_version_hooks.d/217_update_bindgen
new file mode 100644
index 0000000..e66f14c
--- /dev/null
+++ b/chroot_version_hooks.d/217_update_bindgen
@@ -0,0 +1,11 @@
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Update bindgen to >= 0.63. Older versions will produce wrong results
+# after the next LLVM upgrade (b/264938287), and portage doesn't
+# upgrade bindgen automatically because of the limit on backtracking
+# when resolving versions.
+sudo emerge --noreplace -g                          \
+    '>=dev-rust/third-party-crates-src-0.0.1-r114'  \
+    '>=virtual/bindgen-0.63'
diff --git a/chroot_version_hooks.d/218_purge_incremental_cache b/chroot_version_hooks.d/218_purge_incremental_cache
new file mode 100644
index 0000000..0dc6620
--- /dev/null
+++ b/chroot_version_hooks.d/218_purge_incremental_cache
@@ -0,0 +1,7 @@
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Clean local portage caches. We have stale intermediates after the protobuf
+# upgrade (b/268609029).
+sudo rm -rf /var/cache/portage/* /build/*/var/cache/portage/*
diff --git a/chroot_version_hooks.d/219_update_env_path b/chroot_version_hooks.d/219_update_env_path
new file mode 100644
index 0000000..5e4b8d3
--- /dev/null
+++ b/chroot_version_hooks.d/219_update_env_path
@@ -0,0 +1,9 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+sudo sed -i \
+  's#^PATH=.*$#PATH="/mnt/host/source/chromite/sdk/bin:/mnt/host/source/chromite/bin"#' \
+  /etc/env.d/99chromiumos
+sudo env-update
diff --git a/chroot_version_hooks.d/220_remove_unneeded_perl_packages b/chroot_version_hooks.d/220_remove_unneeded_perl_packages
new file mode 100644
index 0000000..ef88156
--- /dev/null
+++ b/chroot_version_hooks.d/220_remove_unneeded_perl_packages
@@ -0,0 +1,17 @@
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Remove some packages that were breaking update_chroot b/272316794
+pkgs=(
+  app-text/po4a
+  dev-perl/File-Slurp
+  dev-perl/Locale-gettext
+  perl-core/File-Path
+  virtual/perl-File-Path
+  virtual/perl-IO
+)
+mapfile -t installed_pkgs < <(qlist -IC "${pkgs[@]}" || :)
+if [[ ${#installed_pkgs[@]} -gt 0 ]]; then
+  sudo emerge --rage -q "${installed_pkgs[@]}"
+fi
diff --git a/chroot_version_hooks.d/221_emerge_tar b/chroot_version_hooks.d/221_emerge_tar
new file mode 100644
index 0000000..a0532c9
--- /dev/null
+++ b/chroot_version_hooks.d/221_emerge_tar
@@ -0,0 +1,7 @@
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Update tar before emerging other things since it moved from app-arch
+# to app-alternatives.
+sudo emerge -uqg 'app-arch/tar'
diff --git a/chroot_version_hooks.d/222_purge_sdk b/chroot_version_hooks.d/222_purge_sdk
new file mode 100644
index 0000000..0ff71bf
--- /dev/null
+++ b/chroot_version_hooks.d/222_purge_sdk
@@ -0,0 +1,82 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Force clean up some SDK packages that have been removed.
+pkgs=(
+  app-admin/eselect-fontconfig
+  app-admin/python-updater
+  app-emulation/renode
+  app-emulation/virt-what
+  app-eselect/eselect-ctags
+  app-portage/esearch
+  app-text/htmltidy
+  app-text/yelp-tools
+  chromeos-base/android-installer
+  chromeos-base/cypress-tools
+  chromeos-base/devserver-deps
+  dev-cpp/ctemplate
+  dev-cpp/yaml-cpp
+  dev-go/gcp-trace
+  dev-lang/mono
+  dev-libs/dbus-c++
+  dev-python/astroid
+  dev-python/autopep8
+  dev-python/backports
+  dev-python/black
+  dev-python/dnspython
+  dev-python/flake8
+  dev-python/ipython
+  dev-python/ipython_genutils
+  dev-python/isort
+  dev-python/jedi
+  dev-python/lazy-object-proxy
+  dev-python/logbook
+  dev-python/mccabe
+  dev-python/mypy_extensions
+  dev-python/path-py
+  dev-python/pep8
+  dev-python/pickleshare
+  dev-python/prompt_toolkit
+  dev-python/pyblake2
+  dev-python/pycodestyle
+  dev-python/pyflakes
+  dev-python/pyinotify
+  dev-python/pylint
+  dev-python/robotframework
+  dev-python/scandir
+  dev-python/setuptools_scm_git_archive
+  dev-python/simplegeneric
+  dev-python/traitlets
+  dev-python/trollius
+  dev-python/wcwidth
+  dev-util/codespell
+  dev-util/ctags
+  dev-util/diffstat
+  dev-util/google-web-toolkit
+  dev-util/provision-server
+  dev-util/tclint
+  dev-util/test-exec-server
+  gnome-extra/yelp-xsl
+  media-libs/gd
+  sys-devel/arc-cache-builder
+  sys-devel/bin86
+  sys-devel/dev86
+  sys-devel/smatch
+  sys-firmware/vgabios
+  sys-libs/libcxxabi
+  virtual/libffi
+  virtual/mailx
+  virtual/python-enum34
+  virtual/python-ipaddress
+  virtual/python-pathlib
+  virtual/python-singledispatch
+  virtual/python-typing
+  virtual/shadow
+  x11-apps/xcursorgen
+)
+mapfile -t installed_pkgs < <(qlist -IC "${pkgs[@]}" || :)
+if [[ ${#installed_pkgs[@]} -gt 0 ]]; then
+  sudo emerge --rage -q "${installed_pkgs[@]}"
+fi
diff --git a/common.sh b/common.sh
index 6de1573..5529fab 100644
--- a/common.sh
+++ b/common.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -16,8 +16,8 @@
 
 # Make sure we have the location and name of the calling script, using
 # the current value if it is already set.
-: ${SCRIPT_LOCATION:=$(dirname "$(readlink -f -- "$0")")}
-: ${SCRIPT_NAME:=$(basename -- "$0")}
+: "${SCRIPT_LOCATION:=$(dirname "$(readlink -f -- "$0")")}"
+: "${SCRIPT_NAME:=$(basename -- "$0")}"
 
 # Detect whether we're inside a chroot or not
 CHROOT_VERSION_FILE=/etc/cros_chroot_version
@@ -289,6 +289,9 @@
 # Default location for chroot
 DEFAULT_CHROOT_DIR=${CHROMEOS_CHROOT_DIR:-"${GCLIENT_ROOT}/chroot"}
 
+# Default output directory location
+DEFAULT_OUT_DIR=${CHROMEOS_OUT_DIR:-"${GCLIENT_ROOT}/out"}
+
 # All output files from build should go under ${DEFAULT_BUILD_ROOT}, so that
 # they don't pollute the source directory.
 DEFAULT_BUILD_ROOT=${CHROMEOS_BUILD_ROOT:-"${SRC_ROOT}/build"}
@@ -326,101 +329,6 @@
 CHROMEOS_FACTORY_INSTALL_SHIM_NAME="factory_install_shim.bin"
 SYSROOT_SETTINGS_FILE="/var/cache/edb/chromeos"
 
-# Install mask for portage ebuilds.  Used by build_image and gmergefs.
-# TODO: Is /usr/local/autotest-chrome still used by anyone?
-COMMON_INSTALL_MASK="
-  *.a
-  *.c
-  *.cc
-  *.cmake
-  *.go
-  *.la
-  *.h
-  *.hh
-  *.hpp
-  *.h++
-  *.hxx
-  *.proto
-  */.keep*
-  /build/bin
-  /build/initramfs
-  /build/libexec/tast
-  /build/manatee
-  /build/rootfs/dlc
-  /build/share
-  /etc/init.d
-  /etc/runlevels
-  /etc/selinux/intermediates
-  /etc/xinetd.d
-  /firmware
-  /lib/modules/*/vdso
-  /lib/rc
-  /opt/google/containers/android/vendor/lib*/pkgconfig
-  /opt/google/containers/android/build
-  /usr/bin/*-config
-  /usr/bin/Xnest
-  /usr/bin/Xvfb
-  /usr/include
-  /usr/lib/cros_rust_registry
-  /usr/lib/debug
-  /usr/lib/gopath
-  /usr/lib*/pkgconfig
-  /usr/local/autotest-chrome
-  /usr/man
-  /usr/share/aclocal
-  /usr/share/applications
-  /usr/share/cups/drv
-  /usr/share/doc
-  /usr/share/gettext
-  /usr/share/gtk-2.0
-  /usr/share/gtk-doc
-  /usr/share/info
-  /usr/share/man
-  /usr/share/ppd
-  /usr/share/openrc
-  /usr/share/pkgconfig
-  /usr/share/profiling
-  /usr/share/readline
-  /usr/src
-  "
-
-# Mask for base, dev, and test images (build_image, build_image --test)
-DEFAULT_INSTALL_MASK="
-  ${COMMON_INSTALL_MASK}
-  /boot/config-*
-  /boot/System.map-*
-  /usr/local/build/autotest
-  /lib/modules/*/build
-  /lib/modules/*/source
-  test_*.ko
-  "
-
-# Mask for factory install shim (build_image factory_install)
-FACTORY_SHIM_INSTALL_MASK="
-  ${DEFAULT_INSTALL_MASK}
-  /opt/google/chrome
-  /opt/google/containers
-  /opt/google/vms
-  /usr/lib64/dri
-  /usr/lib/dri
-  /usr/share/X11
-  /usr/share/chromeos-assets/[^i]*
-  /usr/share/chromeos-assets/i[^m]*
-  /usr/share/fonts
-  /usr/share/locale
-  /usr/share/mime
-  /usr/share/oem
-  /usr/share/sounds
-  /usr/share/tts
-  /usr/share/zoneinfo
-  "
-
-# Mask for images without systemd.
-SYSTEMD_INSTALL_MASK="
-  /lib/systemd/network
-  /usr/lib/systemd/system
-"
-
 # -----------------------------------------------------------------------------
 # Functions
 
@@ -567,66 +475,25 @@
 # Setup a loopback device for a file and scan for partitions, with retries.
 #
 # $1 - The file to back the new loopback device.
-# $2-$N - Additional arguments to pass to losetup.
 loopback_partscan() {
-  local lb_dev image="$1"
-  shift
+  if [[ $# -ne 1 ]]; then
+    die "${FUNCNAME[0]}: function only takes 1 argument (the image), not $#: $*"
+  fi
 
-  # We set up a binary backoff for adding the partitions. We give 10 attempts
-  # each time nth time we fail, backing off by 1<<n. The maximum time then
-  # spent sleeping will be sum(2^n) from [0...10] which is 1023 seconds,
-  # or ~37 minutes.
-  local i partx_out partx_d_out sleep_seconds=1
-  for (( i = 0; i <= 10; i++ )); do
-    # Flush any dirty pages in the image before we do partx commands.
-    info "Running sync -f ${image}"
-    sync -f "${image}"
-
-    # This (perhaps) mounts the partitions as well.
-    lb_dev=$(sudo losetup --show -f "$@" "${image}")
-
-    # Try to clean the slate by removing any existing parts (best effort).
-    partx_d_out=$(sudo partx -v -d "${lb_dev}") || true
-
-    # Try to add the partitions back.
-    if ! partx_out=$(sudo partx -v -a "${lb_dev}"); then
-      local proc_parts
-      warn "Adding partitions with 'partx -v -a ${lb_dev}' failed."
-      warn "partx -d output:\n${partx_d_out}"
-      warn "partx -a output:\n${partx_out}"
-      proc_parts=$(cat /proc/partitions)
-      warn "/proc/partitions before detaching loopback:\n ${proc_parts}"
-      # Detach the image.
-      sudo losetup -d "${lb_dev}" || true
-      proc_parts=$(cat /proc/partitions)
-      warn "/proc/partitions after detaching loopback:\n ${proc_parts}"
-      warn "Sleeping ${sleep_seconds} before trying again."
-      sleep "${sleep_seconds}"
-      : $(( sleep_seconds <<= 1 ))
-    else
-      break
-    fi
-  done
-
-  echo "${lb_dev}"
+  local output
+  output=$("${GCLIENT_ROOT}"/chromite/scripts/cros_losetup attach "$1")
+  echo "${output}" | jq --raw-output .path
 }
 
 # Detach a loopback device set up earlier.
 #
 # $1 - The loop device to detach.
-# $2-$N - Additional arguments to pass to losetup.
 loopback_detach() {
-  # Retry the deletes before we detach.  crbug.com/469259
-  local i
-  for (( i = 0; i < 10; i++ )); do
-    if sudo partx -d "$1"; then
-      break
-    fi
-    warn "Sleeping & retrying ..."
-    sync
-    sleep 1
-  done
-  sudo losetup --detach "$@"
+  if [[ $# -ne 1 ]]; then
+    die "${FUNCNAME[0]}: function only takes 1 argument (the image), not $#: $*"
+  fi
+
+  "${GCLIENT_ROOT}"/chromite/scripts/cros_losetup detach "$1"
 }
 
 # Sets up symlinks for the developer root. It is necessary to symlink
diff --git a/cros_show_stacks b/cros_show_stacks
index fcdadb2..28349b2 100755
--- a/cros_show_stacks
+++ b/cros_show_stacks
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
+# Copyright 2010 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -8,7 +8,9 @@
 
 
 SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+# shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
+# shellcheck source=remote_access.sh
 . "${SCRIPT_ROOT}/remote_access.sh" || exit 1
 
 assert_inside_chroot
@@ -105,7 +107,9 @@
     # duplicate crashes (in case cryptohome is not mounted) below.
     local remote_crash_dirs=(
         "/var/spool/crash"
+        "/home/chronos/crash"
         "/home/user/*/crash"
+        "/run/daemon-store/crash/*"
         "/mnt/stateful_partition/home/user/*/crash"
     )
     local remote_crash_patterns=()
diff --git a/cros_workon_make b/cros_workon_make
index 23684fd..8d7dfb0 100755
--- a/cros_workon_make
+++ b/cros_workon_make
@@ -1,12 +1,13 @@
 #!/bin/bash
 
-# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
+# Copyright 2010 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
 # Simple wrapper script to build a cros_workon package incrementally.
 # You must already be cros_workon'ing the package in question.
 
+# shellcheck source=lib/shflags/shflags
 . /usr/share/misc/shflags || exit 1
 
 GCLIENT_ROOT="/mnt/host/source"
diff --git a/hooks/filesystem-layout.py b/hooks/filesystem-layout.py
index fada3fa..1e97cb8 100755
--- a/hooks/filesystem-layout.py
+++ b/hooks/filesystem-layout.py
@@ -1,13 +1,10 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 """Make sure packages don't create random paths outside of existing norms."""
 
-from __future__ import print_function
-
 import argparse
 import fnmatch
 import logging
@@ -24,15 +21,32 @@
 # always bind mounted with the host distro, and we don't want to pollute them.
 # Those are: /dev
 VALID_ROOT = {
-    'bin', 'etc', 'home', 'lib', 'lib32', 'lib64', 'media',
-    'mnt', 'opt', 'proc', 'root', 'run', 'sbin', 'sys', 'usr', 'var',
+    "bin",
+    "etc",
+    "home",
+    "lib",
+    "lib32",
+    "lib64",
+    "media",
+    "mnt",
+    "opt",
+    "proc",
+    "root",
+    "run",
+    "sbin",
+    "sys",
+    "usr",
+    "var",
 }
 
 # Paths that are allowed in the / dir for boards.
 VALID_BOARD_ROOT = {
-    'boot', 'build', 'dev', 'firmware',
+    "boot",
+    "build",
+    "dev",
+    "firmware",
     # TODO(): We should clean this up.
-    'postinst',
+    "postinst",
 }
 
 # Paths that are allowed in the / dir for the SDK chroot.
@@ -40,19 +54,30 @@
 
 # Paths under / that should not have any subdirs.
 NOSUBDIRS_ROOT = {
-    'bin', 'dev', 'proc', 'sbin', 'sys',
+    "bin",
+    "dev",
+    "proc",
+    "sbin",
+    "sys",
 }
 
 # Paths that are allowed in the /usr dir.
 VALID_USR = {
-    'bin', 'include', 'lib', 'lib32', 'lib64', 'libexec', 'sbin', 'share',
-    'src',
+    "bin",
+    "include",
+    "lib",
+    "lib32",
+    "lib64",
+    "libexec",
+    "sbin",
+    "share",
+    "src",
 }
 
 # Paths that are allowed in the /usr dir for boards.
 VALID_BOARD_USR = {
     # Boards install into /usr/local for test images.
-    'local',
+    "local",
 }
 
 # Paths that are allowed in the /usr dir for the SDK chroot.
@@ -60,237 +85,274 @@
 
 # Paths under /usr that should not have any subdirs.
 NOSUBDIRS_USR = {
-    'bin', 'sbin',
+    "bin",
+    "sbin",
 }
 
 # Valid toolchain targets.  We don't want to add any more non-standard ones.
 # targets that use *-cros-* as the vendor are OK to add more.
 KNOWN_TARGETS = {
     # These are historical names that we want to change to *-cros-* someday.
-    'arm-none-eabi',
+    "arm-none-eabi",
     # This is the host SDK name.
-    'x86_64-pc-linux-gnu',
-    '*-cros-eabi',
-    '*-cros-elf',
-    '*-cros-linux-gnu*',
+    "x86_64-pc-linux-gnu",
+    "*-cros-eabi",
+    "*-cros-elf",
+    "*-cros-linux-gnu*",
 }
 
 # These SDK packages need cleanup.
 # NB: Do *not* add more packages here.
 BAD_HOST_USR_LOCAL_PACKAGES = {
-    'app-crypt/nss',
+    "app-crypt/nss",
 }
 
 # Ignore some packages installing into /var for now.
 # NB: Do *not* add more packages here.
 BAD_VAR_PACKAGES = {
-    'app-accessibility/brltty',
-    'app-admin/eselect',
-    'app-admin/rsyslog',
-    'app-admin/sudo',
-    'app-admin/sysstat',
-    'app-admin/webapp-config',
-    'app-crypt/mit-krb5',
-    'app-crypt/trousers',
-    'app-containers/containerd',
-    'app-emulation/lxc',
-    'chromeos-base/chromeos-initramfs',
-    'dev-python/django',
-    'media-gfx/sane-backends',
-    'media-sound/alsa-utils',
-    'net-analyzer/netperf',
-    'net-dns/dnsmasq',
-    'net-firewall/iptables',
-    'net-firewall/nftables',
-    'net-fs/samba',
-    'net-misc/chrony',
-    'net-misc/dhcpcd',
-    'net-misc/openssh',
-    'net-print/cups',
-    'sys-apps/dbus',
-    'sys-apps/fwupd',
-    'sys-apps/iproute2',
-    'sys-apps/portage',
-    'sys-apps/sandbox',
-    'sys-apps/systemd',
-    'sys-apps/usbguard',
-    'sys-kernel/loonix-initramfs',
-    'sys-libs/glibc',
-    'sys-process/audit',
-    'www-servers/nginx',
-    'x11-base/xwayland',
+    "app-accessibility/brltty",
+    "app-admin/eselect",
+    "app-admin/rsyslog",
+    "app-admin/sudo",
+    "app-admin/sysstat",
+    "app-admin/webapp-config",
+    "app-containers/containerd",
+    "app-crypt/mit-krb5",
+    "app-crypt/trousers",
+    "app-emulation/containerd",
+    "app-emulation/lxc",
+    "chromeos-base/chromeos-initramfs",
+    "dev-python/django",
+    "media-gfx/sane-backends",  # nocheck
+    "media-sound/alsa-utils",
+    "net-analyzer/netperf",
+    "net-dns/dnsmasq",
+    "net-firewall/iptables",
+    "net-firewall/nftables",
+    "net-fs/samba",
+    "net-misc/chrony",
+    "net-misc/dhcpcd",
+    "net-misc/openssh",
+    "net-print/cups",
+    "sys-apps/dbus",
+    "sys-apps/iproute2",
+    "sys-apps/portage",
+    "sys-apps/sandbox",
+    "sys-apps/systemd",
+    "sys-apps/usbguard",
+    "sys-kernel/loonix-initramfs",
+    "sys-libs/glibc",
+    "sys-process/audit",
+    "www-servers/nginx",
+    "x11-base/xwayland",
 }
 
 # Ignore some packages installing into /run for now.
 # NB: Do *not* add more packages here.
 BAD_RUN_PACKAGES = {
-    'app-accessibility/brltty',
-    'net-fs/samba',
+    "app-accessibility/brltty",
+    "net-fs/samba",
 }
 
 
 def has_subdirs(path):
-  """See if |path| has any subdirs."""
-  # These checks are helpful for manually running the script when debugging.
-  if os.path.ismount(path):
-    logging.warning('Ignoring mounted dir for subdir check: %s', path)
+    """See if |path| has any subdirs."""
+    # These checks are helpful for manually running the script when debugging.
+    if os.path.ismount(path):
+        logging.warning("Ignoring mounted dir for subdir check: %s", path)
+        return False
+
+    if os.path.join(os.getenv("SYSROOT", "/"), "tmp") == path:
+        logging.warning("Ignoring live dir: %s", path)
+        return False
+
+    for _, dirs, _ in os.walk(path):
+        if dirs:
+            logging.error(
+                "Subdirs found in a dir that should be empty:\n  %s\n"
+                "  |-- %s",
+                path,
+                "\n  |-- ".join(sorted(dirs)),
+            )
+            return True
+        break
+
     return False
 
-  if os.path.join(os.getenv('SYSROOT', '/'), 'tmp') == path:
-    logging.warning('Ignoring live dir: %s', path)
-    return False
-
-  for _, dirs, _ in os.walk(path):
-    if dirs:
-      logging.error('Subdirs found in a dir that should be empty:\n  %s\n'
-                    '  |-- %s', path, '\n  |-- '.join(sorted(dirs)))
-      return True
-    break
-
-  return False
-
 
 def check_usr(usr, host=False):
-  """Check the /usr filesystem at |usr|."""
-  ret = True
+    """Check the /usr filesystem at |usr|."""
+    ret = True
 
-  # Not all packages install into /usr.
-  if not os.path.exists(usr):
+    # Not all packages install into /usr.
+    if not os.path.exists(usr):
+        return ret
+
+    atom = get_current_package()
+    paths = set(os.listdir(usr))
+    unknown = paths - VALID_USR
+    for target in KNOWN_TARGETS:
+        unknown = set(x for x in unknown if not fnmatch.fnmatch(x, target))
+    if host:
+        unknown -= VALID_HOST_USR
+
+        if atom in BAD_HOST_USR_LOCAL_PACKAGES:
+            logging.warning("Ignoring known bad /usr/local install for now")
+            unknown -= {"local"}
+    else:
+        unknown -= VALID_BOARD_USR
+
+        if atom in {"chromeos-base/ap-daemons"}:
+            logging.warning("Ignoring known bad /usr install for now")
+            unknown -= {"www"}
+
+    if unknown:
+        logging.error(
+            "Paths are not allowed in the /usr dir: %s", sorted(unknown)
+        )
+        ret = False
+
+    for path in NOSUBDIRS_USR:
+        if has_subdirs(os.path.join(usr, path)):
+            logging.error(
+                "%s: Path is not allowed to have subdirectories", path
+            )
+            ret = False
+
     return ret
 
-  atom = get_current_package()
-  paths = set(os.listdir(usr))
-  unknown = paths - VALID_USR
-  for target in KNOWN_TARGETS:
-    unknown = set(x for x in unknown if not fnmatch.fnmatch(x, target))
-  if host:
-    unknown -= VALID_HOST_USR
-
-    if atom in BAD_HOST_USR_LOCAL_PACKAGES:
-      logging.warning('Ignoring known bad /usr/local install for now')
-      unknown -= {'local'}
-  else:
-    unknown -= VALID_BOARD_USR
-
-    if atom in {'chromeos-base/ap-daemons'}:
-      logging.warning('Ignoring known bad /usr install for now')
-      unknown -= {'www'}
-
-  if unknown:
-    logging.error('Paths are not allowed in the /usr dir: %s', sorted(unknown))
-    ret = False
-
-  for path in NOSUBDIRS_USR:
-    if has_subdirs(os.path.join(usr, path)):
-      ret = False
-
-  return ret
-
 
 def check_root(root, host=False):
-  """Check the filesystem |root|."""
-  ret = True
+    """Check the filesystem |root|."""
+    ret = True
 
-  atom = get_current_package()
-  paths = set(os.listdir(root))
-  unknown = paths - VALID_ROOT
-  if host:
-    unknown -= VALID_HOST_ROOT
-  else:
-    unknown -= VALID_BOARD_ROOT
-
-  if unknown:
-    logging.error('Paths are not allowed in the root dir:\n  %s\n  |-- %s',
-                  root, '\n  |-- '.join(sorted(unknown)))
-    ret = False
-
-  # Some of these may have subdirs at runtime, but not from package installs.
-  for path in NOSUBDIRS_ROOT:
-    if has_subdirs(os.path.join(root, path)):
-      ret = False
-
-  # Special case /var due to so many misuses currently.
-  if os.path.exists(os.path.join(root, 'var')):
-    if atom in BAD_VAR_PACKAGES:
-      logging.warning('Ignoring known bad /var install for now')
-    elif os.environ.get('PORTAGE_REPO_NAME') == 'portage-stable':
-      logging.warning('Ignoring bad /var install with portage-stable package '
-                      'for now')
+    atom = get_current_package()
+    paths = set(os.listdir(root))
+    unknown = paths - VALID_ROOT
+    if host:
+        unknown -= VALID_HOST_ROOT
     else:
-      ret = False
-  else:
-    if atom in BAD_VAR_PACKAGES:
-      logging.warning('Package has improved; please update BAD_VAR_PACKAGES')
+        unknown -= VALID_BOARD_ROOT
 
-  # Special case /run due to so many misuses currently.
-  if os.path.exists(os.path.join(root, 'run')):
-    if atom in BAD_RUN_PACKAGES:
-      logging.warning('Ignoring known bad /run install for now')
-    elif os.environ.get('PORTAGE_REPO_NAME') == 'portage-stable':
-      logging.warning('Ignoring bad /run install with portage-stable package '
-                      'for now')
+    if unknown:
+        logging.error(
+            "Paths are not allowed in the root dir:\n  %s\n  |-- %s",
+            root,
+            "\n  |-- ".join(sorted(unknown)),
+        )
+        ret = False
+
+    # Some of these may have subdirs at runtime, but not from package installs.
+    for path in NOSUBDIRS_ROOT:
+        if has_subdirs(os.path.join(root, path)):
+            ret = False
+
+    # Special case /var due to so many misuses currently.
+    if os.path.exists(os.path.join(root, "var")):
+        if atom in BAD_VAR_PACKAGES:
+            logging.warning("Ignoring known bad /var install for now")
+        elif os.environ.get("PORTAGE_REPO_NAME") == "portage-stable":
+            logging.warning(
+                "Ignoring bad /var install with portage-stable package "
+                "for now"
+            )
+        else:
+            logging.error(
+                "Installing files or directories in /var is not allowed; "
+                "these must be created at runtime only (e.g. via tmpfiles.d)"
+            )
+            ret = False
     else:
-      ret = False
-  else:
-    if atom in BAD_RUN_PACKAGES:
-      logging.warning('Package has improved; please update BAD_RUN_PACKAGES')
+        if atom in BAD_VAR_PACKAGES:
+            logging.warning(
+                "Package has improved; please update BAD_VAR_PACKAGES"
+            )
 
-  if not check_usr(os.path.join(root, 'usr'), host):
-    ret = False
+    # Special case /run due to so many misuses currently.
+    if os.path.exists(os.path.join(root, "run")):
+        if atom in BAD_RUN_PACKAGES:
+            logging.warning("Ignoring known bad /run install for now")
+        elif os.environ.get("PORTAGE_REPO_NAME") == "portage-stable":
+            logging.warning(
+                "Ignoring bad /run install with portage-stable package "
+                "for now"
+            )
+        else:
+            logging.error(
+                "Installing files or directories in /run is not allowed; "
+                "these must be created at runtime only (e.g. via tmpfiles.d)"
+            )
+            ret = False
+    else:
+        if atom in BAD_RUN_PACKAGES:
+            logging.warning(
+                "Package has improved; please update BAD_RUN_PACKAGES"
+            )
 
-  return ret
+    if not check_usr(os.path.join(root, "usr"), host):
+        ret = False
+
+    return ret
 
 
 def get_current_package():
-  """Figure out what package is being built currently."""
-  if 'CATEGORY' in os.environ and 'PN' in os.environ:
-    return f'{os.environ.get("CATEGORY")}/{os.environ.get("PN")}'
-  else:
-    return None
+    """Figure out what package is being built currently."""
+    if "CATEGORY" in os.environ and "PN" in os.environ:
+        return f'{os.environ.get("CATEGORY")}/{os.environ.get("PN")}'
+    else:
+        return None
 
 
 def get_parser():
-  """Get a CLI parser."""
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('--host', default=None, action='store_true',
-                      help='the filesystem is the host SDK, not board sysroot')
-  parser.add_argument('--board', dest='host', action='store_false',
-                      help='the filesystem is a board sysroot')
-  parser.add_argument('root', nargs='?',
-                      help='the rootfs to scan')
-  return parser
+    """Get a CLI parser."""
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "--host",
+        default=None,
+        action="store_true",
+        help="the filesystem is the host SDK, not board sysroot",
+    )
+    parser.add_argument(
+        "--board",
+        dest="host",
+        action="store_false",
+        help="the filesystem is a board sysroot",
+    )
+    parser.add_argument("root", nargs="?", help="the rootfs to scan")
+    return parser
 
 
 def main(argv):
-  """The main func!"""
-  parser = get_parser()
-  opts = parser.parse_args(argv)
+    """The main func!"""
+    parser = get_parser()
+    opts = parser.parse_args(argv)
 
-  # Default to common portage env vars.
-  if opts.root is None:
-    for var in ('ED', 'D', 'ROOT'):
-      if var in os.environ:
-        logging.debug('Scanning filesystem root via $%s', var)
-        opts.root = os.environ[var]
-        break
-  if not opts.root:
-    parser.error('Need a valid rootfs to scan, but unable to detect one')
+    # Default to common portage env vars.
+    if opts.root is None:
+        for var in ("ED", "D", "ROOT"):
+            if var in os.environ:
+                logging.debug("Scanning filesystem root via $%s", var)
+                opts.root = os.environ[var]
+                break
+    if not opts.root:
+        parser.error("Need a valid rootfs to scan, but unable to detect one")
 
-  if opts.host is None:
-    if os.getenv('BOARD') == 'amd64-host':
-      opts.host = True
+    if opts.host is None:
+        if os.getenv("BOARD") == "amd64-host":
+            opts.host = True
+        else:
+            opts.host = not bool(os.getenv("SYSROOT"))
+
+    if not check_root(opts.root, opts.host):
+        logging.critical(
+            "Package '%s' does not conform to CrOS's filesystem conventions. "
+            "Please review the paths flagged above and adjust its layout.",
+            get_current_package(),
+        )
+        return 1
     else:
-      opts.host = not bool(os.getenv('SYSROOT'))
-
-  if not check_root(opts.root, opts.host):
-    logging.critical(
-        "Package '%s' does not conform to CrOS's filesystem conventions. "
-        'Please review the paths flagged above and adjust its layout.',
-        get_current_package())
-    return 1
-  else:
-    return 0
+        return 0
 
 
-if __name__ == '__main__':
-  sys.exit(main(sys.argv[1:]))
+if __name__ == "__main__":
+    sys.exit(main(sys.argv[1:]))
diff --git a/hooks/install/check-seccomp.sh b/hooks/install/check-seccomp.sh
new file mode 100755
index 0000000..076d54f
--- /dev/null
+++ b/hooks/install/check-seccomp.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+: "${D:=}"
+
+if [[ -z "${SYSROOT}" ]]; then
+  echo "SYSROOT is required" >&2
+  exit 1
+fi
+
+if [[ ! -f "${SYSROOT}/build/share/constants.json" ]]; then
+  echo "SKIPPING: Cannot find constants.json" >&2
+  exit 0
+fi
+
+shopt -s nullglob
+if [[ -n "${D}" ]]; then
+  set -- "${D}"/usr/share/policy/*.policy \
+    "${D}"/opt/google/touch/policies/*.policy
+fi
+
+for policy in "$@"; do
+  # TODO(b/267522710) move this over to the seccomp policy linter.
+  compile_seccomp_policy \
+    --arch-json "${SYSROOT}/build/share/constants.json" \
+    --default-action trap "${policy}" /dev/null \
+    || die "failed to compile seccomp policy $(basename "${policy}")"
+done
diff --git a/hooks/install/check-upstart-scripts.sh b/hooks/install/check-upstart-scripts.sh
index 7ff914f..1a756ff 100755
--- a/hooks/install/check-upstart-scripts.sh
+++ b/hooks/install/check-upstart-scripts.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -44,7 +44,6 @@
   chromeos-base/arc-sslh-init|\
   chromeos-base/arcvm-common-scripts|\
   chromeos-base/arcvm-forward-pstore|\
-  chromeos-base/arcvm-gce-l1-prebuilts|\
   chromeos-base/arcvm-launch|\
   chromeos-base/arcvm-mojo-proxy|\
   chromeos-base/arcvm-vsock-proxy|\
@@ -101,7 +100,6 @@
   chromeos-base/chromeos-test-init|\
   chromeos-base/chromeos-trim|\
   chromeos-base/chunnel|\
-  chromeos-base/crash-reporter|\
   chromeos-base/cros-camera|\
   chromeos-base/cros-camera-libs|\
   chromeos-base/cros-disks|\
@@ -151,7 +149,6 @@
   chromeos-base/patchpanel|\
   chromeos-base/pdfc-scripts|\
   chromeos-base/permission_broker|\
-  chromeos-base/power_manager|\
   chromeos-base/quickoffice|\
   chromeos-base/rialto-cellular-autoconnect|\
   chromeos-base/rialto-modem-watchdog|\
@@ -174,7 +171,6 @@
   chromeos-base/vm_host_tools|\
   chromeos-base/vpd|\
   chromeos-base/weaveauth|\
-  chromeos-base/webserver|\
   chromeos-base/whining|\
   dev-util/hdctools|\
   media-libs/arc-camera-service|\
@@ -182,7 +178,6 @@
   media-libs/dlm|\
   media-libs/img-ddk|\
   media-libs/img-ddk-bin|\
-  media-sound/adhd|\
   net-dns/avahi-daemon|\
   net-firewall/conntrack-tools|\
   net-libs/libqrtr|\
@@ -204,13 +199,11 @@
   sys-apps/haveged|\
   sys-apps/huddly-falcon-updater|\
   sys-apps/huddly-monitor|\
-  sys-apps/fwupd|\
   sys-apps/mimo-houston-mcu-updater|\
   sys-apps/mimo-monitor|\
   sys-apps/moblab|\
   sys-apps/satlab|\
   sys-apps/upstart|\
-  sys-apps/ureadahead|\
   sys-apps/usbguard|\
   sys-apps/viking-cleanup-logs|\
   sys-firmware/viking-firmware|\
diff --git a/hooks/install/filesystem-layout.sh b/hooks/install/filesystem-layout.sh
index 2877a68..ffdebfe 100755
--- a/hooks/install/filesystem-layout.sh
+++ b/hooks/install/filesystem-layout.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/hooks/install/gen-package-licenses.sh b/hooks/install/gen-package-licenses.sh
index 28fd3e9..062f57b 100755
--- a/hooks/install/gen-package-licenses.sh
+++ b/hooks/install/gen-package-licenses.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/hooks/install/large-file-support.sh b/hooks/install/large-file-support.sh
index fff8964..57be7e1 100755
--- a/hooks/install/large-file-support.sh
+++ b/hooks/install/large-file-support.sh
@@ -1,11 +1,22 @@
 #!/bin/bash
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 # Detect 32bit builds that are using legacy 32bit file interfaces.
 # https://en.wikipedia.org/wiki/Large_file_support
 
+# Set ebuild vars to make shellcheck happy.
+: "${ARCH:=}"
+: "${CATEGORY:=}"
+: "${D:=/}"
+: "${PN:=}"
+: "${PV:=}"
+: "${RESTRICT:=}"
+: "${WORKDIR:=}"
+
+DOC_URL="https://issuetracker.google.com/201531268"
+
 # Lists gleaned from headers and this doc:
 # http://people.redhat.com/berrange/notes/largefile.html
 # http://opengroup.org/platform/lfs.html
@@ -108,24 +119,421 @@
 SYMBOLS_REGEX=$(printf '%s|' "${SYMBOLS[@]}")
 SYMBOLS_REGEX="^(${SYMBOLS_REGEX%|})$"
 
+# These are packages that are known to DTRT.  This list should only be updated
+# with explicit review & documentation.
+known_good_pkg() {
+  case "${CATEGORY}/${PN}" in
+
+  # All of the binaries provided by v4l-utils are built with LFS flags enabled,
+  # except libv4l2tracer.so. This library wraps the interfaces for open, open64,
+  # mmap, and mmap64 for tracing purposes which means they're only called when
+  # the tracee program is not built with LFS.
+  media-tv/v4l-utils) ;;
+
+  # Provides wrappers to every C library interface, both LFS & non-LFS.
+  # Internally it handles LFS correctly.  Its non-LFS references are only via
+  # packages that are themselves broken.
+  sys-apps/sandbox) ;;
+
+  # https://bugs.gentoo.org/893656
+  # zlib is quite intelligent when it comes to the standard LFS flags. z_off_t
+  # is the only exported interface that uses off_t. In gzlib.c, a few APIs
+  # (e.g. gzseek) are defined in terms of z_off_t, so with a 32-bit ABI that
+  # splits values (e.g. arm 32-bit). This means the stack usage & return value
+  # are ABI incompatible.
+  #
+  # Annoyingly, the only thing tripping up the checker is the call to open() in
+  # gzlib.c, and zlib actually DTRT by using O_LARGEFILE when available.
+  # Unfortunately, it's impossible from a symbol analysis point of view to
+  # determine that. We would really need something that decompiles & analyzes
+  # the opcodes to detect that this particular usage is correct.
+  sys-libs/zlib) ;;
+
+  *) return 1;;
+  esac
+
+  return 0
+}
+
+known_bad_pkg() {
+  # Only allow this on arm as we have devices shipping that now.
+  case "${ARCH}" in
+  arm) ;;
+  *) return 1;;
+  esac
+
+  # TODO(b/260698283): Ignore ARC (bionic) packages for now.
+  case "${CATEGORY}/${PN}:${PV}" in
+  media-libs/arc-cros-gralloc:*|\
+  media-libs/arc-img-ddk:*|\
+  media-libs/arc-mali-drivers:*|\
+  media-libs/arc-mali-drivers-bifrost:*|\
+  media-libs/arc-mali-drivers-bifrost-bin:*|\
+  media-libs/arc-mali-drivers-valhall:*|\
+  media-libs/arc-mali-drivers-valhall-bin:*|\
+  media-libs/arc-mesa-freedreno:*|\
+  media-libs/arc-mesa-img:*|\
+  media-libs/arc-mesa-virgl:*|\
+  media-libs/arcvm-mesa-freedreno:*|\
+  x11-libs/arc-libdrm:*)
+    return 0
+    ;;
+  esac
+
+  # TODO(b/258669199): Ignore Rust packages for now.
+  case "${CATEGORY}/${PN}:${PV}" in
+  chromeos-base/crosvm:*|\
+  chromeos-base/hwsec-utils:*|\
+  chromeos-base/resourced:*|\
+  media-sound/adhd:*|\
+  media-sound/audio_processor:*|\
+  media-sound/audio_streams_conformance_test:*|\
+  media-sound/cras-client:*|\
+  media-sound/cras_rust:*|\
+  media-sound/cras_tests:*|\
+  sys-apps/kexec-lite:*)
+    return 0
+    ;;
+  esac
+
+  # Packages in upstream discussion.  Must link to an upstream tracker.
+  case "${CATEGORY}/${PN}:${PV}" in
+  dev-libs/expat:2.5*) return 0;;  # https://bugs.gentoo.org/904190
+  esac
+
+  # Do not add more packages here!
+  case "${CATEGORY}/${PN}:${PV}" in
+  app-accessibility/brltty:6.3|\
+  app-admin/sysstat:11.7.4|\
+  app-arch/brotli:1.0.9|\
+  app-benchmarks/blktests:20190430|\
+  app-benchmarks/blogbench:1.1.20200218|\
+  app-benchmarks/bootchart:0.9.2|\
+  app-benchmarks/pjdfstest:20190822|\
+  app-crypt/nss:3.68.2|\
+  app-crypt/tpm-tools:1.3.9.1|\
+  app-crypt/trousers:0.3.3|\
+  app-editors/qemacs:0.4.1_pre20170225|\
+  app-editors/vim-core:9.0.*|\
+  app-emulation/lx[cd]:3.*|\
+  app-emulation/lx[cd]:4.0.*|\
+  app-misc/ckermit:9.0.302|\
+  app-misc/edid-decode:20210514|\
+  app-misc/evtest:1.35|\
+  app-misc/figlet:2.2.5|\
+  app-misc/jq:1.4|\
+  app-misc/screen:4.9.0|\
+  app-misc/tmux:3.3a|\
+  app-misc/utouch-evemu:1.0.5|\
+  app-mobilephone/dfu-util:0.9|\
+  app-shells/dash:0.5.9.1|\
+  app-text/ghostscript-gpl:9.55.0|\
+  app-text/htmltidy:20090325|\
+  app-text/libpaper:1.1.28|\
+  app-text/poppler:22.03.0|\
+  chromeos-base/arc-key"ma"ster:0.0.1|\
+  chromeos-base/audiotest:0.0.1|\
+  chromeos-base/autotest-deps:0.0.4|\
+  chromeos-base/autotest-deps-cellular:0.0.1|\
+  chromeos-base/autotest-tests:0.0.4|\
+  chromeos-base/autotest-tests-graphics:0.0.1|\
+  chromeos-base/chromeos-chrome:*|\
+  chromeos-base/chromeos-cr50-dev:0.0.1|\
+  chromeos-base/chunnel:0.1.0|\
+  chromeos-base/crash-reporter:0.0.1|\
+  chromeos-base/cronista:0.24.52|\
+  chromeos-base/cros-camera:0.0.1|\
+  chromeos-base/cros-camera-libs:0.0.1|\
+  chromeos-base/croscomp:0.1.0|\
+  chromeos-base/crosh:0.24.52|\
+  chromeos-base/crostini_client:0.1.0|\
+  chromeos-base/ec-devutils:0.0.2|\
+  chromeos-base/ec-utils:0.0.2|\
+  chromeos-base/ec-utils-test:0.0.1|\
+  chromeos-base/factory:0.2.0|\
+  chromeos-base/factory_installer:0.0.1|\
+  chromeos-base/g2update_tool:1.2.4905|\
+  chromeos-base/gdix_hid_firmware_update:1.7.6|\
+  chromeos-base/glbench:0.0.1|\
+  chromeos-base/google-breakpad:2022.*|\
+  chromeos-base/google-breakpad:2023.0[12]*|\
+  chromeos-base/hps-firmware-tools:0.0.1|\
+  chromeos-base/infineon-firmware-updater:1.1.2459.0|\
+  chromeos-base/ippusb_bridge:0.0.1|\
+  chromeos-base/libevdev:0.0.1|\
+  chromeos-base/libhwsec:0.0.1|\
+  chromeos-base/manatee-runtime:0.1.0|\
+  chromeos-base/memd:0.1.0|\
+  chromeos-base/mttools:0.0.1|\
+  chromeos-base/perfetto:29.0|\
+  chromeos-base/perfetto_simple_producer:0.0.1|\
+  chromeos-base/pixart_tpfwup:0.0.3|\
+  chromeos-base/pixart_tpfwup:0.0.6|\
+  chromeos-base/sirenia:0.24.52|\
+  chromeos-base/sommelier:0.0.1|\
+  chromeos-base/tast-local-helpers-cros:0.0.1|\
+  chromeos-base/telemetry:0.0.1|\
+  chromeos-base/tensorflow-internal:2.8.0|\
+  chromeos-base/termina_container_tools:0.0.1|\
+  chromeos-base/toolchain-tests:0.0.1|\
+  chromeos-base/tpm2-simulator:0.0.1|\
+  chromeos-base/tremplin:0.0.1|\
+  chromeos-base/vkbench:0.0.1|\
+  chromeos-base/vpd:0.0.1|\
+  chromeos-base/wacom_fw_flash:1.4.0|\
+  chromeos-base/weida_wdt_util:0.9.9|\
+  dev-cpp/abseil-cpp:20211102.0|\
+  dev-cpp/gflags:2.2.0|\
+  dev-cpp/gtest:1.10.0|\
+  dev-embedded/dfu-programmer:0.7.2|\
+  dev-lang/tcl:8.6.12|\
+  dev-libs/boost:1.79.0|\
+  dev-libs/confuse:2.7|\
+  dev-libs/flatbuffers:2.0.0|\
+  dev-libs/fribidi:1.0.9|\
+  dev-libs/hidapi:0.8.0*|\
+  dev-libs/iniparser:3.1|\
+  dev-libs/json-c:0.14|\
+  dev-libs/leveldb:1.23|\
+  dev-libs/libconfig:1.5|\
+  dev-libs/libcroco:0.6.12|\
+  dev-libs/libev:4.33|\
+  dev-libs/libfastjson:0.99.8|\
+  dev-libs/libffi:3.1|\
+  dev-libs/libfmt:7.1.3|\
+  dev-libs/libgcrypt:1.8.8|\
+  dev-libs/libgpg-error:1.36|\
+  dev-libs/libgpiod:1.4.1|\
+  dev-libs/libltdl:2.4.6|\
+  dev-libs/libnl:1.1|\
+  dev-libs/libnl:3.4.0|\
+  dev-libs/libpcre2:10.34|\
+  dev-libs/libpcre:8.44|\
+  dev-libs/libunistring:0.9.10|\
+  dev-libs/libusb:1.0.26|\
+  dev-libs/libverto:0.3.0|\
+  dev-libs/libxslt:1.1.35|\
+  dev-libs/nettle:3.7.3|\
+  dev-libs/nspr:4.32|\
+  dev-libs/nss:3.68.2|\
+  dev-libs/opensc:0.21.0|\
+  dev-libs/openssl:1.1.1n|\
+  dev-libs/protobuf:3.19.3|\
+  dev-libs/tinyxml2:8.0.0|\
+  dev-python/grpcio:1.43.*|\
+  dev-python/numpy:1.19.4|\
+  dev-python/python-uinput:0.11.2|\
+  dev-python/selenium:3.0.2|\
+  dev-rust/bindgen:0.59.2|\
+  dev-rust/s9:0.1.0|\
+  dev-util/android-tools:9.0.0_p3|\
+  dev-util/apitrace:9.0|\
+  dev-util/cmocka:1.1.5|\
+  dev-util/glslang:1.3.211|\
+  dev-util/hdctools:0.0.1|\
+  dev-rust/manatee-client:0.24.52|\
+  dev-util/perf:5.15*|\
+  dev-util/rt-tests:2.2|\
+  dev-util/spirv-tools:1.3.211|\
+  dev-util/vulkan-tools:1.3.211|\
+  dev-util/xdelta:3.0.11|\
+  dev-util/xxd:1.10|\
+  games-util/joystick:1.4.2|\
+  gnome-base/librsvg:2.40.21|\
+  media-fonts/font-util:1.3.2|\
+  media-gfx/deqp-runner:0.13.1|\
+  media-gfx/qrencode:3.4.4|\
+  media-gfx/"sa"ne-backends:1.1.1|\
+  media-gfx/zbar:0.23.1|\
+  media-libs/alsa-lib:1.2.1.2|\
+  media-libs/clvk:0.0.1|\
+  media-libs/cros-camera-hal-qti:0.0.1|\
+  media-libs/cros-camera-libfs:0.0.1|\
+  media-libs/cros-camera-sw-privacy-switch-test:0.0.1|\
+  media-libs/dlm:0.0.1|\
+  media-libs/freeimage:3.15.3|\
+  media-libs/freetype:2.12*|\
+  media-libs/ladspa-sdk:1.13|\
+  media-libs/lcms:2.12|\
+  media-libs/libjpeg-turbo:2.1.1|\
+  media-libs/libpng:1.6.37|\
+  media-libs/libv4lplugins:0.0.1|\
+  media-libs/libvorbis:1.3.7|\
+  media-libs/libyuv-test:1774|\
+  media-libs/mali-drivers:1.20|\
+  media-libs/mali-drivers-bin:1.20*|\
+  media-libs/mali-drivers-bifrost:32.0|\
+  media-libs/mali-drivers-bifrost-bin:32.0*|\
+  media-libs/mali-drivers-valhall:32.0|\
+  media-libs/mali-drivers-valhall-bin:32.0*|\
+  media-libs/mesa-img:21.3*|\
+  media-libs/opencl-cts:0.0.1|\
+  media-libs/opencv:4.5.5|\
+  media-libs/openh264:2.1.1|\
+  media-libs/openjpeg:2.3.0|\
+  media-libs/qti-7c-camera-bins:20220401|\
+  media-libs/rockchip-isp1-3a-libs-bin:2018.06.28|\
+  media-libs/sbc:1.3|\
+  media-libs/shaderc:2022.1|\
+  media-libs/skia:106|\
+  media-libs/tiff:4.3.0|\
+  media-libs/vulkan-layers:1.3.211|\
+  media-libs/vulkan-loader:1.3.211|\
+  media-libs/waffle:1.6.0|\
+  media-plugins/alsa-plugins:1.1.6|\
+  media-sound/alsa-utils:1.2.1|\
+  media-sound/gsm:1.0.13|\
+  media-sound/sound_card_init:*|\
+  media-video/yavta:0.0.1|\
+  net-analyzer/netcat:110.20180111|\
+  net-analyzer/netdata:1.34.1|\
+  net-analyzer/netperf:2.7.0|\
+  net-analyzer/tcpdump:4.9.3|\
+  net-analyzer/traceroute:2.1.0|\
+  net-dialup/lrzsz:0.12.20|\
+  net-dialup/minicom:2.7|\
+  net-dialup/ppp:2.4.9|\
+  net-dialup/xl2tpd:1.3.12|\
+  net-dns/avahi:0.8|\
+  net-dns/bind-tools:9.11.2_p1|\
+  net-firewall/conntrack-tools:1.4.4|\
+  net-firewall/ebtables:2.0.11|\
+  net-libs/grpc:1.16.*|\
+  net-libs/grpc:1.43.*|\
+  net-libs/libiio:0.23|\
+  net-libs/libnetfilter_conntrack:1.0.6|\
+  net-libs/libnsl:1.2.0|\
+  net-libs/libsoup:2.58.2|\
+  net-libs/libtirpc:1.0.2|\
+  net-libs/libvncserver:0.9.13|\
+  net-libs/rpcsvc-proto:1.3.1|\
+  net-misc/bridge-utils:1.6|\
+  net-misc/chrony:4.2|\
+  net-misc/diag:0.1_p20210329|\
+  net-misc/htpdate:1.0.4|\
+  net-misc/iperf:2.0.9|\
+  net-misc/iperf:3.7|\
+  net-misc/iputils:20171016_pre|\
+  net-misc/pps-tools:0.0.20120407|\
+  net-misc/qc-netmgr:0.1_p20220118|\
+  net-misc/radvd:2.17|\
+  net-misc/rmtfs:0.3_p20210408|\
+  net-misc/socat:1.7.3.2|\
+  net-misc/sslh:1.18|\
+  net-misc/tlsdate:0.0.5|\
+  net-misc/uftp:4.10.1|\
+  net-misc/usbip:4.19|\
+  net-print/cups-filters:1.28.7|\
+  net-print/dymo-cups-drivers:1.4.0|\
+  net-print/epson-inkjet-printer-escpr:1.7.18|\
+  net-print/hplip:3.21.8|\
+  net-print/starcupsdrv:3.11.0|\
+  net-proxy/tinyproxy:1.10.0|\
+  net-vpn/openvpn:2.4.4|\
+  net-vpn/strongswan:5.9.4|\
+  net-vpn/wireguard-tools:1.0.20200319|\
+  net-wireless/bluez:5.54|\
+  net-wireless/crda:3.18|\
+  net-wireless/floss:0.0.2|\
+  net-wireless/hostapd:2.11_pre|\
+  net-wireless/iw:5.9|\
+  net-wireless/wireless-tools:30_pre9|\
+  net-wireless/wpa_supplicant-cros:2.11_pre|\
+  sci-geosciences/gpsd:3.17|\
+  sci-libs/tensorflow:2.8.0|\
+  sys-apps/coreboot-utils:0.0.1|\
+  sys-apps/debianutils:4.4|\
+  sys-apps/dmidecode:3.2|\
+  sys-apps/dtc:1.6.0|\
+  sys-apps/ethtool:4.13|\
+  sys-apps/flashmap:0.3|\
+  sys-apps/flashrom-tester:1.6.0|\
+  sys-apps/groff:1.22.4|\
+  sys-apps/haveged:1.9.14|\
+  sys-apps/hdparm:9.63|\
+  sys-apps/i2c-tools:4.0|\
+  sys-apps/install-xattr:0.5|\
+  sys-apps/iotools:1.5|\
+  sys-apps/kbd:1.15.5|\
+  sys-apps/keyutils:1.6.3|\
+  sys-apps/less:590|\
+  sys-apps/lshw:02.19.2b_p20210121|\
+  sys-apps/memtester:4.2.2|\
+  sys-apps/nvme-cli:1.6|\
+  sys-apps/pv:1.6.20|\
+  sys-apps/restorecon:2.7|\
+  sys-apps/smartmontools:7.3|\
+  sys-apps/toybox:0.8.6|\
+  sys-apps/usbguard:20210927|\
+  sys-apps/usbutils:014|\
+  sys-auth/nss-mdns:0.13|\
+  sys-auth/pam_pwdfile:0.99|\
+  sys-cluster/libqb:0.17.2|\
+  sys-devel/bc:1.07.1|\
+  sys-devel/binutils:2.36.1|\
+  sys-devel/flex:2.6.4|\
+  sys-devel/gdb:9.2.20200923|\
+  sys-devel/llvm:12.0.1|\
+  sys-devel/llvm-img:9.0.0|\
+  sys-fs/btrfs-progs:5.4.1|\
+  sys-fs/e2fsprogs:1.47.0|\
+  sys-fs/fuse:2.9.8|\
+  sys-libs/gcc-libs:10.2.0|\
+  sys-libs/libcap-ng:0.8.2|\
+  sys-libs/libcxx:15.*|\
+  sys-libs/libselinux:3.0|\
+  sys-libs/libsepol:3.0|\
+  sys-libs/mtdev:1.1.2|\
+  sys-libs/pam:1.3.1|\
+  sys-process/audit:3.0.6|\
+  sys-process/htop:1.0.2|\
+  sys-process/numactl:2.0.14|\
+  sys-process/psmisc:23.3|\
+  sys-process/time:1.9|\
+  x11-base/xwayland:1.20.8|\
+  x11-libs/pango:1.42.4)
+    return 0
+    ;;
+  esac
+
+  return 1
+}
+
 check_lfs()
 {
-  local files=$(scanelf -F '%s %p' -qRgs "-${SYMBOLS_REGEX}" "$@")
+  local files
 
-  if [[ -n ${files} ]]; then
+  if known_good_pkg; then
+    return
+  fi
+
+  files=$(scanelf -F '%s %p' -qyRgs "-${SYMBOLS_REGEX}" "$@")
+  if [[ -n "${files}" ]]; then
     echo
     eqawarn "QA Notice: The following files were not built with LFS support:"
-    eqawarn "  Please see http://crbug.com/464024 for details."
+    eqawarn "  Please see ${DOC_URL} for details."
     eqawarn "${files}"
+    eqawarn "Full build files:"
+    scanelf -F '%s %F' -qyRgs "-${SYMBOLS_REGEX}" "${WORKDIR:-}"
     echo
+
+    if ! known_bad_pkg; then
+      die "package needs LFS support enabled -- see ${DOC_URL}"
+    fi
+  else
+    if known_bad_pkg; then
+      eqawarn "Please remove ${PN} exception from large-file-support.sh hook."
+    fi
   fi
 }
 
-# Only check on 32bit systems.  Filtering by $ARCH here isn't perfect, but it
+# Only check on 32-bit systems.  Filtering by $ARCH here isn't perfect, but it
 # should be good enough for our needs so far.
-case ${ARCH} in
-arm|mips|ppc|sh|x86)
-  if [[ " ${RESTRICT} " == *" binchecks "* ]] ; then
+case "${ARCH}" in
+amd64|arm64|"")
+  ;;
+*)
+  if [[ " ${RESTRICT} " != *" binchecks "* ]]; then
     check_lfs "${D}"
   fi
   ;;
diff --git a/hooks/install/multilib-check.sh b/hooks/install/multilib-check.sh
index 438d656..f2a27d8 100755
--- a/hooks/install/multilib-check.sh
+++ b/hooks/install/multilib-check.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/hooks/install/qa-elf.sh b/hooks/install/qa-elf.sh
index 60fca47..9404b2d 100755
--- a/hooks/install/qa-elf.sh
+++ b/hooks/install/qa-elf.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/image_to_vm.sh b/image_to_vm.sh
index 46f3b5d..6490f33 100755
--- a/image_to_vm.sh
+++ b/image_to_vm.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
+# Copyright 2010 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -8,7 +8,9 @@
 
 # Helper scripts should be run from the same location as this script.
 SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
+# shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
+# shellcheck source=build_library/ext2_sb_util.sh
 . "${SCRIPT_ROOT}/build_library/ext2_sb_util.sh" || exit 1
 
 # Need to be inside the chroot to load chromeos-common.sh
@@ -24,7 +26,7 @@
   "Board for which the image was built"
 DEFINE_string from "" \
   "Directory containing rootfs.image and mbr.image"
-DEFINE_string disk_layout "2gb-rootfs-updatable" \
+DEFINE_string disk_layout "usb-updatable" \
   "The disk layout type to use for this image."
 DEFINE_boolean test_image "${FLAGS_FALSE}" \
   "Use ${CHROMEOS_TEST_IMAGE_NAME} instead of ${CHROMEOS_IMAGE_NAME}."
@@ -118,6 +120,7 @@
   # but preferring the board inferred from FLAGS_from over the default,
   # everywhere.
   FLAGS_board="$(
+    # shellcheck source=build_library/mount_gpt_util.sh
     . "${BUILD_LIBRARY_DIR}/mount_gpt_util.sh"
     get_board_from_image "${SRC_IMAGE}"
   )"
@@ -131,7 +134,9 @@
   setup_board --quiet --board="${FLAGS_board}" \
     --skip-toolchain-update --skip-chroot-upgrade --skip-board-pkg-init
 fi
+# shellcheck source=build_library/board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
+# shellcheck source=build_library/disk_layout_util.sh
 . "${SCRIPT_ROOT}/build_library/disk_layout_util.sh" || exit 1
 
 # Memory units are in MBs
diff --git a/make_netboot.sh b/make_netboot.sh
index 1a51a05..9f16107 100755
--- a/make_netboot.sh
+++ b/make_netboot.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -11,6 +11,7 @@
 # are placed in a "netboot" subfolder.
 
 SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+# shellcheck source=build_library/build_common.sh
 . "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
 
 # Script must be run inside the chroot.
@@ -24,12 +25,14 @@
 FLAGS "$@" || exit 1
 eval set -- "${FLAGS_ARGV}"
 
+# shellcheck source=build_library/build_common.sh
 . "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
+# shellcheck source=build_library/board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
 
 switch_to_strict_mode
 # build_packages artifact output.
-SYSROOT="${GCLIENT_ROOT}/chroot/build/${FLAGS_board}"
+SYSROOT="/build/${FLAGS_board}"
 # build_image artifact output.
 
 if [ -n "${FLAGS_image_dir}" ]; then
diff --git a/mod_for_test_scripts/000recordRootFsSize b/mod_for_test_scripts/000recordRootFsSize
index 3540b82..05950a2 100755
--- a/mod_for_test_scripts/000recordRootFsSize
+++ b/mod_for_test_scripts/000recordRootFsSize
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
+# Copyright 2009 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
diff --git a/mod_for_test_scripts/001changeBuildName b/mod_for_test_scripts/001changeBuildName
index c7ffc24..9b0a7ff 100755
--- a/mod_for_test_scripts/001changeBuildName
+++ b/mod_for_test_scripts/001changeBuildName
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
+# Copyright 2010 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
diff --git a/mod_for_test_scripts/002changeUpdateChannel b/mod_for_test_scripts/002changeUpdateChannel
index b07422e..d06b0f7 100755
--- a/mod_for_test_scripts/002changeUpdateChannel
+++ b/mod_for_test_scripts/002changeUpdateChannel
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
diff --git a/mod_for_test_scripts/100setupTestingInterface b/mod_for_test_scripts/100setupTestingInterface
index 24530ec..b5fd688 100755
--- a/mod_for_test_scripts/100setupTestingInterface
+++ b/mod_for_test_scripts/100setupTestingInterface
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
+# Copyright 2009 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/mod_for_test_scripts/200disableIdleSuspend b/mod_for_test_scripts/200disableIdleSuspend
index bbeb5e2..722eaf7 100755
--- a/mod_for_test_scripts/200disableIdleSuspend
+++ b/mod_for_test_scripts/200disableIdleSuspend
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
+# Copyright 2009 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/mod_for_test_scripts/250enableForDemoMode b/mod_for_test_scripts/250enableForDemoMode
index 9c111f1..0721421 100755
--- a/mod_for_test_scripts/250enableForDemoMode
+++ b/mod_for_test_scripts/250enableForDemoMode
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
diff --git a/mod_for_test_scripts/300changePassword b/mod_for_test_scripts/300changePassword
index 70b2fdf..66bf513 100755
--- a/mod_for_test_scripts/300changePassword
+++ b/mod_for_test_scripts/300changePassword
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
+# Copyright 2009 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/mod_for_test_scripts/340enableFwupdDummy b/mod_for_test_scripts/340enableFwupdDummy
index 8ca83de..6534a32 100755
--- a/mod_for_test_scripts/340enableFwupdDummy
+++ b/mod_for_test_scripts/340enableFwupdDummy
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -14,7 +14,7 @@
 
 echo "Enabling dummy fwupd remote for tests."
 
-sed -e '/^DisabledPlugins=/s/^/#/' -i "${ROOT_FS_DIR}/etc/fwupd/daemon.conf"
+sed -e 's/^\(DisabledPlugins=\).*/\1/' -i "${ROOT_FS_DIR}/etc/fwupd/daemon.conf"
 
 cat > "${ROOT_FS_DIR}/usr/share/fwupd/remotes.d/vendor/fwupd-tests.xml" <<EOF
 <?xml version="1.0" encoding="UTF-8"?>
@@ -63,7 +63,7 @@
 EOF
 
 cat > "${ROOT_FS_DIR}/etc/fwupd/remotes.d/fwupd-tests.conf" <<EOF
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/mod_for_test_scripts/350addEnvironment b/mod_for_test_scripts/350addEnvironment
index 9268be3..180408d 100755
--- a/mod_for_test_scripts/350addEnvironment
+++ b/mod_for_test_scripts/350addEnvironment
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
diff --git a/mod_for_test_scripts/360setPagerLess b/mod_for_test_scripts/360setPagerLess
index 5eb8f0e..5d56a50 100755
--- a/mod_for_test_scripts/360setPagerLess
+++ b/mod_for_test_scripts/360setPagerLess
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
diff --git a/mod_for_test_scripts/370fixTestSSH b/mod_for_test_scripts/370fixTestSSH
index fbd95b4..f1cf7fa 100755
--- a/mod_for_test_scripts/370fixTestSSH
+++ b/mod_for_test_scripts/370fixTestSSH
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
diff --git a/mod_for_test_scripts/380archiveTestKeys b/mod_for_test_scripts/380archiveTestKeys
index 581f73e..fb17e79 100755
--- a/mod_for_test_scripts/380archiveTestKeys
+++ b/mod_for_test_scripts/380archiveTestKeys
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/mod_for_test_scripts/390removeLimitForSanitizers b/mod_for_test_scripts/390removeLimitForSanitizers
index 6a854e3..282f27d 100755
--- a/mod_for_test_scripts/390removeLimitForSanitizers
+++ b/mod_for_test_scripts/390removeLimitForSanitizers
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
@@ -11,7 +11,7 @@
 
 BOARD=${BOARD_ROOT#"/build/"}
 CONF_FILES_PATH="${ROOT_FS_DIR}/etc/init"
-SANTIZER_USE_FLAGS=( asan msan tsan ubsan )
+SANTIZER_USE_FLAGS=( asan msan tsan ubsan system_wide_scudo )
 
 if [[ ! -d "${CONF_FILES_PATH}" ]]; then
   exit 0
diff --git a/mod_for_test_scripts/920addTestcases b/mod_for_test_scripts/920addTestcases
index 1154547..6859aaf 100755
--- a/mod_for_test_scripts/920addTestcases
+++ b/mod_for_test_scripts/920addTestcases
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
diff --git a/mod_for_test_scripts/ssh_keys/testing_rsa b/mod_for_test_scripts/ssh_keys/testing_rsa
deleted file mode 100644
index d50a630..0000000
--- a/mod_for_test_scripts/ssh_keys/testing_rsa
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEoAIBAAKCAQEAvsNpFdK5lb0GfKx+FgsrsM/2+aZVFYXHMPdvGtTz63ciRhq0
-Jnw7nln1SOcHraSz3/imECBg8NHIKV6rA+B9zbf7pZXEv20x5Ul0vrcPqYWC44PT
-tgsgvi8s0KZUZN93YlcjZ+Q7BjQ/tuwGSaLWLqJ7hnHALMJ3dbEM9fKBHQBCrG5H
-OaWD2gtXj7jp04M/WUnDDdemq/KMg6E9jcrJOiQ39IuTpas4hLQzVkKAKSrpl6MY
-2etHyoNarlWhcOwitArEDwf3WgnctwKstI/MTKB5BTpO2WXUNUv4kXzA+g8/l1al
-jIG13vtd9A/IV3KFVx/sLkkjuZ7z2rQXyNKuJwIBIwKCAQA79EWZJPh/hI0CnJyn
-16AEXp4T8nKDG2p9GpCiCGnq6u2Dvz/u1pZk97N9T+x4Zva0GvJc1vnlST7objW/
-Y8/ET8QeGSCT7x5PYDqiVspoemr3DCyYTKPkADKn+cLAngDzBXGHDTcfNP4U6xfr
-Qc5JK8BsFR8kApqSs/zCU4eqBtp2FVvPbgUOv3uUrFnjEuGs9rb1QZ0K6o08L4Cq
-N+e2nTysjp78blakZfqlurqTY6iJb0ImU2W3T8sV6w5GP1NT7eicXLO3WdIRB15a
-evogPeqtMo8GcO62wU/D4UCvq4GNEjvYOvFmPzXHvhTxsiWv5KEACtleBIEYmWHA
-POwrAoGBAOKgNRgxHL7r4bOmpLQcYK7xgA49OpikmrebXCQnZ/kZ3QsLVv1QdNMH
-Rx/ex7721g8R0oWslM14otZSMITCDCMWTYVBNM1bqYnUeEu5HagFwxjQ2tLuSs8E
-SBzEr96JLfhwuBhDH10sQqn+OQG1yj5acs4Pt3L4wlYwMx0vs1BxAoGBANd9Owro
-5ONiJXfKNaNY/cJYuLR+bzGeyp8oxToxgmM4UuA4hhDU7peg4sdoKJ4XjB9cKMCz
-ZGU5KHKKxNf95/Z7aywiIJEUE/xPRGNP6tngRunevp2QyvZf4pgvACvk1tl9B3HH
-7J5tY/GRkT4sQuZYpx3YnbdP5Y6Kx33BF7QXAoGAVCzghVQR/cVT1QNhvz29gs66
-iPIrtQnwUtNOHA6i9h+MnbPBOYRIpidGTaqEtKTTKisw79JjJ78X6TR4a9ML0oSg
-c1K71z9NmZgPbJU25qMN80ZCph3+h2f9hwc6AjLz0U5wQ4alP909VRVIX7iM8paf
-q59wBiHhyD3J16QAxhsCgYBu0rCmhmcV2rQu+kd4lCq7uJmBZZhFZ5tny9MlPgiK
-zIJkr1rkFbyIfqCDzyrU9irOTKc+iCUA25Ek9ujkHC4m/aTU3lnkNjYp/OFXpXF3
-XWZMY+0Ak5uUpldG85mwLIvATu3ivpbyZCTFYM5afSm4StmaUiU5tA+oZKEcGily
-jwKBgBdFLg+kTm877lcybQ04G1kIRMf5vAXcConzBt8ry9J+2iX1ddlu2K2vMroD
-1cP/U/EmvoCXSOGuetaI4UNQwE/rGCtkpvNj5y4twVLh5QufSOl49V0Ut0mwjPXw
-HfN/2MoO07vQrjgsFylvrw9A79xItABaqKndlmqlwMZWc9Ne
------END RSA PRIVATE KEY-----
diff --git a/mod_for_test_scripts/ssh_keys/testing_rsa.pub b/mod_for_test_scripts/ssh_keys/testing_rsa.pub
deleted file mode 100644
index 7a4d033..0000000
--- a/mod_for_test_scripts/ssh_keys/testing_rsa.pub
+++ /dev/null
@@ -1 +0,0 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvsNpFdK5lb0GfKx+FgsrsM/2+aZVFYXHMPdvGtTz63ciRhq0Jnw7nln1SOcHraSz3/imECBg8NHIKV6rA+B9zbf7pZXEv20x5Ul0vrcPqYWC44PTtgsgvi8s0KZUZN93YlcjZ+Q7BjQ/tuwGSaLWLqJ7hnHALMJ3dbEM9fKBHQBCrG5HOaWD2gtXj7jp04M/WUnDDdemq/KMg6E9jcrJOiQ39IuTpas4hLQzVkKAKSrpl6MY2etHyoNarlWhcOwitArEDwf3WgnctwKstI/MTKB5BTpO2WXUNUv4kXzA+g8/l1aljIG13vtd9A/IV3KFVx/sLkkjuZ7z2rQXyNKuJw== ChromeOS test key
diff --git a/mod_for_test_scripts/test_setup.sh b/mod_for_test_scripts/test_setup.sh
index d80eac7..7f91b99 100755
--- a/mod_for_test_scripts/test_setup.sh
+++ b/mod_for_test_scripts/test_setup.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/mod_image_for_recovery.sh b/mod_image_for_recovery.sh
index 2988b5a..5aec33f 100755
--- a/mod_image_for_recovery.sh
+++ b/mod_image_for_recovery.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -9,14 +9,17 @@
 # kernel.  Alternatively, a signed recovery kernel can be used to
 # create a Chromium OS recovery image.
 
-SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+SCRIPT_ROOT="$(dirname "$(readlink -f "$0")")"
+# shellcheck source=/build_library/build_common.sh
 . "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
+# shellcheck source=build_library/disk_layout_util.sh
 . "${SCRIPT_ROOT}/build_library/disk_layout_util.sh" || exit 1
 
 # Default recovery kernel name.
 RECOVERY_KERNEL_NAME=recovery_vmlinuz.image
 
-DEFINE_string board "$DEFAULT_BOARD" \
+# shellcheck disable=SC2154
+DEFINE_string board "${DEFAULT_BOARD}" \
   "board for which the image was built" \
   b
 DEFINE_integer statefulfs_sectors 4096 \
@@ -24,29 +27,33 @@
 DEFINE_string kernel_image "" \
   "path to a pre-built recovery kernel"
 DEFINE_string kernel_outfile "" \
-  "emit recovery kernel to path/file ($RECOVERY_KERNEL_NAME if empty)"
+  "emit recovery kernel to path/file (${RECOVERY_KERNEL_NAME} if empty)"
+# shellcheck disable=SC2154
 DEFINE_string image "" \
-  "source image to use ($CHROMEOS_IMAGE_NAME if empty)"
+  "source image to use (${CHROMEOS_IMAGE_NAME} if empty)"
+# shellcheck disable=SC2154
 DEFINE_string to "" \
-  "emit recovery image to path/file ($CHROMEOS_RECOVERY_IMAGE_NAME if empty)"
-DEFINE_boolean kernel_image_only $FLAGS_FALSE \
+  "emit recovery image to path/file (${CHROMEOS_RECOVERY_IMAGE_NAME} if empty)"
+DEFINE_boolean kernel_image_only "${FLAGS_FALSE}" \
   "only emit recovery kernel"
-DEFINE_boolean sync_keys $FLAGS_TRUE \
+DEFINE_boolean sync_keys "${FLAGS_TRUE}" \
   "update install kernel with the vblock from stateful"
-DEFINE_boolean minimize_image $FLAGS_TRUE \
+DEFINE_boolean minimize_image "${FLAGS_TRUE}" \
   "create a minimized recovery image from source image"
-DEFINE_boolean modify_in_place $FLAGS_FALSE \
+DEFINE_boolean modify_in_place "${FLAGS_FALSE}" \
   "modify source image in place"
+# shellcheck disable=SC2034
 DEFINE_integer jobs -1 \
   "how many packages to build in parallel at maximum" \
   j
+# shellcheck disable=SC2034
 DEFINE_string build_root "/build" \
   "root location for board sysroots"
 DEFINE_string keys_dir "${VBOOT_DEVKEYS_DIR}" \
   "directory containing the signing keys"
-DEFINE_boolean verbose $FLAGS_FALSE \
+DEFINE_boolean verbose "${FLAGS_FALSE}" \
   "log all commands to stdout" v
-DEFINE_boolean decrypt_stateful $FLAGS_FALSE \
+DEFINE_boolean decrypt_stateful "${FLAGS_FALSE}" \
   "request a decryption of the stateful partition (implies --nominimize_image)"
 DEFINE_string enable_serial "" \
   "Enable serial output (same as build_kernel_image.sh). Example: ttyS0"
@@ -59,20 +66,23 @@
 # so will die prematurely if 'switch_to_strict_mode' is specified before now.
 switch_to_strict_mode
 
-if [ $FLAGS_verbose -eq $FLAGS_TRUE ]; then
+if [ "${FLAGS_verbose}" -eq "${FLAGS_TRUE}" ]; then
   # Make debugging with -v easy.
   set -x
 fi
 
 # We need space for copying decrypted files to the recovery image, so force
 # --nominimize_image when using --decrypt_stateful.
-if [ $FLAGS_decrypt_stateful -eq $FLAGS_TRUE ]; then
-  FLAGS_minimize_image=$FLAGS_FALSE
+if [ "${FLAGS_decrypt_stateful}" -eq "${FLAGS_TRUE}" ]; then
+  FLAGS_minimize_image="${FLAGS_FALSE}"
 fi
 
 # Load board options.
+# shellcheck source=build_library/board_options.sh
+# shellcheck disable=SC2154
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
-EMERGE_BOARD_CMD="emerge-$BOARD"
+# shellcheck disable=SC2034,SC2154
+EMERGE_BOARD_CMD="emerge-${BOARD}"
 
 # Files to preserve from original stateful, if minimize_image is true.
 # If minimize_image is false, everything is always preserved.
@@ -85,22 +95,23 @@
 get_install_vblock() {
   # If it exists, we need to copy the vblock over to stateful
   # This is the real vblock and not the recovery vblock.
-  local partition_num_state=$(get_image_partition_number "${FLAGS_image}" \
-    "STATE")
+  local partition_num_state stateful_mnt out
+
+  partition_num_state=$(get_image_partition_number "${FLAGS_image}" "STATE")
   IMAGE_DEV=$(loopback_partscan "${FLAGS_image}")
-  local stateful_mnt=$(mktemp -d)
-  local out=$(mktemp)
+  stateful_mnt=$(mktemp -d)
+  out=$(mktemp)
 
   set +e
-  sudo mount ${IMAGE_DEV}p${partition_num_state} $stateful_mnt
-  sudo cp "$stateful_mnt/vmlinuz_hd.vblock"  "$out"
-  sudo chown $USER "$out"
+  sudo mount "${IMAGE_DEV}"p"${partition_num_state}" "${stateful_mnt}"
+  sudo cp "${stateful_mnt}/vmlinuz_hd.vblock"  "${out}"
+  sudo chown "${USER}" "${out}"
 
-  safe_umount "$stateful_mnt"
-  sudo losetup -d ${IMAGE_DEV}
-  rmdir "$stateful_mnt"
+  safe_umount "${stateful_mnt}"
+  rmdir "${stateful_mnt}"
+  loopback_detach "${IMAGE_DEV}"
   switch_to_strict_mode
-  echo "$out"
+  echo "${out}"
 }
 
 calculate_kernel_hash() {
@@ -124,8 +135,8 @@
 }
 
 create_recovery_kernel_image() {
-  local sysroot="$FACTORY_ROOT"
-  local vmlinuz="$sysroot/boot/vmlinuz"
+  local sysroot="${FACTORY_ROOT}"
+  local vmlinuz="${sysroot}/boot/vmlinuz"
 
   local enable_rootfs_verification_flag=--noenable_rootfs_verification
   if grep -q enable_rootfs_verification "${IMAGE_DIR}/boot.desc"; then
@@ -147,17 +158,19 @@
   kern_hash="$(calculate_kernel_hash "${FLAGS_image}")"
 
   # TODO(wad) add FLAGS_boot_args support too.
-  ${SCRIPTS_DIR}/build_kernel_image.sh \
+  # shellcheck source=build_kernel_image.sh
+  # shellcheck disable=SC2154
+  "${SCRIPTS_DIR}"/build_kernel_image.sh \
     --board="${FLAGS_board}" \
     --arch="${ARCH}" \
-    --to="$RECOVERY_KERNEL_IMAGE" \
-    --vmlinuz="$vmlinuz" \
+    --to="${RECOVERY_KERNEL_IMAGE}" \
+    --vmlinuz="${vmlinuz}" \
     --working_dir="${IMAGE_DIR}" \
-    --boot_args="noinitrd panic=60 cros_recovery kern_b_hash=$kern_hash" \
+    --boot_args="noinitrd panic=60 cros_recovery kern_b_hash=${kern_hash}" \
     --enable_serial="${FLAGS_enable_serial}" \
     --keep_work \
     --keys_dir="${FLAGS_keys_dir}" \
-    ${enable_rootfs_verification_flag} \
+    "${enable_rootfs_verification_flag}" \
     --public="recovery_key.vbpubk" \
     --private="recovery_kernel_data_key.vbprivk" \
     --keyblock="recovery_kernel.keyblock" 1>&2 || die "build_kernel_image"
@@ -166,96 +179,141 @@
 update_efi_partition() {
   # Update the EFI System Partition configuration so that the kern_hash check
   # passes.
-  RECOVERY_DEV=$(loopback_partscan "${RECOVERY_IMAGE}")
-  local partition_num_efi_system=$(get_image_partition_number \
-    "${RECOVERY_IMAGE}" "EFI-SYSTEM")
+  local partition_num_efi_system efi_size kern_hash
 
-  local efi_size kern_hash
+  RECOVERY_DEV=$(loopback_partscan "${RECOVERY_IMAGE}")
+  partition_num_efi_system=$(get_image_partition_number "${RECOVERY_IMAGE}" \
+    "EFI-SYSTEM")
+
   efi_size=$(partsize "${RECOVERY_IMAGE}" "${partition_num_efi_system}")
   kern_hash="$(calculate_kernel_hash "${RECOVERY_IMAGE}")"
 
   if [[ ${efi_size} -ne 0 ]]; then
-    local efi_dir=$(mktemp -d)
-    sudo mount ${RECOVERY_DEV}p${partition_num_efi_system} "${efi_dir}"
+    local efi_dir
 
-    sudo sed  -i -e "s/cros_legacy/cros_legacy kern_b_hash=$kern_hash/g" \
-      "$efi_dir/syslinux/usb.A.cfg" || true
+    efi_dir=$(mktemp -d)
+    sudo mount "${RECOVERY_DEV}p${partition_num_efi_system}" "${efi_dir}"
+
+    sudo sed  -i -e "s/cros_legacy/cros_legacy kern_b_hash=${kern_hash}/g" \
+      "${efi_dir}/syslinux/usb.A.cfg" || true
     # This will leave the hash in the kernel for all boots, but that should be
     # safe.
-    sudo sed  -i -e "s/cros_efi/cros_efi kern_b_hash=$kern_hash/g" \
-      "$efi_dir/efi/boot/grub.cfg" || true
-    safe_umount "$efi_dir"
-    rmdir "$efi_dir"
+    sudo sed  -i -e "s/cros_efi/cros_efi kern_b_hash=${kern_hash}/g" \
+      "${efi_dir}/efi/boot/grub.cfg" || true
+    safe_umount "${efi_dir}"
+    rmdir "${efi_dir}"
   fi
-  sudo losetup -d "${RECOVERY_DEV}"
+  loopback_detach "${RECOVERY_DEV}"
+}
+
+install_recovery_kernel_once() {
+  local kern_offset="$1"
+  local kern_size="$2"
+
+  local kernel_img_bytes
+  kernel_img_bytes="$(stat -c %s "${RECOVERY_KERNEL_IMAGE}")"
+  if [[ "${kernel_img_bytes}" -gt "$(( kern_size * 512 ))" ]]; then
+    die "Kernel image size ($(( kernel_img_bytes / 1048576)) MiB) is " \
+      "larger than kernel partition size ($(( kern_size * 512 / 1048576 )) MiB)"
+  fi
+
+  dd if="${RECOVERY_KERNEL_IMAGE}" of="${RECOVERY_IMAGE}" bs=512 \
+     seek="${kern_offset}" \
+     count="${kern_size}" \
+     conv=notrunc
 }
 
 install_recovery_kernel() {
-  local partition_num_kern_a=$(get_image_partition_number "${RECOVERY_IMAGE}" \
+  local partition_num_kern_a kern_a_offset kern_a_size \
+        partition_num_kern_b kern_b_offset kern_b_size \
+        partition_num_kern_c kern_c_offset kern_c_size \
+        has_kern_c
+
+  partition_num_kern_a=$(get_image_partition_number "${RECOVERY_IMAGE}" \
     "KERN-A")
-  local kern_a_offset=$(partoffset "$RECOVERY_IMAGE" "${partition_num_kern_a}")
-  local kern_a_size=$(partsize "$RECOVERY_IMAGE" "${partition_num_kern_a}")
+  kern_a_offset=$(partoffset "${RECOVERY_IMAGE}" "${partition_num_kern_a}")
+  kern_a_size=$(partsize "${RECOVERY_IMAGE}" "${partition_num_kern_a}")
 
-  local partition_num_kern_b=$(get_image_partition_number "${RECOVERY_IMAGE}" \
+  partition_num_kern_b=$(get_image_partition_number "${RECOVERY_IMAGE}" \
     "KERN-B")
-  local kern_b_offset=$(partoffset "$RECOVERY_IMAGE" "${partition_num_kern_b}")
-  local kern_b_size=$(partsize "$RECOVERY_IMAGE" "${partition_num_kern_b}")
+  kern_b_offset=$(partoffset "${RECOVERY_IMAGE}" "${partition_num_kern_b}")
+  kern_b_size=$(partsize "${RECOVERY_IMAGE}" "${partition_num_kern_b}")
 
-  if [ $kern_b_size -eq 1 ]; then
+  if [ "${kern_b_size}" -eq 1 ]; then
     echo "Image was created with no KERN-B partition reserved!" 1>&2
     echo "Cannot proceed." 1>&2
     return 1
   fi
 
+  # Only some devices have a KERN-C. If it exists and has size > 1 sector,
+  # the same recovery kernel is installed as both KERN-A and KERN-C. If not,
+  # the recovery kernel is installed as KERN-A only. (See b/266502803).
+  has_kern_c="true"
+  partition_num_kern_c=$(get_image_partition_number "${RECOVERY_IMAGE}" \
+    "KERN-C")
+  if [[ -z "${partition_num_kern_c}" ]]; then
+    has_kern_c="false"
+  else
+    kern_c_offset=$(partoffset "${RECOVERY_IMAGE}" "${partition_num_kern_c}")
+    kern_c_size=$(partsize "${RECOVERY_IMAGE}" "${partition_num_kern_c}")
+    if [[ "${kern_c_size}" -le 1 ]]; then
+      has_kern_c="false"
+    fi
+  fi
+
   # We're going to use the real signing block.
-  if [ $FLAGS_sync_keys -eq $FLAGS_TRUE ]; then
-    dd if="$INSTALL_VBLOCK" of="$RECOVERY_IMAGE" bs=512 \
-       seek=$kern_b_offset \
+  if [ "${FLAGS_sync_keys}" -eq "${FLAGS_TRUE}" ]; then
+    dd if="${INSTALL_VBLOCK}" of="${RECOVERY_IMAGE}" bs=512 \
+       seek="${kern_b_offset}" \
        conv=notrunc
   fi
 
-  local kernel_img_bytes
-  kernel_img_bytes="$(stat -c %s "${RECOVERY_KERNEL_IMAGE}")"
-  if [[ "${kernel_img_bytes}" -gt "$(( kern_a_size * 512 ))" ]]; then
-    die "Kernel image is larger than $(( kern_a_size * 512 / 1048576 )) MiB."
+  install_recovery_kernel_once "${kern_a_offset}" "${kern_a_size}"
+  if [[ "${has_kern_c}" == "true" ]]; then
+    install_recovery_kernel_once "${kern_c_offset}" "${kern_c_size}"
   fi
 
-  # Install the recovery kernel as primary.
-  dd if="$RECOVERY_KERNEL_IMAGE" of="$RECOVERY_IMAGE" bs=512 \
-     seek=$kern_a_offset \
-     count=$kern_a_size \
-     conv=notrunc
   # Force all of the file writes to complete, in case it's necessary for
   # crbug.com/954188
   sync
 
   # Set the 'Success' flag to 1 (to prevent the firmware from updating
   # the 'Tries' flag).
-  sudo $GPT add -i "${partition_num_kern_a}" -S 1 "$RECOVERY_IMAGE"
+  # shellcheck disable=SC2154
+  sudo "${GPT}" add -i "${partition_num_kern_a}" -S 1 "${RECOVERY_IMAGE}"
+  if [[ "${has_kern_c}" == "true" ]]; then
+    sudo "${GPT}" add -i "${partition_num_kern_c}" -S 1 "${RECOVERY_IMAGE}"
+
+    # Set the KERN-C priority non-zero, otherwise the firmware won't try it.
+    sudo "${GPT}" add -i "${partition_num_kern_c}" -P 1 "${RECOVERY_IMAGE}"
+  fi
 
   # Repeat for the legacy bioses.
   # Replace vmlinuz.A with the recovery version we built.
   # TODO(wad): Extract the $RECOVERY_KERNEL_IMAGE and grab vmlinuz from there.
-  local sysroot="$FACTORY_ROOT"
-  local vmlinuz="$sysroot/boot/vmlinuz"
-  local failed=0
+  local sysroot vmlinuz failed
+  sysroot="${FACTORY_ROOT}"
+  vmlinuz="${sysroot}/boot/vmlinuz"
+  failed=0
 
-  if [ "$ARCH" = "x86" ]; then
+  if [ "${ARCH}" = "x86" ]; then
     RECOVERY_DEV=$(loopback_partscan "${RECOVERY_IMAGE}")
     # There is no syslinux on ARM, so this copy only makes sense for x86.
+    local partition_num_efi_system esp_mnt
+
     set +e
-    local partition_num_efi_system=$(get_image_partition_number \
+    partition_num_efi_system=$(get_image_partition_number \
       "${RECOVERY_IMAGE}" "EFI-SYSTEM")
-    local esp_mnt=$(mktemp -d)
-    sudo mount ${RECOVERY_DEV}p${partition_num_efi_system} "$esp_mnt"
-    sudo cp "$vmlinuz" "$esp_mnt/syslinux/vmlinuz.A" || failed=1
-    safe_umount "$esp_mnt"
-    rmdir "$esp_mnt"
-    sudo losetup -d ${RECOVERY_DEV}
+    esp_mnt=$(mktemp -d)
+    sudo mount "${RECOVERY_DEV}"p"${partition_num_efi_system}" "${esp_mnt}"
+    sudo cp "${vmlinuz}" "${esp_mnt}/syslinux/vmlinuz.A" || failed=1
+    safe_umount "${esp_mnt}"
+    rmdir "${esp_mnt}"
+    loopback_detach "${RECOVERY_DEV}"
     switch_to_strict_mode
   fi
 
-  if [ $failed -eq 1 ]; then
+  if [ "${failed}" -eq 1 ]; then
     echo "Failed to copy recovery kernel to ESP"
     return 1
   fi
@@ -265,15 +323,17 @@
 find_sectors_needed() {
   # Find the minimum disk sectors needed for a file system to hold a list of
   # files or directories.
-  local base_dir="$1"
-  local file_list="$2"
+  local base_dir file_list in_use sectors_needed
+
+  base_dir="$1"
+  read -r -a file_list <<< "$2"
 
   # Calculate space needed by the files we'll be copying, plus
   # a reservation for recovery logs or other runtime data.
-  local in_use=$(cd "${base_dir}"
-                 du -s -B512 ${file_list} |
-                   awk '{ sum += $1 } END { print sum }')
-  local sectors_needed=$(( in_use + FLAGS_statefulfs_sectors ))
+  in_use=$(cd "${base_dir}" || die "${base_dir} doesn't exists."
+            du -s -B512 "${file_list[@]}" |
+            awk '{ sum += $1 } END { print sum }')
+  sectors_needed=$(( in_use + FLAGS_statefulfs_sectors ))
 
   # Add 10% overhead for the FS, rounded down.  There's some
   # empirical justification for this number, but at heart, it's a
@@ -290,7 +350,7 @@
   local src_img="$1"
   local dst_img="$2"
 
-  local old_stateful_offset old_stateful_mnt sectors_needed
+  local old_stateful_mnt sectors_needed
   local small_stateful new_stateful_mnt
 
   # Mount the old stateful partition so we can copy selected values
@@ -308,7 +368,8 @@
   small_stateful=$(mktemp)
   dd if=/dev/zero of="${small_stateful}" bs=512 \
     count="${sectors_needed}" 1>&2
-  trap "rm ${small_stateful}; sudo losetup -d ${IMAGE_DEV} || true; cleanup" \
+  trap \
+    'rm "${small_stateful}"; loopback_detach "${IMAGE_DEV}" || true; cleanup' \
     EXIT
 
   # Don't bother with ext3 for such a small image.
@@ -321,7 +382,7 @@
   # Force all of the file writes to complete, in case it's necessary for
   # crbug.com/954188
   sync
-  sudo mount -o loop $small_stateful $new_stateful_mnt
+  sudo mount -o loop "${small_stateful}" "${new_stateful_mnt}"
 
   # Create the directories that are going to be needed below. With correct
   # permissions and ownership.
@@ -335,11 +396,12 @@
   done
 
   # Cleanup everything.
-  safe_umount "$old_stateful_mnt"
-  safe_umount "$new_stateful_mnt"
-  rmdir "$old_stateful_mnt"
-  rmdir "$new_stateful_mnt"
-  sudo losetup -d ${IMAGE_DEV}
+  safe_umount "${old_stateful_mnt}"
+  # Delete the loop device associated with this mount.
+  safe_umount -d "${new_stateful_mnt}"
+  rmdir "${old_stateful_mnt}"
+  rmdir "${new_stateful_mnt}"
+  loopback_detach "${IMAGE_DEV}"
   trap cleanup EXIT
   switch_to_strict_mode
 
@@ -376,7 +438,7 @@
   # Cleanup everything.
   safe_umount "${old_stateful_mnt}"
   rmdir "${old_stateful_mnt}"
-  sudo losetup -d "${IMAGE_DEV}"
+  loopback_detach "${IMAGE_DEV}"
 
   return 0
 }
@@ -439,7 +501,8 @@
       if [[ "${size}" -gt "${dst_size}" ]]; then
         die "Partition #${part} larger than the destination partition"
       fi
-      local src_start="$(cgpt show -i "${part}" -b "${src_img}")"
+      local src_start
+      src_start="$(cgpt show -i "${part}" -b "${src_img}")"
       dd if="${src_img}" of="${dst_img}" conv=notrunc bs=512 \
          skip="${src_start}" seek="${dst_start}" count="${size}" \
          status=none
@@ -452,10 +515,10 @@
 cleanup() {
   set +e
   if [[ -n "${RECOVERY_DEV}" ]]; then
-    sudo losetup -d "${RECOVERY_DEV}"
+    loopback_detach "${RECOVERY_DEV}" || true
   fi
   if [[ -n "${IMAGE_DEV}" ]]; then
-    sudo losetup -d "${IMAGE_DEV}"
+    loopback_detach "${IMAGE_DEV}" || true
   fi
   if [[ "${FLAGS_image}" != "${RECOVERY_IMAGE}" ]]; then
     rm "${RECOVERY_IMAGE}"
@@ -468,58 +531,59 @@
 set -u
 
 # No image was provided, use standard latest image path.
-if [ -z "$FLAGS_image" ]; then
+if [ -z "${FLAGS_image}" ]; then
+  # Ignore SC2153, since IMAGES_DIR is defined in common.sh
+  # shellcheck disable=SC2153,SC2154
   FLAGS_image="${IMAGES_DIR}/${BOARD}/latest/${CHROMEOS_IMAGE_NAME}"
 fi
 
 # Turn path into an absolute path.
-FLAGS_image=$(readlink -f "$FLAGS_image")
+FLAGS_image=$(readlink -f "${FLAGS_image}")
 
 # Abort early if we can't find the image.
-if [ ! -f "$FLAGS_image" ]; then
-  die_notrace "Image not found: $FLAGS_image"
+if [ ! -f "${FLAGS_image}" ]; then
+  die_notrace "Image not found: ${FLAGS_image}"
 fi
 
-IMAGE_DIR="$(dirname "$FLAGS_image")"
-IMAGE_NAME="$(basename "$FLAGS_image")"
-RECOVERY_IMAGE="${FLAGS_to:-$IMAGE_DIR/$CHROMEOS_RECOVERY_IMAGE_NAME}"
+IMAGE_DIR="$(dirname "${FLAGS_image}")"
+RECOVERY_IMAGE="${FLAGS_to:-${IMAGE_DIR}/${CHROMEOS_RECOVERY_IMAGE_NAME}}"
 RECOVERY_KERNEL_IMAGE=\
-"${FLAGS_kernel_outfile:-$IMAGE_DIR/$RECOVERY_KERNEL_NAME}"
-STATEFUL_DIR="$IMAGE_DIR/stateful_partition"
-SCRIPTS_DIR=${SCRIPT_ROOT}
+"${FLAGS_kernel_outfile:-${IMAGE_DIR}/${RECOVERY_KERNEL_NAME}}"
+SCRIPTS_DIR="${SCRIPT_ROOT}"
 RECOVERY_DEV=""
 IMAGE_DEV=""
 
-if [ $FLAGS_kernel_image_only -eq $FLAGS_TRUE -a \
-     -n "$FLAGS_kernel_image" ]; then
+if [ "${FLAGS_kernel_image_only}" -eq "${FLAGS_TRUE}" ] && \
+  [ -n "${FLAGS_kernel_image}" ]; then
   die_notrace "Cannot use --kernel_image_only with --kernel_image"
 fi
 
 echo "Creating recovery image from ${FLAGS_image}"
 
 INSTALL_VBLOCK=$(get_install_vblock)
-if [ -z "$INSTALL_VBLOCK" ]; then
+if [ -z "${INSTALL_VBLOCK}" ]; then
   die "Could not copy the vblock from stateful."
 fi
 
+# shellcheck disable=SC2154
 FACTORY_ROOT="${BOARD_ROOT}/factory-root"
 : "${USE:=}"
 
 if [ -z "${FLAGS_kernel_image}" ]; then
   # Build the recovery kernel.
   RECOVERY_KERNEL_FLAGS="recovery_ramfs tpm i2cdev vfat kernel_compress_xz"
-  RECOVERY_KERNEL_FLAGS="${RECOVERY_KERNEL_FLAGS} -kernel_afdo"
-  USE="${USE} ${RECOVERY_KERNEL_FLAGS}" emerge_custom_kernel "$FACTORY_ROOT" ||
-    die "Cannot emerge custom kernel"
+  RECOVERY_KERNEL_FLAGS="${RECOVERY_KERNEL_FLAGS} -kernel_afdo -kern_arm_afdo"
+  USE="${USE} ${RECOVERY_KERNEL_FLAGS}" emerge_custom_kernel "${FACTORY_ROOT}" \
+    || die "Cannot emerge custom kernel"
   create_recovery_kernel_image
-  echo "Recovery kernel created at $RECOVERY_KERNEL_IMAGE"
+  echo "Recovery kernel created at ${RECOVERY_KERNEL_IMAGE}"
 else
-  RECOVERY_KERNEL_IMAGE="$FLAGS_kernel_image"
+  RECOVERY_KERNEL_IMAGE="${FLAGS_kernel_image}"
 fi
 
-if [ $FLAGS_kernel_image_only -eq $FLAGS_TRUE ]; then
+if [ "${FLAGS_kernel_image_only}" -eq "${FLAGS_TRUE}" ]; then
   echo "Kernel emitted. Stopping there."
-  rm "$INSTALL_VBLOCK"
+  rm "${INSTALL_VBLOCK}"
   exit 0
 fi
 
@@ -534,16 +598,16 @@
 copy_partitions "${FLAGS_image}" "${RECOVERY_IMAGE}"
 sync
 
-if [ $FLAGS_decrypt_stateful -eq $FLAGS_TRUE ]; then
+if [ "${FLAGS_decrypt_stateful}" -eq "${FLAGS_TRUE}" ]; then
   stateful_mnt=$(mktemp -d)
   RECOVERY_DEV=$(loopback_partscan "${RECOVERY_IMAGE}")
   partition_num_state=$(get_image_partition_number \
     "${RECOVERY_IMAGE}" "STATE")
-  sudo mount ${RECOVERY_DEV}p${partition_num_state} "${stateful_mnt}"
+  sudo mount "${RECOVERY_DEV}p${partition_num_state}" "${stateful_mnt}"
   echo -n "1" | sudo tee "${stateful_mnt}"/decrypt_stateful >/dev/null
-  sudo umount "$stateful_mnt"
-  rmdir "$stateful_mnt"
-  sudo losetup -d ${RECOVERY_DEV}
+  sudo umount "${stateful_mnt}"
+  rmdir "${stateful_mnt}"
+  loopback_detach "${RECOVERY_DEV}"
 fi
 
 install_recovery_kernel
@@ -553,6 +617,6 @@
   mv "${RECOVERY_IMAGE}" "${FLAGS_image}"
 fi
 
-echo "Recovery image created at $RECOVERY_IMAGE"
+echo "Recovery image created at ${RECOVERY_IMAGE}"
 command_completed
 trap - EXIT
diff --git a/mount_gpt_image.sh b/mount_gpt_image.sh
index d5663f4..bb588a0 100755
--- a/mount_gpt_image.sh
+++ b/mount_gpt_image.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -11,9 +11,13 @@
 echo "Entering $0 $*" >&2
 
 SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
+# shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
+# shellcheck source=build_library/filesystem_util.sh
 . "${SCRIPT_ROOT}/build_library/filesystem_util.sh" || exit 1
+# shellcheck source=build_library/disk_layout_util.sh
 . "${SCRIPT_ROOT}/build_library/disk_layout_util.sh" || exit 1
+# shellcheck source=build_library/ext2_sb_util.sh
 . "${SCRIPT_ROOT}/build_library/ext2_sb_util.sh" || exit 1
 
 if [[ ${INSIDE_CHROOT} -ne 1 ]]; then
@@ -22,6 +26,7 @@
   INSTALL_ROOT=/usr/share/misc
 fi
 # Load functions and constants for chromeos-install
+# shellcheck source=../../platform2/chromeos-common-script/share/chromeos-common.sh
 . "${INSTALL_ROOT}/chromeos-common.sh" || exit 1
 
 locate_gpt
diff --git a/remote_access.sh b/remote_access.sh
index ac859af..ef845af 100644
--- a/remote_access.sh
+++ b/remote_access.sh
@@ -1,12 +1,20 @@
 #!/bin/bash
-# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
+# Copyright 2009 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 # Library for setting up remote access and running remote commands.
 
-DEFAULT_PRIVATE_KEY="${GCLIENT_ROOT}/src/scripts/mod_for_test_scripts/\
-ssh_keys/testing_rsa"
+case ${SCRIPT_NAME} in
+cros_show_stacks|update_kernel.sh)
+  ;;
+*)
+  echo "remote_access.sh: This script will be removed by July 2023." >&2
+  ;;
+esac
+
+DEFAULT_PRIVATE_KEY="${GCLIENT_ROOT}/chromite/ssh_keys/testing_rsa"
+PARTNER_PRIVATE_KEY="${GCLIENT_ROOT}/sshkeys/partner_testing_rsa"
 
 DEFINE_string remote "" "remote hostname/IP of running Chromium OS instance"
 DEFINE_string private_key "$DEFAULT_PRIVATE_KEY" \
@@ -59,6 +67,9 @@
       "ControlMaster=auto"
       "ControlPersist=45"
     )
+    if [[ -f "${TMP_PRIVATE_PARTNER_KEY}" ]]; then
+      settings+=("IdentityFile=${TMP_PRIVATE_PARTNER_KEY}")
+    fi
     printf -- '-o %s ' "${settings[@]}"
 
     if [[ "${FLAGS_ssh_port}" -ne 0 ]]; then
@@ -110,9 +121,10 @@
 # Send a directory from $1 to $2 on remote host
 #
 # Tries to use rsync -a but will fall back to tar if the remote doesn't
-# have rsync.
+# have rsync.  The optional rsync flags are ignored if we fall back to tar.
 #
-# Use like: remote_send_to /build/board/lib/modules/ /lib/modules/
+# Use like:
+# remote_send_to /build/board/lib/modules/ /lib/modules/ [optional rsync flags]
 remote_send_to() {
   local rsync_rem
   if [ ! -d "$1" ]; then
@@ -121,7 +133,7 @@
 
   if remote_sh rsync --version >/dev/null 2>&1; then
     rsync_rem="$(brackets_enclosed_if_ipv6 "${FLAGS_remote}")"
-    remote_rsync_raw -a "$1/" root@"${rsync_rem}:$2/"
+    remote_rsync_raw -a "${@:3}" "$1/" root@"${rsync_rem}:$2/"
   else
     tar -C "$1" -cz . | remote_sh tar -C "$2" -xz
   fi
@@ -161,6 +173,10 @@
 set_up_remote_access() {
   cp $FLAGS_private_key $TMP_PRIVATE_KEY
   chmod 0400 $TMP_PRIVATE_KEY
+  if [[ -f "${PARTNER_PRIVATE_KEY}" ]]; then
+      cp "${PARTNER_PRIVATE_KEY}" "${TMP_PRIVATE_PARTNER_KEY}"
+      chmod 0400 "${TMP_PRIVATE_PARTNER_KEY}"
+  fi
 
   # Verify the client is reachable before continuing
   local output
@@ -268,6 +284,7 @@
 
 remote_access_init() {
   TMP_PRIVATE_KEY=$TMP/private_key
+  TMP_PRIVATE_PARTNER_KEY="${TMP}/partner_private_key"
   TMP_KNOWN_HOSTS=$TMP/known_hosts
   TMP_CONTROL_FILE="${TMP}/ssh_control-%C"
 
diff --git a/sdk_lib/enter_chroot.sh b/sdk_lib/enter_chroot.sh
index 90306e0..4b7bbb9 100755
--- a/sdk_lib/enter_chroot.sh
+++ b/sdk_lib/enter_chroot.sh
@@ -1,23 +1,28 @@
 #!/bin/bash
 
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 # Script to enter the chroot environment
 
-SCRIPT_ROOT=$(readlink -f $(dirname "$0")/..)
+SCRIPT_ROOT=$(readlink -f "$(dirname "$0")"/..)
+# shellcheck source=../common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 
+: "${SUDO_USER:=${USER}}"
+
 # Script must be run outside the chroot and as root.
 assert_outside_chroot
 assert_root_user
 
 # Define command line flags
 # See http://code.google.com/p/shflags/wiki/Documentation10x
-DEFINE_string chroot "$DEFAULT_CHROOT_DIR" \
+DEFINE_string chroot "${DEFAULT_CHROOT_DIR}" \
   "The destination dir for the chroot environment." "d"
-DEFINE_string trunk "$GCLIENT_ROOT" \
+DEFINE_string out_dir "${DEFAULT_OUT_DIR}" \
+  "The destination dir for build output and state."
+DEFINE_string trunk "${GCLIENT_ROOT}" \
   "The source trunk to bind mount within the chroot." "s"
 DEFINE_string build_number "" \
   "The build-bot build number (when called by buildbot only)." "b"
@@ -27,17 +32,18 @@
   "The mount point of the chrome broswer source in the chroot."
 DEFINE_string cache_dir "" "Directory to use for caching."
 DEFINE_string goma_dir "" "Goma installed directory."
-DEFINE_string goma_client_json "" "Service account json file for goma."
 DEFINE_string reclient_dir "" "Reclient binaries installed directory."
 DEFINE_string reproxy_cfg_file "" "Config file for re-client's reproxy."
 DEFINE_string working_dir "" \
   "The working directory relative to ${CHROOT_TRUNK_DIR} for the command in \
 chroot, must start with '/' if set."
 
-DEFINE_boolean ssh_agent $FLAGS_TRUE "Import ssh agent."
-DEFINE_boolean early_make_chroot $FLAGS_FALSE \
+DEFINE_boolean ssh_agent "${FLAGS_TRUE}" "Import ssh agent."
+DEFINE_boolean early_make_chroot "${FLAGS_FALSE}" \
   "Internal flag.  If set, the command is run as root without sudo."
-DEFINE_boolean verbose $FLAGS_FALSE "Print out actions taken"
+DEFINE_boolean verbose "${FLAGS_FALSE}" "Print out actions taken"
+DEFINE_boolean pivot_root "${FLAGS_TRUE}" \
+  "Use pivot_root to change the root file system."
 
 # More useful help
 FLAGS_HELP="USAGE: $0 [flags] [VAR=value] [-- command [arg1] [arg2] ...]
@@ -57,11 +63,11 @@
 "
 
 CROS_LOG_PREFIX=cros_sdk:enter_chroot
-SUDO_HOME=$(eval echo ~${SUDO_USER})
+SUDO_HOME=$(eval echo "~${SUDO_USER}")
 
 # Version of info from common.sh that only echos if --verbose is set.
 debug() {
-  if [ $FLAGS_verbose -eq $FLAGS_TRUE ]; then
+  if [[ "${FLAGS_verbose}" -eq "${FLAGS_TRUE}" ]]; then
     info "$*"
   fi
 }
@@ -92,33 +98,33 @@
   .goma_client_oauth2_config  # Auth token for Goma
   .inputrc                    # Preserve command line customizations
 )
-if [[ "${SUDO_USER:-${USER}}" == "chrome-bot" ]]; then
+if [[ "${SUDO_USER}" == "chrome-bot" ]]; then
   # Builders still haven't migrated fully to gitcookies.
   # https://crbug.com/1032944
   FILES_TO_COPY_TO_CHROOT+=( .netrc )
 fi
 
-INNER_CHROME_ROOT=$FLAGS_chrome_root_mount  # inside chroot
-CHROME_ROOT_CONFIG="/var/cache/chrome_root"  # inside chroot
-FUSE_DEVICE="/dev/fuse"
+INNER_CHROME_ROOT=${FLAGS_chrome_root_mount}  # inside chroot
+CHROME_ROOT_CONFIG="/var/cache/chrome_root"   # inside chroot
 
 # We can't use /var/lock because that might be a symlink to /run/lock outside
 # of the chroot.  Or /run on the host system might not exist.
 LOCKFILE="${FLAGS_chroot}/.enter_chroot.lock"
-MOUNTED_PATH=$(readlink -f "$FLAGS_chroot")
+MOUNTED_PATH=$(readlink -f "${FLAGS_chroot}")
 
 # Writes stdin to the given file name as the sudo user in overwrite mode.
 #
 # $@ - The output file names.
 user_clobber() {
-  install -m644 -o ${SUDO_UID} -g ${SUDO_GID} /dev/stdin "$@"
+  # shellcheck disable=SC2154
+  install -m644 -o "${SUDO_UID}" -g "${SUDO_GID}" /dev/stdin "$@"
 }
 
 # Copies the specified file owned by the user to the specified location.
 # If the copy fails as root (e.g. due to root_squash and NFS), retry the copy
 # with the user's account before failing.
 user_cp() {
-  cp -p "$@" 2>/dev/null || sudo -u ${SUDO_USER} -- cp -p "$@"
+  cp -p "$@" 2>/dev/null || sudo -u "${SUDO_USER}" -- cp -p "$@"
 }
 
 # Appends stdin to the given file name as the sudo user.
@@ -126,14 +132,14 @@
 # $1 - The output file name.
 user_append() {
   cat >> "$1"
-  chown ${SUDO_UID}:${SUDO_GID} "$1"
+  chown "${SUDO_UID}:${SUDO_GID}" "$1"
 }
 
 # Create the specified directory, along with parents, as the sudo user.
 #
 # $@ - The directories to create.
 user_mkdir() {
-  install -o ${SUDO_UID} -g ${SUDO_GID} -d "$@"
+  install -o "${SUDO_UID}" -g "${SUDO_GID}" -d "$@"
 }
 
 # Create the specified symlink as the sudo user.
@@ -142,7 +148,7 @@
 # $2 - Link name
 user_symlink() {
   ln -sfT "$1" "$2"
-  chown -h ${SUDO_UID}:${SUDO_GID} "$2"
+  chown -h "${SUDO_UID}:${SUDO_GID}" "$2"
 }
 
 setup_mount() {
@@ -151,10 +157,16 @@
   # these mounts are all contained within an unshare and are therefore
   # inaccessible to other namespaces (e.g. the host desktop system).
   local source="$1"
-  local mount_args="-n $2"
-  local target="$3"
+  local target="$2"
+  shift 2
+  local mount_args=( -n )
+  if [[ $# -gt 0 ]]; then
+    mount_args+=( "$@" )
+  else
+    mount_args+=( --bind )
+  fi
 
-  local mounted_path="${MOUNTED_PATH}$target"
+  local mounted_path="${MOUNTED_PATH}${target}"
 
   case " ${MOUNT_CACHE} " in
   *" ${mounted_path} "*)
@@ -171,15 +183,56 @@
       fi
     fi
     # The args are left unquoted on purpose.
-    if [[ -n ${source} ]]; then
-      mount ${mount_args} "${source}" "${mounted_path}"
+    if [[ -n "${source}" ]]; then
+      mount "${mount_args[@]}" "${source}" "${mounted_path}"
     else
-      mount ${mount_args} "${mounted_path}"
+      mount "${mount_args[@]}" "${mounted_path}"
     fi
     ;;
   esac
 }
 
+symlink_or_bind() {
+  local outside_source="$1"
+  local inside_target="$2"
+
+  # Try to compute the source relative to trunk.
+  local relative_source=${outside_source#"${FLAGS_trunk}"}
+
+  # If the link target is outside the source tree, fall back to a bind mount.
+  if [[ ${outside_source} == "${relative_source}" ]]; then
+    info "Falling back to bind mount for:"
+    info "  '${inside_target}' -> '${outside_source}'"
+    setup_mount "${outside_source}" "${inside_target}"
+    return
+  fi
+
+  # Compute the outside path of the target.
+  target="${FLAGS_chroot}${inside_target}"
+
+  # Compute the inside path of the source.
+  local source="${CHROOT_TRUNK_DIR}${relative_source}"
+
+  # If the target is already a non-empty directory, skip it.
+  if [[ ! -L "${target}" ]] && [[ -d "${target}" ]] && ! ls -A "${target}"; then
+    info "Skipping link for '${inside_target}'"
+    return
+  fi
+
+  # If the symlink is already correct we are done.
+  if [[ -L "${target}" ]] && [[ "$(readlink "${target}")" == "${source}" ]]; then
+    return
+  fi
+
+  # Clear empty directories, incorrect links, etc.
+  if [[ -L "${target}" ]] || [[ -e "${target}" ]]; then
+    info "Cleaning up '${inside_target}'"
+    rm -r "${target}"
+  fi
+
+  ln -s "${source}" "${target}"
+}
+
 copy_ssh_config() {
   # Copy user .ssh/config into the chroot filtering out strings not supported
   # by the chroot ssh. The chroot .ssh directory is passed in as the first
@@ -266,61 +319,6 @@
   fi
 }
 
-# Sanity check the user's environment locale settings.
-check_locale() {
-  if [[ "$(locale -k charmap)" != 'charmap="UTF-8"' ]]; then
-    error "Your locale appears to not support UTF-8 which is required."
-    error "The output from 'locale':"
-    locale
-    error "The output from 'locale -k LC_CTYPE':"
-    locale -k LC_CTYPE
-    error "The language env vars:"
-    env | grep -E '^(LC_|LANG)'
-    if [[ -n "${LC_ALL}" ]]; then
-      error "Never set LC_ALL (=${LC_ALL}) in your environment; only set LANG."
-    fi
-    die_notrace "Please fix your locale settings by setting LANG to UTF-8" \
-      "compatible locale and removing any LC_ variable settings."
-  fi
-}
-
-generate_locales() {
-  # Make sure user's requested locales are available
-  # http://crosbug.com/19139
-  # And make sure en_US{,.UTF-8} are always available as
-  # that what buildbot forces internally
-  local l locales gen_locales=()
-
-  locales=$(printf '%s\n' en_US en_US.UTF-8 ${LANG} \
-    $LC_{ADDRESS,ALL,COLLATE,CTYPE,IDENTIFICATION,MEASUREMENT,MESSAGES} \
-    $LC_{MONETARY,NAME,NUMERIC,PAPER,TELEPHONE,TIME} | \
-    sort -u | sed '/^C$/d')
-  for l in ${locales}; do
-    if [[ ${l} == *.* ]]; then
-      enc=${l#*.}
-    else
-      enc="ISO-8859-1"
-    fi
-    case $(echo ${enc//-} | tr '[:upper:]' '[:lower:]') in
-    utf8) enc="UTF-8";;
-    esac
-    gen_locales+=("${l} ${enc}")
-  done
-  if [[ ${#gen_locales[@]} -gt 0 ]] ; then
-    # Force LC_ALL=C to workaround slow string parsing in bash
-    # with long multibyte strings.  Newer setups have this fixed,
-    # but locale-gen doesn't need to be run in any locale in the
-    # first place, so just go with C to keep it fast.
-    # Force jobs=1 to work around locale-gen weirdness where it simultaneously
-    # wants to and doesn't want to run multiple jobs (and complains about it).
-    # crbug.com/761153
-    # Force PATH to the right value inside the SDK in case the host distro has
-    # its own incompatible setup.
-    chroot "${FLAGS_chroot}" env LC_ALL=C PATH='/bin:/sbin:/usr/bin:/usr/sbin' \
-      locale-gen -q -u -j 1 -G "$(printf '%s\n' "${gen_locales[@]}")"
-  fi
-}
-
 git_config() {
   USER="${SUDO_USER:-${USER}}" \
   HOME="${SUDO_HOME:-${HOME}}" \
@@ -349,8 +347,8 @@
   # If the user didn't set up their username in their gitconfig, look
   # at the default git settings for the user.
   if ! git config -f "${chroot_gitconfig}" user.email >& /dev/null; then
-    local ident=$(cd /; sudo -u ${SUDO_USER} -- git var GIT_COMMITTER_IDENT || \
-                  :)
+    local ident
+    ident=$(cd /; sudo -u "${SUDO_USER}" -- git var GIT_COMMITTER_IDENT || :)
     local ident_name=${ident%% <*}
     local ident_email=${ident%%>*}; ident_email=${ident_email##*<}
     git config -f "${chroot_gitconfig}" --replace-all user.name \
@@ -361,9 +359,8 @@
 
   # Copy the gitcookies file, updating the user's gitconfig to point to it.
   local gitcookies
-  gitcookies="$(git_config_path --file "${chroot_gitconfig}" \
-                  --get http.cookiefile)"
-  if [[ $? -ne 0 ]]; then
+  if ! gitcookies="$(git_config_path --file "${chroot_gitconfig}" \
+                     --get http.cookiefile)"; then
     # Try the default location anyway.
     gitcookies="${SUDO_HOME}/.gitcookies"
   fi
@@ -375,7 +372,7 @@
   fi
   # This line must be at the end because using `git config` changes ownership of
   # the .gitconfig.
-  chown ${SUDO_UID}:${SUDO_GID} "${chroot_gitconfig}"
+  chown "${SUDO_UID}:${SUDO_GID}" "${chroot_gitconfig}"
 }
 
 setup_gclient_cache_dir_mount() {
@@ -387,8 +384,9 @@
     return 0
   fi
 
-  local cache_dir=$(sed -n -E "s/^ *cache_dir *= *'(.*)'/\1/p" \
-                    "${checkout_root}/.gclient")
+  local cache_dir
+  cache_dir=$(sed -n -E "s/^ *cache_dir *= *'(.*)'/\1/p" \
+              "${checkout_root}/.gclient")
   if [[ -z "${cache_dir}" ]]; then
     return 0
   fi
@@ -403,22 +401,23 @@
     return 0
   fi
 
-  setup_mount "${cache_dir}" "--bind" "${cache_dir}"
+  setup_mount "${cache_dir}" "${cache_dir}"
 }
 
 setup_env() {
+  # shellcheck disable=SC2094
   (
     flock 200
 
     # Make the lockfile writable for backwards compatibility.
-    chown ${SUDO_UID}:${SUDO_GID} "${LOCKFILE}"
+    chown "${SUDO_UID}:${SUDO_GID}" "${LOCKFILE}"
 
     # Refresh /etc/resolv.conf and /etc/hosts in the chroot.
-    install -C -m644 /etc/resolv.conf ${FLAGS_chroot}/etc/resolv.conf
-    install -C -m644 /etc/hosts ${FLAGS_chroot}/etc/hosts
+    install -C -m644 /etc/resolv.conf "${FLAGS_chroot}/etc/resolv.conf"
+    install -C -m644 /etc/hosts "${FLAGS_chroot}/etc/hosts"
 
     debug "Mounting chroot environment."
-    MOUNT_CACHE=$(echo $(awk '{print $2}' /proc/mounts))
+    mapfile -t MOUNT_CACHE < <(awk '{print $2}' /proc/mounts)
     # We shouldn't need access to any /run state, so don't mount it.  Some
     # distros (e.g. Ubuntu) might have /dev/shm symlinked to /run/shm.
     local run_shm="${FLAGS_chroot}/run/shm"
@@ -433,9 +432,6 @@
       chmod 1777 "${run_lock}"
     fi
 
-    # Do this early as it's slow and only needs basic mounts (above).
-    generate_locales &
-
     debug "Setting up referenced repositories if required."
     REFERENCE_DIR=$(git_config_path --file  \
       "${FLAGS_trunk}/.repo/manifests.git/config" \
@@ -449,13 +445,11 @@
 
       unset ALTERNATES
 
-      IFS=$'\n';
-      required=( $( sudo -u "${SUDO_USER}" -- \
+      mapfile -t required < <( sudo -u "${SUDO_USER}" -- \
         "${FLAGS_trunk}/chromite/lib/rewrite_git_alternates" \
-        "${FLAGS_trunk}" "${REFERENCE_DIR}" "${CHROOT_TRUNK_DIR}" ) )
-      unset IFS
+        "${FLAGS_trunk}" "${REFERENCE_DIR}" "${CHROOT_TRUNK_DIR}" )
 
-      setup_mount "${FLAGS_trunk}/.repo/chroot/alternates" --bind \
+      setup_mount "${FLAGS_trunk}/.repo/chroot/alternates" \
         "${CHROOT_TRUNK_DIR}/.repo/alternates"
 
       # Note that as we're bringing up each referened repo, we also
@@ -466,16 +460,16 @@
       #
       # Finally note that if you're unfamiliar w/ chroot/vfs semantics,
       # the bind is visible only w/in the chroot.
-      user_mkdir ${FLAGS_trunk}/.repo/chroot/empty
+      user_mkdir "${FLAGS_trunk}/.repo/chroot/empty"
       position=1
       for x in "${required[@]}"; do
         base="${CHROOT_TRUNK_DIR}/.repo/chroot/external${position}"
-        setup_mount "${x}" "--bind" "${base}"
+        setup_mount "${x}" "${base}"
         if [ -e "${x}/.repo/alternates" ]; then
-          setup_mount "${FLAGS_trunk}/.repo/chroot/empty" "--bind" \
+          setup_mount "${FLAGS_trunk}/.repo/chroot/empty" \
             "${base}/.repo/alternates"
         fi
-        position=$(( ${position} + 1 ))
+        position=$(( position + 1 ))
       done
       unset required position base
     fi
@@ -485,7 +479,7 @@
     debug "Setting up shared cache dir directory."
     user_mkdir "${FLAGS_cache_dir}"/distfiles
     user_mkdir "${FLAGS_chroot}/${chroot_cache}"
-    setup_mount "${FLAGS_cache_dir}" "--bind" "${chroot_cache}"
+    setup_mount "${FLAGS_cache_dir}" "${chroot_cache}"
     # Create /var/log/asan directory (b/222311476).
     user_mkdir "${FLAGS_chroot}/var/log/asan"
     # TODO(build): remove this as of 12/01/12.
@@ -499,18 +493,20 @@
       p="${FLAGS_chroot}/etc/profile.d/chromeos-cachedir.sh"
       rm -rf "${distfiles_path}"
       ln -s chromeos-cache/distfiles "${distfiles_path}"
+      # shellcheck disable=SC2174
       mkdir -p -m 775 "${p%/*}"
+      # shellcheck disable=SC2016
       echo 'export CHROMEOS_CACHEDIR=${chroot_cache}' > "${p}"
       chmod 0644 "${p}"
     fi
 
     if [ -d "${SUDO_HOME}/.cidb_creds" ]; then
-      setup_mount "${SUDO_HOME}/.cidb_creds" --bind \
+      setup_mount "${SUDO_HOME}/.cidb_creds" \
         "/home/${SUDO_USER}/.cidb_creds"
     fi
 
-    if [ $FLAGS_ssh_agent -eq $FLAGS_TRUE ]; then
-      if [ -n "${SSH_AUTH_SOCK}" -a -d "${SUDO_HOME}/.ssh" ]; then
+    if [[ "${FLAGS_ssh_agent}" -eq "${FLAGS_TRUE}" ]]; then
+      if [[ -n "${SSH_AUTH_SOCK}" ]] && [[ -d "${SUDO_HOME}/.ssh" ]]; then
         local target_ssh="/home/${SUDO_USER}/.ssh"
         TARGET_DIR="${FLAGS_chroot}${target_ssh}"
         user_mkdir "${TARGET_DIR}"
@@ -519,75 +515,63 @@
         if [[ -e ${known_hosts} ]]; then
           # Ensure there is a file to bind mount onto for setup_mount.
           touch "${TARGET_DIR}/known_hosts"
-          setup_mount "${known_hosts}" --bind "${target_ssh}/known_hosts"
+          setup_mount "${known_hosts}" "${target_ssh}/known_hosts"
         fi
         copy_ssh_config "${TARGET_DIR}"
-        chown -R ${SUDO_UID}:${SUDO_GID} "${TARGET_DIR}"
+        chown -R "${SUDO_UID}:${SUDO_GID}" "${TARGET_DIR}"
 
         if [ -S "${SSH_AUTH_SOCK}" ]; then
           touch "${FLAGS_chroot}/tmp/ssh-auth-sock"
-          setup_mount "${SSH_AUTH_SOCK}" "--bind" "/tmp/ssh-auth-sock"
+          setup_mount "${SSH_AUTH_SOCK}" "/tmp/ssh-auth-sock"
         fi
       fi
     fi
 
     if [[ -d "${SUDO_HOME}/.config/chromite" ]]; then
-      setup_mount "${SUDO_HOME}/.config/chromite" "--bind" \
+      setup_mount "${SUDO_HOME}/.config/chromite" \
         "/home/${SUDO_USER}/.config/chromite"
     fi
 
     # A reference to the DEPOT_TOOLS path may be passed in by cros_sdk.
     if [ -n "${DEPOT_TOOLS}" ]; then
-      debug "Mounting depot_tools"
-      setup_mount "${DEPOT_TOOLS}" --bind "${DEPOT_TOOLS_DIR}"
+      debug "Setting up depot_tools"
+      symlink_or_bind "${DEPOT_TOOLS}" "${DEPOT_TOOLS_DIR}"
     fi
 
     if [[ -n "${FLAGS_reclient_dir}" ]]; then
       debug "Mounting re-client"
-      setup_mount "${FLAGS_reclient_dir}" --bind "/home/${SUDO_USER}/reclient"
+      setup_mount "${FLAGS_reclient_dir}" "/home/${SUDO_USER}/reclient"
     fi
 
     if [[ -n "${FLAGS_reproxy_cfg_file}" ]]; then
       debug "Mounting reproxy config file."
-      setup_mount "${FLAGS_reproxy_cfg_file}" --bind "/home/${SUDO_USER}/reclient_cfgs/reproxy_chroot.cfg"
+      setup_mount "${FLAGS_reproxy_cfg_file}" \
+        "/home/${SUDO_USER}/reclient_cfgs/reproxy_chroot.cfg"
     fi
 
     if [[ -n "${FLAGS_goma_dir}" ]]; then
       debug "Mounting goma"
       # $HOME/goma is the default directory for goma.
       # It is used by goma if GOMA_DIR is not provide.
-      setup_mount "${FLAGS_goma_dir}" --bind "/home/${SUDO_USER}/goma"
-    fi
-
-    if [[ -n "${FLAGS_goma_client_json}" ]]; then
-      debug "Mounting service-account-goma-client.json"
-      local dest_path="/creds/service_accounts/service-account-goma-client.json"
-      # setup_mount assumes the target is a directory by default.
-      # So here, touch the file in advance.
-      local mounted_path="${MOUNTED_PATH}${dest_path}"
-      mkdir -p "$(dirname "${mounted_path}")"
-      touch "${mounted_path}"
-      # Note: Original UID:GID and permission should be inherited even after
-      # mounting.
-      setup_mount "${FLAGS_goma_client_json}" --bind "${dest_path}"
+      setup_mount "${FLAGS_goma_dir}" "/home/${SUDO_USER}/goma"
     fi
 
     # Mount additional directories as specified in .local_mounts file.
     local local_mounts="${FLAGS_trunk}/src/scripts/.local_mounts"
-    if [[ -f ${local_mounts} ]]; then
+    if [[ -f "${local_mounts}" ]]; then
       debug "Mounting local folders"
       # format: mount_source
       #      or mount_source mount_point
       #      or # comments
       local mount_source mount_point
-      while read mount_source mount_point; do
-        if [[ -z ${mount_source} ]]; then
+      while read -r mount_source mount_point; do
+        if [[ -z "${mount_source}" ]]; then
           continue
         fi
         # if only source is assigned, use source as mount point.
-        : ${mount_point:=${mount_source}}
+        : "${mount_point:=${mount_source}}"
         debug "  mounting ${mount_source} on ${mount_point}"
-        setup_mount "${mount_source}" "--bind" "${mount_point}"
+        setup_mount "${mount_source}" "${mount_point}"
       done < <(sed -e 's:#.*::' "${local_mounts}" | xargs -0)
     fi
 
@@ -596,39 +580,32 @@
         die_notrace "${FLAGS_chrome_root} does not exist."
       fi
     fi
-    if [ -z "$CHROME_ROOT" ]; then
+    if [ -z "${CHROME_ROOT}" ]; then
       CHROME_ROOT="$(cat "${FLAGS_chroot}${CHROME_ROOT_CONFIG}" \
         2>/dev/null || :)"
       CHROME_ROOT_AUTO=1
     fi
-    if [[ -n "$CHROME_ROOT" ]]; then
+    if [[ -n "${CHROME_ROOT}" ]]; then
       if [[ ! -d "${CHROME_ROOT}/src" ]]; then
         error "Not mounting chrome source: could not find CHROME_ROOT/src dir."
         error "Full path we tried: ${CHROME_ROOT}/src"
         rm -f "${FLAGS_chroot}${CHROME_ROOT_CONFIG}"
-        if [[ ! "$CHROME_ROOT_AUTO" ]]; then
+        if [[ -z "${CHROME_ROOT_AUTO}" ]]; then
           exit 1
         fi
       else
-        debug "Mounting chrome source at: $INNER_CHROME_ROOT"
-        echo $CHROME_ROOT > "${FLAGS_chroot}${CHROME_ROOT_CONFIG}"
-        setup_mount "$CHROME_ROOT" --bind "$INNER_CHROME_ROOT"
-        setup_gclient_cache_dir_mount "$CHROME_ROOT"
+        debug "Mounting chrome source at: ${INNER_CHROME_ROOT}"
+        echo "${CHROME_ROOT}" > "${FLAGS_chroot}${CHROME_ROOT_CONFIG}"
+        setup_mount "${CHROME_ROOT}" "${INNER_CHROME_ROOT}"
+        setup_gclient_cache_dir_mount "${CHROME_ROOT}"
       fi
     fi
 
-    # Install fuse module.  Skip modprobe when possible for slight
-    # speed increase when initializing the env.
-    if [ -c "${FUSE_DEVICE}" ] && ! grep -q fuse /proc/filesystems; then
-      modprobe fuse 2> /dev/null ||\
-        warn "-- Note: modprobe fuse failed.  gmergefs will not work"
-    fi
-
     # Bind mount the host kernel modules read-only so modprobe can be used
     # inside the chroot for things like usbip-host.
     local modules_dir="/lib/modules"
     if [ -d "${modules_dir}" ]; then
-      setup_mount "${modules_dir}" "--bind -o ro" "${modules_dir}"
+      setup_mount "${modules_dir}" "${modules_dir}" --bind -o ro
     fi
 
     # Fix permissions on ccache tree.  If this is a fresh chroot, then they
@@ -637,16 +614,11 @@
     # on demand, but only when it updates.
     ccache_dir="${FLAGS_chroot}/var/cache/distfiles/ccache"
     if [[ ! -d ${ccache_dir} ]]; then
+      # shellcheck disable=SC2174
       mkdir -p -m 2775 "${ccache_dir}"
     fi
-    (
-      find -H "${ccache_dir}" '(' -type d -a '!' -perm 2775 ')' \
-        -exec chmod 2775 {} +
-      find -H "${ccache_dir}" -gid 0 -exec chgrp 250 {} +
-      # These settings are kept in sync with the gcc ebuild.
-      chroot "${FLAGS_chroot}" env CCACHE_DIR=/var/cache/distfiles/ccache \
-        CCACHE_UMASK=002 ccache -F 0 -M 11G >/dev/null
-    ) &
+    unshare --mount "${SCRIPT_ROOT}/sdk_lib/fix_ccache.sh" --chroot \
+      "${FLAGS_chroot}" &
 
     # Certain files get copied into the chroot when entering.
     for fn in "${FILES_TO_COPY_TO_CHROOT[@]}"; do
@@ -673,26 +645,26 @@
     # to /root for sudoed invocations.
     chroot_user_boto="${FLAGS_chroot}/home/${SUDO_USER}/.boto"
     chroot_root_boto="${FLAGS_chroot}/root/.boto"
-    if [ -f ${SUDO_HOME}/.boto ]; then
+    if [ -f "${SUDO_HOME}/.boto" ]; then
       # Pass --remote-destination to overwrite a symlink.
-      user_cp "--remove-destination" "${SUDO_HOME}/.boto" "$chroot_user_boto"
-      cp "--remove-destination" "$chroot_user_boto" "$chroot_root_boto"
+      user_cp "--remove-destination" "${SUDO_HOME}/.boto" "${chroot_user_boto}"
+      cp "--remove-destination" "${chroot_user_boto}" "${chroot_root_boto}"
     elif [ -f "/etc/boto.cfg" ]; then
       # For GCE instances, the non-chroot .boto file is not deployed so
       # use the system /etc/boto.cfg if it exists.
-      user_cp "--remove-destination" "/etc/boto.cfg" "$chroot_user_boto"
-      cp "--remove-destination" "$chroot_user_boto" "$chroot_root_boto"
+      user_cp "--remove-destination" "/etc/boto.cfg" "${chroot_user_boto}"
+      cp "--remove-destination" "${chroot_user_boto}" "${chroot_root_boto}"
     fi
 
     # If user doesn't have a boto file, check if the private overlays
     # are installed and use those credentials.
     boto='src/private-overlays/chromeos-overlay/googlestorage_account.boto'
     if [ -s "${FLAGS_trunk}/${boto}" ]; then
-      if [ ! -e "$chroot_user_boto" ]; then
-        user_symlink "trunk/${boto}" "$chroot_user_boto"
+      if [ ! -e "${chroot_user_boto}" ]; then
+        user_symlink "trunk/${boto}" "${chroot_user_boto}"
       fi
-      if [ ! -e "$chroot_root_boto" ]; then
-        ln -sf "${CHROOT_TRUNK_DIR}/${boto}" "$chroot_root_boto"
+      if [ ! -e "${chroot_root_boto}" ]; then
+        ln -sf "${CHROOT_TRUNK_DIR}/${boto}" "${chroot_root_boto}"
       fi
     fi
 
@@ -702,16 +674,19 @@
     # actually write to their directory.
     gsutil_dir="${FLAGS_chroot}/home/${SUDO_USER}/.gsutil"
     if [ -d "${gsutil_dir}" ]; then
-      chown -R ${SUDO_UID}:${SUDO_GID} "${gsutil_dir}"
+      chown -R "${SUDO_UID}:${SUDO_GID}" "${gsutil_dir}"
     fi
-  ) 200>>"$LOCKFILE" || die "setup_env failed"
+  ) 200>>"${LOCKFILE}" || die "setup_env failed"
+
+  # shellcheck disable=SC2086
+  # Clear locale related variables, since C.UTF-8 will be used in the chroot.
+  unset -v LANGUAGE ${!LC_*}
 }
 
-check_locale
 setup_env
 
 CHROOT_PASSTHRU=(
-  "BUILDBOT_BUILD=$FLAGS_build_number"
+  "BUILDBOT_BUILD=${FLAGS_build_number}"
   "CHROMEOS_RELEASE_APPID=${CHROMEOS_RELEASE_APPID:-{DEV-BUILD}}"
   "EXTERNAL_TRUNK_PATH=${FLAGS_trunk}"
 
@@ -721,6 +696,9 @@
 
   # We don't want to auto-update depot_tools inside of the SDK as we manage it.
   "DEPOT_TOOLS_UPDATE=0"
+
+  # Force LANG=C.UTF-8, so locales do not need to be generated.
+  "LANG=C.UTF-8"
 )
 
 # Needs to be set here because setup_env runs in a subshell.
@@ -743,16 +721,28 @@
 # the source trunk for scripts that may need to print it (e.g.
 # build_image.sh).
 
-if [ $FLAGS_early_make_chroot -eq $FLAGS_TRUE ]; then
+if [ "${FLAGS_early_make_chroot}" -eq "${FLAGS_TRUE}" ]; then
   cmd=( /bin/bash -l -c 'env "$@"' -- )
 elif [ ! -x "${FLAGS_chroot}/usr/bin/sudo" ]; then
   # Complain that sudo is missing.
   error "Failing since the chroot lacks sudo."
-  error "Requested enter_chroot command was: $@"
+  error "Requested enter_chroot command was: $*"
   exit 127
 else
   cmd=( sudo -i -u "${SUDO_USER}" )
 fi
 
 cmd+=( "${CHROOT_PASSTHRU[@]}" "$@" )
-exec chroot "${FLAGS_chroot}" "${cmd[@]}"
+
+if [[ "${FLAGS_pivot_root}" -eq "${FLAGS_TRUE}" ]]; then
+  # See pivot_root(8) man page for the safe usage of pivot_root.
+  # See also pivot_root(".", ".") section of pivot_roo(2) man page.
+  cd "${FLAGS_chroot}" || exit 1
+  pivot_root . .
+  umount -l .
+  chroot="."
+else
+  chroot=${FLAGS_chroot}
+fi
+
+exec chroot "${chroot}" "${cmd[@]}"
diff --git a/sdk_lib/fix_ccache.sh b/sdk_lib/fix_ccache.sh
new file mode 100755
index 0000000..c036386
--- /dev/null
+++ b/sdk_lib/fix_ccache.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Script to fix permissions on ccache tree.
+
+SCRIPT_ROOT=$(readlink -f "$(dirname "$0")"/..)
+# shellcheck source=../common.sh
+. "${SCRIPT_ROOT}/common.sh" || exit 1
+
+# Define command line flags
+# See http://code.google.com/p/shflags/wiki/Documentation10x
+DEFINE_string chroot "" "The destination dir for the chroot environment."
+
+# Parse command line flags.
+FLAGS_HELP="usage: ${SCRIPT_NAME} [flags]"
+FLAGS "$@" || exit 1
+eval set -- "${FLAGS_ARGV}"
+
+if [ -z "${FLAGS_chroot}" ]; then
+  die "--chroot is required"
+fi
+
+# Walking ccache dir can be expensive, so only do it once, but make sure
+# to run both sets of tests+execs independently.
+ccache_dir="${FLAGS_chroot}/var/cache/distfiles/ccache"
+find -H "${ccache_dir}" \
+  '(' -type d -a '!' -perm 2775 ')' -exec chmod 2775 {} + \
+  , \
+  -gid 0 -exec chgrp 250 {} +
+
+# These settings are kept in sync with the gcc ebuild.
+chroot "${FLAGS_chroot}" env CCACHE_DIR=/var/cache/distfiles/ccache \
+  CCACHE_UMASK=002 ccache -F 0 -M 11G >/dev/null
diff --git a/sdk_lib/make_chroot.sh b/sdk_lib/make_chroot.sh
index 6771a80..50f83ca 100755
--- a/sdk_lib/make_chroot.sh
+++ b/sdk_lib/make_chroot.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -10,6 +10,7 @@
 # One can enter the chrooted environment for work by running enter_chroot.sh.
 
 SCRIPT_ROOT=$(readlink -f $(dirname "$0")/..)
+# shellcheck source=../common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 
 ENTER_CHROOT=$(readlink -f $(dirname "$0")/enter_chroot.sh)
@@ -59,6 +60,7 @@
 
 [[ -z "${FLAGS_cache_dir}" ]] && die "--cache_dir is required"
 
+# shellcheck source=make_conf_util.sh
 . "${SCRIPT_ROOT}"/sdk_lib/make_conf_util.sh
 
 USEPKG=""
@@ -83,8 +85,8 @@
 # Invoke enter_chroot.  This can only be used after sudo has been installed.
 enter_chroot() {
   echo "$(date +%H:%M:%S) [enter_chroot] $*"
-  "$ENTER_CHROOT" --cache_dir "${FLAGS_cache_dir}" --chroot "$FLAGS_chroot" \
-    -- "${ENTER_CHROOT_ARGS[@]}" "$@"
+  "${ENTER_CHROOT}" --cache_dir "${FLAGS_cache_dir}" --chroot \
+    "${FLAGS_chroot}" --nopivot_root -- "${ENTER_CHROOT_ARGS[@]}" "$@"
 }
 
 # Invoke enter_chroot running the command as root, and w/out sudo.
@@ -92,8 +94,8 @@
 early_env=()
 early_enter_chroot() {
   echo "$(date +%H:%M:%S) [early_enter_chroot] $*"
-  "$ENTER_CHROOT" --chroot "$FLAGS_chroot" --early_make_chroot \
-    --cache_dir "${FLAGS_cache_dir}" \
+  "${ENTER_CHROOT}" --chroot "${FLAGS_chroot}" --early_make_chroot \
+    --cache_dir "${FLAGS_cache_dir}" --nopivot_root \
     -- "${ENTER_CHROOT_ARGS[@]}" "${early_env[@]}" "$@"
 }
 
@@ -140,11 +142,6 @@
    # Fix bad group for some.
    chown -R root:root "${FLAGS_chroot}/etc/"sudoers*
 
-   info "Setting up hosts/resolv..."
-   # Copy config from outside chroot into chroot.
-   cp /etc/{hosts,resolv.conf} "$FLAGS_chroot/etc/"
-   chmod 0644 "$FLAGS_chroot"/etc/{hosts,resolv.conf}
-
    # Setup host make.conf. This includes any overlay that we may be using
    # and a pointer to pre-built packages.
    # TODO: This should really be part of a profile in the portage.
@@ -190,40 +187,10 @@
    # these are defined as w/in the chroot.
    bare_chroot chown "${SUDO_USER}:portage" /var/cache/chromeos-chrome
 
-   # Add chromite/bin and depot_tools into the path globally; note that the
-   # chromite wrapper itself might also be found in depot_tools.
-   # We rely on 'env-update' getting called below.
-   target="${FLAGS_chroot}/etc/env.d/99chromiumos"
-   cat <<EOF > "${target}"
-PATH="${CHROOT_TRUNK_DIR}/chromite/bin:${DEPOT_TOOLS_DIR}"
-CROS_WORKON_SRCROOT="${CHROOT_TRUNK_DIR}"
-PORTAGE_USERNAME="${SUDO_USER}"
-EOF
-
    # TODO(zbehan): Configure stuff that is usually configured in postinst's,
    # but wasn't. Fix the postinst's.
    info "Running post-inst configuration hacks"
    early_enter_chroot env-update
-
-   target="${FLAGS_chroot}/etc/profile.d"
-   mkdir -p "${target}"
-   ln -sfT \
-     "/mnt/host/source/chromite/sdk/etc/profile.d/50-chromiumos-niceties.sh" \
-     "${target}/50-chromiumos-niceties.sh"
-
-   # Select a small set of locales for the user if they haven't done so
-   # already.  This makes glibc upgrades cheap by only generating a small
-   # set of locales.  The ones listed here are basically for the buildbots
-   # which always assume these are available.  This works in conjunction
-   # with `cros_sdk --enter`.
-   # http://crosbug.com/20378
-   local localegen="$FLAGS_chroot/etc/locale.gen"
-   if ! grep -q -v -e '^#' -e '^$' "${localegen}" ; then
-     cat <<EOF >> "${localegen}"
-en_US ISO-8859-1
-en_US.UTF-8 UTF-8
-EOF
-   fi
 }
 
 CHROOT_TRUNK="${CHROOT_TRUNK_DIR}"
@@ -296,9 +263,10 @@
   early_enter_chroot ${EMERGE_CMD} -uNv ${USEPKG} ${USEPKGONLY} ${EMERGE_JOBS} \
     sys-kernel/linux-headers sys-libs/glibc
 
-  # Next libcxx. This is required due to the migration to LLVM runtime builds.
+  # Next libcxx and libunwind. This is required due to the migration to LLVM
+  # runtime builds.
   early_enter_chroot ${EMERGE_CMD} -uN --nodeps ${USEPKG} \
-    sys-libs/libcxx
+    sys-libs/llvm-libunwind sys-libs/libcxx
 
   # XXX(ovt): ebuild_license_hook requires yaml module that is available
   # only for python3.6 in the existing SDK
diff --git a/sdk_lib/make_conf_util.sh b/sdk_lib/make_conf_util.sh
index 9865032..b8344f7 100644
--- a/sdk_lib/make_conf_util.sh
+++ b/sdk_lib/make_conf_util.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
diff --git a/sdk_lib/rewrite-sudoers.d.sh b/sdk_lib/rewrite-sudoers.d.sh
index 0219b91..613854c 100755
--- a/sdk_lib/rewrite-sudoers.d.sh
+++ b/sdk_lib/rewrite-sudoers.d.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -35,10 +35,9 @@
 cat > "${root}/etc/sudoers.d/90_cros" <<EOF
 Defaults env_keep += "$*"
 
-# We need adm currently to let sudo work inside ebuilds.
-%adm ALL=(ALL) ALL
-root ALL=(ALL) ALL
-${username} ALL=NOPASSWD: ALL
+# adm lets users & ebuilds run sudo (e.g. platform2 sysroot test runners).
+%adm ALL=(ALL) NOPASSWD: ALL
+${username} ALL=(ALL) NOPASSWD: ALL
 
 # Simplify the -v option checks due to overlap of the adm group and the user's
 # supplementary groups.  We don't set any passwords, so disable asking.
@@ -46,5 +45,5 @@
 Defaults verifypw = any
 EOF
 
-chmod 0440 "${root}/etc/sudoers.d/90_cros"
+chmod 0444 "${root}/etc/sudoers.d/90_cros"
 chown root:root "${root}/etc/sudoers.d/90_cros"
diff --git a/set_shared_user_password.sh b/set_shared_user_password.sh
index 91ee8d5..173cefe 100755
--- a/set_shared_user_password.sh
+++ b/set_shared_user_password.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -9,6 +9,7 @@
 
 # Make sure common.sh is here.
 SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+# shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || { echo "Unable to load common.sh"; exit 1; }
 
 assert_inside_chroot
diff --git a/termina_build_image b/termina_build_image
index 3593ab0..0af0389 100755
--- a/termina_build_image
+++ b/termina_build_image
@@ -1,10 +1,12 @@
 #!/bin/bash
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Copyright 2017 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
+# shellcheck source=build_library/build_common.sh
 . "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
+# shellcheck source=build_library/filesystem_util.sh
 . "${SCRIPT_ROOT}/build_library/filesystem_util.sh" || exit 1
 
 TERMINA_BUILD_IMAGE_PY="${SCRIPT_ROOT}/../platform/container-guest-tools/termina/termina_build_image.py"
diff --git a/update_bootloaders.sh b/update_bootloaders.sh
index 371859e..2c3ed4d 100755
--- a/update_bootloaders.sh
+++ b/update_bootloaders.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -8,13 +8,16 @@
 # It does not populate the templates, but can update a loop device.
 
 SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+# shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
+# shellcheck source=build_library/disk_layout_util.sh
 . "${BUILD_LIBRARY_DIR}/disk_layout_util.sh" || exit 1
 
 # Need to be inside the chroot to load chromeos-common.sh
 assert_inside_chroot
 
 # Load functions and constants for chromeos-install
+# shellcheck source=../../platform2/chromeos-common-script/share/chromeos-common.sh
 . /usr/share/misc/chromeos-common.sh || exit 1
 
 # Flags.
@@ -59,6 +62,7 @@
 eval set -- "${FLAGS_ARGV}"
 switch_to_strict_mode
 
+# shellcheck source=build_library/board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
 load_board_specific_script "board_specific_setup.sh"
 
diff --git a/update_chroot b/update_chroot
index 782365f..be96d9b 100755
--- a/update_chroot
+++ b/update_chroot
@@ -1,10 +1,10 @@
 #!/bin/bash
 
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-
+# shellcheck source=common.sh
 . "$(dirname "$0")/common.sh" || exit 1
 
 # Script must run inside the chroot
@@ -17,7 +17,7 @@
 DEFINE_boolean usepkg "${FLAGS_TRUE}" \
   "Use binary packages to bootstrap."
 
-FLAGS_HELP="usage: $(basename $0) [flags]
+FLAGS_HELP="usage: $(basename "$0") [flags]
 Performs an update of the chroot. This script is called as part of
 build_packages, so there is typically no need to call this script directly.
 "
@@ -42,6 +42,7 @@
 # so will die prematurely if 'switch_to_strict_mode' is specified before now.
 switch_to_strict_mode
 
+# shellcheck source=sdk_lib/make_conf_util.sh
 . "${SCRIPTS_DIR}"/sdk_lib/make_conf_util.sh
 
 # Run version hooks as pre-update
@@ -62,9 +63,8 @@
 
 sudo_clear_shadow_locks /
 
-# First make sure the cross-compilers have the right config settings.
-# We don't actually build them yet though as we want to wait for the
-# sdk to have all up-to-date packages.
+# First update the cross-compilers.
+# Note that this uses binpkgs only, unless we pass --nousepkg below.
 if [ "${FLAGS_skip_toolchain_update}" -eq "${FLAGS_FALSE}" ]; then
   info "Updating cross-compilers"
   TOOLCHAIN_FLAGS=()
@@ -80,14 +80,19 @@
     TOOLCHAIN_FLAGS+=( --nousepkg )
   fi
   # Expand the path before sudo, as root doesn't have the same path magic.
-  info_run sudo -E $(type -p cros_setup_toolchains) "${TOOLCHAIN_FLAGS[@]}"
+  info_run sudo -E "$(type -p cros_setup_toolchains)" "${TOOLCHAIN_FLAGS[@]}"
 fi
 
 # Make sure depot_tools is bootstrapped, so that it can build chromeos-chrome.
 info "Bootstrapping depot_tools"
-ensure_bootstrap
+"${DEPOT_TOOLS_DIR}"/ensure_bootstrap
 
-# Perform an update of all the sdk packages in the chroot.
+# Clean outdated packages in SDK.
+if [[ -e ~/.config/chromite/autocop ]] || [[ "${USER}" == "chrome-bot" ]]; then
+  # Use "|| true" to not exit on errors for one command.
+  cros clean-outdated-pkgs --host || true
+fi
+
 EMERGE_CMD="${CHROMITE_BIN}/parallel_emerge"
 
 info "Rebuilding Portage cache"
@@ -108,15 +113,14 @@
 if [ "${FLAGS_usepkg}" -eq "${FLAGS_TRUE}" ]; then
   EMERGE_FLAGS+=( --getbinpkg )
 
-  # Only update toolchain when binpkgs are available. Toolchain rollout
-  # process only takes place when the chromiumos sdk builder finishes
-  # a successful build.
+  # Avoid building toolchain packages or "post-cross" packages from
+  # source. The toolchain rollout process only takes place when the
+  # chromiumos-sdk builder finishes a successful build.
   PACKAGES=(
     $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host)
   )
   # Sanity check we got some valid results.
   [[ ${#PACKAGES[@]} -eq 0 ]] && die_notrace "cros_setup_toolchains failed"
-  # Update post cross-packages. This is needed to update rust.
   PACKAGES+=(
     $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host-post-cross)
   )
@@ -142,8 +146,8 @@
 EMERGE_FLAGS+=( --deep )
 info_run sudo -E "${EMERGE_CMD}" "${EMERGE_FLAGS[@]}" virtual/target-sdk world
 
-# Install post cross packages if binary pkgs are available.
 if [ "${FLAGS_usepkg}" -eq "${FLAGS_TRUE}" ]; then
+  # Update "post-cross" packages.
   # Use --usepkgonly to ensure that packages are not built from source.
   EMERGE_FLAGS=( -uNv --with-bdeps=y --oneshot --getbinpkg --deep )
   EMERGE_FLAGS+=( --usepkgonly --rebuilt-binaries=n )
diff --git a/update_kernel.sh b/update_kernel.sh
index acc29db..96c5580 100755
--- a/update_kernel.sh
+++ b/update_kernel.sh
@@ -1,13 +1,15 @@
 #!/bin/bash
 
-# Copyright (c) 2009-2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2009-2011 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 # Script to update the kernel on a live running ChromiumOS instance.
 
 SCRIPT_ROOT="$(dirname "$(readlink -f "$0")")"
+# shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
+# shellcheck source=remote_access.sh
 . "${SCRIPT_ROOT}/remote_access.sh" || exit 1
 
 # Script must be run inside the chroot.
@@ -27,8 +29,7 @@
 DEFINE_string rootoff "" "Override root offset"
 DEFINE_string rootfs "" "Override rootfs partition reported by target"
 DEFINE_string arch "" "Override architecture reported by target"
-DEFINE_boolean clean "${FLAGS_FALSE}" \
-  "Remove old files before sending new files"
+DEFINE_boolean clean "${FLAGS_TRUE}" "Remove old files before sending new files"
 DEFINE_boolean hv "${FLAGS_TRUE}" "Use hypervisor kernel if available."
 DEFINE_boolean ignore_verity "${FLAGS_FALSE}" "Update kernel even if system \
 is using verity (WARNING: likely to make the system unable to boot)"
@@ -232,17 +233,24 @@
 
 copy_kernelmodules() {
   local basedir="$1" # rootfs directory (could be in /tmp) or empty string
+  local old_kern_ver="$2"
+  local new_kern_ver="$3"
   local modules_dir=/build/"${FLAGS_board}"/lib/modules/
   if [ ! -d "${modules_dir}" ]; then
     info "No modules. Skipping."
     return
   fi
   if [[ ${FLAGS_clean} -eq ${FLAGS_TRUE} ]]; then
-    info "Cleaning /lib/modules"
-    remote_sh rm -rf "${basedir}/lib/modules/*"
+    info "Cleaning and copying modules"
+    if [[ "${old_kern_ver}" != "${new_kern_ver}" ]]; then
+      remote_sh mv "${basedir}/lib/modules/${old_kern_ver}" \
+        "${basedir}/lib/modules/${new_kern_ver}" || true
+    fi
+    remote_send_to "${modules_dir}" "${basedir}"/lib/modules --delete
+  else
+    info "Copying modules"
+    remote_send_to "${modules_dir}" "${basedir}"/lib/modules
   fi
-  info "Copying modules"
-  remote_send_to "${modules_dir}" "${basedir}"/lib/modules
 }
 
 copy_kernelimage() {
@@ -250,9 +258,8 @@
 }
 
 check_kernelbuildtime() {
-  local version
+  local version="$1"
   local build_dir
-  version=$(readlink "/build/${FLAGS_board}/boot/vmlinuz" | cut -d- -f2-)
   build_dir="/build/${FLAGS_board}/lib/modules/${version}/build"
   if [ "${build_dir}/Makefile" -nt "/build/${FLAGS_board}/boot/vmlinuz" ]; then
     warn "Your build directory has been built more recently than"
@@ -314,6 +321,9 @@
 }
 
 main() {
+  local old_kern_ver
+  local new_kern_ver
+
   # If there are commas in the --remote, run the script in parallel.
   if [[ ${FLAGS_remote} == *,* ]]; then
     multi_main
@@ -347,10 +357,11 @@
   fi
 
   remote_sh uname -r -v
-
   old_kernel="${REMOTE_OUT}"
 
-  check_kernelbuildtime
+  new_kern_ver=$(readlink "/build/${FLAGS_board}/boot/vmlinuz" | cut -d- -f2-)
+
+  check_kernelbuildtime "${new_kern_ver}"
 
   if [[ "${FLAGS_vboot}" -eq "${FLAGS_TRUE}" ]]; then
     make_kernelimage
@@ -366,29 +377,47 @@
       remote_sh mount -o remount,rw /
     fi
 
+    # Getting the old kernel version from /boot/vmlinuz is the most reliable,
+    # but this file doesn't exist on ARM.  In that case we fall back to the
+    # most recently installed kernel in /lib/modules.
+    if remote_sh "test -f '${remote_basedir}/boot/vmlinuz'"; then
+      remote_sh readlink "${remote_basedir}/boot/vmlinuz"
+      old_kern_ver="$(echo "${REMOTE_OUT}" | cut -d- -f2-)"
+    else
+      remote_sh "ls -t ${remote_basedir}/lib/modules | head -1"
+      old_kern_ver="${REMOTE_OUT}"
+    fi
+
     if [[ ${FLAGS_syslinux} -eq ${FLAGS_TRUE} ]]; then
       if [[ ${FLAGS_clean} -eq ${FLAGS_TRUE} ]]; then
-        info "Cleaning /boot"
-        remote_sh rm -rf \
-                  "${remote_basedir}"/boot/{Image,System,config,vmlinuz}'*'
+        info "Cleaning /boot, copying syslinux and /boot"
+        if [[ "${old_kern_ver}" != "${new_kern_ver}" ]]; then
+          remote_sh rename "${old_kern_ver}" "${new_kern_ver}" \
+                    "${remote_basedir}/boot/* 2>/dev/null" || true
+        fi
+        remote_send_to /build/"${FLAGS_board}"/boot/ "${remote_basedir}"/boot/ \
+          --delete
+      else
+        info "Copying syslinux and /boot"
+        remote_send_to /build/"${FLAGS_board}"/boot/ "${remote_basedir}"/boot/
       fi
-      info "Copying syslinux and /boot"
-      remote_send_to /build/"${FLAGS_board}"/boot/ "${remote_basedir}"/boot/
       update_syslinux_kernel "${remote_basedir}"
     else
       info "Skipping syslinux and /boot (per request)"
     fi
 
-    copy_kernelmodules "${remote_basedir}"
+    copy_kernelmodules "${remote_basedir}" "${old_kern_ver}" "${new_kern_ver}"
 
     if [[ ${FLAGS_firmware} -eq ${FLAGS_TRUE} ]]; then
       if [[ ${FLAGS_clean} -eq ${FLAGS_TRUE} ]]; then
-        info "Cleaning /lib/firmware"
-        remote_sh rm -rf "${remote_basedir}/lib/firmware/*"
+        info "Cleaning and copying firmware (per request)"
+        remote_send_to /build/"${FLAGS_board}"/lib/firmware/ \
+                       "${remote_basedir}"/lib/firmware/ --delete
+      else
+        info "Copying firmware (per request)"
+        remote_send_to /build/"${FLAGS_board}"/lib/firmware/ \
+                       "${remote_basedir}"/lib/firmware/
       fi
-      info "Copying firmware (per request)"
-      remote_send_to /build/"${FLAGS_board}"/lib/firmware/ \
-                     "${remote_basedir}"/lib/firmware/
     fi
     if [[ ${REMOTE_NEEDS_ROOTFS_MOUNTED} -eq ${FLAGS_TRUE} ]]; then
       remote_sh umount "${remote_basedir}"