Sync cos-sdk branch to tip

BUG=b/347026719
TEST=presubmit
RELEASE_NOTE=None

Change-Id: Ic8236893cf78ebaecf4ac9f974c4f12d39eaaa4b
Reviewed-on: https://cos-review.googlesource.com/c/third_party/platform/crosutils/+/74174
Reviewed-by: He Gao <hegao@google.com>
Tested-by: Cusky Presubmit Bot <presubmit@cos-infra-prod.iam.gserviceaccount.com>
diff --git a/bin/cros_make_image_bootable b/bin/cros_make_image_bootable
index 45d6dd3..a6cd9e2 100755
--- a/bin/cros_make_image_bootable
+++ b/bin/cros_make_image_bootable
@@ -82,30 +82,18 @@
   "Type of image we're building for (base/factory_install)."
 DEFINE_string output_dir "/tmp" \
   "Directory to place output in."
-DEFINE_string image "chromiumos_base.img" \
-  "Full path to the chromiumos image to make bootable."
 DEFINE_string arch "x86" \
   "Architecture to make bootable for: arm, mips, x86, or amd64"
-DEFINE_boolean cleanup_dirs ${FLAGS_TRUE} \
+DEFINE_boolean cleanup_dirs "${FLAGS_TRUE}" \
   "Whether the mount dirs should be removed on completion."
 
 DEFINE_string boot_args "noinitrd" \
   "Additional boot arguments to pass to the commandline"
 
-DEFINE_integer rootfs_size 720 \
-  "rootfs filesystem size in MBs."
-# ceil(0.1 * rootfs_size) is a good minimum.
-DEFINE_integer rootfs_hash_pad 8 \
-  "MBs reserved at the end of the rootfs image."
-
 DEFINE_string rootfs_hash "/tmp/rootfs.hash" \
   "Path where the rootfs hash should be stored."
-# TODO(taysom): when we turn on boot cache, both verification and
-# bootcache should have their default be FLAGS_TRUE.
-DEFINE_boolean enable_rootfs_verification ${FLAGS_FALSE} \
+DEFINE_boolean enable_rootfs_verification "${FLAGS_FALSE}" \
   "Default all bootloaders to NOT use kernel-based root fs integrity checking."
-DEFINE_boolean enable_bootcache ${FLAGS_FALSE} \
-  "Default all bootloaders to NOT use bootcache."
 DEFINE_integer verity_error_behavior 3 \
   "Kernel verified boot error behavior (0: I/O errors, 1: reboot, 2: nothing)"
 DEFINE_integer verity_max_ios -1 \
@@ -125,20 +113,13 @@
 DEFINE_string espfs_mountpoint "/tmp/espfs" \
   "Path where the espfs can be safely mounted"
 
-DEFINE_boolean use_dev_keys ${FLAGS_FALSE} \
+DEFINE_boolean use_dev_keys "${FLAGS_FALSE}" \
   "Use developer keys for signing. (Default: false)"
 
-DEFINE_boolean fsck_rootfs ${FLAGS_FALSE} \
+DEFINE_boolean fsck_rootfs "${FLAGS_FALSE}" \
   "Check integrity of the rootfs on the modified image."
 
-# TODO(pkumar): Remove once known that no images are using this flag
-DEFINE_boolean crosbug12352_arm_kernel_signing ${FLAGS_FALSE} \
-  "This flag is deprecated but the bots still need parse old images."
-
-# TODO(sosa):  Remove once known images no longer use this in their config.
-DEFINE_string arm_extra_bootargs "" "DEPRECATED FLAG.  Do not use."
-
-DEFINE_boolean force_developer_mode ${FLAGS_FALSE} \
+DEFINE_boolean force_developer_mode "${FLAGS_FALSE}" \
   "Add cros_debug to boot args."
 
 DEFINE_string enable_serial "" \
@@ -167,8 +148,8 @@
   local pattern="$2"
   local base_pattern="$3"
 
-  [ -f "${file}" ] || return ${FLAGS_TRUE}
-  sudo grep -wq "${pattern}" "${file}" && return ${FLAGS_TRUE}
+  [[ -f "${file}" ]] || return "${FLAGS_TRUE}"
+  sudo grep -wq "${pattern}" "${file}" && return "${FLAGS_TRUE}"
   sudo sed -i "s/\b${base_pattern}\b/& ${pattern}/g" "${file}"
 }
 
@@ -176,9 +157,11 @@
   local kernel_image_size="$1"
   local kernel_part="$2"
   local kernel_slot="$3"
-  local kernel_partition_size=$(get_partition_size ${FLAGS_image_type} \
-                                                   ${kernel_part})
-  local kernel_partition_size_90=$(( kernel_partition_size * 90 / 100 ))
+  local kernel_partition_size
+  kernel_partition_size=$(get_partition_size "${FLAGS_image_type}" \
+                                              "${kernel_part}")
+  local kernel_partition_size_90
+  kernel_partition_size_90=$(( kernel_partition_size * 90 / 100 ))
   info "Kernel partition ${kernel_slot} size is ${kernel_partition_size} bytes."
   if [[ ${kernel_image_size} -gt ${kernel_partition_size} ]]; then
     die "Kernel image won't fit in partition ${kernel_slot}!"
@@ -195,22 +178,20 @@
   local private="$5"
   local public="$6"
   local vblock=${7:-""}
+  local extra_arguments=()
 
-  # Default to non-verified
-  local enable_rootfs_verification_flag=--noenable_rootfs_verification
-  if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then
-    enable_rootfs_verification_flag=--enable_rootfs_verification
-  fi
-  local enable_bootcache_flag=--noenable_bootcache
-  if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then
-    enable_bootcache_flag=--enable_bootcache
+  if [[ "${FLAGS_enable_rootfs_verification}" -eq "${FLAGS_TRUE}" ]]; then
+    extra_arguments+=(--enable_rootfs_verification)
+  else
+    # Default to non-verified
+    extra_arguments+=(--noenable_rootfs_verification)
   fi
 
   if [[ -n "${vblock}" ]]; then
-    vblock=--hd_vblock="${FLAGS_output_dir}/${vblock}"
+    extra_arguments+=(--hd_vblock="${FLAGS_output_dir}/${vblock}")
   fi
 
-  ${SCRIPTS_DIR}/build_kernel_image.sh \
+  "${SCRIPTS_DIR}"/build_kernel_image.sh \
     --board="${FLAGS_board}" \
     --arch="${FLAGS_arch}" \
     --to="${FLAGS_output_dir}/${image_name}" \
@@ -218,21 +199,19 @@
     --working_dir="${FLAGS_output_dir}" \
     --boot_args="${FLAGS_boot_args}" \
     --keep_work \
-    --rootfs_image=${root_dev} \
-    --rootfs_image_size=${root_dev_size} \
-    --rootfs_hash=${FLAGS_rootfs_hash} \
-    --verity_hash_alg=${FLAGS_verity_algorithm} \
-    --verity_max_ios=${FLAGS_verity_max_ios} \
-    --verity_error_behavior=${FLAGS_verity_error_behavior} \
-    --verity_salt=${FLAGS_verity_salt} \
+    --rootfs_image="${root_dev}" \
+    --rootfs_image_size="${root_dev_size}" \
+    --rootfs_hash="${FLAGS_rootfs_hash}" \
+    --verity_hash_alg="${FLAGS_verity_algorithm}" \
+    --verity_max_ios="${FLAGS_verity_max_ios}" \
+    --verity_error_behavior="${FLAGS_verity_error_behavior}" \
+    --verity_salt="${FLAGS_verity_salt}" \
     --keys_dir="${FLAGS_keys_dir}" \
     --keyblock="${keyblock}" \
     --private="${private}" \
     --public="${public}" \
     --enable_serial="${FLAGS_enable_serial}" \
-    ${vblock} \
-    ${enable_rootfs_verification_flag} \
-    ${enable_bootcache_flag}
+    "${extra_arguments[@]}"
 }
 
 make_image_bootable() {
@@ -242,7 +221,7 @@
   # This is required because postinst will copy new legacy boot configurations
   # from rootfs partition instead of modifying existing entries in EFI
   # partition.
-  if [ ${FLAGS_force_developer_mode} -eq ${FLAGS_TRUE} ]; then
+  if [[ "${FLAGS_force_developer_mode}" -eq "${FLAGS_TRUE}" ]]; then
     trap "unmount_image ; die 'cros_make_image_bootable failed.'" EXIT
     mount_image "${image}" "${FLAGS_rootfs_mountpoint}" \
       "${FLAGS_statefulfs_mountpoint}"
@@ -263,17 +242,18 @@
   # Make the filesystem un-mountable as read-write.
   # mount_gpt_image.sh will undo this as needed.
   # TODO(wad) make sure there is parity in the signing scripts.
-  if [ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]; then
+  if [[ "${FLAGS_enable_rootfs_verification}" -eq "${FLAGS_TRUE}" ]]; then
     # TODO(wad) this would be a good place to reset any other ext2 metadata.
     info "Disabling r/w mount of the root filesystem"
-    local rootfs_offset="$(partoffset ${image} 3)"
+    local rootfs_offset
+    rootfs_offset="$(partoffset "${image}" 3)"
     disable_rw_mount "${image}" "$(( rootfs_offset * 512 ))"
 
     # For factory_install images, override FLAGS_enable_rootfs_verification
     # here, so the following build_img calls won't make kernel set up the
     # device mapper on initialization.
     if [[ "${FLAGS_image_type}" == "factory_install" ]]; then
-      FLAGS_enable_rootfs_verification=${FLAGS_FALSE}
+      FLAGS_enable_rootfs_verification="${FLAGS_FALSE}"
     fi
   fi
 
@@ -298,7 +278,7 @@
   # For dev install shim, we need to use the installer keyblock instead of
   # the recovery keyblock because of the difference in flags.
   local keyblock
-  if [ ${FLAGS_use_dev_keys} -eq ${FLAGS_TRUE} ]; then
+  if [[ "${FLAGS_use_dev_keys}" -eq "${FLAGS_TRUE}" ]]; then
     keyblock=installer_kernel.keyblock
     info "DEBUG: use dev install keyblock"
   else
@@ -306,14 +286,16 @@
     info "DEBUG: use recovery keyblock"
   fi
 
-  if [ ${FLAGS_force_developer_mode} -eq ${FLAGS_TRUE} ]; then
+  if [[ "${FLAGS_force_developer_mode}" -eq "${FLAGS_TRUE}" ]]; then
     FLAGS_boot_args="${FLAGS_boot_args} cros_debug"
   fi
 
   # Builds the kernel partition image.
-  local partition_num_root_a="$(get_layout_partition_number \
+  local partition_num_root_a
+  partition_num_root_a="$(get_layout_partition_number \
     "${FLAGS_image_type}" ROOT-A)"
-  local rootfs_fs_size=$(get_filesystem_size "${FLAGS_image_type}" \
+  local rootfs_fs_size
+  rootfs_fs_size=$(get_filesystem_size "${FLAGS_image_type}" \
     "${partition_num_root_a}")
 
   # Usually we need to ensure that there will always be a regular kernel on
@@ -333,36 +315,45 @@
 
   # Check the size of kernel image and issue warning when image size is
   # near the limit.
-  local kernel_image_size_A=$(stat -c '%s' ${FLAGS_output_dir}/${kern_a_image})
+  local kernel_image_size_A
+  kernel_image_size_A=$(stat -c '%s' "${FLAGS_output_dir}/${kern_a_image}")
   info "Kernel image A   size is ${kernel_image_size_A} bytes."
-  local kernel_image_size_B=$(stat -c '%s' ${FLAGS_output_dir}/${kern_b_image})
+  local kernel_image_size_B
+  kernel_image_size_B=$(stat -c '%s' "${FLAGS_output_dir}/${kern_b_image}")
   info "Kernel image B   size is ${kernel_image_size_B} bytes."
-  local partition_num_kern_a="$(get_layout_partition_number \
+  local partition_num_kern_a
+  partition_num_kern_a="$(get_layout_partition_number \
     "${FLAGS_image_type}" KERN-A)"
-  check_kernel_size ${kernel_image_size_A} ${partition_num_kern_a} A
-  local partition_num_kern_b="$(get_layout_partition_number \
+  check_kernel_size "${kernel_image_size_A}" "${partition_num_kern_a}" A
+  local partition_num_kern_b
+  partition_num_kern_b="$(get_layout_partition_number \
     "${FLAGS_image_type}" KERN-B)"
 
   # Since the kernel-b of factory shim is optional, ignore kernel-b if the size
   # of kernel-b is less than or equal to the default value 1MiB.
-  local need_kern_b=${FLAGS_TRUE}
+  local need_kern_b="${FLAGS_TRUE}"
   if [[ "${FLAGS_image_type}" == "factory_install" ]]; then
-    local kernel_partition_size=$(get_partition_size ${FLAGS_image_type} \
-                                                   ${partition_num_kern_b})
-    local block_size="$(get_block_size)"
+    local kernel_partition_size
+    kernel_partition_size=$(get_partition_size "${FLAGS_image_type}" \
+                                                 "${partition_num_kern_b}")
+    local block_size
+    block_size="$(get_block_size)"
     if [[ "${kernel_partition_size}" -le "${block_size}" ]]; then
-      need_kern_b=${FLAGS_FALSE}
+      need_kern_b="${FLAGS_FALSE}"
       warn "Kernel partition B is skipped!"
     fi
   fi
   if [[ ${need_kern_b} -eq ${FLAGS_TRUE} ]]; then
-    check_kernel_size ${kernel_image_size_B} ${partition_num_kern_b} B
+    check_kernel_size "${kernel_image_size_B}" "${partition_num_kern_b}" B
   fi
 
-  local rootfs_hash_size=$(stat -c '%s' ${FLAGS_rootfs_hash})
-  local rootfs_partition_size=$(get_partition_size ${FLAGS_image_type} \
-      ${partition_num_root_a})
-  local rootfs_hash_pad=$(( rootfs_partition_size - rootfs_fs_size ))
+  local rootfs_hash_size
+  rootfs_hash_size=$(stat -c '%s' "${FLAGS_rootfs_hash}")
+  local rootfs_partition_size
+  rootfs_partition_size=$(get_partition_size "${FLAGS_image_type}" \
+      "${partition_num_root_a}")
+  local rootfs_hash_pad
+  rootfs_hash_pad=$(( rootfs_partition_size - rootfs_fs_size ))
   info "Appending rootfs.hash (${rootfs_hash_size} bytes) to the root fs"
   if [[ ${rootfs_hash_size} -gt ${rootfs_hash_pad} ]]; then
     die "rootfs_partition_size - rootfs_fs_size is less than the needed " \
@@ -372,10 +363,11 @@
   # Unfortunately, mount_gpt_image uses mount and not losetup to create the
   # loop devices.  This means that they are not the correct size.  We have to
   # write directly to the image to append the hash tree data.
-  local hash_offset="$(partoffset ${image} ${partition_num_root_a})"
-  hash_offset=$((hash_offset + (${rootfs_fs_size} / 512)))
+  local hash_offset
+  hash_offset="$(partoffset "${image}" "${partition_num_root_a}")"
+  hash_offset=$((hash_offset + ("${rootfs_fs_size}" / 512)))
   sudo dd bs=512 \
-          seek=${hash_offset} \
+          seek="${hash_offset}" \
           if="${FLAGS_rootfs_hash}" \
           of="${image}" \
           conv=notrunc \
@@ -394,40 +386,49 @@
   fi
 
   # Install the kernel to both slots A and B.
-  local koffset="$(partoffset ${image} ${partition_num_kern_a})"
+  local koffset
+  koffset="$(partoffset "${image}" "${partition_num_kern_a}")"
   sudo dd if="${FLAGS_output_dir}/${kern_a_image}" of="${image}" \
-    conv=notrunc bs=512 seek=${koffset} status=none
+    conv=notrunc bs=512 seek="${koffset}" status=none
   if [[ ${need_kern_b} -eq ${FLAGS_TRUE} ]]; then
-    koffset="$(partoffset ${image} ${partition_num_kern_b})"
+    koffset="$(partoffset "${image}" "${partition_num_kern_b}")"
     sudo dd if="${FLAGS_output_dir}/${kern_b_image}" of="${image}" \
-      conv=notrunc bs=512 seek=${koffset} status=none
+      conv=notrunc bs=512 seek="${koffset}" status=none
   fi
 
   # Update the bootloaders.  The EFI system partition will be updated.
   local kernel_part=
 
   # We should update the esp in place in the image.
-  local partition_num_efi_system="$(get_layout_partition_number \
+  local partition_num_efi_system
+  partition_num_efi_system="$(get_layout_partition_number \
     "${FLAGS_image_type}" EFI-SYSTEM)"
-  local image_dev=$(loopback_partscan "${image}")
-  trap "loopback_detach ${image_dev}; unmount_image ; \
-    die 'cros_make_image_bootable failed.'" EXIT
+  local image_dev
+  image_dev=$(loopback_partscan "${image}")
+  loopback_detach_and_die() {
+    loopback_detach "${image_dev}"
+    unmount_image
+    die "cros_make_image_bootable failed."
+  }
+  trap loopback_detach_and_die EXIT
   local bootloader_to="${image_dev}"
   # Make this writable by the user, it will get deleted by losetup -d.
-  sudo chown $(id -u) ${bootloader_to} ${bootloader_to}p*
-  local esp_size="$(partsize ${image} ${partition_num_efi_system})"
+  sudo chown "$(id -u)" "${bootloader_to}" "${bootloader_to}"p*
+  local esp_size
+  esp_size="$(partsize "${image}" "${partition_num_efi_system}")"
   esp_size=$((esp_size * 512))  # sectors to bytes
 
-  if [[ "${FLAGS_arch}" = "x86" || "${FLAGS_arch}" = "amd64" ]]; then
+  if [[ "${FLAGS_arch}" == "x86" || "${FLAGS_arch}" == "amd64" ]]; then
     # Use the kernel partition to acquire configuration flags.
-    kernel_part="--kernel_partition='${FLAGS_output_dir}/${kern_a_image}'"
+    kernel_part=("--kernel_partition='${FLAGS_output_dir}/${kern_a_image}'")
     # Install syslinux on the EFI System Partition.
-    kernel_part="${kernel_part} --install_syslinux"
-  elif [[ "${FLAGS_arch}" = "arm64" ]]; then
-    kernel_part="--kernel_partition='${FLAGS_output_dir}/vmlinuz.image'"
-  elif [[ "${FLAGS_arch}" = "arm" || "${FLAGS_arch}" = "mips" ]]; then
+    kernel_part+=(--install_syslinux)
+  elif [[ "${FLAGS_arch}" == "arm64" ]]; then
+    # Use the kernel partition to acquire configuration flags.
+    kernel_part=("--kernel_partition='${FLAGS_output_dir}/${kern_a_image}'")
+  elif [[ "${FLAGS_arch}" == "arm" || "${FLAGS_arch}" == "mips" ]]; then
     # These flags are not used for ARM / MIPS update_bootloaders.sh
-    kernel_part=""
+    kernel_part=()
   fi
 
   # Force all of the file writes to complete, in case it's necessary for
@@ -436,7 +437,7 @@
 
   if [[ ${esp_size} -gt 0 ]]; then
     # Update EFI partition
-    ${SCRIPTS_DIR}/update_bootloaders.sh \
+    "${SCRIPTS_DIR}"/update_bootloaders.sh \
       --arch="${FLAGS_arch}" \
       --board="${BOARD}" \
       --image_type="${FLAGS_image_type}" \
@@ -445,7 +446,7 @@
       --from="${FLAGS_rootfs_mountpoint}"/boot \
       --vmlinuz="${VMLINUZ}" \
       --zimage="${ZIMAGE}" \
-      ${kernel_part}
+      "${kernel_part[@]}"
   fi
 
   # We don't need to keep these files around anymore.
@@ -453,7 +454,7 @@
              "${FLAGS_output_dir}/${kern_b_image}" \
              "${FLAGS_output_dir}/vmlinuz_hd.vblock"
 
-  sudo losetup -d ${image_dev}
+  sudo losetup -d "${image_dev}"
   unmount_image
 
   # Since the kern-b will be signed with another recovery key, need to make
@@ -469,12 +470,17 @@
 
 verify_image_rootfs() {
   local image=$1
-  local partition_num_root_a="$(get_layout_partition_number \
+  local partition_num_root_a
+  partition_num_root_a="$(get_layout_partition_number \
     "${FLAGS_image_type}" ROOT-A)"
-  local rootfs_offset="$(partoffset ${image} ${partition_num_root_a})"
+  local rootfs_offset
+  rootfs_offset="$(partoffset "${image}" "${partition_num_root_a}")"
 
-  local rootfs_tmp_file=$(mktemp)
-  trap "rm ${rootfs_tmp_file}" EXIT
+  local rootfs_tmp_file
+  rootfs_tmp_file=$(mktemp)
+  # Immediately resolve the local variable for the trap.
+  # shellcheck disable=SC2064
+  trap "rm '${rootfs_tmp_file}'" EXIT
   sudo dd if="${image}" of="${rootfs_tmp_file}" bs=512 skip="${rootfs_offset}" \
           status=none
 
@@ -495,9 +501,9 @@
 FLAGS_espfs_mountpoint="${IMAGE_DIR}/esp"
 
 # Create the directories if they don't exist.
-mkdir -p ${FLAGS_rootfs_mountpoint}
-mkdir -p ${FLAGS_statefulfs_mountpoint}
-mkdir -p ${FLAGS_espfs_mountpoint}
+mkdir -p "${FLAGS_rootfs_mountpoint}"
+mkdir -p "${FLAGS_statefulfs_mountpoint}"
+mkdir -p "${FLAGS_espfs_mountpoint}"
 
 # base_image_utils.sh always places the kernel images in
 # ${IMAGE_DIR}/boot_images.
@@ -508,11 +514,11 @@
 if type -p board_make_image_bootable; then
   board_make_image_bootable "${IMAGE}"
 fi
-if [ ${FLAGS_fsck_rootfs} -eq ${FLAGS_TRUE} ]; then
+if [[ "${FLAGS_fsck_rootfs}" -eq "${FLAGS_TRUE}" ]]; then
   verify_image_rootfs "${IMAGE}"
 fi
 
-if [ ${FLAGS_cleanup_dirs} -eq ${FLAGS_TRUE} ]; then
+if [[ "${FLAGS_cleanup_dirs}" -eq "${FLAGS_TRUE}" ]]; then
   # These paths are already cleaned up by make_image_bootable when unmounting
   # the image. This is a fallback in case there are errors in that script.
   for path in ${FLAGS_rootfs_mountpoint} ${FLAGS_statefulfs_mountpoint} \
diff --git a/build_image b/build_image
deleted file mode 100755
index 8c0258f..0000000
--- a/build_image
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-# Copyright 2022 The ChromiumOS Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# shellcheck source=common.sh
-. "$(dirname "$0")/common.sh" || exit 1
-
-new_script="build_image"
-warn "$0: This script is deprecated and will be removed."
-warn "All users must migrate to ${new_script} in chromite/bin."
-warn "You can simply change all references of $0 to \`${new_script}\`" \
-  "from \$PATH (in chromite/bin/)."
-warn "This old script will be removed by January 2023."
-warn "If you have questions or found code that needs updating, please" \
-  "contact chromium-os-dev@, or file a bug at go/cros-build-bug."
-exec "${CHROMITE_BIN}/${new_script}" "$@"
diff --git a/build_image.sh b/build_image.sh
index ac706aa..0f5f917 100755
--- a/build_image.sh
+++ b/build_image.sh
@@ -15,7 +15,7 @@
 
 if [[ "$1" != "--script-is-run-only-by-chromite-and-not-users" ]]; then
   die_notrace 'This script must not be run by users.' \
-    'Please run `build_images` from $PATH (in chromite/bin/) instead.'
+    'Please run `cros build-image` instead.'
 fi
 
 # Discard the 'script-is-run-only-by-chromite-and-not-users' flag.
@@ -28,8 +28,6 @@
   "The board to build an image for."
 DEFINE_string boot_args "noinitrd" \
   "Additional boot arguments to pass to the commandline"
-DEFINE_boolean enable_bootcache ${FLAGS_FALSE} \
-  "Default all bootloaders to NOT use boot cache."
 DEFINE_boolean enable_rootfs_verification ${FLAGS_TRUE} \
   "Default all bootloaders to use kernel-based root fs integrity checking." \
   r
@@ -110,8 +108,7 @@
 # TODO: <prebuild hook>
 
 # Create the base image.
-create_base_image "${PRISTINE_IMAGE_NAME}" \
-  "${FLAGS_enable_rootfs_verification}" "${FLAGS_enable_bootcache}"
+create_base_image "${PRISTINE_IMAGE_NAME}" "${FLAGS_enable_rootfs_verification}"
 
 # Running board-specific setup if any exists.
 if type board_setup &>/dev/null; then
diff --git a/build_kernel_image.sh b/build_kernel_image.sh
index 9519d51..a1cc67b 100755
--- a/build_kernel_image.sh
+++ b/build_kernel_image.sh
@@ -63,8 +63,6 @@
   "Salt to use for rootfs hash (Default: \"\")"
 DEFINE_boolean enable_rootfs_verification "${FLAGS_TRUE}" \
   "Enable kernel-based root fs integrity checking. (Default: true)"
-DEFINE_boolean enable_bootcache "${FLAGS_FALSE}" \
-  "Enable boot cache to accelerate booting. (Default: false)"
 DEFINE_string enable_serial "" \
   "Enable serial port for printks. Example values: ttyS0"
 DEFINE_integer loglevel 7 \
@@ -124,8 +122,6 @@
   echo 'PARTUUID=%U/PARTNROFF=1'
 }
 
-load_board_specific_script "build_kernel_image.sh"
-
 base_root=$(get_base_root)
 
 device_mapper_args=
@@ -205,27 +201,10 @@
   # Don't claim the root device unless verity is enabled.
   # Doing so will claim /dev/sdDP out from under the system.
   if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then
-    if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then
-      base_root='254:0'  # major:minor numbers for /dev/dm-0
-    fi
     table=${table//HASH_DEV/${base_root}}
     table=${table//ROOT_DEV/${base_root}}
   fi
-  verity_dev="vroot,,,ro,${table}"
-  if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then
-    signature=$(rootdigest)
-    cachestart=$(($(hashstart) + $(veritysize)))
-    size_limit=512
-    max_trace=20000
-    max_pages=100000
-    bootcache_args="PARTUUID=%U/PARTNROFF=1"
-    bootcache_args+=" ${cachestart} ${signature} ${size_limit}"
-    bootcache_args+=" ${max_trace} ${max_pages}"
-    bootcache_dev="vboot none ro 1,0 ${cachestart} bootcache ${bootcache_args}"
-    device_mapper_args="dm-mod.create=\"${bootcache_dev}, ${verity_dev}\""
-  else
-    device_mapper_args="dm-mod.create=\"${verity_dev}\""
-  fi
+  device_mapper_args="dm-mod.create=\"vroot,,,ro,${table}\""
   info "device mapper configuration: ${device_mapper_args}"
 fi
 
@@ -239,15 +218,7 @@
 root_dev=${base_root}
 if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then
   dev_wait=1
-  if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then
-    root_dev=/dev/dm-1
-  else
-    root_dev=/dev/dm-0
-  fi
-else
-  if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then
-    die "Having bootcache without verity is not supported"
-  fi
+  root_dev=/dev/dm-0
 fi
 
 # kern_guid should eventually be changed to use PARTUUID
@@ -264,7 +235,7 @@
 kern_guid=%U
 EOF
 
-WORK="${WORK} ${FLAGS_working_dir}/boot.config"
+WORK=("${FLAGS_working_dir}/boot.config")
 info "Emitted cross-platform boot params to ${FLAGS_working_dir}/boot.config"
 
 # Add common boot options first.
@@ -300,29 +271,17 @@
 drm.trace=0x106
 EOF
 
+WORK+=("${config}")
 if [[ "${FLAGS_arch}" == "x86" || "${FLAGS_arch}" == "amd64" ]]; then
   # Legacy BIOS will use the kernel in the rootfs (via syslinux), as will
   # standard EFI BIOS (via grub, from the EFI System Partition). Chrome OS
   # BIOS will use a separate signed kernel partition, which we'll create now.
   cat <<EOF >> "${config}"
 add_efi_memmap
-boot=local
 noresume
-noswap
 i915.modeset=1
 EOF
-  WORK="${WORK} ${config}"
-
-  bootloader_path="/lib64/bootstub/bootstub.efi"
-elif [[ "${FLAGS_arch}" == "arm" || "${FLAGS_arch}" == "mips"  || "${FLAGS_arch}" == "arm64" ]]; then
-  WORK="${WORK} ${config}"
-
-  # arm does not need/have a bootloader in kernel partition
-  dd if="/dev/zero" of="${FLAGS_working_dir}/bootloader.bin" bs=512 count=1
-  WORK="${WORK} ${FLAGS_working_dir}/bootloader.bin"
-
-  bootloader_path="${FLAGS_working_dir}/bootloader.bin"
-else
+elif [[ "${FLAGS_arch}" != "arm" && "${FLAGS_arch}" != "mips"  && "${FLAGS_arch}" != "arm64" ]]; then
   error "Unknown arch: ${FLAGS_arch}"
 fi
 kernel_image="${FLAGS_vmlinuz}"
@@ -362,7 +321,11 @@
   done
 done
 
-modify_kernel_command_line "${config}"
+(
+  # Run in a subshell so we know build_kernel_image.sh can't set env vars.
+  load_board_specific_script "build_kernel_image.sh"
+  modify_kernel_command_line "${config}"
+)
 
 # Create and sign the kernel blob
 vbutil_kernel \
@@ -371,7 +334,6 @@
   --signprivate "${FLAGS_keys_dir}/${FLAGS_private}" \
   --version 1 \
   --config "${config}" \
-  --bootloader "${bootloader_path}" \
   --vmlinuz "${kernel_image}" \
   --arch "${FLAGS_arch}"
 
@@ -387,8 +349,8 @@
 set +e  # cleanup failure is a-ok
 
 if [[ ${FLAGS_keep_work} -eq ${FLAGS_FALSE} ]]; then
-  info "Cleaning up temporary files: ${WORK}"
-  rm ${WORK}
+  info "Cleaning up temporary files: ${WORK[*]}"
+  rm "${WORK[@]}"
   rmdir "${FLAGS_working_dir}"
 fi
 
diff --git a/build_library/base_image_util.sh b/build_library/base_image_util.sh
index 92e49e6..b3949f3 100755
--- a/build_library/base_image_util.sh
+++ b/build_library/base_image_util.sh
@@ -334,7 +334,6 @@
 create_base_image() {
   local image_name=$1
   local rootfs_verification_enabled=$2
-  local bootcache_enabled=$3
   local image_type="usb"
 
   info "Entering create_base_image $*"
@@ -354,6 +353,7 @@
   get_disk_layout_path
   info "Using disk layout ${DISK_LAYOUT_PATH}"
   root_fs_dir="${BUILD_DIR}/rootfs"
+  root_cros_vdb_strip_prefix="${root_fs_dir}"
   stateful_fs_dir="${BUILD_DIR}/stateful"
   esp_fs_dir="${BUILD_DIR}/esp"
 
@@ -570,10 +570,6 @@
   if [[ ${rootfs_verification_enabled} -eq ${FLAGS_TRUE} ]]; then
     enable_rootfs_verification="--enable_rootfs_verification"
   fi
-  local enable_bootcache=
-  if [[ ${bootcache_enabled} -eq ${FLAGS_TRUE} ]]; then
-    enable_bootcache="--enable_bootcache"
-  fi
 
   # If the KERN-A partition is of type "reserved", there is no kernel.
   # Skip the preparation of a bootable kernel partition.
@@ -593,8 +589,7 @@
       --boot_args="${FLAGS_boot_args}" \
       --enable_serial="${FLAGS_enable_serial}" \
       --loglevel="${FLAGS_loglevel}" \
-      ${enable_rootfs_verification} \
-      ${enable_bootcache}
+      ${enable_rootfs_verification}
   fi
 
   # Run board-specific build image function, if available.
@@ -631,7 +626,11 @@
     info_run sudo tar -cf "${BOARD_ROOT}/packages/dev-only-extras.tar.xz" \
       -I 'xz -9 -T0' --exclude=var -C "${root_fs_dir}/usr/local" .
 
-    create_dev_install_lists "${root_fs_dir}"
+    if should_build_image ${CHROMEOS_FACTORY_INSTALL_SHIM_NAME}; then
+      info "Skipping dev-install lists for factory shim images."
+    else
+      create_dev_install_lists "${root_fs_dir}"
+    fi
   fi
 
   # Generate DLCs and copy their metadata to the rootfs + factory install DLC
@@ -650,35 +649,24 @@
   # the bootable partitions later.
   mkdir -p "${BUILD_DIR}/boot_images"
 
-  # Bootable kernel image for ManaTEE enabled targets is located at directory
-  # ${BOARD_ROOT}/build/manatee/boot and included only in bootable partition.
-  # If no manatee USE flag is specified the standard /boot location is used,
-  # optionally including kernel image in final build image:
+  # The standard /boot location may be optionally including kernel image in
+  # final build image iff:
+  #
   # - boards that boot with legacy bioses need the kernel on the boot image
   # - boards with coreboot/depthcharge boot from a boot partition.
-  local boot_dir
   local cpmv
-  if has "manatee" "$(portageq-"${BOARD}" envvar USE)"; then
-    boot_dir="${BOARD_ROOT}/build/manatee/boot"
+  local boot_dir="${root_fs_dir}/boot"
+  if has "include_vmlinuz" "$(portageq-"${BOARD}" envvar USE)"; then
     cpmv="cp"
-    # The CrOS VM (guest) kernel goes into initramfs, so we should drop it
-    # from rootfs to save space.
-    sudo rm "${root_fs_dir}"/boot/vmlinuz-*
-    sudo rm "${root_fs_dir}"/boot/vmlinuz
   else
-    boot_dir="${root_fs_dir}/boot"
-    if has "include_vmlinuz" "$(portageq-"${BOARD}" envvar USE)"; then
-      cpmv="cp"
-    else
-      cpmv="mv"
-    fi
+    cpmv="mv"
   fi
 
   [ -e "${boot_dir}"/Image-* ] && \
     sudo "${cpmv}" "${boot_dir}"/Image-* "${BUILD_DIR}/boot_images"
   [ -L "${boot_dir}"/zImage-* ] && \
     sudo "${cpmv}" "${boot_dir}"/zImage-* "${BUILD_DIR}/boot_images"
-  [ -e "${boot_dir}"/vmlinuz-* ] && \
+  find "${boot_dir}"/vmlinuz-* >/dev/null 2>&1 && \
     sudo "${cpmv}" "${boot_dir}"/vmlinuz-* "${BUILD_DIR}/boot_images"
   [ -L "${boot_dir}"/vmlinuz ] && \
     sudo "${cpmv}" "${boot_dir}"/vmlinuz "${BUILD_DIR}/boot_images"
diff --git a/build_library/build_common.sh b/build_library/build_common.sh
index 8fefe8b..386911f 100644
--- a/build_library/build_common.sh
+++ b/build_library/build_common.sh
@@ -73,6 +73,9 @@
   local root=/build/${FLAGS_board}
   local tmp_pkgdir=${root}/custom-packages
 
+  info "Emerging custom kernel into ${install_root}"
+  info "Setting PKGDIR=${tmp_pkgdir} to avoid conflicts in ${root}"
+
   # Clean up any leftover state in custom directories.
   sudo rm -rf "${tmp_pkgdir}"
 
diff --git a/build_library/build_image_util.sh b/build_library/build_image_util.sh
index 2b79116..9f6ab03 100755
--- a/build_library/build_image_util.sh
+++ b/build_library/build_image_util.sh
@@ -33,10 +33,6 @@
   if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then
     enable_rootfs_verification_flag="--enable_rootfs_verification"
   fi
-  local enable_bootcache_flag=""
-  if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then
-    enable_bootcache_flag=--enable_bootcache
-  fi
 
   [ -z "${FLAGS_verity_salt}" ] && FLAGS_verity_salt=$(make_salt)
   cat <<EOF > ${BUILD_DIR}/boot.desc
@@ -50,7 +46,6 @@
   --enable_serial="${FLAGS_enable_serial}"
   --loglevel="${FLAGS_loglevel}"
   ${enable_rootfs_verification_flag}
-  ${enable_bootcache_flag}
 EOF
 }
 
@@ -121,8 +116,8 @@
 # Arguments to this command are passed as addition options/arguments
 # to the basic emerge command.
 emerge_to_image() {
-  set -- ${EMERGE_BOARD_CMD} --root-deps=rdeps --usepkgonly -v --with-bdeps=n \
-    "$@" ${EMERGE_JOBS}
+  set -- ${EMERGE_BOARD_CMD} --root-cros-vdb-strip-prefix="${root_cros_vdb_strip_prefix}" \
+    --root-deps=rdeps --usepkgonly -v --with-bdeps=n "$@" ${EMERGE_JOBS}
   info_run sudo -E "$@"
 }
 
@@ -135,6 +130,7 @@
   local passwd="${root}/etc/passwd"
   local line
   local cmds
+  local sed_cmds=()
 
   # Remove the file completely so we know it is fully initialized
   # with the correct permissions.  Note: we're just making it writable
@@ -171,10 +167,20 @@
     # Password is set directly.
     *)
       echo "${acct}:${pass}:::::::" >> "${shadow}"
+
+      # This is a sed command to replace the password field with an 'x' instead
+      # of the existing hashed password. We use ! as the sed separator because
+      # it's guaranteed not to appear in a hashed password field (see passwd(5)
+      # and crypt(3) man pages)
+      sed_cmds+=("-e" "s!^${acct}:${pass}:!${acct}:x:!")
       ;;
     esac
   done <"${passwd}"
 
+  if [[ "${#sed_cmds[@]}" -gt 0 ]]; then
+    sudo sed -i "${sed_cmds[@]}" "${passwd}"
+  fi
+
   # Now make the settings sane.
   cmds=(
     "chown 0:0 '${shadow}'"
diff --git a/build_library/create_legacy_bootloader_templates.sh b/build_library/create_legacy_bootloader_templates.sh
index 330214d..8b248de 100755
--- a/build_library/create_legacy_bootloader_templates.sh
+++ b/build_library/create_legacy_bootloader_templates.sh
@@ -25,8 +25,6 @@
   "Path to populate with bootloader templates (Default: /tmp/boot)"
 DEFINE_string boot_args "" \
   "Additional boot arguments to pass to the commandline (Default: '')"
-DEFINE_boolean enable_bootcache "${FLAGS_FALSE}" \
-  "Default all bootloaders to NOT use boot cache."
 DEFINE_boolean enable_rootfs_verification "${FLAGS_FALSE}" \
   "Controls if verity is used for root filesystem checking (Default: false)"
 DEFINE_string enable_serial "tty2" \
@@ -52,9 +50,6 @@
 ROOTDEV=/dev/dm-0
 if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then
   dev_wait=1
-  if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then
-    ROOTDEV=/dev/dm-1
-  fi
 fi
 
 # Common kernel command-line args. Write them to a temporary config_file so that
@@ -70,11 +65,9 @@
 
 cat <<EOF > "${config_file}"
 init=/sbin/init
-boot=local
 rootwait
 ro
 noresume
-noswap
 loglevel=${FLAGS_loglevel}
 ${FLAGS_boot_args}
 console=${FLAGS_enable_serial}
@@ -92,8 +85,11 @@
 
 # shellcheck source=board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
-load_board_specific_script "build_kernel_image.sh"
-modify_kernel_command_line "${config_file}"
+(
+  # Run in a subshell so we know build_kernel_image.sh can't set env vars.
+  load_board_specific_script "build_kernel_image.sh"
+  modify_kernel_command_line "${config_file}"
+)
 # Read back the config_file; translate newlines to space
 common_args="$(tr "\n" " " < "${config_file}")"
 cleanup
@@ -114,10 +110,93 @@
 partition_num_root_a="$(get_layout_partition_number \
     "${FLAGS_image_type}" ROOT-A)"
 
+# Create grub image and common grub.cfg template for EFI on x86/amd64/arm64.
+install_grub_efi_template() {
+  # To cover all of our bases, now populate templated boot support for efi.
+  sudo mkdir -p "${FLAGS_to}"/efi/boot
+
+  # /boot/syslinux must be installed in partition 12 as /syslinux/.
+  SYSLINUX_DIR="${FLAGS_to}/syslinux"
+  sudo mkdir -p "${SYSLINUX_DIR}"
+
+  grub_args=(
+    -p /efi/boot
+    part_gpt gptpriority test fat ext2 normal boot chain
+    efi_gop configfile linux
+    # For more context on SBAT, see chromiumos-overlay/sys-boot/grub/README.md
+    -s "${SRC_ROOT}/third_party/chromiumos-overlay/sys-boot/grub/files/sbat.csv"
+  )
+
+  if [[ "${FLAGS_arch}" == "arm64" ]]; then
+    # GRUB for arm64 is installed inside board overlay, since cross compilation
+    # tools are not available in base SDK.
+    sudo grub-mkimage -O arm64-efi \
+      -d "/build/${FLAGS_board}/lib64/grub/arm64-efi/" \
+      -o "${FLAGS_to}/efi/boot/bootaa64.efi" "${grub_args[@]}"
+  else
+    sudo grub-mkimage -O x86_64-efi \
+      -o "${FLAGS_to}/efi/boot/bootx64.efi" "${grub_args[@]}"
+    sudo grub-mkimage -O i386-efi \
+      -o "${FLAGS_to}/efi/boot/bootia32.efi" "${grub_args[@]}"
+  fi
+
+  # Templated variables:
+  #  DMTABLEA, DMTABLEB -> '0 xxxx verity ... '
+  # This should be replaced during postinst when updating the ESP.
+  cat <<EOF | sudo dd of="${FLAGS_to}/efi/boot/grub.cfg" 2>/dev/null
+defaultA=0
+defaultB=1
+gptpriority \$grubdisk ${partition_num_kern_a} prioA
+gptpriority \$grubdisk ${partition_num_kern_b} prioB
+
+if [ \$prioA -lt \$prioB ]; then
+  set default=\$defaultB
+else
+  set default=\$defaultA
+fi
+
+set timeout=2
+
+# NOTE: These magic grub variables are a Chrome OS hack. They are not portable.
+
+menuentry "local image A" {
+  linux /syslinux/vmlinuz.A ${common_args} i915.modeset=1 cros_efi \
+      root=/dev/\$linuxpartA
+}
+
+menuentry "local image B" {
+  linux /syslinux/vmlinuz.B ${common_args} i915.modeset=1 cros_efi \
+      root=/dev/\$linuxpartB
+}
+
+menuentry "verified image A" {
+  linux /syslinux/vmlinuz.A ${common_args} ${verity_common} \
+      i915.modeset=1 cros_efi root=${ROOTDEV} dm-mod.create="DMTABLEA"
+}
+
+menuentry "verified image B" {
+  linux /syslinux/vmlinuz.B ${common_args} ${verity_common} \
+      i915.modeset=1 cros_efi root=${ROOTDEV} dm-mod.create="DMTABLEB"
+}
+
+# FIXME: usb doesn't support verified boot for now
+menuentry "Alternate USB Boot" {
+  linux (hd0,${partition_num_root_a})/boot/vmlinuz ${common_args} root=HDROOTUSB i915.modeset=1 cros_efi
+}
+EOF
+  if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then
+    sudo sed -i \
+      -e '/^defaultA=/s:=.*:=2:' \
+      -e '/^defaultB=/s:=.*:=3:' \
+      "${FLAGS_to}/efi/boot/grub.cfg"
+  fi
+  info "Emitted ${FLAGS_to}/efi/boot/grub.cfg"
+}
+
 # Populate the x86 rootfs to support legacy and EFI bios config templates.
 # The templates are used by the installer to populate partition 12 with
 # the correct bootloader configuration.
-if [[ "${FLAGS_arch}" = "x86" || "${FLAGS_arch}" = "amd64"  ]]; then
+if [[ "${FLAGS_arch}" == "x86" || "${FLAGS_arch}" == "amd64"  ]]; then
   # TODO: For some reason the /dev/disk/by-uuid is not being generated by udev
   # in the initramfs. When we figure that out, switch to root=UUID=${UUID}.
   sudo mkdir -p "${FLAGS_to}"
@@ -209,71 +288,11 @@
 EOF
   info "Emitted ${SYSLINUX_DIR}/README"
 
-  # To cover all of our bases, now populate templated boot support for efi.
-  sudo mkdir -p "${FLAGS_to}"/efi/boot
-
-  grub_args=(
-    -p /efi/boot
-    part_gpt gptpriority test fat ext2 normal boot chain
-    efi_gop configfile linux
-  # For more context on SBAT, see chromiumos-overlay/sys-boot/grub/README.md
-    -s "${SRC_ROOT}/third_party/chromiumos-overlay/sys-boot/grub/files/sbat.csv"
-  )
-  sudo grub-mkimage -O x86_64-efi \
-    -o "${FLAGS_to}/efi/boot/bootx64.efi" "${grub_args[@]}"
-  sudo grub-mkimage -O i386-efi \
-    -o "${FLAGS_to}/efi/boot/bootia32.efi" "${grub_args[@]}"
-  # Templated variables:
-  #  DMTABLEA, DMTABLEB -> '0 xxxx verity ... '
-  # This should be replaced during postinst when updating the ESP.
-  cat <<EOF | sudo dd of="${FLAGS_to}/efi/boot/grub.cfg" 2>/dev/null
-defaultA=0
-defaultB=1
-gptpriority \$grubdisk ${partition_num_kern_a} prioA
-gptpriority \$grubdisk ${partition_num_kern_b} prioB
-
-if [ \$prioA -lt \$prioB ]; then
-  set default=\$defaultB
-else
-  set default=\$defaultA
-fi
-
-set timeout=2
-
-# NOTE: These magic grub variables are a Chrome OS hack. They are not portable.
-
-menuentry "local image A" {
-  linux /syslinux/vmlinuz.A ${common_args} i915.modeset=1 cros_efi \
-      root=/dev/\$linuxpartA
-}
-
-menuentry "local image B" {
-  linux /syslinux/vmlinuz.B ${common_args} i915.modeset=1 cros_efi \
-      root=/dev/\$linuxpartB
-}
-
-menuentry "verified image A" {
-  linux /syslinux/vmlinuz.A ${common_args} ${verity_common} \
-      i915.modeset=1 cros_efi root=${ROOTDEV} dm-mod.create="DMTABLEA"
-}
-
-menuentry "verified image B" {
-  linux /syslinux/vmlinuz.B ${common_args} ${verity_common} \
-      i915.modeset=1 cros_efi root=${ROOTDEV} dm-mod.create="DMTABLEB"
-}
-
-# FIXME: usb doesn't support verified boot for now
-menuentry "Alternate USB Boot" {
-  linux (hd0,${partition_num_root_a})/boot/vmlinuz ${common_args} root=HDROOTUSB i915.modeset=1 cros_efi
-}
-EOF
-  if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then
-    sudo sed -i \
-      -e '/^defaultA=/s:=.*:=2:' \
-      -e '/^defaultB=/s:=.*:=3:' \
-      "${FLAGS_to}/efi/boot/grub.cfg"
-  fi
-  info "Emitted ${FLAGS_to}/efi/boot/grub.cfg"
+  install_grub_efi_template
+  exit 0
+elif [[ "${FLAGS_arch}" == "arm64" ]] && \
+     [[ -d "/build/${FLAGS_board}/lib64/grub/arm64-efi/" ]]; then
+  install_grub_efi_template
   exit 0
 elif [[ "${FLAGS_arch}" = "arm64" ]]; then
   sudo mkdir -p "${FLAGS_to}"/efi/boot
diff --git a/build_library/disk_layout_v2.json b/build_library/disk_layout_v2.json
index 40f4da1..470e652 100644
--- a/build_library/disk_layout_v2.json
+++ b/build_library/disk_layout_v2.json
@@ -164,15 +164,15 @@
       },
       {
         "num": 3,
-        "size": "420 MiB",
-        "fs_size": "400 MiB",
+        "size": "480 MiB",
+        "fs_size": "460 MiB",
         "fs_options": {
           "ext2": "-i 32768"
         }
       },
       {
         "num": 1,
-        "size": "140 MiB"
+        "size": "200 MiB"
       }
     ],
     # Used for recovery images.
diff --git a/build_library/disk_layout_v3.json b/build_library/disk_layout_v3.json
index afb8953..de79713 100644
--- a/build_library/disk_layout_v3.json
+++ b/build_library/disk_layout_v3.json
@@ -155,8 +155,8 @@
     "factory_install": [
       {
         "num": 3,
-        "size": "420 MiB",
-        "fs_size": "400 MiB",
+        "size": "480 MiB",
+        "fs_size": "460 MiB",
         "fs_options": {
           "ext2": "-i 32768"
         }
@@ -171,7 +171,7 @@
       },
       {
         "num": 1,
-        "size": "140 MiB"
+        "size": "200 MiB"
       }
     ],
     "recovery": [
diff --git a/build_library/legacy_disk_layout.json b/build_library/legacy_disk_layout.json
index 705ab79..18d4b24 100644
--- a/build_library/legacy_disk_layout.json
+++ b/build_library/legacy_disk_layout.json
@@ -123,7 +123,8 @@
         "fs_format": "ext2",
         "fs_options": {
           "squashfs": "-noI -no-exports -comp lzo -Xalgorithm lzo1x_999 -Xcompression-level 9",
-          "ext2": "-i 65536",
+	  # lakitu: set bytes per inode at 32768 because we run out of inodes with 65536
+          "ext2": "-i 32768",
           "btrfs": "skinny-metadata"
         },
         "size": "2 GiB",
@@ -169,8 +170,8 @@
       },
       {
         "num": 3,
-        "size": "420 MiB",
-        "fs_size": "400 MiB",
+        "size": "480 MiB",
+        "fs_size": "460 MiB",
         "fs_options": {
           "ext2": "-i 32768"
         }
diff --git a/build_sdk_board b/build_sdk_board
index fe2af33..bb6542a 100755
--- a/build_sdk_board
+++ b/build_sdk_board
@@ -23,7 +23,7 @@
 
 FLAGS_HELP="usage: $(basename "$0") [flags]
 
-setup_host_board builds the chroot for the amd64-host (chroot) board.
+build_sdk_board builds the chroot for the amd64-host (chroot) board.
 This should not need to be called except by the SDK Builder.
 "
 
@@ -37,77 +37,18 @@
 
 BOARD=${FLAGS_board}
 
+CREATE_SDK_BOARD_ROOT_ARGS=("--board" "${BOARD}")
+if [[ ${FLAGS_force} -eq ${FLAGS_TRUE} ]]; then
+  CREATE_SDK_BOARD_ROOT_ARGS+=("--force")
+fi
+info_run "${SCRIPTS_DIR}/create_sdk_board_root" \
+  "${CREATE_SDK_BOARD_ROOT_ARGS[@]}"
+
 # Locations we will need
 BOARD_ROOT="/build/${BOARD}"
-CHROMIUMOS_OVERLAY="${CHROOT_TRUNK_DIR}/src/third_party/chromiumos-overlay"
-CHROMIUMOS_CONFIG="${CHROMIUMOS_OVERLAY}/chromeos/config"
-BOARD_ETC="${BOARD_ROOT}/etc"
-BOARD_SETUP="${BOARD_ETC}/make.conf.board_setup"
-BOARD_PROFILE="${BOARD_ETC}/portage/profile"
 
 eval "$(portageq envvar -v CHOST PKGDIR)"
 
-if [ -d "${BOARD_ROOT}" ]; then
-  if [[ ${FLAGS_force} -eq ${FLAGS_TRUE} ]]; then
-    echo "--force set.  Re-creating ${BOARD_ROOT}..."
-    # Removal takes long. Make it asynchronous.
-    TEMP_DIR=$(mktemp -d)
-    sudo mv "${BOARD_ROOT}" "${TEMP_DIR}"
-    sudo rm -rf --one-file-system "${TEMP_DIR}" &
-  fi
-fi
-
-# Setup the make.confs. We use the following:
-#    make.conf             <- Overall target make.conf [arm, x86, etc. version]
-#    make.conf.board_setup <- Declares CHOST, ROOT, etc.
-#    make.conf.board       <- Optional board-supplied make.conf.
-#    make.conf.user        <- User specified parameters.
-cmds=(
-  "mkdir -p '${BOARD_ROOT}' '${BOARD_ETC}' '${BOARD_PROFILE}' /usr/local/bin"
-  "ln -sf /etc/make.conf.user '${BOARD_ROOT}/etc/make.conf.user'"
-  "mkdir -p '${BOARD_ROOT}/etc/portage/hooks'"
-)
-for d in "${SCRIPTS_DIR}"/hooks/*; do
-  cmds+=( "ln -sfT '${d}' '${BOARD_ROOT}/etc/portage/hooks/${d##*/}'" )
-done
-
-cmds+=(
-  "ln -sf '${CHROMIUMOS_CONFIG}/make.conf.${BOARD}' \
-    '${BOARD_ETC}/make.conf'"
-  "cp -f '/etc/make.conf.host_setup' '${BOARD_ETC}/'"
-
-  # Setting up symlinks for bootstrapping multilib.
-  # See http://crosbug.com/14498
-  "mkdir -p '${BOARD_ROOT}'{/usr,}/lib64"
-  "ln -sfT lib64 '${BOARD_ROOT}/lib'"
-  "rm -rf '${BOARD_ROOT}/usr/lib'"
-  "ln -sfT lib64 '${BOARD_ROOT}/usr/lib'"
-
-  # Copying some files for bootstrapping empty chroot.
-  # See http://crosbug.com/14499
-  "mkdir -p '${BOARD_ETC}'/{init.d,xml}"
-  "cp /etc/xml/catalog '${BOARD_ETC}'/xml/"
-  "cp /etc/init.d/functions.sh '${BOARD_ETC}'/init.d/"
-)
-sudo_multi "${cmds[@]}"
-
-# Generating the standard configuration file (make.conf.board_setup) for the
-# sysroot.
-info_run cros_sysroot_utils generate-config --sysroot="${BOARD_ROOT}" \
-  --board="${BOARD}" --out-file="${BOARD_SETUP}"
-
-# Generate wrappers for portage helpers (equery, portageq, emerge, etc...).
-# Those are used to generate make.conf.board.
-info_run cros_sysroot_utils create-wrappers --sysroot="${BOARD_ROOT}" \
-  --friendlyname="${BOARD}"
-
-# Choose the default profile.
-if ! info_run cros_choose_profile --profile "" \
-      --board-root "${BOARD_ROOT}" --board "${BOARD}"; then
-  sudo rm -rf --one-file-system "${BOARD_ROOT}"
-  die "Selecting profile failed, removing incomplete board directory!"
-fi
-
 EMERGE_CMD="${CHROMITE_BIN}/parallel_emerge"
 mapfile -t TOOLCHAIN_PACKAGES < \
   <("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host)
@@ -150,26 +91,108 @@
 info_run sudo cp -a "${PKGDIR}" "${BOARD_ROOT}/packages"
 
 # Copy our chroot version into the newly packaged chroot.
-sudo cp -a "${CHROOT_VERSION_FILE}" "${BOARD_ROOT}${CHROOT_VERSION_FILE}"
+info_run sudo cp -a \
+  "${CHROOT_VERSION_FILE}" \
+  "${BOARD_ROOT}${CHROOT_VERSION_FILE}"
 
 # Now cleanup paths referencing the ROOT from the *.la files.
-sudo find "${BOARD_ROOT}" -type f -name '*.la' -exec \
+info_run sudo find "${BOARD_ROOT}" -type f -name '*.la' -exec \
   sed -i -e "s|${BOARD_ROOT}/|/|g" {} +
 
-# Remove wrapper scripts left behind in the sysroot. These are not supposed to
-# be part of the final filesystem.
-sudo rm -rf "${BOARD_ROOT}/build"
+# Remove wrapper scripts and any home directory contents left behind in the
+# sysroot. These are not supposed to be part of the final filesystem.
+info_run sudo rm -rf \
+  "${BOARD_ROOT}/build" \
+  "${BOARD_ROOT}"/run/* \
+  "${BOARD_ROOT}"/home/* \
+  "${BOARD_ROOT}"/etc/{,portage/}make.{conf,profile} \
+  "${BOARD_ROOT}/etc/make.conf.user" \
+  "${BOARD_ROOT}/var/cache/distfiles"
+
+# Setup host make.conf. This includes any overlay that we may be using and a
+# pointer to pre-built packages.
+cros_overlay="${CHROOT_TRUNK_DIR}/src/third_party/chromiumos-overlay"
+info_run sudo mkdir -p "${BOARD_ROOT}/etc/portage"
+info_run sudo ln -sf \
+  "${cros_overlay}/chromeos/config/make.conf.amd64-host" \
+  "${BOARD_ROOT}/etc/make.conf"
+info_run sudo ln -sf \
+  "${cros_overlay}/profiles/default/linux/amd64/10.0/sdk" \
+  "${BOARD_ROOT}/etc/portage/make.profile"
+
+# Create make.conf.user.
+cat <<\EOF | info_run sudo tee "${BOARD_ROOT}/etc/make.conf.user" >/dev/null
+# This file is useful for doing global (chroot and all board) changes.
+# Tweak emerge settings, ebuild env, etc...
+#
+# Make sure to append variables unless you really want to clobber all
+# existing settings.  e.g. You most likely want:
+#   FEATURES="${FEATURES} ..."
+#   USE="${USE} foo"
+# and *not*:
+#   USE="foo"
+#
+# This also is a good place to setup ACCEPT_LICENSE.
+EOF
+info_run sudo chmod 0644 "${BOARD_ROOT}/etc/make.conf.user"
 
 # Enable locale that some Chrome scripts assume exist.
-sudo sed -i -e '/^#en_US.UTF-8/s:#::' "${BOARD_ROOT}/etc/locale.gen"
-sudo mount --bind /dev "${BOARD_ROOT}/dev"
-sudo chroot "${BOARD_ROOT}" locale-gen -u
-sudo umount "${BOARD_ROOT}/dev"
+info_run sudo sed -i -e '/^#en_US.UTF-8/s:#::' "${BOARD_ROOT}/etc/locale.gen"
+info_run sudo mount --bind /dev "${BOARD_ROOT}/dev"
+info_run sudo chroot "${BOARD_ROOT}" locale-gen -u
+info_run sudo umount "${BOARD_ROOT}/dev"
 
 # b/278101251: /build/amd64-host doesn't include ccache's link tree by default,
 # which makes `FEATURES=ccache` quietly fail for host packages. Ensure it's
 # built here.
-sudo ROOT="${BOARD_ROOT}" "${BOARD_ROOT}/usr/bin/ccache-config" --install-links
+info_run sudo ROOT="${BOARD_ROOT}" \
+  "${BOARD_ROOT}/usr/bin/ccache-config" --install-links
+
+# Add chromite into python path.
+for python_path in "${BOARD_ROOT}/usr/lib/"python*.*/site-packages; do
+  info_run sudo mkdir -p "${python_path}"
+  info_run sudo ln -sfT "${CHROOT_TRUNK_DIR}"/chromite "${python_path}"/chromite
+done
+
+# Newer portage complains about bare overlays.  Create the file that crossdev
+# will also create later on.
+OVERLAYS_ROOT="/usr/local/portage"
+CROSSDEV_OVERLAY="${OVERLAYS_ROOT}/crossdev"
+CROSSDEV_METADATA="${BOARD_ROOT}/${CROSSDEV_OVERLAY}/metadata"
+info_run sudo mkdir -p -m 755 "${CROSSDEV_METADATA}"
+cat <<EOF | sudo tee "${CROSSDEV_METADATA}/layout.conf" >/dev/null
+# Autogenerated and managed by crossdev
+# Delete the above line if you want to manage this file yourself
+masters = portage-stable eclass-overlay chromiumos
+repo-name = crossdev
+use-manifests = true
+thin-manifests = true
+EOF
+
+PORTAGE_CACHE_DIRS=(
+  "${BOARD_ROOT}/var/lib/portage/pkgs"
+  "${BOARD_ROOT}/var/cache/"chromeos-{cache,chrome}
+)
+
+# Setup stable paths.
+info_run sudo mkdir -p -m 755 \
+  "${PORTAGE_CACHE_DIRS[@]}" \
+  "${BOARD_ROOT}/var/cache" \
+  "${BOARD_ROOT}/etc/profile.d" \
+  "${BOARD_ROOT}/run" \
+  "${BOARD_ROOT}/mnt/host" \
+  "${BOARD_ROOT}/mnt/host/out" \
+  "${BOARD_ROOT}/mnt/host/source"
+
+info_run sudo ln -sfT \
+  /mnt/host/source/src/chromium/depot_tools \
+  "${BOARD_ROOT}/mnt/host/depot_tools"
+
+info_run sudo ln -sfT \
+  chromeos-cache/distfiles "${BOARD_ROOT}/var/cache/distfiles"
+
+# Setup cache dirs.
+info_run sudo chmod 775 "${PORTAGE_CACHE_DIRS[@]}"
 
 command_completed
 echo "Done!"
diff --git a/chroot_version_hooks.d/210_rustc_upgrade b/chroot_version_hooks.d/210_rustc_upgrade
deleted file mode 100644
index 4af5386..0000000
--- a/chroot_version_hooks.d/210_rustc_upgrade
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2023 The ChromiumOS Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Clean local portage caches. We have stale mojom headers in chroot after camera
-# package consolidation (b/177958529).
-# Clean local portage caches. We have stale intermediates after the protobuf
-# # upgrade (crbug.com/1346059).
-# Clean caches again because of stale intermediates from dev-lang/rust upgrade
-# prior to droping the slot operator from dev-rust DEPENDs (b/245844306).
-sudo rm -rf /var/cache/portage/* /build/*/var/cache/portage/*
diff --git a/chroot_version_hooks.d/211_cleanup_dev_rust b/chroot_version_hooks.d/211_cleanup_dev_rust
deleted file mode 100644
index 2871d29..0000000
--- a/chroot_version_hooks.d/211_cleanup_dev_rust
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2021 The ChromiumOS Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Clean up rust packages which no longer have corresponding ebuilds
-#
-# This is achieved by:
-# * Running `eclean` to remove binary packages without a corresponding ebuild
-# * Running `equery l -b` to find ebuilds with no binary package
-# * Running `emerge -C` to remove those revisions.
-
-export CLEAN_DELAY=0
-
-do_clean() {
-  local suffix="$1"
-  local emerge_cmd=()
-  local args
-
-  if [[ -z "${suffix}" ]]; then
-    emerge_cmd+=( sudo )
-
-    qlist -IC ^cross- | sudo eclean -e /dev/stdin packages
-  else
-    "eclean${suffix}" packages
-  fi
-  emerge_cmd+=( "emerge${suffix}" )
-
-  args=( $("equery${suffix}" l -b --format='=$cpv' 'dev-rust/*' ) )
-  if [[ ${#args[@]} -eq 0 ]]; then
-    return
-  fi
-
-  "${emerge_cmd[@]}" -C "${args[@]}"
-}
-
-do_clean
-for board_root in /build/*; do
-  board_name=${board_root##*/}
-  if [[ -d "${board_root}/var/db/pkg/dev-rust" ]]; then
-    do_clean "-${board_name}"
-  fi
-done
diff --git a/chroot_version_hooks.d/212_update_third_party_crates_src b/chroot_version_hooks.d/212_update_third_party_crates_src
deleted file mode 100644
index 919ee90..0000000
--- a/chroot_version_hooks.d/212_update_third_party_crates_src
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright 2022 The ChromiumOS Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Update third-party-crates-src to sidestep b/247596883. See comment 7.
-sudo emerge -ug dev-rust/third-party-crates-src
diff --git a/chroot_version_hooks.d/213_cleanup_dev_rust b/chroot_version_hooks.d/213_cleanup_dev_rust
deleted file mode 120000
index 0fb3240..0000000
--- a/chroot_version_hooks.d/213_cleanup_dev_rust
+++ /dev/null
@@ -1 +0,0 @@
-211_cleanup_dev_rust
\ No newline at end of file
diff --git a/chroot_version_hooks.d/223_rewrite_sudoers b/chroot_version_hooks.d/223_rewrite_sudoers
new file mode 120000
index 0000000..bf2318e
--- /dev/null
+++ b/chroot_version_hooks.d/223_rewrite_sudoers
@@ -0,0 +1 @@
+../sdk_lib/rewrite-sudoers.d.sh
\ No newline at end of file
diff --git a/chroot_version_hooks.d/224_reset_crossdev b/chroot_version_hooks.d/224_reset_crossdev
new file mode 100644
index 0000000..ccda7ba
--- /dev/null
+++ b/chroot_version_hooks.d/224_reset_crossdev
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Force clean up cross-*/libcxx packages, as we need to remove libcxx-9999.
+# See: b/293898274
+mapfile -t pkgs < <(qlist -IC 'cross-.*/libcxx-9999$')
+if [[ ${#pkgs[@]} -gt 0 ]]; then
+  sudo emerge --quiet --rage-clean "${pkgs[@]}" || true
+fi
+
+# We also need to rewrite all the keyword files to what's listed in
+# toolchains-overlay.
+if [[ -d /etc/portage/package.accept_keywords ]]; then
+  sudo cp \
+    /mnt/host/source/src/third_party/toolchains-overlay/profiles/base/package.accept_keywords/cross-* \
+    /etc/portage/package.accept_keywords/
+fi
diff --git a/chroot_version_hooks.d/225_purge_sdk b/chroot_version_hooks.d/225_purge_sdk
new file mode 100644
index 0000000..2579690
--- /dev/null
+++ b/chroot_version_hooks.d/225_purge_sdk
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Force clean up some SDK packages that have been removed.
+pkgs=(
+  app-admin/eselect-mesa
+  app-admin/eselect-opengl
+  app-editors/qemacs
+  app-portage/eclass-manpages
+  chromeos-base/sirenia-tools
+  dev-cpp/gflags
+  dev-libs/dbus-glib
+  dev-embedded/cbootimage
+  dev-embedded/tegrarcm
+  dev-go/test
+  dev-libs/libyaml
+  dev-python/mysqlclient
+  dev-python/pypy3
+  dev-python/pypy3-exe-bin
+  dev-util/bazel:0
+  dev-util/sh
+  dev-util/unifdef
+  sys-apps/dbus
+  sys-apps/man-pages
+  sys-apps/ripgrep
+  sys-boot/bootstub
+  '<sys-devel/automake-1.15'
+  sys-fs/libfat
+)
+mapfile -t installed_pkgs < <(qlist -IC "${pkgs[@]}" || :)
+if [[ ${#installed_pkgs[@]} -gt 0 ]]; then
+  sudo emerge --rage -q "${installed_pkgs[@]}"
+fi
diff --git a/chroot_version_hooks.d/226_purge_sdk b/chroot_version_hooks.d/226_purge_sdk
new file mode 100644
index 0000000..f870f2f
--- /dev/null
+++ b/chroot_version_hooks.d/226_purge_sdk
@@ -0,0 +1,94 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Force clean up some SDK packages that have been removed.
+pkgs=(
+  app-arch/pbzip2
+  app-arch/unrar
+  app-benchmarks/sysbench
+  app-doc/doxygen
+  app-shells/dash
+  app-shells/push
+  app-text/tree
+  chromeos-base/chromeos-activate-date
+  chromeos-base/crosvm-base
+  chromeos-base/update-policy-chromeos
+  chromeos-base/vpd
+  dev-db/mariadb-connector-c
+  dev-embedded/meta-embedded-toolkit
+  dev-go/brotli
+  dev-go/go-uuid
+  dev-go/mitmproxy
+  dev-lang/luajit
+  dev-libs/concurrencykit
+  dev-libs/crypto++
+  dev-libs/iniparser
+  dev-libs/libnl
+  dev-libs/libusb-compat
+  dev-libs/wayland
+  dev-python/appdirs
+  dev-python/argcomplete
+  dev-python/boto
+  dev-python/click
+  dev-python/entrypoints
+  dev-python/fasteners
+  dev-python/future
+  dev-python/gcs-oauth2-boto-plugin
+  dev-python/google-apitools
+  dev-python/google-cloud-logging
+  dev-python/google-reauth-python
+  dev-python/keyring
+  dev-python/linecache2
+  dev-python/monotonic
+  dev-python/oauth2client
+  dev-python/pathlib2
+  dev-python/pbr
+  dev-python/pip
+  dev-python/pyshark
+  dev-python/python-gflags
+  dev-python/pyu2f
+  dev-python/pyxattr
+  dev-python/retry-decorator
+  dev-python/secretstorage
+  dev-python/selenium
+  dev-python/toml
+  dev-python/traceback2
+  dev-python/unittest2
+  dev-rust/cros_async
+  dev-rust/io_uring
+  dev-rust/minijail
+  dev-rust/minijail-sys
+  dev-rust/serde_keyvalue
+  dev-rust/serde_keyvalue_derive
+  dev-rust/sync
+  dev-util/gob
+  dev-util/ragel
+  net-analyzer/wireshark
+  net-fs/sshfs
+  net-libs/grpc-web
+  net-libs/http-parser
+  net-libs/libpcap
+  net-misc/gsutil
+  sys-apps/hdparm
+  sys-apps/mmc-utils
+  sys-apps/smartmontools
+  sys-auth/pambase
+  '<sys-devel/autoconf-2.71'
+  '<sys-devel/automake-1.16'
+  sys-fs/fuse
+  sys-fs/fuse-common
+  sys-libs/pam
+  virtual/chromeos-activate-date
+  virtual/libusb:0
+  virtual/modutils
+  virtual/pam
+  virtual/update-policy
+  x11-libs/libXcursor
+  x11-libs/libxkbcommon
+)
+mapfile -t installed_pkgs < <(qlist -eICS "${pkgs[@]}" || :)
+if [[ ${#installed_pkgs[@]} -gt 0 ]]; then
+  sudo emerge --rage -q "${installed_pkgs[@]}"
+fi
diff --git a/chroot_version_hooks.d/227_force_yacc b/chroot_version_hooks.d/227_force_yacc
new file mode 100644
index 0000000..9b80f95
--- /dev/null
+++ b/chroot_version_hooks.d/227_force_yacc
@@ -0,0 +1,9 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Workaround parallel cros_setup_toolchains race.
+# b/299321780
+set -x
+sudo emerge -gu app-alternatives/yacc virtual/yacc
diff --git a/common.sh b/common.sh
index 5529fab..b49174b 100644
--- a/common.sh
+++ b/common.sh
@@ -411,12 +411,6 @@
   fi
 }
 
-# Clears out stale shadow-utils locks in the given target root.
-sudo_clear_shadow_locks() {
-  info "Clearing shadow utils lockfiles under $1"
-  sudo rm -f "$1/etc/"{passwd,group,shadow,gshadow}.lock*
-}
-
 # Locate all mounts below a specified directory.
 #
 # $1 - The root tree.
diff --git a/cos/README.md b/cos/README.md
deleted file mode 100644
index a4895f0..0000000
--- a/cos/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-This folder contains image utilities from Container-Optimized OS(COS) team
-to fullfill the functionalities to support image formwat convertion between
-different platfroms:
-
-* COS on vSphere
-* COS on AWS
-* COS on AZure
-* COS on Borg
diff --git a/cos/convert_image.sh b/cos/convert_image.sh
deleted file mode 100755
index 508b9b1..0000000
--- a/cos/convert_image.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# convert_image.sh --board=[board] --image_type=[type] --image_format=[format]
-#
-# This script converts a board's image(base, test, dev) to the specified format
-# like vmdk, vhd so that the image can be used by platform other than GCP.
-#
-SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
-SCRIPT_ROOT=${SCRIPT_ROOT%cos}
-. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
-
-# Script must be run inside the chroot.
-restart_in_chroot_if_needed "$@"
-
-DEFINE_string board "${DEFAULT_BOARD}" \
-  "The board to build an image for."
-DEFINE_string image_type "base" \
-  "Image type to process, base, test or dev."
-DEFINE_string image_format "" \
-  "Image format to be converted to, vmdk or vhd."
-DEFINE_string image_dir "" "Path to the folder to store netboot images."
-
-# Parse command line.
-FLAGS "$@" || exit 1
-eval set -- "${FLAGS_ARGV}"
-
-. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
-. "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
-
-switch_to_strict_mode
-
-set -x
-# build_packages artifact output.
-SYSROOT="${GCLIENT_ROOT}/chroot/build/${FLAGS_board}"
-# build_image artifact output.
-
-IMAGE_DIR="${CHROOT_TRUNK_DIR}"/src/build/images/"${FLAGS_board}"/latest
-if [ -n "${FLAGS_image_dir}" ]; then
-  IMAGE_DIR=${FLAGS_image_dir}
-fi
-IMAGE_TYPE=${FLAGS_image_type}
-
-case ${FLAGS_image_format} in
-  "vmdk")
-  qemu-img convert -p -o subformat=streamOptimized -O vmdk\
-    ${IMAGE_DIR}/chromiumos_${IMAGE_TYPE}_image.bin \
-    ${IMAGE_DIR}/chromiumos_${IMAGE_TYPE}_image.vmdk
-  ;;
-
-  "vhd")
-  qemu-img convert -f raw -o subformat=fixed,force_size -O vpc \
-    ${IMAGE_DIR}/chromiumos_${IMAGE_TYPE}_image.bin \
-    ${IMAGE_DIR}/chromiumos_${IMAGE_TYPE}_image.vhd
-  ;;
-
-  *)
-  ;;
-esac
diff --git a/cos/cos.json b/cos/cos.json
deleted file mode 100644
index a22b87e..0000000
--- a/cos/cos.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
-    "DiskProvisioning": "thin",
-    "IPAllocationPolicy": "dhcpPolicy",
-    "IPProtocol": "IPv4",
-    "InjectOvfEnv": false,
-    "MarkAsTemplate": false,
-    "Name": null,
-    "NetworkMapping": [
-        {
-            "Name": "VM Network",
-            "Network": ""
-        }
-    ],
-    "PowerOn": false,
-    "PropertyMapping": [
-        {
-            "Key": "instance-id",
-            "Value": "id-ovf"
-        },
-        {
-            "Key": "hostname",
-            "Value": ""
-        },
-        {
-            "Key": "seedfrom",
-            "Value": ""
-        },
-        {
-            "Key": "public-keys",
-            "Value": ""
-        },
-        {
-            "Key": "user-data",
-            "Value": ""
-        },
-        {
-            "Key": "password",
-            "Value": ""
-        }
-    ],
-    "WaitForIP": false
-}
diff --git a/cos/make_ova.sh b/cos/make_ova.sh
deleted file mode 100755
index 543a890..0000000
--- a/cos/make_ova.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# make_ova.sh -d [vmdk file] -o [ova file] -p[product-name] \
-#  -n[image-name] -t ${TEMPLATE_OVF}
-#
-# This scripts creates .ova file from given disk image and OVA template.
-#
-
-set -o xtrace
-set -o errexit
-set -o nounset
-
-SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
-TEMPLATE_PATH=${SCRIPT_ROOT}/template.ovf
-WORKSPACE=${SCRIPT_ROOT%\/src\/scripts\/cos}
-BOARD=anthos-amd64-vsphere
-PRODUCT_NAME="Anthos OnPrem on COS"
-IMAGE_NAME="COS"
-IMAGE_TYPE="test"
-IMAGE_ROOT=${WORKSPACE}/src/build/images/${BOARD}/latest
-DISK_FILE=${IMAGE_ROOT}/chromiumos_${IMAGE_TYPE}_image.vmdk
-OUTPUT_FILE=${IMAGE_ROOT}/chromiumos_${IMAGE_TYPE}_image.ova
-
-usage() {
-  echo "Usage: $0 -b board -d disk.vmdk \
-    -p product-name -n image-name \
-    -o output-file [-t template.ovf]"
-}
-
-while getopts ":b:d:p:n:t:o:h" arg; do
-  case $arg in
-    b) BOARDD=$OPTARG ;;
-    d) DISK_FILE=$OPTARG ;;
-    p) PRODUCT_NAME=$OPTARG ;;
-    n) IMAGE_NAME=$OPTARG ;;
-    t) TEMPLATE_PATH=$OPTARG ;;
-    o) OUTPUT_FILE=$OPTARG ;;
-    h)
-      usage
-      exit 0
-      ;;
-    *)
-      usage
-      exit 1
-      ;;
-  esac
-done
-
-: "${BOARD?Missing -d BOARD value}"
-: "${DISK_FILE?Missing -d DISK_FILE value}"
-: "${PRODUCT_NAME?Missing -p PRODUCT_NAME value}"
-: "${IMAGE_NAME?Missing -n IMAGE_NAME value}"
-: "${TEMPLATE_PATH?Missing -t TEMPLATE_PATH value}"
-: "${OUTPUT_FILE?Missing -o OUTPUT_FILE value}"
-
-if [[ ! -f ${TEMPLATE_PATH} ]]; then
-  echo "Cannot find template at ${TEMPLATE_PATH}"
-  exit 1
-fi
-
-XML_NS=(
-  -N 'x=http://schemas.dmtf.org/ovf/envelope/1'
-  -N 'ovf=http://schemas.dmtf.org/ovf/envelope/1'
-  -N 'vssd=http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData'
-)
-
-WORK_DIR=$(mktemp -d)
-trap 'rm -rf "${WORK_DIR}"' EXIT
-
-# xmlstar does not support multiple updates at once, and we need to provide
-# namespaces to every invocation, so disable quoting warning.
-# shellcheck disable=SC2086
-xmlstarlet ed ${XML_NS[*]} \
-  --update '//x:VirtualSystem/@ovf:id' --value "${IMAGE_NAME}" \
-  "${TEMPLATE_PATH}" \
-  | xmlstarlet ed ${XML_NS[*]} \
-    --update '//x:VirtualSystem/x:Name' --value "${IMAGE_NAME}" \
-  | xmlstarlet ed ${XML_NS[*]} \
-    --update '//vssd:VirtualSystemIdentifier' --value "${IMAGE_NAME}" \
-    > "${WORK_DIR}/tmp.ovf"
-
-# Add a disk image to temporary .ovf
-cot --force add-disk "${DISK_FILE}" "${WORK_DIR}/tmp.ovf" \
-  -o "${WORK_DIR}/image.ovf" \
-  -f vmdisk1 -t harddisk -c scsi
-
-# Add product information and convert .ovf to .ova
-cot --force edit-product "${WORK_DIR}/image.ovf" \
-  -o "${OUTPUT_FILE}" \
-  --product "${PRODUCT_NAME}"
-
diff --git a/cos/run_vmtests.sh b/cos/run_vmtests.sh
deleted file mode 100755
index ee25665..0000000
--- a/cos/run_vmtests.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# cos/run_vmtests.sh --board=[anthos-amd64-vsphere]
-#
-# This script builds and runs VMTests for a given board.
-
-SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
-SCRIPT_ROOT=${SCRIPT_ROOT%cos}
-. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
-
-# Script must be run inside the chroot.
-restart_in_chroot_if_needed "$@"
-
-DEFINE_string board "${DEFAULT_BOARD}" \
-  "The board to build an image for."
-DEFINE_string image_type "test" \
-  "Image type to process, base, test or dev."
-DEFINE_string image_dir "" "Path to the folder to store netboot images."
-
-# Parse command line.
-FLAGS "$@" || exit 1
-eval set -- "${FLAGS_ARGV}"
-
-. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
-. "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
-
-switch_to_strict_mode
-
-set -x
-# build_packages artifact output.
-SYSROOT="${GCLIENT_ROOT}/chroot/build/${FLAGS_board}"
-# build_image artifact output.
-
-IMAGE_DIR="${CHROOT_TRUNK_DIR}"/src/build/images/"${FLAGS_board}"/latest
-if [ -n "${FLAGS_image_dir}" ]; then
-  IMAGE_DIR=${FLAGS_image_dir}
-fi
-
-BOARD_ARCH=$(portageq-${FLAGS_board} envvar ARCH)
-if [[ ${BOARD_ARCH} == "amd64" ]]; then
-  BOARD_ARCH="x86_64"
-elif [[ ${BOARD_ARCH} == "arm64" ]]; then
-  BOARD_ARCH="aarch64"
-else
-  echo "Unsupported ${BOARD_ARCH}"
-  exit 1
-fi
-
-cros_run_vm_test --board ${BOARD} \
-  --image-path ${IMAGE_DIR}/chromiumos_${FLAGS_image_type}_image.bin \
-  --private-key ${IMAGE_DIR}/id_rsa \
-  --test_that-args=--model=ad_hoc_model \
-  --copy-on-write \
-  --start-vm \
-  --qemu-arch ${BOARD_ARCH} \
-  --autotest 'suite:smoke'
diff --git a/cos/template.ovf b/cos/template.ovf
deleted file mode 100644
index f8b5220..0000000
--- a/cos/template.ovf
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Envelope xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <References>
-  </References>
-  <DiskSection>
-    <Info>Virtual disk information</Info>
-  </DiskSection>
-  <NetworkSection>
-    <Info>The list of logical networks</Info>
-    <Network ovf:name="VM Network">
-      <Description>The VM Network network</Description>
-    </Network>
-  </NetworkSection>
-  <VirtualSystem ovf:id="__NAME__">
-    <Info>A virtual machine</Info>
-    <Name>__NAME__</Name>
-    <OperatingSystemSection ovf:id="94" vmw:osType="genericLinuxGuest">
-      <Info>The kind of installed guest operating system</Info>
-      <Description>Other Linux</Description>
-    </OperatingSystemSection>
-
-    <ProductSection ovf:required="false">
-      <Info>Cloud-Init customization</Info>
-      <Product>__PRODUCT_REPLACED_BY_COT__</Product>
-      <Property ovf:key="instance-id" ovf:type="string" ovf:userConfigurable="true" ovf:value="id-ovf">
-          <Label>A Unique Instance ID for this instance</Label>
-          <Description>Specifies the instance id.  This is required and used to determine if the machine should take "first boot" actions</Description>
-      </Property>
-      <Property ovf:key="hostname" ovf:type="string" ovf:userConfigurable="true" ovf:value="cosguest">
-          <Description>Specifies the hostname for the appliance</Description>
-      </Property>
-      <Property ovf:key="seedfrom" ovf:type="string" ovf:userConfigurable="true">
-          <Label>Url to seed instance data from</Label>
-          <Description>This field is optional, but indicates that the instance should 'seed' user-data and meta-data from the given url.  If set to 'http://tinyurl.com/sm-' is given, meta-data will be pulled from http://tinyurl.com/sm-meta-data and user-data from http://tinyurl.com/sm-user-data.  Leave this empty if you do not want to seed from a url.</Description>
-      </Property>
-      <Property ovf:key="public-keys" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
-          <Label>ssh public keys</Label>
-          <Description>This field is optional, but indicates that the instance should populate the default user's 'authorized_keys' with this value</Description>
-      </Property>
-      <Property ovf:key="user-data" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
-          <Label>Encoded user-data</Label>
-          <Description>In order to fit into a xml attribute, this value is base64 encoded . It will be decoded, and then processed normally as user-data.</Description>
-          <!--  The following represents '#!/bin/sh\necho "hi world"'
-          ovf:value="IyEvYmluL3NoCmVjaG8gImhpIHdvcmxkIgo="
-        -->
-      </Property>
-      <Property ovf:key="password" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
-          <Label>Default User's password</Label>
-          <Description>If set, the default user's password will be set to this value to allow password based login.  The password will be good for only a single login.  If set to the string 'RANDOM' then a random password will be generated, and written to the console.</Description>
-      </Property>
-    </ProductSection>
-
-    <VirtualHardwareSection ovf:transport="com.vmware.guestInfo">
-      <Info>Virtual hardware requirements</Info>
-      <System>
-        <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
-        <vssd:InstanceID>0</vssd:InstanceID>
-        <vssd:VirtualSystemIdentifier>__NAME__</vssd:VirtualSystemIdentifier>
-        <vssd:VirtualSystemType>vmx-13</vssd:VirtualSystemType>
-      </System>
-      <Item>
-        <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
-        <rasd:Description>Number of Virtual CPUs</rasd:Description>
-        <rasd:ElementName>2 virtual CPU(s)</rasd:ElementName>
-        <rasd:InstanceID>1</rasd:InstanceID>
-        <rasd:ResourceType>3</rasd:ResourceType>
-        <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
-      </Item>
-      <Item>
-        <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
-        <rasd:Description>Memory Size</rasd:Description>
-        <rasd:ElementName>1024MB of memory</rasd:ElementName>
-        <rasd:InstanceID>2</rasd:InstanceID>
-        <rasd:ResourceType>4</rasd:ResourceType>
-        <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>
-      </Item>
-      <Item>
-        <rasd:Address>0</rasd:Address>
-        <rasd:Description>SCSI Controller</rasd:Description>
-        <rasd:ElementName>SCSI Controller 0</rasd:ElementName>
-        <rasd:InstanceID>3</rasd:InstanceID>
-        <rasd:ResourceSubType>VirtualSCSI</rasd:ResourceSubType>
-        <rasd:ResourceType>6</rasd:ResourceType>
-      </Item>
-      <Item>
-        <rasd:Address>1</rasd:Address>
-        <rasd:Description>IDE Controller</rasd:Description>
-        <rasd:ElementName>VirtualIDEController 1</rasd:ElementName>
-        <rasd:InstanceID>4</rasd:InstanceID>
-        <rasd:ResourceType>5</rasd:ResourceType>
-      </Item>
-      <Item>
-        <rasd:Address>0</rasd:Address>
-        <rasd:Description>IDE Controller</rasd:Description>
-        <rasd:ElementName>VirtualIDEController 0</rasd:ElementName>
-        <rasd:InstanceID>5</rasd:InstanceID>
-        <rasd:ResourceType>5</rasd:ResourceType>
-      </Item>
-      <Item ovf:required="false">
-        <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
-        <rasd:ElementName>VirtualVideoCard</rasd:ElementName>
-        <rasd:InstanceID>6</rasd:InstanceID>
-        <rasd:ResourceType>24</rasd:ResourceType>
-        <vmw:Config ovf:required="false" vmw:key="enable3DSupport" vmw:value="false"/>
-        <vmw:Config ovf:required="false" vmw:key="use3dRenderer" vmw:value="automatic"/>
-        <vmw:Config ovf:required="false" vmw:key="useAutoDetect" vmw:value="false"/>
-        <vmw:Config ovf:required="false" vmw:key="videoRamSizeInKB" vmw:value="4096"/>
-      </Item>
-      <Item ovf:required="false">
-        <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
-        <rasd:ElementName>VirtualVMCIDevice</rasd:ElementName>
-        <rasd:InstanceID>7</rasd:InstanceID>
-        <rasd:ResourceSubType>vmware.vmci</rasd:ResourceSubType>
-        <rasd:ResourceType>1</rasd:ResourceType>
-        <vmw:Config ovf:required="false" vmw:key="allowUnrestrictedCommunication" vmw:value="false"/>
-      </Item>
-      <Item>
-        <rasd:AddressOnParent>7</rasd:AddressOnParent>
-        <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
-        <rasd:Connection>VM Network</rasd:Connection>
-        <rasd:Description>VMXNET3 ethernet adapter on &quot;VM Network&quot;</rasd:Description>
-        <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
-        <rasd:InstanceID>11</rasd:InstanceID>
-        <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
-        <rasd:ResourceType>10</rasd:ResourceType>
-        <vmw:Config ovf:required="false" vmw:key="wakeOnLanEnabled" vmw:value="true"/>
-      </Item>
-      <vmw:Config ovf:required="false" vmw:key="cpuHotAddEnabled" vmw:value="false"/>
-      <vmw:Config ovf:required="false" vmw:key="cpuHotRemoveEnabled" vmw:value="false"/>
-      <vmw:Config ovf:required="false" vmw:key="firmware" vmw:value="bios"/>
-      <vmw:Config ovf:required="false" vmw:key="virtualICH7MPresent" vmw:value="false"/>
-      <vmw:Config ovf:required="false" vmw:key="virtualSMCPresent" vmw:value="false"/>
-      <vmw:Config ovf:required="false" vmw:key="memoryHotAddEnabled" vmw:value="false"/>
-      <vmw:Config ovf:required="false" vmw:key="nestedHVEnabled" vmw:value="false"/>
-      <vmw:Config ovf:required="false" vmw:key="powerOpInfo.powerOffType" vmw:value="preset"/>
-      <vmw:Config ovf:required="false" vmw:key="powerOpInfo.resetType" vmw:value="preset"/>
-      <vmw:Config ovf:required="false" vmw:key="powerOpInfo.standbyAction" vmw:value="checkpoint"/>
-      <vmw:Config ovf:required="false" vmw:key="powerOpInfo.suspendType" vmw:value="preset"/>
-      <vmw:Config ovf:required="false" vmw:key="tools.afterPowerOn" vmw:value="true"/>
-      <vmw:Config ovf:required="false" vmw:key="tools.afterResume" vmw:value="true"/>
-      <vmw:Config ovf:required="false" vmw:key="tools.beforeGuestShutdown" vmw:value="true"/>
-      <vmw:Config ovf:required="false" vmw:key="tools.beforeGuestStandby" vmw:value="true"/>
-      <vmw:Config ovf:required="false" vmw:key="tools.syncTimeWithHost" vmw:value="false"/>
-      <vmw:Config ovf:required="false" vmw:key="tools.toolsUpgradePolicy" vmw:value="manual"/>
-    </VirtualHardwareSection>
-  </VirtualSystem>
-</Envelope>
diff --git a/create_sdk_board_root b/create_sdk_board_root
new file mode 100755
index 0000000..15d5140
--- /dev/null
+++ b/create_sdk_board_root
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# shellcheck source=common.sh
+. "$(dirname "$0")/common.sh" || exit 1
+
+# Script must run inside the chroot
+restart_in_chroot_if_needed "$@"
+
+assert_not_root_user
+
+# Developer-visible flags.
+DEFINE_string board "amd64-host" \
+  "The name of the board to set up."
+DEFINE_string profile "" \
+  "The name of the profile to set up."
+DEFINE_boolean force "${FLAGS_FALSE}" \
+  "Force re-creating board root."
+
+FLAGS_HELP="usage: $(basename "$0") [flags]
+
+create_sdk_board_root creates the board root for the amd64-host (chroot) board.
+This should not need to be called except by the SDK Builder or Alchemy builders.
+"
+
+# Parse command line flags
+FLAGS "$@" || exit 1
+eval set -- "${FLAGS_ARGV}"
+
+# Only now can we die on error.  shflags functions leak non-zero error codes,
+# so will die prematurely if 'switch_to_strict_mode' is specified before now.
+switch_to_strict_mode
+
+BOARD=${FLAGS_board}
+
+# Locations we will need
+BOARD_ROOT="/build/${BOARD}"
+CHROMIUMOS_OVERLAY="${CHROOT_TRUNK_DIR}/src/third_party/chromiumos-overlay"
+CHROMIUMOS_CONFIG="${CHROMIUMOS_OVERLAY}/chromeos/config"
+BOARD_ETC="${BOARD_ROOT}/etc"
+BOARD_SETUP="${BOARD_ETC}/make.conf.board_setup"
+BOARD_PROFILE="${BOARD_ETC}/portage/profile"
+
+if [ -d "${BOARD_ROOT}" ]; then
+  if [[ ${FLAGS_force} -eq ${FLAGS_TRUE} ]]; then
+    echo "--force set.  Re-creating ${BOARD_ROOT}..."
+    # Removal takes long. Make it asynchronous.
+    TEMP_DIR=$(mktemp -d -p "$(dirname "${BOARD_ROOT}")")
+    info_run sudo mv "${BOARD_ROOT}" "${TEMP_DIR}"
+    info_run sudo rm -rf --one-file-system "${TEMP_DIR}" &
+  fi
+fi
+
+# Setup the make.confs. We use the following:
+#    make.conf             <- Overall target make.conf [arm, x86, etc. version]
+#    make.conf.board_setup <- Declares CHOST, ROOT, etc.
+#    make.conf.board       <- Optional board-supplied make.conf.
+#    make.conf.user        <- User specified parameters.
+cmds=(
+  "mkdir -p '${BOARD_ROOT}' '${BOARD_ETC}' '${BOARD_PROFILE}' /usr/local/bin"
+  "ln -sf /etc/make.conf.user '${BOARD_ROOT}/etc/make.conf.user'"
+  "mkdir -p '${BOARD_ROOT}/etc/portage/hooks'"
+)
+for d in "${SCRIPTS_DIR}"/hooks/*; do
+  cmds+=( "ln -sfT '${d}' '${BOARD_ROOT}/etc/portage/hooks/${d##*/}'" )
+done
+
+cmds+=(
+  "ln -sf '${CHROMIUMOS_CONFIG}/make.conf.${BOARD}' \
+    '${BOARD_ETC}/make.conf'"
+  "cp -f '/etc/make.conf.host_setup' '${BOARD_ETC}/'"
+
+  # Setting up symlinks for bootstrapping multilib.
+  # See http://crosbug.com/14498
+  "mkdir -p '${BOARD_ROOT}'{/usr,}/lib64"
+  "ln -sfT lib64 '${BOARD_ROOT}/lib'"
+  "rm -rf '${BOARD_ROOT}/usr/lib'"
+  "ln -sfT lib64 '${BOARD_ROOT}/usr/lib'"
+
+  # Copying some files for bootstrapping empty chroot.
+  # See http://crosbug.com/14499
+  "mkdir -p '${BOARD_ETC}'/{init.d,xml}"
+  "cp /etc/xml/catalog '${BOARD_ETC}'/xml/"
+  "cp /etc/init.d/functions.sh '${BOARD_ETC}'/init.d/"
+)
+sudo_multi "${cmds[@]}"
+
+# Generating the standard configuration file (make.conf.board_setup) for the
+# sysroot.
+info_run cros_sysroot_utils generate-config --sysroot="${BOARD_ROOT}" \
+  --board="${BOARD}" --out-file="${BOARD_SETUP}"
+
+# Generate wrappers for portage helpers (equery, portageq, emerge, etc...).
+# Those are used to generate make.conf.board.
+info_run cros_sysroot_utils create-wrappers --sysroot="${BOARD_ROOT}" \
+  --friendlyname="${BOARD}"
+
+# Choose the profile.
+if ! info_run cros_choose_profile --profile "${FLAGS_profile}" \
+      --board-root "${BOARD_ROOT}" --board "${BOARD}"; then
+  info_run sudo rm -rf --one-file-system "${BOARD_ROOT}"
+  die "Selecting profile failed, removing incomplete board directory!"
+fi
+
+command_completed
+echo "Created SDK board root at ${BOARD_ROOT}"
diff --git a/cros_show_stacks b/cros_show_stacks
index 28349b2..70577c5 100755
--- a/cros_show_stacks
+++ b/cros_show_stacks
@@ -6,8 +6,7 @@
 
 # Script to generate stackdumps from a machine or dmp files.
 
-
-SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+SCRIPT_ROOT="$(dirname "$(readlink -f "$0")")"
 # shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 # shellcheck source=remote_access.sh
diff --git a/cros_workon_make b/cros_workon_make
deleted file mode 100755
index 8d7dfb0..0000000
--- a/cros_workon_make
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/bin/bash
-
-# Copyright 2010 The ChromiumOS Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# Simple wrapper script to build a cros_workon package incrementally.
-# You must already be cros_workon'ing the package in question.
-
-# shellcheck source=lib/shflags/shflags
-. /usr/share/misc/shflags || exit 1
-
-GCLIENT_ROOT="/mnt/host/source"
-DEFAULT_BOARD="$(cat "${GCLIENT_ROOT}"/src/scripts/.default_board 2>/dev/null)"
-
-info() { echo "INFO: $*"; }
-warn() { echo "WARN: $*"; }
-error() { echo "ERROR: $*"; }
-die() { error "$@"; exit 1; }
-
-DEFINE_string board "${DEFAULT_BOARD}" \
-    "Board for which to build the package."
-DEFINE_boolean test "${FLAGS_FALSE}" \
-  "Compile and run tests as well."
-DEFINE_boolean reconf "${FLAGS_FALSE}" \
-  "Re-run configure and prepare steps."
-DEFINE_boolean install "${FLAGS_FALSE}" \
-  "Incrementally build and install your package."
-DEFINE_boolean scrub "${FLAGS_FALSE}" \
-  "Blow away all in-tree files not managed by git."
-
-set -e
-# Parse command line.
-FLAGS "$@" || exit 1
-eval set -- "${FLAGS_ARGV}"
-
-if [ $# -lt 1 ]; then
-  echo "Usage: ${0} [OPTIONS] <package (read: ebuild) basename> [target args]"
-  exit 1
-fi
-
-if [ -z "${FLAGS_board}" ]; then
-  die "--board is required"
-fi
-
-if [ -n "${FLAGS_board}" ]; then
-  EBUILDCMD=ebuild-"${FLAGS_board}"
-  EMERGECMD=emerge-"${FLAGS_board}"
-  EQUERYCMD=equery-"${FLAGS_board}"
-  BOARD="${FLAGS_board}"
-fi
-
-pkg="${1}"
-shift
-if [ "${pkg}" = "." ]; then
-  if ! pkg=$(git config workon.pkg); then
-    die "workon.pkg not set in git config for this project"
-  fi
-fi
-
-unstable_suffix="9999"
-workon_name="${pkg}-${unstable_suffix}"
-pkgfile=
-
-# Find the ebuild file, ensure the caller is workon'ing the package.
-if ! pkgfile=$("${EQUERYCMD}" which "${workon_name}" 2> /dev/null); then
-  BOARD_KEYWORD="$(portageq-${FLAGS_board} envvar ARCH)"
-  if ACCEPT_KEYWORDS="~${BOARD_KEYWORD}" "${EQUERYCMD}" which "${workon_name}" \
-      > /dev/null 2>&1; then
-    die "run 'cros_workon --board ${BOARD} start ${pkg}' first!" 1>&2
-  fi
-  die "error looking up package ${pkg}"
-fi
-
-if [ "${FLAGS_scrub}" = "${FLAGS_TRUE}" ]; then
-  warn "--scrub will destroy ALL FILES unknown to git!"
-  read -p "Are you sure you want to do this? [y|N]" resp
-  if egrep -qi "^y(es)?$" <(echo -n "${resp}"); then
-    eval $(${EBUILDCMD} $(${EQUERYCMD} which ${workon_name}) info)
-    srcdir=$(readlink -m ${CROS_WORKON_SRCDIR})
-    project_path=${srcdir#${GCLIENT_ROOT}/}
-    if ! (cd "${GCLIENT_ROOT}/${project_path}" && git clean -dxf); then
-      die "Could not scrub source directory"
-    fi
-  else
-    info "Not scrubbing; exiting gracefully"
-  fi
-  exit 0
-fi
-
-# Find the portage work directory for this package.
-workpath=$(\
-    echo "${pkgfile}" | \
-        awk -F '/' '{ print $(NF-2) "/" $(NF-1) }')-"${unstable_suffix}"
-workpath="/build/${BOARD}/tmp/portage/${workpath}"
-
-# Export vars that the ebuild env needs from us.
-export SANDBOX_WRITE=~/chromiumos
-export CROS_WORKON_INPLACE=1
-export CROS_WORKON_MAKE_COMPILE_ARGS="$*"
-
-# The ebuild commands we run rely on portage automatically running earlier
-# phases for us.  Append in case there is something already in the env.
-FEATURES+=" -noauto"
-export FEATURES
-
-# Vars that we want to pass through for the user.
-PASS_THROUGH_VARS=(
-  # cros-workon.eclass vars.
-  CROS_WORKON_MAKE_COMPILE_ARGS
-  # Common test vars.
-  GTEST_ARGS
-  # Platform eclass vars.
-  P2_TEST_FILTER
-  P2_VMODULE
-)
-
-# Determine if we're going to do tests, set up commands appropriately.
-to_do="compile"
-if [ "${FLAGS_test}" = "${FLAGS_TRUE}" ]; then
-  to_do="test"
-  FEATURES+=" test"
-  rm -f "${workpath}/.tested"
-fi
-
-# See if the ebuild declares CROS_WORKON_OUTOFTREE_BUILD=1.
-is_cros_workon_outoftree_build() {
-  grep -qs '^CROS_WORKON_OUTOFTREE_BUILD=.*1' "${pkgfile}"
-}
-
-workdir="${workpath}/work/${workon_name}"
-if [[ ! -h "${workdir}" ]] && ! is_cros_workon_outoftree_build; then
-  warn "Cleaning up stale workdir: ${workdir}"
-  FLAGS_reconf="${FLAGS_TRUE}"  # To force symlinking in the user's src dir.
-fi
-
-if [ "${FLAGS_install}" = "${FLAGS_TRUE}" ]; then
-  exec "${EMERGECMD}" --nodeps "${pkg}"
-fi
-
-clean=
-if [ "${FLAGS_reconf}" = "${FLAGS_TRUE}" ]; then
-  clean="clean"
-else
-  rm -f "${workpath}/.compiled"
-  envf="${workpath}/temp/environment"
-  if [[ -f "${envf}" ]]; then
-    for v in ${PASS_THROUGH_VARS[@]}; do
-      # We delete it independently in case the var wasn't set initially.
-      sed -i -e "/^declare .. ${v}=/d" "${envf}"
-      printf 'declare -x %s="%s"\n' "${v}" "${!v}" >> "${envf}"
-    done
-  fi
-fi
-exec "${EBUILDCMD}" "${pkgfile}" ${clean} "${to_do}"
diff --git a/hooks/filesystem-layout.py b/hooks/filesystem-layout.py
index 1e97cb8..9e09712 100755
--- a/hooks/filesystem-layout.py
+++ b/hooks/filesystem-layout.py
@@ -44,11 +44,20 @@
     "boot",
     "build",
     "dev",
+    "efi",
     "firmware",
     # TODO(): We should clean this up.
     "postinst",
 }
 
+# Paths that are allowed in the / dir for EdgeOS.
+VALID_EDGEOS_ROOT = {
+    "data",
+    "export",
+    "logs",
+    "user",
+}
+
 # Paths that are allowed in the / dir for the SDK chroot.
 VALID_HOST_ROOT = set()
 
@@ -83,6 +92,12 @@
 # Paths that are allowed in the /usr dir for the SDK chroot.
 VALID_HOST_USR = set()
 
+# Paths that are allowed in the /usr dir for the EdgeOS boards
+VALID_EDGEOS_USR = {
+    "crosstool",
+    "grte",
+}
+
 # Paths under /usr that should not have any subdirs.
 NOSUBDIRS_USR = {
     "bin",
@@ -110,7 +125,7 @@
 # Ignore some packages installing into /var for now.
 # NB: Do *not* add more packages here.
 BAD_VAR_PACKAGES = {
-    "app-accessibility/brltty",
+    ("app-accessibility/brltty", "6.5"),
     "app-admin/eselect",
     "app-admin/rsyslog",
     "app-admin/sudo",
@@ -129,7 +144,6 @@
     "net-dns/dnsmasq",
     "net-firewall/iptables",
     "net-firewall/nftables",
-    "net-fs/samba",
     "net-misc/chrony",
     "net-misc/dhcpcd",
     "net-misc/openssh",
@@ -150,11 +164,25 @@
 # Ignore some packages installing into /run for now.
 # NB: Do *not* add more packages here.
 BAD_RUN_PACKAGES = {
-    "app-accessibility/brltty",
-    "net-fs/samba",
+    ("app-accessibility/brltty", "6.5"),
 }
 
 
+def is_edgeos():
+    """Check if the ebuild has declared itself an EdgeOS ebuild."""
+    # True if $USE_EDGEOS_FS_LAYOUT is set to any non empty string.
+    return bool(os.environ.get("USE_EDGEOS_FS_LAYOUT"))
+
+
+def is_known_bad(allowlist):
+    """See if the current package is allowed."""
+    atom = get_current_package()
+    if atom in allowlist:
+        return True
+
+    return (atom, os.environ.get("PV")) in allowlist
+
+
 def has_subdirs(path):
     """See if |path| has any subdirs."""
     # These checks are helpful for manually running the script when debugging.
@@ -180,7 +208,7 @@
     return False
 
 
-def check_usr(usr, host=False):
+def check_usr(usr, host=False, edgeos=False):
     """Check the /usr filesystem at |usr|."""
     ret = True
 
@@ -188,7 +216,6 @@
     if not os.path.exists(usr):
         return ret
 
-    atom = get_current_package()
     paths = set(os.listdir(usr))
     unknown = paths - VALID_USR
     for target in KNOWN_TARGETS:
@@ -196,15 +223,14 @@
     if host:
         unknown -= VALID_HOST_USR
 
-        if atom in BAD_HOST_USR_LOCAL_PACKAGES:
+        if is_known_bad(BAD_HOST_USR_LOCAL_PACKAGES):
             logging.warning("Ignoring known bad /usr/local install for now")
             unknown -= {"local"}
     else:
         unknown -= VALID_BOARD_USR
 
-        if atom in {"chromeos-base/ap-daemons"}:
-            logging.warning("Ignoring known bad /usr install for now")
-            unknown -= {"www"}
+    if edgeos:
+        unknown -= VALID_EDGEOS_USR
 
     if unknown:
         logging.error(
@@ -222,11 +248,10 @@
     return ret
 
 
-def check_root(root, host=False):
+def check_root(root, host=False, edgeos=False):
     """Check the filesystem |root|."""
     ret = True
 
-    atom = get_current_package()
     paths = set(os.listdir(root))
     unknown = paths - VALID_ROOT
     if host:
@@ -234,6 +259,9 @@
     else:
         unknown -= VALID_BOARD_ROOT
 
+    if edgeos:
+        unknown -= VALID_EDGEOS_ROOT
+
     if unknown:
         logging.error(
             "Paths are not allowed in the root dir:\n  %s\n  |-- %s",
@@ -249,7 +277,7 @@
 
     # Special case /var due to so many misuses currently.
     if os.path.exists(os.path.join(root, "var")):
-        if atom in BAD_VAR_PACKAGES:
+        if is_known_bad(BAD_VAR_PACKAGES):
             logging.warning("Ignoring known bad /var install for now")
         elif os.environ.get("PORTAGE_REPO_NAME") == "portage-stable":
             logging.warning(
@@ -263,14 +291,14 @@
             )
             ret = False
     else:
-        if atom in BAD_VAR_PACKAGES:
+        if is_known_bad(BAD_VAR_PACKAGES):
             logging.warning(
                 "Package has improved; please update BAD_VAR_PACKAGES"
             )
 
     # Special case /run due to so many misuses currently.
     if os.path.exists(os.path.join(root, "run")):
-        if atom in BAD_RUN_PACKAGES:
+        if is_known_bad(BAD_RUN_PACKAGES):
             logging.warning("Ignoring known bad /run install for now")
         elif os.environ.get("PORTAGE_REPO_NAME") == "portage-stable":
             logging.warning(
@@ -284,12 +312,12 @@
             )
             ret = False
     else:
-        if atom in BAD_RUN_PACKAGES:
+        if is_known_bad(BAD_RUN_PACKAGES):
             logging.warning(
                 "Package has improved; please update BAD_RUN_PACKAGES"
             )
 
-    if not check_usr(os.path.join(root, "usr"), host):
+    if not check_usr(os.path.join(root, "usr"), host, edgeos):
         ret = False
 
     return ret
@@ -343,7 +371,7 @@
         else:
             opts.host = not bool(os.getenv("SYSROOT"))
 
-    if not check_root(opts.root, opts.host):
+    if not check_root(opts.root, opts.host, is_edgeos()):
         logging.critical(
             "Package '%s' does not conform to CrOS's filesystem conventions. "
             "Please review the paths flagged above and adjust its layout.",
diff --git a/hooks/install/check-upstart-scripts.sh b/hooks/install/check-upstart-scripts.sh
index 1a756ff..7cfdba1 100755
--- a/hooks/install/check-upstart-scripts.sh
+++ b/hooks/install/check-upstart-scripts.sh
@@ -21,13 +21,6 @@
   app-accessibility/googletts|\
   app-benchmarks/bootchart|\
   app-crypt/trousers|\
-  chromeos-base/actions|\
-  chromeos-base/ap-daemons|\
-  chromeos-base/ap-infra|\
-  chromeos-base/ap-net|\
-  chromeos-base/ap-scm|\
-  chromeos-base/ap-security|\
-  chromeos-base/ap-wireless|\
   chromeos-base/apollo-ptp|\
   chromeos-base/arc-adbd|\
   chromeos-base/arc-apk-cache|\
@@ -49,7 +42,6 @@
   chromeos-base/arcvm-vsock-proxy|\
   chromeos-base/atrusctl|\
   chromeos-base/attestation|\
-  chromeos-base/authpolicy|\
   chromeos-base/biod|\
   chromeos-base/bluetooth|\
   chromeos-base/bootcomplete-embedded|\
@@ -114,7 +106,6 @@
   chromeos-base/factory_installer|\
   chromeos-base/fastrpc|\
   chromeos-base/feedback|\
-  chromeos-base/gdisp|\
   chromeos-base/goldfishd|\
   chromeos-base/hammerd|\
   chromeos-base/hermes|\
@@ -129,7 +120,6 @@
   chromeos-base/lorgnette|\
   chromeos-base/metrics|\
   chromeos-base/midis|\
-  chromeos-base/ml|\
   chromeos-base/modemfwd|\
   chromeos-base/modemfwd-helpers-coral|\
   chromeos-base/modemfwd-helpers-dedede|\
@@ -139,7 +129,6 @@
   chromeos-base/modemfwd-helpers-octopus|\
   chromeos-base/modemfwd-helpers-sarien|\
   chromeos-base/modemfwd-helpers-zork|\
-  chromeos-base/mri_package|\
   chromeos-base/mtpd|\
   chromeos-base/nodejs-scripts|\
   chromeos-base/oobe_config|\
@@ -150,12 +139,9 @@
   chromeos-base/pdfc-scripts|\
   chromeos-base/permission_broker|\
   chromeos-base/quickoffice|\
-  chromeos-base/rialto-cellular-autoconnect|\
-  chromeos-base/rialto-modem-watchdog|\
   chromeos-base/runtime_probe|\
   chromeos-base/sirenia|\
   chromeos-base/smbprovider|\
-  chromeos-base/swap-init|\
   chromeos-base/thermald|\
   chromeos-base/timberslide|\
   chromeos-base/tpm_manager|\
@@ -168,10 +154,6 @@
   chromeos-base/viking-hid|\
   chromeos-base/virtual-file-provider|\
   chromeos-base/virtual-usb-printer|\
-  chromeos-base/vm_host_tools|\
-  chromeos-base/vpd|\
-  chromeos-base/weaveauth|\
-  chromeos-base/whining|\
   dev-util/hdctools|\
   media-libs/arc-camera-service|\
   media-libs/cros-camera-libcab|\
@@ -210,8 +192,7 @@
   sys-power/dptf|\
   sys-process/audit|\
   virtual/chromeos-bootcomplete|\
-  virtual/chromeos-firewall|\
-  virtual/target-jetstream-test-root)
+  virtual/chromeos-firewall)
     return 0
     ;;
   chromeos-base/arc-keymaster)  # We don't control the package name.  nocheck
@@ -225,7 +206,7 @@
 # Require an oom score line.
 check_oom() {
   local config="$1"
-  local relconfig="${config#${D}}"
+  local relconfig="${config#"${D}"}"
 
   if ! grep -q '^oom score ' "${config}"; then
     local msg="${relconfig}: missing 'oom score' line."
diff --git a/hooks/install/find-missing-deps.sh b/hooks/install/find-missing-deps.sh
new file mode 100755
index 0000000..21df71d
--- /dev/null
+++ b/hooks/install/find-missing-deps.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2023 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Generate an error if a cros-workon-ed ebuild has missing dependencies.
+
+# Set ebuild vars to make shellcheck happy.
+: "${PV:=}" "${SYSROOT:=/}" "${T:=}"
+
+if [[ "${PV}" == "9999" ]]; then
+  args=()
+  if [[ "${SYSROOT}" == "/build/"* ]]; then
+    args+=( "--board=${SYSROOT##*/}" )
+  fi
+  args+=( --match --build-info="${T}/../build-info/" )
+  einfo "Checking dependencies: ${args[*]}"
+  /mnt/host/source/chromite/scripts/package_has_missing_deps \
+    "${args[@]}" || die 'found missing dependencies'
+fi
diff --git a/hooks/install/large-file-support.sh b/hooks/install/large-file-support.sh
index 57be7e1..aeef225 100755
--- a/hooks/install/large-file-support.sh
+++ b/hooks/install/large-file-support.sh
@@ -183,29 +183,39 @@
   # TODO(b/258669199): Ignore Rust packages for now.
   case "${CATEGORY}/${PN}:${PV}" in
   chromeos-base/crosvm:*|\
+  chromeos-base/chunnel:*|\
+  dev-rust/s9:*|\
+  chromeos-base/factory_fai:*|\
+  chromeos-base/ippusb_bridge:*|\
   chromeos-base/hwsec-utils:*|\
   chromeos-base/resourced:*|\
+  media-gfx/deqp-runner:*|\
   media-sound/adhd:*|\
   media-sound/audio_processor:*|\
   media-sound/audio_streams_conformance_test:*|\
   media-sound/cras-client:*|\
   media-sound/cras_rust:*|\
   media-sound/cras_tests:*|\
-  sys-apps/kexec-lite:*)
+  sys-apps/kexec-lite:*|\
+  sys-firmware/sunplus-fwverify:*)
     return 0
     ;;
   esac
 
   # Packages in upstream discussion.  Must link to an upstream tracker.
   case "${CATEGORY}/${PN}:${PV}" in
-  dev-libs/expat:2.5*) return 0;;  # https://bugs.gentoo.org/904190
+  # https://bugs.gentoo.org/904190
+  dev-libs/expat:2.5*) return 0;;
+  # https://github.com/alsa-project/alsa-lib/pull/333
+  media-libs/alsa-lib:*) return 0;;
+  # https://github.com/alsa-project/alsa-utils/pull/223
+  media-sound/alsa-utils:*) return 0;;
   esac
 
   # Do not add more packages here!
   case "${CATEGORY}/${PN}:${PV}" in
   app-accessibility/brltty:6.3|\
   app-admin/sysstat:11.7.4|\
-  app-arch/brotli:1.0.9|\
   app-benchmarks/blktests:20190430|\
   app-benchmarks/blogbench:1.1.20200218|\
   app-benchmarks/bootchart:0.9.2|\
@@ -239,7 +249,6 @@
   chromeos-base/autotest-tests-graphics:0.0.1|\
   chromeos-base/chromeos-chrome:*|\
   chromeos-base/chromeos-cr50-dev:0.0.1|\
-  chromeos-base/chunnel:0.1.0|\
   chromeos-base/crash-reporter:0.0.1|\
   chromeos-base/cronista:0.24.52|\
   chromeos-base/cros-camera:0.0.1|\
@@ -259,7 +268,6 @@
   chromeos-base/google-breakpad:2023.0[12]*|\
   chromeos-base/hps-firmware-tools:0.0.1|\
   chromeos-base/infineon-firmware-updater:1.1.2459.0|\
-  chromeos-base/ippusb_bridge:0.0.1|\
   chromeos-base/libevdev:0.0.1|\
   chromeos-base/libhwsec:0.0.1|\
   chromeos-base/manatee-runtime:0.1.0|\
@@ -283,15 +291,12 @@
   chromeos-base/wacom_fw_flash:1.4.0|\
   chromeos-base/weida_wdt_util:0.9.9|\
   dev-cpp/abseil-cpp:20211102.0|\
-  dev-cpp/gflags:2.2.0|\
-  dev-cpp/gtest:1.10.0|\
   dev-embedded/dfu-programmer:0.7.2|\
   dev-lang/tcl:8.6.12|\
   dev-libs/boost:1.79.0|\
   dev-libs/confuse:2.7|\
   dev-libs/flatbuffers:2.0.0|\
   dev-libs/fribidi:1.0.9|\
-  dev-libs/hidapi:0.8.0*|\
   dev-libs/iniparser:3.1|\
   dev-libs/json-c:0.14|\
   dev-libs/leveldb:1.23|\
@@ -325,17 +330,12 @@
   dev-python/python-uinput:0.11.2|\
   dev-python/selenium:3.0.2|\
   dev-rust/bindgen:0.59.2|\
-  dev-rust/s9:0.1.0|\
   dev-util/android-tools:9.0.0_p3|\
   dev-util/apitrace:9.0|\
-  dev-util/cmocka:1.1.5|\
-  dev-util/glslang:1.3.211|\
   dev-util/hdctools:0.0.1|\
   dev-rust/manatee-client:0.24.52|\
   dev-util/perf:5.15*|\
   dev-util/rt-tests:2.2|\
-  dev-util/spirv-tools:1.3.211|\
-  dev-util/vulkan-tools:1.3.211|\
   dev-util/xdelta:3.0.11|\
   dev-util/xxd:1.10|\
   games-util/joystick:1.4.2|\
@@ -345,7 +345,6 @@
   media-gfx/qrencode:3.4.4|\
   media-gfx/"sa"ne-backends:1.1.1|\
   media-gfx/zbar:0.23.1|\
-  media-libs/alsa-lib:1.2.1.2|\
   media-libs/clvk:0.0.1|\
   media-libs/cros-camera-hal-qti:0.0.1|\
   media-libs/cros-camera-libfs:0.0.1|\
@@ -369,19 +368,14 @@
   media-libs/mesa-img:21.3*|\
   media-libs/opencl-cts:0.0.1|\
   media-libs/opencv:4.5.5|\
-  media-libs/openh264:2.1.1|\
-  media-libs/openjpeg:2.3.0|\
   media-libs/qti-7c-camera-bins:20220401|\
   media-libs/rockchip-isp1-3a-libs-bin:2018.06.28|\
   media-libs/sbc:1.3|\
   media-libs/shaderc:2022.1|\
   media-libs/skia:106|\
   media-libs/tiff:4.3.0|\
-  media-libs/vulkan-layers:1.3.211|\
-  media-libs/vulkan-loader:1.3.211|\
   media-libs/waffle:1.6.0|\
   media-plugins/alsa-plugins:1.1.6|\
-  media-sound/alsa-utils:1.2.1|\
   media-sound/gsm:1.0.13|\
   media-sound/sound_card_init:*|\
   media-video/yavta:0.0.1|\
@@ -436,7 +430,7 @@
   net-wireless/crda:3.18|\
   net-wireless/floss:0.0.2|\
   net-wireless/hostapd:2.11_pre|\
-  net-wireless/iw:5.9|\
+  net-wireless/iw:5.19|\
   net-wireless/wireless-tools:30_pre9|\
   net-wireless/wpa_supplicant-cros:2.11_pre|\
   sci-geosciences/gpsd:3.17|\
@@ -470,7 +464,7 @@
   sys-auth/pam_pwdfile:0.99|\
   sys-cluster/libqb:0.17.2|\
   sys-devel/bc:1.07.1|\
-  sys-devel/binutils:2.36.1|\
+  sys-devel/binutils:2.39*|\
   sys-devel/flex:2.6.4|\
   sys-devel/gdb:9.2.20200923|\
   sys-devel/llvm:12.0.1|\
@@ -507,7 +501,10 @@
     return
   fi
 
-  files=$(scanelf -F '%s %p' -qyRgs "-${SYMBOLS_REGEX}" "$@")
+  # Exclude /build, since such files don't go into the final image.
+  files="$(for d in "$@"; do \
+           find "${d}" -path "${d}/build" -prune -o -type f -print0; done | \
+           xargs -0 scanelf -F '%s %p' -qyRgs "-${SYMBOLS_REGEX}")"
   if [[ -n "${files}" ]]; then
     echo
     eqawarn "QA Notice: The following files were not built with LFS support:"
diff --git a/hooks/install/sbom_info_lib/download_url.py b/hooks/install/sbom_info_lib/download_url.py
index cd97fd6..74ecce9 100644
--- a/hooks/install/sbom_info_lib/download_url.py
+++ b/hooks/install/sbom_info_lib/download_url.py
@@ -143,7 +143,10 @@
 def is_uri_valid(uri):
     if not uri.strip().startswith("http"):
         return False
-    request = requests.get(uri, stream=True)
+    try:
+        request = requests.get(uri, stream=True)
+    except:
+        return False
     if request.status_code == 200:
         return True
     return False
diff --git a/hooks/install/sbom_info_lib/licenses.py b/hooks/install/sbom_info_lib/licenses.py
index d430a0a..f219748 100644
--- a/hooks/install/sbom_info_lib/licenses.py
+++ b/hooks/install/sbom_info_lib/licenses.py
@@ -12,8 +12,9 @@
 
 # This script is used to parse licenses of a package.
 
-import re
+import json
 import os
+import re
 from chromite.lib import cros_build_lib
 from chromite.licensing import licenses_lib
 from sbom_info_lib import license_data
@@ -131,6 +132,35 @@
     return found, saved_scanned_txt, saved_license_files
 
 
+# Parse license.json.
+def parse_license_json(data, pkg_name):
+    parsed_data = json.loads(data)
+    # Try to find scanned license text in license.json.
+    saved_scanned_txt = ", ".join(parsed_data["license_text_scanned"])
+    saved_license_files = []
+    license_file_match = re.findall(
+        REGEX_LICENSE_FILE_NAME, saved_scanned_txt, re.DOTALL
+    )
+    for n in license_file_match:
+        saved_license_files.append(n.strip())
+
+    # Try to find scanned license names in license.json.
+    found = []
+    licenses = parsed_data["license_names"]
+    for license in licenses:
+        # Being in the public domain is not a license.
+        if not license or license == "public-domain" or license == "metapackage":
+            continue
+        if license in LICENSE_MAP:
+            license = LICENSE_MAP[license]
+        found.append(license)
+    # There are cases where license.yaml contains no license
+    # but only sanned license text e.g. dev-libs/libpcre.
+    if not found and saved_license_files:
+        found.append(pkg_name)
+    return found, saved_scanned_txt, saved_license_files
+
+
 def extract_other_licenses(licenses, src_path, saved_scanned_txt, saved_license_files):
     # other_license_list format: [
     # {
@@ -262,12 +292,20 @@
 
 
 def get_licenses(build_info_dir, src_path, pkg_name):
-    if not os.path.exists(os.path.join(build_info_dir, "license.yaml")):
+    has_yaml = os.path.exists(os.path.join(build_info_dir, "license.yaml"))
+    has_json = os.path.exists(os.path.join(build_info_dir, "license.json"))
+    if not has_yaml and not has_json:
         return ""
-    with open(os.path.join(build_info_dir, "license.yaml"), "r") as l:
-        licenses, saved_scanned_txt, saved_license_files = parse_license_yaml(
-            l.read(), pkg_name
-        )
+    if has_json:
+        with open(os.path.join(build_info_dir, "license.json"), "r") as l:
+            licenses, saved_scanned_txt, saved_license_files = parse_license_json(
+                l.read(), pkg_name
+            )
+    elif has_yaml:
+        with open(os.path.join(build_info_dir, "license.yaml"), "r") as l:
+            licenses, saved_scanned_txt, saved_license_files = parse_license_yaml(
+                l.read(), pkg_name
+            )
 
     other_license_list = extract_other_licenses(
         licenses, src_path, saved_scanned_txt, saved_license_files
diff --git a/make_netboot.sh b/make_netboot.sh
index 9f16107..fff7ebd 100755
--- a/make_netboot.sh
+++ b/make_netboot.sh
@@ -10,13 +10,14 @@
 # included as initramfs. Generated image, along with the netboot firmware
 # are placed in a "netboot" subfolder.
 
-SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+SCRIPT_ROOT="$(dirname "$(readlink -f "$0")")"
 # shellcheck source=build_library/build_common.sh
 . "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
 
 # Script must be run inside the chroot.
 restart_in_chroot_if_needed "$@"
 
+# shellcheck disable=SC2154
 DEFINE_string board "${DEFAULT_BOARD}" \
   "The board to build an image for."
 DEFINE_string image_dir "" "Path to the folder to store netboot images."
@@ -25,8 +26,7 @@
 FLAGS "$@" || exit 1
 eval set -- "${FLAGS_ARGV}"
 
-# shellcheck source=build_library/build_common.sh
-. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
+# shellcheck disable=SC2154
 # shellcheck source=build_library/board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
 
@@ -36,50 +36,48 @@
 # build_image artifact output.
 
 if [ -n "${FLAGS_image_dir}" ]; then
-  cd ${FLAGS_image_dir}
+  cd "${FLAGS_image_dir}" || die
 else
-  cd "${CHROOT_TRUNK_DIR}"/src/build/images/"${FLAGS_board}"/latest
+  # shellcheck disable=SC2154
+  cd "${CHROOT_TRUNK_DIR}/src/build/images/${FLAGS_board}/latest" || die
 fi
 
 # Generate staging dir for netboot files.
-sudo rm -rf netboot
-mkdir -p netboot
+info_run sudo rm -rf netboot
+info_run mkdir -p netboot
 
 # Get netboot firmware.
 FIRMWARE_PATTERN="firmware/image*.net.bin"
+# shellcheck disable=SC2206
 FIRMWARE_PATHS=("${SYSROOT}"/${FIRMWARE_PATTERN})
 # When there is no netboot firmware found, filename expansion fails and the
 # array still contains the original pattern string, so we need to check if the
 # first file in the array actually exists to know if we find any firmware.
 if [ -e "${FIRMWARE_PATHS[0]}" ]; then
+  info "Copying netboot firmware"
   for firmware_path in "${FIRMWARE_PATHS[@]}"; do
-    echo "Copying netboot firmware ${firmware_path}..."
-    cp -v "${firmware_path}" netboot/
+    info_run cp "${firmware_path}" netboot/
   done
 else
-  echo "Skipping netboot firmware: ${SYSROOT}/${FIRMWARE_PATTERN} not present?"
+  warn "Skipping netboot firmware: ${SYSROOT}/${FIRMWARE_PATTERN} not present?"
 fi
 
 # Create temporary emerge root
-temp_build_path="$(mktemp -d bk_XXXXXXXX)"
-if ! [ -d "${temp_build_path}" ]; then
-  echo "Failed to create temporary directory."
-  exit 1
-fi
+temp_build_path="$(mktemp -d bk_XXXXXXXX)" || die "Failed to create tempdir."
 
 # Build initramfs network boot image
-echo "Building kernel"
-export USE="fbconsole vtconsole factory_netboot_ramfs tpm i2cdev vfat"
+info "Building kernel"
+export USE="fbconsole vtconsole factory_netboot_ramfs i2cdev vfat"
 export EMERGE_BOARD_CMD="emerge-${FLAGS_board}"
-emerge_custom_kernel ${temp_build_path}
+emerge_custom_kernel "${temp_build_path}"
 
 # Place kernel image under 'netboot'
 KERNEL_PATH="/boot/vmlinuz"
 if [ -f "${temp_build_path}${KERNEL_PATH}" ]; then
-  echo "Generating netboot kernel ${KERNEL_PATH}"
-  cp -v "${temp_build_path}${KERNEL_PATH}" netboot/
+  info "Generating netboot kernel ${KERNEL_PATH}"
+  info_run cp "${temp_build_path}${KERNEL_PATH}" netboot/
 else
-  echo "No ${KERNEL_PATH} found in your board."
+  warn "No ${KERNEL_PATH} found in your board."
 fi
 
-sudo rm -rf "${temp_build_path}"
+info_run sudo rm -rf "${temp_build_path}"
diff --git a/mod_for_test_scripts/340enableFwupdDummy b/mod_for_test_scripts/340enableFwupdDummy
index 6534a32..f567490 100755
--- a/mod_for_test_scripts/340enableFwupdDummy
+++ b/mod_for_test_scripts/340enableFwupdDummy
@@ -4,17 +4,19 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-if [[ ! -f ${ROOT_FS_DIR}/etc/fwupd/daemon.conf ]]; then
+CONFIG_FILE="${ROOT_FS_DIR}/etc/fwupd/fwupd.conf"
+
+if [[ ! -f ${CONFIG_FILE} ]]; then
   exit 0
 fi
 
 echo "Enabling verbose debug output for fwupd."
 
-sed -e 's/^\(VerboseDomains=\).*/\1*/' -i "${ROOT_FS_DIR}/etc/fwupd/daemon.conf"
+sed -e 's/^\(VerboseDomains=\).*/\1*/' -i "${CONFIG_FILE}"
 
 echo "Enabling dummy fwupd remote for tests."
 
-sed -e 's/^\(DisabledPlugins=\).*/\1/' -i "${ROOT_FS_DIR}/etc/fwupd/daemon.conf"
+sed -e 's/^\(DisabledPlugins=\).*/\1/' -i "${CONFIG_FILE}"
 
 cat > "${ROOT_FS_DIR}/usr/share/fwupd/remotes.d/vendor/fwupd-tests.xml" <<EOF
 <?xml version="1.0" encoding="UTF-8"?>
@@ -54,6 +56,17 @@
             <checksum type="sha256">3fab34cfa1ef97238fb24c5e40a979bc544bb2b0967b863e43e7d58e0d9a923f</checksum>
             <size type="installed">10</size>
             <size type="download">24493</size>
+            <testing>
+              <test_result date="2023-06-22">
+                <vendor_name id="1">LVFS</vendor_name>
+                <device>Google Voxel</device>
+                <os version="113">chromeos</os>
+                <previous_version>1.2.3</previous_version>
+                <custom>
+                  <value key="RuntimeVersion(org.freedesktop.fwupd)">1.8.12</value>
+                </custom>
+              </test_result>
+            </testing>
           </artifact>
         </artifacts>
       </release>
diff --git a/mod_for_test_scripts/350addEnvironment b/mod_for_test_scripts/350addEnvironment
index 180408d..6243472 100755
--- a/mod_for_test_scripts/350addEnvironment
+++ b/mod_for_test_scripts/350addEnvironment
@@ -4,6 +4,10 @@
 # found in the LICENSE file.
 #
 # The 'su' command requires a correct PATH setting in /etc/environment.
-touch ${ROOT_FS_DIR}/etc/environment
+touch "${ROOT_FS_DIR}"/etc/environment
 echo "PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" \
   >> "${ROOT_FS_DIR}/etc/environment"
+
+# INTEL_DEBUG is a mesa flag to capture more logs when we see a GPU hangs.
+# https://docs.mesa3d.org/envvars.html
+echo "INTEL_DEBUG=capture-all" >> "${ROOT_FS_DIR}/etc/environment"
diff --git a/remote_access.sh b/remote_access.sh
index ef845af..f70245e 100644
--- a/remote_access.sh
+++ b/remote_access.sh
@@ -10,6 +10,7 @@
   ;;
 *)
   echo "remote_access.sh: This script will be removed by July 2023." >&2
+  exit 1
   ;;
 esac
 
diff --git a/sdk_lib/enter_chroot.sh b/sdk_lib/enter_chroot.sh
index 4b7bbb9..43edddd 100755
--- a/sdk_lib/enter_chroot.sh
+++ b/sdk_lib/enter_chroot.sh
@@ -20,6 +20,7 @@
 # See http://code.google.com/p/shflags/wiki/Documentation10x
 DEFINE_string chroot "${DEFAULT_CHROOT_DIR}" \
   "The destination dir for the chroot environment." "d"
+# shellcheck disable=SC2034 # Mostly here for plumbing. Not used.
 DEFINE_string out_dir "${DEFAULT_OUT_DIR}" \
   "The destination dir for build output and state."
 DEFINE_string trunk "${GCLIENT_ROOT}" \
@@ -95,6 +96,7 @@
 
   .gdata_cred.txt             # User/password for Google Docs on chromium.org
   .gdata_token                # Auth token for Google Docs on chromium.org
+  .googleapikeys              # Google API keys for Chrome
   .goma_client_oauth2_config  # Auth token for Goma
   .inputrc                    # Preserve command line customizations
 )
@@ -127,14 +129,6 @@
   cp -p "$@" 2>/dev/null || sudo -u "${SUDO_USER}" -- cp -p "$@"
 }
 
-# Appends stdin to the given file name as the sudo user.
-#
-# $1 - The output file name.
-user_append() {
-  cat >> "$1"
-  chown "${SUDO_UID}:${SUDO_GID}" "$1"
-}
-
 # Create the specified directory, along with parents, as the sudo user.
 #
 # $@ - The directories to create.
@@ -290,35 +284,6 @@
   fi
 }
 
-# Usage: promote_api_keys
-# This takes care of getting the developer API keys into the chroot where
-# chrome can build with them.  It needs to take it from the places a dev
-# is likely to put them, and recognize that older chroots may or may not
-# have been used since the concept of keys got added, as well as before
-# and after the developer deciding to grab their own keys.
-promote_api_keys() {
-  local destination="${FLAGS_chroot}/home/${SUDO_USER}/.googleapikeys"
-  # Don't disturb existing keys.  They could be set differently
-  if [[ -s "${destination}" ]]; then
-    return 0
-  fi
-  if [[ -r "${SUDO_HOME}/.googleapikeys" ]]; then
-    cp -p "${SUDO_HOME}/.googleapikeys" "${destination}"
-    if [[ -s "${destination}" ]] ; then
-      info "Copied Google API keys into chroot."
-    fi
-  elif [[ -r "${SUDO_HOME}/.gyp/include.gypi" ]]; then
-    local NAME="('google_(api_key|default_client_(id|secret))')"
-    local WS="[[:space:]]*"
-    local CONTENTS="('[^\\\\']*')"
-    sed -nr -e "/^${WS}${NAME}${WS}[:=]${WS}${CONTENTS}.*/{s//\1: \4,/;p;}" \
-         "${SUDO_HOME}/.gyp/include.gypi" | user_clobber "${destination}"
-    if [[ -s "${destination}" ]]; then
-      info "Put discovered Google API keys into chroot."
-    fi
-  fi
-}
-
 git_config() {
   USER="${SUDO_USER:-${USER}}" \
   HOME="${SUDO_HOME:-${HOME}}" \
@@ -482,23 +447,6 @@
     setup_mount "${FLAGS_cache_dir}" "${chroot_cache}"
     # Create /var/log/asan directory (b/222311476).
     user_mkdir "${FLAGS_chroot}/var/log/asan"
-    # TODO(build): remove this as of 12/01/12.
-    # Because of how distfiles -> cache_dir was deployed, if this isn't
-    # a symlink, we *know* the ondisk pathways aren't compatible- thus
-    # fix it now.
-    distfiles_path="${FLAGS_chroot}/var/cache/distfiles"
-    if [ ! -L "${distfiles_path}" ]; then
-      # While we're at it, ensure the var is exported w/in the chroot; it
-      # won't exist if distfiles isn't a symlink.
-      p="${FLAGS_chroot}/etc/profile.d/chromeos-cachedir.sh"
-      rm -rf "${distfiles_path}"
-      ln -s chromeos-cache/distfiles "${distfiles_path}"
-      # shellcheck disable=SC2174
-      mkdir -p -m 775 "${p%/*}"
-      # shellcheck disable=SC2016
-      echo 'export CHROMEOS_CACHEDIR=${chroot_cache}' > "${p}"
-      chmod 0644 "${p}"
-    fi
 
     if [ -d "${SUDO_HOME}/.cidb_creds" ]; then
       setup_mount "${SUDO_HOME}/.cidb_creds" \
@@ -631,9 +579,7 @@
     copy_into_chroot_if_exists "${log_cert_dir}${log_cert_file}" \
       "/home/${SUDO_USER}/${log_cert_file}"
 
-
     setup_git
-    promote_api_keys
 
     # Fix permissions on shared memory to allow non-root users access to POSIX
     # semaphores. Take special care to only change the permissions on the
@@ -661,7 +607,7 @@
     boto='src/private-overlays/chromeos-overlay/googlestorage_account.boto'
     if [ -s "${FLAGS_trunk}/${boto}" ]; then
       if [ ! -e "${chroot_user_boto}" ]; then
-        user_symlink "trunk/${boto}" "${chroot_user_boto}"
+        user_symlink "${CHROOT_TRUNK_DIR}/${boto}" "${chroot_user_boto}"
       fi
       if [ ! -e "${chroot_root_boto}" ]; then
         ln -sf "${CHROOT_TRUNK_DIR}/${boto}" "${chroot_root_boto}"
@@ -739,6 +685,12 @@
   # See also pivot_root(".", ".") section of pivot_roo(2) man page.
   cd "${FLAGS_chroot}" || exit 1
   pivot_root . .
+  # After pivot_root, we're running inside the CrOS sdk.  Reset PATH to match
+  # so we don't rely on the host distro's PATH bleeding in and requiring it be
+  # compatible. Once PATH has changed, force bash to clear its lookup cache
+  # just in case a program later is installed differently.
+  PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin"
+  hash -r
   umount -l .
   chroot="."
 else
diff --git a/sdk_lib/make_chroot.sh b/sdk_lib/make_chroot.sh
index 50f83ca..43b05ae 100755
--- a/sdk_lib/make_chroot.sh
+++ b/sdk_lib/make_chroot.sh
@@ -9,11 +9,11 @@
 # setup for development. Once created, the password is set to PASSWORD (below).
 # One can enter the chrooted environment for work by running enter_chroot.sh.
 
-SCRIPT_ROOT=$(readlink -f $(dirname "$0")/..)
+SCRIPT_ROOT=$(readlink -f "$(dirname "$0")/..")
 # shellcheck source=../common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 
-ENTER_CHROOT=$(readlink -f $(dirname "$0")/enter_chroot.sh)
+ENTER_CHROOT=$(readlink -f "$(dirname "$0")/enter_chroot.sh")
 
 if [ -n "${USE}" ]; then
   echo "$SCRIPT_NAME: Building with a non-empty USE: ${USE}"
@@ -41,7 +41,6 @@
 DEFINE_boolean usepkg $FLAGS_TRUE "Use binary packages to bootstrap."
 DEFINE_integer jobs -1 "How many packages to build in parallel at maximum."
 DEFINE_string cache_dir "" "Directory to store caches within."
-DEFINE_boolean eclean "${FLAGS_TRUE}" "Run eclean to delete old binpkgs."
 
 # Parse command line flags.
 FLAGS_HELP="usage: $SCRIPT_NAME [flags]"
@@ -110,14 +109,6 @@
     chroot "${FLAGS_chroot}" "$@"
 }
 
-# Appends stdin to the given file name as the sudo user.
-#
-# $1 - The output file name.
-user_append() {
-  cat >> "$1"
-  chown ${SUDO_UID}:${SUDO_GID} "$1"
-}
-
 init_setup () {
    info "Running init_setup()..."
    mkdir -p -m 755 "${FLAGS_chroot}/usr" \
@@ -127,7 +118,7 @@
    cat <<EOF > "${FLAGS_chroot}/${CROSSDEV_OVERLAY}/metadata/layout.conf"
 # Autogenerated and managed by crossdev
 # Delete the above line if you want to manage this file yourself
-masters = portage-stable chromiumos
+masters = portage-stable eclass-overlay chromiumos
 repo-name = crossdev
 use-manifests = true
 thin-manifests = true
@@ -217,16 +208,6 @@
 # Run all the init stuff to setup the env.
 init_setup
 
-# Clean out any stale binpkgs that might be in a warm cache. This is done
-# immediately after unpacking the tarball in case ebuilds have been removed
-# (e.g. from a revert).
-if [[ "${FLAGS_eclean}" -eq "${FLAGS_TRUE}" ]]; then
-  info "Cleaning stale binpkgs"
-  early_enter_chroot /bin/bash -c '
-    source /mnt/host/source/src/scripts/common.sh &&
-    eclean -e <(get_eclean_exclusions) packages'
-fi
-
 if [[ "${FLAGS_skip_chroot_upgrade}" -eq "${FLAGS_FALSE}" ]]; then
   info "Updating portage"
   early_enter_chroot emerge -uNv --quiet --ignore-world portage
@@ -291,7 +272,7 @@
   if [[ "${FLAGS_jobs}" -ne -1 ]]; then
     UPDATE_ARGS+=( --jobs="${FLAGS_jobs}" )
   fi
-  enter_chroot "${CHROOT_TRUNK_DIR}/src/scripts/update_chroot" \
+  enter_chroot "${CHROOT_TRUNK_DIR}/chromite/bin/update_chroot" \
     "${UPDATE_ARGS[@]}"
 else
   warn "SDK and toolchain update were skipped. It will eventually stop working."
diff --git a/termina_build_image b/termina_build_image
deleted file mode 100755
index 0af0389..0000000
--- a/termina_build_image
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-# Copyright 2017 The ChromiumOS Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
-# shellcheck source=build_library/build_common.sh
-. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
-# shellcheck source=build_library/filesystem_util.sh
-. "${SCRIPT_ROOT}/build_library/filesystem_util.sh" || exit 1
-
-TERMINA_BUILD_IMAGE_PY="${SCRIPT_ROOT}/../platform/container-guest-tools/termina/termina_build_image.py"
-TERMINA_BUILD_IMAGE_PY="$(readlink -f "${TERMINA_BUILD_IMAGE_PY}")"
-
-assert_inside_chroot "$@"
-
-DEFINE_string arch "amd64" \
-  "Architecture of the VM image"
-DEFINE_string filesystem "ext4" \
-  "Filesystem for the rootfs image"
-DEFINE_string image "" \
-  "Chromium OS disk image to build the Termina image from"
-DEFINE_string output "" \
-  "Output directory"
-
-FLAGS_HELP="USAGE: ${SCRIPT_NAME} [flags]
-
-To build a tatl test image, try:
-$ ./build_image --board=tatl test
-$ ${SCRIPT_NAME} --image=../build/images/tatl/latest/chromiumos_test_image.bin --output=tatl
-"
-FLAGS "$@" || exit 1
-eval set -- "${FLAGS_ARGV}"
-switch_to_strict_mode
-
-main() {
-  warn "termina_build_image is deprecated. Please use termina_build_image.py."
-
-  if [[ -z "${FLAGS_image}" ]]; then
-    die_notrace "Please provide an image using --image"
-  elif [[ ! -f "${FLAGS_image}" ]]; then
-    die_notrace "'${FLAGS_image}' does not exist"
-  fi
-
-  if [[ "${FLAGS_arch}" != "amd64" && "${FLAGS_arch}" != "arm" ]]; then
-    die_notrace "Architecture '${FLAGS_arch}' is not valid. Options are 'amd64' and 'arm'"
-  fi
-
-  if [[ "${FLAGS_filesystem}" != "ext4" ]]; then
-    die_notrace "Filesystem '${FLAGS_filesystem}' is not valid. 'ext4' is valid."
-  fi
-
-  info "Equivalent termina_build_image.py command:"
-  info "${TERMINA_BUILD_IMAGE_PY} ${FLAGS_image} ${FLAGS_output}"
-
-  sudo "${TERMINA_BUILD_IMAGE_PY}" "${FLAGS_image}" "${FLAGS_output}"
-
-  info "Done! The resulting image is in '${FLAGS_output}'"
-}
-
-main "$@"
diff --git a/update_bootloaders.sh b/update_bootloaders.sh
index 2c3ed4d..5d37ed6 100755
--- a/update_bootloaders.sh
+++ b/update_bootloaders.sh
@@ -7,7 +7,7 @@
 # Helper script that generates the legacy/efi bootloader partitions.
 # It does not populate the templates, but can update a loop device.
 
-SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+SCRIPT_ROOT="$(dirname "$(readlink -f "$0")")"
 # shellcheck source=common.sh
 . "${SCRIPT_ROOT}/common.sh" || exit 1
 # shellcheck source=build_library/disk_layout_util.sh
@@ -64,7 +64,6 @@
 
 # shellcheck source=build_library/board_options.sh
 . "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
-load_board_specific_script "board_specific_setup.sh"
 
 part_index_to_uuid() {
   local image="$1"
@@ -72,63 +71,56 @@
   cgpt show -i "$index" -u "$image"
 }
 
-# If not provided by chromeos-common.sh, this will update all of the
-# boot loader files (both A and B) with the data pulled
-# from the kernel_partition.  The default boot target should
-# be set when the rootfs is stuffed.
-if ! type -p update_x86_bootloaders; then
-  update_x86_bootloaders() {
-    local old_root="$1"  # e.g., /dev/sd%D%P or %U+1
-    local kernel_cmdline="$2"
-    local esp_fs_dir="$3"
-    local template_dir="$4"
-    local to="$5"
+# Common logic on x86/arm64 to replace placeholders in bootloader configs.
+update_bootloaders() {
+  local old_root="$1"  # e.g., /dev/sd%D%P or %U+1
+  local kernel_cmdline="$2"
+  local esp_fs_dir="$3"
+  local template_dir="$4"
+  local to="$5"
 
-    # Pull out the dm-mod.create="" values
-    dm_table=
-    if echo "$kernel_cmdline" | grep -q 'dm-mod.create="'; then
-      dm_table=$(echo "$kernel_cmdline" | sed -s 's/.*dm-mod.create="\([^"]*\)".*/\1/')
-    fi
+  # Pull out the dm-mod.create="" values
+  dm_table=
+  if echo "$kernel_cmdline" | grep -q 'dm-mod.create="'; then
+    dm_table=$(echo "$kernel_cmdline" | sed -s 's/.*dm-mod.create="\([^"]*\)".*/\1/')
+  fi
 
-    # Discover last known partition numbers.
-    local partition_num_root_a="$(get_layout_partition_number \
-      "${FLAGS_image_type}" ROOT-A)"
-    local partition_num_root_b="$(get_layout_partition_number \
-      "${FLAGS_image_type}" ROOT-B)"
-    root_a_uuid="PARTUUID=$(part_index_to_uuid "$to" ${partition_num_root_a})"
-    root_b_uuid="PARTUUID=$(part_index_to_uuid "$to" ${partition_num_root_b})"
+  # Discover last known partition numbers.
+  local partition_num_root_a="$(get_layout_partition_number \
+    "${FLAGS_image_type}" ROOT-A)"
+  local partition_num_root_b="$(get_layout_partition_number \
+    "${FLAGS_image_type}" ROOT-B)"
+  root_a_uuid="PARTUUID=$(part_index_to_uuid "$to" ${partition_num_root_a})"
+  root_b_uuid="PARTUUID=$(part_index_to_uuid "$to" ${partition_num_root_b})"
 
-    # Rewrite grub table
-    grub_dm_table_a=${dm_table//${old_root}/${root_a_uuid}}
-    grub_dm_table_b=${dm_table//${old_root}/${root_b_uuid}}
-    sudo sed -e "s|DMTABLEA|${grub_dm_table_a}|g" \
-        -e "s|DMTABLEB|${grub_dm_table_b}|g" \
-        -e "s|/dev/\\\$linuxpartA|${root_a_uuid}|g" \
-        -e "s|/dev/\\\$linuxpartB|${root_b_uuid}|g" \
-        -e "s|HDROOTUSB|${root_a_uuid}|g" \
-        "${template_dir}"/efi/boot/grub.cfg |
-        sudo dd of="${esp_fs_dir}"/efi/boot/grub.cfg status=none
+  # Rewrite grub table
+  grub_dm_table_a=${dm_table//${old_root}/${root_a_uuid}}
+  grub_dm_table_b=${dm_table//${old_root}/${root_b_uuid}}
+  sudo sed -e "s|DMTABLEA|${grub_dm_table_a}|g" \
+      -e "s|DMTABLEB|${grub_dm_table_b}|g" \
+      -e "s|/dev/\\\$linuxpartA|${root_a_uuid}|g" \
+      -e "s|/dev/\\\$linuxpartB|${root_b_uuid}|g" \
+      -e "s|HDROOTUSB|${root_a_uuid}|g" \
+      "${template_dir}"/efi/boot/grub.cfg |
+      sudo dd of="${esp_fs_dir}"/efi/boot/grub.cfg status=none
 
+  if [[ ${FLAGS_install_syslinux} -eq ${FLAGS_TRUE} ]]; then
     # Rewrite syslinux DM_TABLE and HDROOTUSB for USB booting.
     syslinux_dm_table_usb=${dm_table//${old_root}/${root_a_uuid}}
     sed -e "s|DMTABLEA|${syslinux_dm_table_usb}|g" \
         -e "s|HDROOTUSB|${root_a_uuid}|g" \
         "${template_dir}"/syslinux/usb.A.cfg |
         sudo dd of="${esp_fs_dir}"/syslinux/usb.A.cfg status=none
+  fi
 
-    # Note DMTABLE for root.A and root.B does not need to be updated because
-    # postinst will discard all changes in EFI partition and copy from
-    # rootfs:boot/syslinux/root.?.cfg again after installation or AU, because
-    # new rootfs will be apparently different.
+  # Note DMTABLE for root.A and root.B does not need to be updated because
+  # postinst will discard all changes in EFI partition and copy from
+  # rootfs:boot/syslinux/root.?.cfg again after installation or AU, because
+  # new rootfs will be apparently different.
 
-    # Copy the vmlinuz's into place for syslinux
-    sudo cp -f "${FLAGS_vmlinuz}" "${esp_fs_dir}"/syslinux/vmlinuz.A
-    sudo cp -f "${FLAGS_vmlinuz}" "${esp_fs_dir}"/syslinux/vmlinuz.B
-
-    # The only work left for the installer is to pick the correct defaults
-    # and replace HDROOTA and HDROOTB with the correct /dev/sd%D%P/%U+1
-  }
-fi
+  # The only work left for the installer is to pick the correct defaults
+  # and replace HDROOTA and HDROOTB with the correct /dev/sd%D%P/%U+1
+}
 
 if ! type -p update_arm64_bootloaders; then
   update_arm64_bootloaders() {
@@ -216,7 +208,7 @@
 trap cleanup EXIT
 sudo mount "${ESP_DEV}" "${ESP_FS_DIR}"
 
-if [[ "${FLAGS_arch}" = "x86" || "${FLAGS_arch}" = "amd64" ]]; then
+if [[ "${FLAGS_arch}" == "x86" || "${FLAGS_arch}" == "amd64" ]]; then
   # Populate the EFI bootloader configuration
   sudo mkdir -p "${ESP_FS_DIR}/efi/boot"
   sudo cp -r "${FLAGS_from}"/efi/boot/. "${ESP_FS_DIR}"/efi/boot
@@ -240,11 +232,12 @@
     info "Extracting the kernel command line from ${FLAGS_kernel_partition}"
     kernel_cfg=$(dump_kernel_config "${FLAGS_kernel_partition}")
   fi
-  update_x86_bootloaders "${old_root}" \
-                         "${kernel_cfg}" \
-                         "${ESP_FS_DIR}" \
-                         "${FLAGS_from}" \
-                         "${FLAGS_to}"
+
+  update_bootloaders "${old_root}" \
+                     "${kernel_cfg}" \
+                     "${ESP_FS_DIR}" \
+                     "${FLAGS_from}" \
+                     "${FLAGS_to}"
 
   # Install the syslinux loader on the ESP image (part 12) so it is ready when
   # we cut over from rootfs booting (extlinux).
@@ -254,10 +247,10 @@
     # mount again for cleanup to free resource gracefully
     sudo mount -o ro "${ESP_DEV}" "${ESP_FS_DIR}"
   fi
-elif [[ "${FLAGS_arch}" = "arm64" ]]; then
-  set -x
+elif [[ "${FLAGS_arch}" == "arm64" ]]; then
   # Populate the EFI bootloader configuration
   sudo mkdir -p "${ESP_FS_DIR}/efi/boot"
+  sudo cp -r "${FLAGS_from}"/efi/boot/. "${ESP_FS_DIR}"/efi/boot
 
   # Extract kernel flags
   kernel_cfg=
@@ -269,12 +262,13 @@
     info "Extracting the kernel command line from ${FLAGS_kernel_partition}"
     kernel_cfg=$(dump_kernel_config "${FLAGS_kernel_partition}")
   fi
-  update_arm64_bootloaders "${old_root}" \
-                         "${kernel_cfg}" \
-                         "${ESP_FS_DIR}" \
-                         "${FLAGS_from}" \
-                         "${FLAGS_to}"
-elif [[ "${FLAGS_arch}" = "arm" || "${FLAGS_arch}" = "mips" ]]; then
+
+  update_bootloaders "${old_root}" \
+                     "${kernel_cfg}" \
+                     "${ESP_FS_DIR}" \
+                     "${FLAGS_from}" \
+                     "${FLAGS_to}"
+elif [[ "${FLAGS_arch}" == "arm" || "${FLAGS_arch}" == "mips" ]]; then
   # Copy u-boot script to ESP partition
   if [ -r "${FLAGS_from}/boot-A.scr.uimg" ]; then
     sudo mkdir -p "${ESP_FS_DIR}/u-boot"
@@ -287,8 +281,4 @@
   fi
 fi
 
-if type board_update_bootloaders >&/dev/null; then
-  board_update_bootloaders "${BOARD_ROOT}" "${ESP_FS_DIR}"
-fi
-
 set +e
diff --git a/update_chroot b/update_chroot
index be96d9b..a8a1446 100755
--- a/update_chroot
+++ b/update_chroot
@@ -1,186 +1,17 @@
 #!/bin/bash
-
-# Copyright 2012 The ChromiumOS Authors
+# Copyright 2023 The ChromiumOS Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
 # shellcheck source=common.sh
 . "$(dirname "$0")/common.sh" || exit 1
 
-# Script must run inside the chroot
-assert_inside_chroot "$@"
-
-# Do not run as root
-assert_not_root_user
-
-# Developer-visible flags.
-DEFINE_boolean usepkg "${FLAGS_TRUE}" \
-  "Use binary packages to bootstrap."
-
-FLAGS_HELP="usage: $(basename "$0") [flags]
-Performs an update of the chroot. This script is called as part of
-build_packages, so there is typically no need to call this script directly.
-"
-
-# The following options are advanced options, only available to those willing
-# to read the source code. They are not shown in help output, since they are
-# not needed for the typical developer workflow.
-DEFINE_integer jobs -1 \
-  "How many packages to build in parallel at maximum."
-DEFINE_boolean skip_toolchain_update "${FLAGS_FALSE}" \
-  "Don't update the toolchains."
-DEFINE_string toolchain_boards "" \
-  "Extra toolchains to setup for the specified boards."
-DEFINE_boolean eclean "${FLAGS_TRUE}" "Run eclean to delete old binpkgs."
-DEFINE_integer backtrack 10 "See emerge --backtrack."
-
-# Parse command line flags
-FLAGS "$@" || exit 1
-eval set -- "${FLAGS_ARGV}"
-
-# Only now can we die on error.  shflags functions leak non-zero error codes,
-# so will die prematurely if 'switch_to_strict_mode' is specified before now.
-switch_to_strict_mode
-
-# shellcheck source=sdk_lib/make_conf_util.sh
-. "${SCRIPTS_DIR}"/sdk_lib/make_conf_util.sh
-
-# Run version hooks as pre-update
-run_chroot_version_hooks
-
-info "Updating chroot"
-
-# Create /etc/make.conf.host_setup.  The file content is regenerated
-# from scratch every update.  There are various reasons to do this:
-#  + It's cheap, so this is an easy way to guarantee correct content
-#    after an upgrade.
-#  + Inside make_chroot.sh, we use a temporary version of the file
-#    which must be updated before the script completes; that final
-#    update happens here.
-#  + If the repositories change to add or remove the private
-#    overlay, the file may need to be regenerated.
-create_host_setup
-
-sudo_clear_shadow_locks /
-
-# First update the cross-compilers.
-# Note that this uses binpkgs only, unless we pass --nousepkg below.
-if [ "${FLAGS_skip_toolchain_update}" -eq "${FLAGS_FALSE}" ]; then
-  info "Updating cross-compilers"
-  TOOLCHAIN_FLAGS=()
-
-  if [[ -n ${FLAGS_toolchain_boards} ]]; then
-    TOOLCHAIN_FLAGS+=(
-      "--include-boards=${FLAGS_toolchain_boards}"
-    )
-  fi
-
-  # This should really only be skipped while bootstrapping.
-  if [ "${FLAGS_usepkg}" -eq "${FLAGS_FALSE}" ]; then
-    TOOLCHAIN_FLAGS+=( --nousepkg )
-  fi
-  # Expand the path before sudo, as root doesn't have the same path magic.
-  info_run sudo -E "$(type -p cros_setup_toolchains)" "${TOOLCHAIN_FLAGS[@]}"
-fi
-
-# Make sure depot_tools is bootstrapped, so that it can build chromeos-chrome.
-info "Bootstrapping depot_tools"
-"${DEPOT_TOOLS_DIR}"/ensure_bootstrap
-
-# Clean outdated packages in SDK.
-if [[ -e ~/.config/chromite/autocop ]] || [[ "${USER}" == "chrome-bot" ]]; then
-  # Use "|| true" to not exit on errors for one command.
-  cros clean-outdated-pkgs --host || true
-fi
-
-EMERGE_CMD="${CHROMITE_BIN}/parallel_emerge"
-
-info "Rebuilding Portage cache"
-# Before running any emerge operations, regenerate the Portage dependency cache
-# in parallel.
-info_run "${EMERGE_CMD[@]}" --regen --quiet
-
-# Clean out any stale binpkgs we've accumulated. This is done immediately after
-# regenerating the cache in case ebuilds have been removed (e.g. from a revert).
-if [[ "${FLAGS_eclean}" -eq "${FLAGS_TRUE}" ]]; then
-  info "Cleaning stale binpkgs"
-  get_eclean_exclusions | sudo eclean -e /dev/stdin packages
-fi
-
-info "Updating the SDK"
-
-EMERGE_FLAGS=( -uNv --with-bdeps=y --backtrack="${FLAGS_backtrack}" )
-if [ "${FLAGS_usepkg}" -eq "${FLAGS_TRUE}" ]; then
-  EMERGE_FLAGS+=( --getbinpkg )
-
-  # Avoid building toolchain packages or "post-cross" packages from
-  # source. The toolchain rollout process only takes place when the
-  # chromiumos-sdk builder finishes a successful build.
-  PACKAGES=(
-    $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host)
-  )
-  # Sanity check we got some valid results.
-  [[ ${#PACKAGES[@]} -eq 0 ]] && die_notrace "cros_setup_toolchains failed"
-  PACKAGES+=(
-    $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host-post-cross)
-  )
-  EMERGE_FLAGS+=(
-    $(printf ' --useoldpkg-atoms=%s' "${PACKAGES[@]}")
-  )
-fi
-if [[ "${FLAGS_jobs}" -ne -1 ]]; then
-  EMERGE_FLAGS+=( --jobs="${FLAGS_jobs}" )
-fi
-
-# Build cros_workon packages when they are changed.
-for pkg in $("${CHROMITE_BIN}/cros_list_modified_packages" --host); do
-  EMERGE_FLAGS+=( --reinstall-atoms="${pkg}" --usepkg-exclude="${pkg}" )
-done
-
-# Regenerate docbook catalog to avoid manpage compilation errors.
-info_run sudo -E "${EMERGE_CMD}" "${EMERGE_FLAGS[@]}" \
-  app-text/docbook-xml-dtd app-text/build-docbook-catalog
-info_run sudo build-docbook-catalog
-
-# Second pass, update everything else.
-EMERGE_FLAGS+=( --deep )
-info_run sudo -E "${EMERGE_CMD}" "${EMERGE_FLAGS[@]}" virtual/target-sdk world
-
-if [ "${FLAGS_usepkg}" -eq "${FLAGS_TRUE}" ]; then
-  # Update "post-cross" packages.
-  # Use --usepkgonly to ensure that packages are not built from source.
-  EMERGE_FLAGS=( -uNv --with-bdeps=y --oneshot --getbinpkg --deep )
-  EMERGE_FLAGS+=( --usepkgonly --rebuilt-binaries=n )
-  EMERGE_FLAGS+=(
-    $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host-post-cross)
-  )
-  sudo -E "${EMERGE_CMD}" "${EMERGE_FLAGS[@]}"
-
-  # Install nobdeps packages only when binary pkgs are available, since we don't
-  # want to accidentally pull in build deps for a rebuild.
-  EMERGE_FLAGS=( -uNv --with-bdeps=n --oneshot --getbinpkg --deep )
-  EMERGE_FLAGS+=( --usepkgonly --rebuilt-binaries=n )
-  info_run sudo -E "${EMERGE_CMD}" "${EMERGE_FLAGS[@]}" \
-    virtual/target-sdk-nobdeps
-fi
-
-# Automatically discard all CONFIG_PROTECT'ed files. Those that are
-# protected should not be overwritten until the variable is changed.
-# Autodiscard is option "-9" followed by the "YES" confirmation.
-printf '%s\nYES\n' -9 | sudo etc-update
-
-# If the user still has old perl modules installed, update them.
-"${SCRIPTS_DIR}/build_library/perl_rebuild.sh"
-
-# Deep clean any stale binpkgs. This includes any binary packages that do not
-# correspond to a currently installed package (different versions are kept).
-if [[ "${FLAGS_eclean}" -eq "${FLAGS_TRUE}" ]]; then
-  info "Deep cleaning stale binpkgs"
-  get_eclean_exclusions | sudo eclean -e /dev/stdin -d packages
-fi
-
-# Generate /usr/bin/remote_toolchain_inputs file for Reclient used by Chrome
-# for distributed builds, go/rbe/dev/x/reclient .
-info_run generate_reclient_inputs
-
-command_completed
+new_script="update_chroot"
+warn "$0: This script is deprecated and will be removed."
+warn "All users must migrate to ${new_script} in chromite/bin."
+warn "You can simply change all references of $0 to \`${new_script}\`" \
+  "from \$PATH (in chromite/bin/)."
+warn "This old script will be removed by June 2024."
+warn "If you have questions or found code that needs updating, please" \
+  "contact chromium-os-dev@, or file a bug at go/cros-build-bug."
+exec "${CHROMITE_BIN}/${new_script}" "$@"
diff --git a/update_chroot.sh b/update_chroot.sh
new file mode 100755
index 0000000..e64b4f1
--- /dev/null
+++ b/update_chroot.sh
@@ -0,0 +1,182 @@
+#!/bin/bash
+
+# Copyright 2012 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# shellcheck source=common.sh
+. "$(dirname "$0")/common.sh" || exit 1
+
+if [[ "$1" != "--script-is-run-only-by-chromite-and-not-users" ]]; then
+  die_notrace 'This script must not be run by users.' \
+    'Please run `update_chroot` instead.'
+fi
+
+# Discard the 'script-is-run-only-by-chromite-and-not-users' flag.
+shift
+
+# Script must run inside the chroot
+assert_inside_chroot "$@"
+
+# Do not run as root
+assert_not_root_user
+
+# Developer-visible flags.
+DEFINE_boolean usepkg "${FLAGS_TRUE}" \
+  "Use binary packages to bootstrap."
+
+FLAGS_HELP="usage: $(basename "$0") [flags]
+Performs an update of the chroot. This script is called as part of
+build_packages, so there is typically no need to call this script directly.
+"
+
+# The following options are advanced options, only available to those willing
+# to read the source code. They are not shown in help output, since they are
+# not needed for the typical developer workflow.
+DEFINE_integer jobs -1 \
+  "How many packages to build in parallel at maximum."
+DEFINE_boolean skip_toolchain_update "${FLAGS_FALSE}" \
+  "Don't update the toolchains."
+DEFINE_string toolchain_boards "" \
+  "Extra toolchains to setup for the specified boards."
+DEFINE_boolean eclean "${FLAGS_TRUE}" "Run eclean to delete old binpkgs."
+DEFINE_integer backtrack 10 "See emerge --backtrack."
+
+# Parse command line flags
+FLAGS "$@" || exit 1
+eval set -- "${FLAGS_ARGV}"
+
+# Only now can we die on error.  shflags functions leak non-zero error codes,
+# so will die prematurely if 'switch_to_strict_mode' is specified before now.
+switch_to_strict_mode
+
+# shellcheck source=sdk_lib/make_conf_util.sh
+. "${SCRIPTS_DIR}"/sdk_lib/make_conf_util.sh
+
+info "Updating chroot"
+
+# Create /etc/make.conf.host_setup.  The file content is regenerated
+# from scratch every update.  There are various reasons to do this:
+#  + It's cheap, so this is an easy way to guarantee correct content
+#    after an upgrade.
+#  + Inside make_chroot.sh, we use a temporary version of the file
+#    which must be updated before the script completes; that final
+#    update happens here.
+#  + If the repositories change to add or remove the private
+#    overlay, the file may need to be regenerated.
+create_host_setup
+
+# First update the cross-compilers.
+# Note that this uses binpkgs only, unless we pass --nousepkg below.
+if [ "${FLAGS_skip_toolchain_update}" -eq "${FLAGS_FALSE}" ]; then
+  info "Updating cross-compilers"
+  TOOLCHAIN_FLAGS=()
+
+  if [[ -n ${FLAGS_toolchain_boards} ]]; then
+    TOOLCHAIN_FLAGS+=(
+      "--include-boards=${FLAGS_toolchain_boards}"
+    )
+  fi
+
+  # This should really only be skipped while bootstrapping.
+  if [ "${FLAGS_usepkg}" -eq "${FLAGS_FALSE}" ]; then
+    TOOLCHAIN_FLAGS+=( --nousepkg )
+  fi
+  # Expand the path before sudo, as root doesn't have the same path magic.
+  info_run sudo -E "$(type -p cros_setup_toolchains)" "${TOOLCHAIN_FLAGS[@]}"
+fi
+
+# Clean outdated packages in SDK.
+CONFIG_DIR="${HOME}/.config"
+if [[ "${USER}" == "chrome-bot" ]]; then
+  CONFIG_DIR=$(python -c "import tempfile; print(tempfile.gettempdir())")
+  CONFIG_DIR+="/.config/"
+fi
+if [[ ! -e "${CONFIG_DIR}/chromite/autocop-off" ]] && \
+   [[ "${CROS_CLEAN_OUTDATED_PKGS}" != "0" ]]; then
+  # Use "|| true" to not exit on errors for one command.
+  cros clean-outdated-pkgs --host || true
+fi
+
+EMERGE_CMD="${CHROMITE_BIN}/parallel_emerge"
+
+info "Rebuilding Portage cache"
+# Before running any emerge operations, regenerate the Portage dependency cache
+# in parallel.
+info_run "${EMERGE_CMD[@]}" --regen --quiet
+
+# Clean out any stale binpkgs we've accumulated. This is done immediately after
+# regenerating the cache in case ebuilds have been removed (e.g. from a revert).
+if [[ "${FLAGS_eclean}" -eq "${FLAGS_TRUE}" ]]; then
+  info "Cleaning stale binpkgs"
+  get_eclean_exclusions | sudo eclean -e /dev/stdin packages
+fi
+
+info "Updating the SDK"
+
+EMERGE_FLAGS=( -uNv --with-bdeps=y --backtrack="${FLAGS_backtrack}" )
+if [ "${FLAGS_usepkg}" -eq "${FLAGS_TRUE}" ]; then
+  EMERGE_FLAGS+=( --getbinpkg )
+
+  # Avoid building toolchain packages or "post-cross" packages from
+  # source. The toolchain rollout process only takes place when the
+  # chromiumos-sdk builder finishes a successful build.
+  PACKAGES=(
+    $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host)
+  )
+  # Sanity check we got some valid results.
+  [[ ${#PACKAGES[@]} -eq 0 ]] && die_notrace "cros_setup_toolchains failed"
+  PACKAGES+=(
+    $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host-post-cross)
+  )
+  EMERGE_FLAGS+=(
+    $(printf ' --useoldpkg-atoms=%s' "${PACKAGES[@]}")
+  )
+fi
+if [[ "${FLAGS_jobs}" -ne -1 ]]; then
+  EMERGE_FLAGS+=( --jobs="${FLAGS_jobs}" )
+fi
+
+# Build cros_workon packages when they are changed.
+for pkg in $("${CHROMITE_BIN}/cros_list_modified_packages" --host); do
+  EMERGE_FLAGS+=( --reinstall-atoms="${pkg}" --usepkg-exclude="${pkg}" )
+done
+
+# Second pass, update everything else.
+EMERGE_FLAGS+=( --deep )
+info_run sudo -E "${EMERGE_CMD}" "${EMERGE_FLAGS[@]}" virtual/target-sdk world
+
+if [ "${FLAGS_usepkg}" -eq "${FLAGS_TRUE}" ]; then
+  # Update "post-cross" packages.
+  # Use --usepkgonly to ensure that packages are not built from source.
+  EMERGE_FLAGS=( -uNv --with-bdeps=y --oneshot --getbinpkg --deep )
+  EMERGE_FLAGS+=( --usepkgonly --rebuilt-binaries=n )
+  EMERGE_FLAGS+=(
+    $("${CHROMITE_BIN}/cros_setup_toolchains" --show-packages host-post-cross)
+  )
+  sudo -E "${EMERGE_CMD}" "${EMERGE_FLAGS[@]}"
+
+  # Install nobdeps packages only when binary pkgs are available, since we don't
+  # want to accidentally pull in build deps for a rebuild.
+  EMERGE_FLAGS=( -uNv --with-bdeps=n --oneshot --getbinpkg --deep )
+  EMERGE_FLAGS+=( --usepkgonly --rebuilt-binaries=n )
+  info_run sudo -E "${EMERGE_CMD}" "${EMERGE_FLAGS[@]}" \
+    virtual/target-sdk-nobdeps
+fi
+
+# Automatically discard all CONFIG_PROTECT'ed files. Those that are
+# protected should not be overwritten until the variable is changed.
+# Autodiscard is option "-9" followed by the "YES" confirmation.
+printf '%s\nYES\n' -9 | sudo etc-update
+
+# If the user still has old perl modules installed, update them.
+"${SCRIPTS_DIR}/build_library/perl_rebuild.sh"
+
+# Deep clean any stale binpkgs. This includes any binary packages that do not
+# correspond to a currently installed package (different versions are kept).
+if [[ "${FLAGS_eclean}" -eq "${FLAGS_TRUE}" ]]; then
+  info "Deep cleaning stale binpkgs"
+  get_eclean_exclusions | sudo eclean -e /dev/stdin -d packages
+fi
+
+command_completed
diff --git a/update_kernel.sh b/update_kernel.sh
index 96c5580..906a435 100755
--- a/update_kernel.sh
+++ b/update_kernel.sh
@@ -30,11 +30,11 @@
 DEFINE_string rootfs "" "Override rootfs partition reported by target"
 DEFINE_string arch "" "Override architecture reported by target"
 DEFINE_boolean clean "${FLAGS_TRUE}" "Remove old files before sending new files"
-DEFINE_boolean hv "${FLAGS_TRUE}" "Use hypervisor kernel if available."
 DEFINE_boolean ignore_verity "${FLAGS_FALSE}" "Update kernel even if system \
 is using verity (WARNING: likely to make the system unable to boot)"
 DEFINE_boolean reboot "${FLAGS_TRUE}" "Reboot system after update"
 DEFINE_boolean vboot "${FLAGS_TRUE}" "Update the vboot kernel"
+DEFINE_boolean vmlinux "${FLAGS_FALSE}" "Update the vmlinux.debug symbol"
 DEFINE_boolean syslinux "${FLAGS_TRUE}" \
   "Update the syslinux kernel (including /boot)"
 DEFINE_boolean bootonce "${FLAGS_FALSE}" "Mark kernel partition as boot once"
@@ -81,22 +81,7 @@
 
 # Ask the target what the kernel partition is
 learn_partition_and_ro() {
-  remote_sh rootdev || die_notrace
-  if [ "${REMOTE_OUT%%-*}" == "/dev/dm" ]; then
-    remote_sh rootdev -s
-    REMOTE_VERITY=${FLAGS_TRUE}
-    if [[ "${FLAGS_ignore_verity}" -eq "${FLAGS_TRUE}" ]]; then
-        warn "System is using verity: not updating firmware/modules"
-    else
-        warn "System is using verity: First remove rootfs verification using"
-        warn "/usr/share/vboot/bin/make_dev_ssd.sh --remove_rootfs_verification"
-        warn "on the DUT."
-        die_notrace
-    fi
-  else
-    REMOTE_VERITY=${FLAGS_FALSE}
-    info "System is not using verity: updating firmware and modules"
-  fi
+  remote_sh rootdev -s || die_notrace
   if [[ "${FLAGS_ab_update}" -eq "${FLAGS_TRUE}" ]]; then
     if [[ "${REMOTE_OUT}" == "${FLAGS_device}${PARTITION_NUM_ROOT_A}" ]]; then
       FLAGS_partition="${FLAGS_device}${PARTITION_NUM_KERN_B}"
@@ -105,25 +90,44 @@
       FLAGS_partition="${FLAGS_device}${PARTITION_NUM_KERN_A}"
       FLAGS_rootfs="${FLAGS_device}${PARTITION_NUM_ROOT_A}"
     fi
+  else
+    if [[ -z "${FLAGS_rootfs}" ]]; then
+      FLAGS_rootfs="${REMOTE_OUT}"
+    fi
+
+    if [[ -z "${FLAGS_partition}" ]]; then
+      if [ "${REMOTE_OUT}" == "${FLAGS_device}${PARTITION_NUM_ROOT_A}" ]; then
+        FLAGS_partition="${FLAGS_device}${PARTITION_NUM_KERN_A}"
+      else
+        FLAGS_partition="${FLAGS_device}${PARTITION_NUM_KERN_B}"
+      fi
+    fi
   fi
-  if [[ -z "${FLAGS_rootfs}" ]]; then
-    FLAGS_rootfs="${REMOTE_OUT}"
-  fi
+
   # If rootfs is for different partition than we're currently running on
   # mount it manually to update the right modules, firmware, etc.
   REMOTE_NEEDS_ROOTFS_MOUNTED=${FLAGS_FALSE}
   if [[ "${REMOTE_OUT}" != "${FLAGS_rootfs}" ]]; then
     REMOTE_NEEDS_ROOTFS_MOUNTED=${FLAGS_TRUE}
   fi
-  [ -n "${FLAGS_partition}" ] && return
-  if [ "${REMOTE_OUT}" == "${FLAGS_device}${PARTITION_NUM_ROOT_A}" ]; then
-    FLAGS_partition="${FLAGS_device}${PARTITION_NUM_KERN_A}"
+
+  # Check if the partition has removed rootfs verification
+  remote_sh dump_kernel_config "${FLAGS_partition}"
+  if [[ "${REMOTE_OUT}" =~ root=/dev/dm-[0-9] ]]; then
+    REMOTE_VERITY=${FLAGS_TRUE}
+    if [[ "${FLAGS_ignore_verity}" -eq "${FLAGS_TRUE}" ]]; then
+        warn "System is using verity: not updating firmware/modules"
+    else
+        warn "System is using verity: First remove rootfs verification using"
+        warn "/usr/share/vboot/bin/make_dev_ssd.sh --remove_rootfs_verification --partitions ${FLAGS_partition: -1}"
+        warn "on the DUT."
+        die_notrace
+    fi
   else
-    FLAGS_partition="${FLAGS_device}${PARTITION_NUM_KERN_B}"
+    REMOTE_VERITY=${FLAGS_FALSE}
+    info "System is not using verity: updating firmware and modules"
   fi
-  if [ -z "${FLAGS_partition}" ]; then
-    die_notrace "Partition required"
-  fi
+
   if [[ "${REMOTE_VERITY}" -eq "${FLAGS_TRUE}" ]]; then
     info "Target reports kernel partition is ${FLAGS_partition}"
     if [[ "${FLAGS_vboot}" -eq "${FLAGS_FALSE}" ]]; then
@@ -170,39 +174,17 @@
 }
 
 make_kernelimage() {
-  local bootloader_path
   local kernel_image
-  local boot_path="/build/${FLAGS_board}"
+  local boot_path="/build/${FLAGS_board}/boot"
   local config_path
   config_path="$(mktemp /tmp/config.txt.XXXXX)"
-  if [[ "${FLAGS_hv}" -eq "${FLAGS_TRUE}" && \
-        -d "${boot_path}/build/manatee/boot" ]]; then
-    boot_path+="/build/manatee/boot"
-  else
-    boot_path+="/boot"
-  fi
   kernel_image="${boot_path}/vmlinuz"
-  if [[ "${FLAGS_arch}" == "arm" || "${FLAGS_arch}" == "arm64" ]]; then
-    name="bootloader.bin"
-    bootloader_path="${SRC_ROOT}/build/images/${FLAGS_board}/latest/${name}"
-    # If there is no local bootloader stub, create a dummy file.  This matches
-    # build_kernel_image.sh.  If we wanted to be super paranoid, we could copy
-    # and extract it from the remote image, if it had one.
-    if [[ ! -e "${bootloader_path}" ]]; then
-      warn "Bootloader does not exist; creating a stub"
-      bootloader_path="${TMP}/${name}"
-      truncate -s 512 "${bootloader_path}"
-    fi
-  else
-    bootloader_path="/lib64/bootstub/bootstub.efi"
-  fi
   get_bootargs > "${config_path}"
   vbutil_kernel --pack "${TMP}"/new_kern.bin \
     --keyblock /usr/share/vboot/devkeys/kernel.keyblock \
     --signprivate /usr/share/vboot/devkeys/kernel_data_key.vbprivk \
     --version 1 \
     --config "${config_path}" \
-    --bootloader "${bootloader_path}" \
     --vmlinuz "${kernel_image}" \
     --arch "${FLAGS_arch}"
   rm "${config_path}"
@@ -257,6 +239,18 @@
   remote_sh dd of="${FLAGS_partition}" bs=4K < "${TMP}/new_kern.bin"
 }
 
+copy_vmlinux() {
+  local symbol_dir="/usr/lib/debug/boot"
+  local symbol="/build/${FLAGS_board}/${symbol_dir}/vmlinux.debug"
+  if [[ ! -f "${symbol}" ]]; then
+    warn "Can't find vmlinux.debug. Skipping update vmlinux.debug."
+    return
+  fi
+
+  remote_sh mkdir -p "${symbol_dir}"
+  remote_cp_to "${symbol}" "${symbol_dir}"/vmlinux.debug
+}
+
 check_kernelbuildtime() {
   local version="$1"
   local build_dir
@@ -432,6 +426,13 @@
     info "Skipping update of vboot (per request)"
   fi
 
+  if [[ "${FLAGS_vmlinux}" -eq "${FLAGS_TRUE}" ]]; then
+    info "Copying vmlinux.debug symbol to /usr/lib/debug/boot/ (per request)"
+    copy_vmlinux
+  else
+    info "Skipping update of vmlinux.debug symbol"
+  fi
+
   if [[ "${FLAGS_bootonce}" -eq "${FLAGS_TRUE}" || \
         "${FLAGS_ab_update}" -eq "${FLAGS_TRUE}" ]]; then
     info "Marking kernel partition ${FLAGS_partition} as boot once"