Update to ChromeOS 15047.0.0

Merge commit '46719703aba399152fb53ee1f87617487e546d64' into sdk-update-pre

BUG=b/257271340
TEST=presubmit
RELEASE_NOTE=None

Change-Id: Ib77db38480338ae11aa95d6ff8e00e523844452d
diff --git a/bin/cros_make_image_bootable b/bin/cros_make_image_bootable
index 5b32793..542229f 100755
--- a/bin/cros_make_image_bootable
+++ b/bin/cros_make_image_bootable
@@ -161,7 +161,7 @@
   local base_pattern="$3"
 
   [ -f "${file}" ] || return ${FLAGS_TRUE}
-  grep -wq "${pattern}" "${file}" && return ${FLAGS_TRUE}
+  sudo grep -wq "${pattern}" "${file}" && return ${FLAGS_TRUE}
   sudo sed -i "s/\b${base_pattern}\b/& ${pattern}/g" "${file}"
 }
 
@@ -384,6 +384,8 @@
     kernel_part="--kernel_partition='${FLAGS_output_dir}/vmlinuz.image'"
     # Install syslinux on the EFI System Partition.
     kernel_part="${kernel_part} --install_syslinux"
+  elif [[ "${FLAGS_arch}" = "arm64" ]]; then
+    kernel_part="--kernel_partition='${FLAGS_output_dir}/vmlinuz.image'"
   elif [[ "${FLAGS_arch}" = "arm" || "${FLAGS_arch}" = "mips" ]]; then
     # These flags are not used for ARM / MIPS update_bootloaders.sh
     kernel_part=""
diff --git a/build_kernel_image.sh b/build_kernel_image.sh
index 63984cf..dcc4034 100755
--- a/build_kernel_image.sh
+++ b/build_kernel_image.sh
@@ -179,13 +179,23 @@
   info "rootfs is ${root_fs_blocks} blocks of 4096 bytes."
 
   info "Generating root fs hash tree (salt '${FLAGS_verity_salt}')."
+  info "sudo verity mode=create \
+                      alg=${FLAGS_verity_hash_alg} \
+                      payload=${FLAGS_rootfs_image} \
+                      payload_blocks=${root_fs_blocks} \
+                      hashtree=${FLAGS_rootfs_hash} \
+                      salt=${FLAGS_verity_salt}) \
+                      version=1"
+
   # Runs as sudo in case the image is a block device.
   table=$(sudo verity mode=create \
                       alg=${FLAGS_verity_hash_alg} \
                       payload=${FLAGS_rootfs_image} \
                       payload_blocks=${root_fs_blocks} \
                       hashtree=${FLAGS_rootfs_hash} \
-                      salt=${FLAGS_verity_salt})
+                      salt=${FLAGS_verity_salt} \
+                      version=1)
+  info "dm-verity table ${table}"
   if [[ -f "${FLAGS_rootfs_hash}" ]]; then
     sudo chmod a+r "${FLAGS_rootfs_hash}"
   fi
@@ -198,7 +208,7 @@
     table=${table//HASH_DEV/${base_root}}
     table=${table//ROOT_DEV/${base_root}}
   fi
-  verity_dev="vroot none ro 1,${table}"
+  verity_dev="vroot,,,ro,${table}"
   if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then
     signature=$(rootdigest)
     cachestart=$(($(hashstart) + $(veritysize)))
@@ -209,9 +219,9 @@
     bootcache_args+=" ${cachestart} ${signature} ${size_limit}"
     bootcache_args+=" ${max_trace} ${max_pages}"
     bootcache_dev="vboot none ro 1,0 ${cachestart} bootcache ${bootcache_args}"
-    device_mapper_args="dm=\"2 ${bootcache_dev}, ${verity_dev}\""
+    device_mapper_args="dm-mod.create=\"${bootcache_dev}, ${verity_dev}\""
   else
-    device_mapper_args="dm=\"1 ${verity_dev}\""
+    device_mapper_args="dm-mod.create=\"${verity_dev}\""
   fi
   info "device mapper configuration: ${device_mapper_args}"
 fi
diff --git a/build_library/create_legacy_bootloader_templates.sh b/build_library/create_legacy_bootloader_templates.sh
index f8fcc6f..e3c47c3 100755
--- a/build_library/create_legacy_bootloader_templates.sh
+++ b/build_library/create_legacy_bootloader_templates.sh
@@ -164,7 +164,7 @@
   menu label chromeos-vusb.A
   kernel vmlinuz.A
   append ${common_args} ${verity_common} root=${ROOTDEV} \
-      i915.modeset=1 cros_legacy dm="DMTABLEA"
+      i915.modeset=1 cros_legacy dm-mod.create="DMTABLEA"
 EOF
   info "Emitted ${SYSLINUX_DIR}/usb.A.cfg"
 
@@ -181,7 +181,7 @@
   menu label chromeos-vhd.A
   kernel vmlinuz.A
   append ${common_args} ${verity_common} root=${ROOTDEV} \
-      i915.modeset=1 cros_legacy dm="DMTABLEA"
+      i915.modeset=1 cros_legacy dm-mod.create="DMTABLEA"
 EOF
   info "Emitted ${SYSLINUX_DIR}/root.A.cfg"
 
@@ -195,7 +195,7 @@
   menu label chromeos-vhd.B
   kernel vmlinuz.B
   append ${common_args} ${verity_common} root=${ROOTDEV} \
-      i915.modeset=1 cros_legacy dm="DMTABLEB"
+      i915.modeset=1 cros_legacy dm-mod.create="DMTABLEB"
 EOF
   info "Emitted ${SYSLINUX_DIR}/root.B.cfg"
 
@@ -251,12 +251,12 @@
 
 menuentry "verified image A" {
   linux /syslinux/vmlinuz.A ${common_args} ${verity_common} \
-      i915.modeset=1 cros_efi root=${ROOTDEV} dm="DMTABLEA"
+      i915.modeset=1 cros_efi root=${ROOTDEV} dm-mod.create="DMTABLEA"
 }
 
 menuentry "verified image B" {
   linux /syslinux/vmlinuz.B ${common_args} ${verity_common} \
-      i915.modeset=1 cros_efi root=${ROOTDEV} dm="DMTABLEB"
+      i915.modeset=1 cros_efi root=${ROOTDEV} dm-mod.create="DMTABLEB"
 }
 
 # FIXME: usb doesn't support verified boot for now
@@ -272,6 +272,57 @@
   fi
   info "Emitted ${FLAGS_to}/efi/boot/grub.cfg"
   exit 0
+elif [[ "${FLAGS_arch}" = "arm64" ]]; then
+  sudo mkdir -p "${FLAGS_to}"/efi/boot
+
+  # Templated variables:
+  #  DMTABLEA, DMTABLEB -> '0 xxxx verity ... '
+  # This should be replaced during postinst when updating the ESP.
+  cat <<EOF | sudo dd of="${FLAGS_to}/efi/boot/grub.cfg" 2>/dev/null
+defaultA=0
+defaultB=1
+gptpriority \$grubdisk ${partition_num_kern_a} prioA
+gptpriority \$grubdisk ${partition_num_kern_b} prioB
+
+if [ \$prioA -lt \$prioB ]; then
+  set default=\$defaultB
+else
+  set default=\$defaultA
+fi
+
+set timeout=0
+
+# NOTE: These magic grub variables are a Chrome OS hack. They are not portable.
+
+menuentry "local image A" {
+  linux /syslinux/vmlinuz.A ${common_args} cros_efi \
+      root=/dev/\$linuxpartA
+}
+
+menuentry "local image B" {
+  linux /syslinux/vmlinuz.B ${common_args} cros_efi \
+      root=/dev/\$linuxpartB
+}
+
+menuentry "verified image A" {
+  linux /syslinux/vmlinuz.A ${common_args} ${verity_common} \
+      cros_efi root=${ROOTDEV} dm-mod.create="DMTABLEA"
+}
+
+menuentry "verified image B" {
+  linux /syslinux/vmlinuz.B ${common_args} ${verity_common} \
+      cros_efi root=${ROOTDEV} dm-mod.create="DMTABLEB"
+}
+
+EOF
+  if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then
+    sudo sed -i \
+      -e '/^defaultA=/s:=.*:=2:' \
+      -e '/^defaultB=/s:=.*:=3:' \
+      "${FLAGS_to}/efi/boot/grub.cfg"
+  fi
+  info "Emitted ${FLAGS_to}/efi/boot/grub.cfg"
+  exit 0
 fi
 
 info "The target platform does not use bootloader templates."
diff --git a/build_library/disk_layout_anthos.json b/build_library/disk_layout_anthos.json
new file mode 100644
index 0000000..db4cf1c
--- /dev/null
+++ b/build_library/disk_layout_anthos.json
@@ -0,0 +1,255 @@
+{
+  # See README_disk_layout
+  "parent": "common_disk_layout.json",
+
+  "metadata": {
+    "block_size": 512,
+    "fs_block_size": 4096,
+    "fs_align": "2 MiB"
+  },
+  "layouts": {
+    # common is the standard layout template.
+    "common": [
+      {
+        # Reserve space for RW firmware. Not used on modern boards.
+        # BIOS Boot Partition for COS.
+        "num": 11,
+        "label": "RWFW",
+        "type": "firmware",
+        "size": "8 MiB"
+      },
+      {
+        # Unused partition, reserved for software slot C.
+        "num": 6,
+        "label": "KERN-C",
+        "type": "kernel"
+      },
+      {
+        # Unused partition, reserved for software slot C.
+        "num": 7,
+        "label": "ROOT-C",
+        "type": "rootfs"
+      },
+      {
+        # Unused partition, reserved for future changes.
+        "num": 9,
+        "type": "reserved",
+        "label": "reserved"
+      },
+      {
+        # Unused partition, reserved for future changes.
+        "num": 10,
+        "type": "reserved",
+        "label": "reserved"
+      },
+      {
+        # Kernel for Slot A, no file system.
+        # Not used by COS.
+        "num": 2,
+        "label": "KERN-A",
+        "type": "kernel",
+        "size": "16 MiB"
+      },
+      {
+        # Kernel for Slot B, no file system.
+        # Not used by COS.
+        "num": 4,
+        "label": "KERN-B",
+        "type": "kernel",
+        "size": "16 MiB"
+      },
+      {
+        # Board specific files, OEM partition for COS/Anthos.
+        # Used for installing application binaries like Anthos.
+        "num": 8,
+        "label": "OEM",
+        "type": "data",
+        "fs_format": "ext4",
+        "size": "2048 MiB",
+        "uuid": "random"
+      },
+      {
+        # Used for Legacy Bios, and EFI Bios, not ChromeOS hardware
+        "num": 12,
+        "label": "EFI-SYSTEM",
+        "type": "efi",
+        "fs_format": "vfat",
+        "size": "64 MiB",
+        "uuid": "clear"
+      },
+      {
+        # Slot B rootfs. Must match Root A in side for normal
+        # updates. Will contain copy of Root A after install, and
+        # contain new rootfs's after runtime updates.
+        # Shrink to 16 MiB since we disable update-engine.
+        "num": 5,
+        "label": "ROOT-B",
+        "type": "rootfs",
+        "size": "16 MiB"
+      },
+      {
+        # Slot A rootfs. Rootfs + extras (AKA verity) must fit, AKA:
+        #   size <= FS size + Verity size
+        #
+        # Verity's size can be found by:
+        #   verity_bytes = div_round_up(fs_bytes, 128) +
+        #                  div_round_up(fs_bytes, 16384) + 4096
+        #
+        # That means that the FS MiB should be:
+        #   ((total_MiB * 1024 * 1024 - 4096) * 16384 / 16513) / (1024 * 1024)
+        #
+        # The reason to not set your fs_size to be exactly what is specified
+        # by the formula above is to make builds start failing a little bit
+        # before we're truly out of space, allowing a quick release valve to
+        # relieve some of the pressure while we try to find other ways to save
+        # space.
+        #
+        # Note that in the past it was observed that updates would fail if the
+        # rootfs size shrunk (crbug.com/192136).  There are no known reasons to
+        # shrink the rootfs size, but if you come up with one you should
+        # revisit that bug and make sure it won't affect you.
+        #
+        # Changes to the offset of this partition may require
+        # changes in cros-signing/security_test_baselines/
+        # ensure_secure_kernelparams.config to allow secure boot.
+        "num": 3,
+        "label": "ROOT-A",
+        "type": "rootfs",
+        "fs_format": "ext2",
+        "fs_options": {
+          "squashfs": "-noI -comp lzo -Xalgorithm lzo1x_999 -Xcompression-level 9",
+          "btrfs": "skinny-metadata"
+        },
+        "size": "2048 MiB",
+        "fs_size": "2000 MiB",
+        "uuid": "clear"
+      },
+      {
+        # User data, stateful partition.
+        # User data, fills all remaining space on drive.
+        "num": 1,
+        "label": "STATE",
+        "type": "data",
+        "fs_format": "ext4",
+        "fs_options": {
+          # A consequence of this option is that some file system
+          # structures are initialized lazily when resizing,
+          # resulting in much faster resize2fs invocations.
+          "ext4": "-O metadata_csum"
+         },
+        "size": "5000 MiB",
+        "features": ["expand"],
+        "uuid": "random"
+      }
+    ],
+    # Used for installs on main device
+    # by default, same as 'common'.
+    "base": [
+    ],
+    # Used for bootable USB installs (not recovery).
+    "usb": [
+      {
+        # Slot B rootfs, unused on USB, but pad to 2M.
+        # installation will expand this to size from base.
+        "num": 5,
+        "size": "2 MiB"
+      }
+    ],
+    # Used for factory install images.
+    "factory_install": [
+      {
+        "num": 12,
+        "size": "32 MiB"
+      },
+      {
+        "num": 5,
+        "size": "2 MiB"
+      },
+      {
+        "num": 3,
+        "size": "420 MiB",
+        "fs_size": "400 MiB"
+      },
+      {
+        "num": 1,
+        "size": "140 MiB"
+      }
+    ],
+    # Used for recovery images.
+    "recovery": [
+      {
+        # Slot B rootfs, unused on USB, but pad to 2M.
+        # installation will expand this to size from base.
+        "num": 5,
+        "size": "2 MiB"
+      },
+      {
+        # Stateful on recovery is dynamically resized larger.
+        "num": 1,
+        "size": "2 MiB"
+      }
+    ],
+    # Larger rootfs, suitable for development with symbols, etc.
+    # Cannot apply updates when running from USB (no slot B).
+    "2gb-rootfs": [
+      {
+        # Will be grown to size from base on install.
+        "num": 5,
+        "size": "2 MiB"
+      },
+      {
+        # Will be shrunk to size from base on install.
+        "num": 3,
+        "size": "2048 MiB",
+        "fs_size": "2000 MiB"
+      }
+    ],
+    # Larger rootfs, suitable for development with symbols, etc.
+    # CAN apply updates when running from USB.
+    "2gb-rootfs-updatable": [
+      {
+        # The partition size matches base, so it's installable.
+        "num": 5,
+        "size": "2048 MiB"
+      },
+      {
+        # The partition size matches base, so it's installable.
+        "num": 3,
+        "size": "2048 MiB",
+        "fs_size": "2000 MiB"
+      },
+      {
+        "num": 1,
+        "size": "4096 MiB"
+      }
+    ],
+    # Very large rootfs, suitable for development with symbols,
+    # etc. Cannot apply updates when running from USB (no slot B)
+    "4gb-rootfs": [
+      {
+        "num": 5,
+        "size": "2 MiB"
+      },
+      {
+        # This partition is larger than the base partition, so the
+        # installer will corrupt the disk during installation.
+        "num": 3,
+        "size": "4096 MiB",
+        "fs_size": "4000 MiB"
+      }
+    ],
+    # Huge rootfs, suitable for VM only images, should not be used
+    # for actual hardware devices.
+    "16gb-rootfs": [
+      {
+        "num": 5,
+        "size": "2 MiB"
+      },
+      {
+        "num": 3,
+        "size": "16384 MiB",
+        "fs_size": "16000 MiB"
+      }
+    ]
+  }
+}
diff --git a/cos/README.md b/cos/README.md
new file mode 100644
index 0000000..a4895f0
--- /dev/null
+++ b/cos/README.md
@@ -0,0 +1,8 @@
+This folder contains image utilities from Container-Optimized OS(COS) team
+to fullfill the functionalities to support image formwat convertion between
+different platfroms:
+
+* COS on vSphere
+* COS on AWS
+* COS on AZure
+* COS on Borg
diff --git a/cos/convert_image.sh b/cos/convert_image.sh
new file mode 100755
index 0000000..508b9b1
--- /dev/null
+++ b/cos/convert_image.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+#
+# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# convert_image.sh --board=[board] --image_type=[type] --image_format=[format]
+#
+# This script converts a board's image(base, test, dev) to the specified format
+# like vmdk, vhd so that the image can be used by platform other than GCP.
+#
+SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+SCRIPT_ROOT=${SCRIPT_ROOT%cos}
+. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
+
+# Script must be run inside the chroot.
+restart_in_chroot_if_needed "$@"
+
+DEFINE_string board "${DEFAULT_BOARD}" \
+  "The board to build an image for."
+DEFINE_string image_type "base" \
+  "Image type to process, base, test or dev."
+DEFINE_string image_format "" \
+  "Image format to be converted to, vmdk or vhd."
+DEFINE_string image_dir "" "Path to the folder to store netboot images."
+
+# Parse command line.
+FLAGS "$@" || exit 1
+eval set -- "${FLAGS_ARGV}"
+
+. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
+. "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
+
+switch_to_strict_mode
+
+set -x
+# build_packages artifact output.
+SYSROOT="${GCLIENT_ROOT}/chroot/build/${FLAGS_board}"
+# build_image artifact output.
+
+IMAGE_DIR="${CHROOT_TRUNK_DIR}"/src/build/images/"${FLAGS_board}"/latest
+if [ -n "${FLAGS_image_dir}" ]; then
+  IMAGE_DIR=${FLAGS_image_dir}
+fi
+IMAGE_TYPE=${FLAGS_image_type}
+
+case ${FLAGS_image_format} in
+  "vmdk")
+  qemu-img convert -p -o subformat=streamOptimized -O vmdk\
+    ${IMAGE_DIR}/chromiumos_${IMAGE_TYPE}_image.bin \
+    ${IMAGE_DIR}/chromiumos_${IMAGE_TYPE}_image.vmdk
+  ;;
+
+  "vhd")
+  qemu-img convert -f raw -o subformat=fixed,force_size -O vpc \
+    ${IMAGE_DIR}/chromiumos_${IMAGE_TYPE}_image.bin \
+    ${IMAGE_DIR}/chromiumos_${IMAGE_TYPE}_image.vhd
+  ;;
+
+  *)
+  ;;
+esac
diff --git a/cos/cos.json b/cos/cos.json
new file mode 100644
index 0000000..a22b87e
--- /dev/null
+++ b/cos/cos.json
@@ -0,0 +1,42 @@
+{
+    "DiskProvisioning": "thin",
+    "IPAllocationPolicy": "dhcpPolicy",
+    "IPProtocol": "IPv4",
+    "InjectOvfEnv": false,
+    "MarkAsTemplate": false,
+    "Name": null,
+    "NetworkMapping": [
+        {
+            "Name": "VM Network",
+            "Network": ""
+        }
+    ],
+    "PowerOn": false,
+    "PropertyMapping": [
+        {
+            "Key": "instance-id",
+            "Value": "id-ovf"
+        },
+        {
+            "Key": "hostname",
+            "Value": ""
+        },
+        {
+            "Key": "seedfrom",
+            "Value": ""
+        },
+        {
+            "Key": "public-keys",
+            "Value": ""
+        },
+        {
+            "Key": "user-data",
+            "Value": ""
+        },
+        {
+            "Key": "password",
+            "Value": ""
+        }
+    ],
+    "WaitForIP": false
+}
diff --git a/cos/make_ova.sh b/cos/make_ova.sh
new file mode 100755
index 0000000..543a890
--- /dev/null
+++ b/cos/make_ova.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+#
+# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# make_ova.sh -d [vmdk file] -o [ova file] -p[product-name] \
+#  -n[image-name] -t ${TEMPLATE_OVF}
+#
+# This scripts creates .ova file from given disk image and OVA template.
+#
+
+set -o xtrace
+set -o errexit
+set -o nounset
+
+SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+TEMPLATE_PATH=${SCRIPT_ROOT}/template.ovf
+WORKSPACE=${SCRIPT_ROOT%\/src\/scripts\/cos}
+BOARD=anthos-amd64-vsphere
+PRODUCT_NAME="Anthos OnPrem on COS"
+IMAGE_NAME="COS"
+IMAGE_TYPE="test"
+IMAGE_ROOT=${WORKSPACE}/src/build/images/${BOARD}/latest
+DISK_FILE=${IMAGE_ROOT}/chromiumos_${IMAGE_TYPE}_image.vmdk
+OUTPUT_FILE=${IMAGE_ROOT}/chromiumos_${IMAGE_TYPE}_image.ova
+
+usage() {
+  echo "Usage: $0 -b board -d disk.vmdk \
+    -p product-name -n image-name \
+    -o output-file [-t template.ovf]"
+}
+
+while getopts ":b:d:p:n:t:o:h" arg; do
+  case $arg in
+    b) BOARDD=$OPTARG ;;
+    d) DISK_FILE=$OPTARG ;;
+    p) PRODUCT_NAME=$OPTARG ;;
+    n) IMAGE_NAME=$OPTARG ;;
+    t) TEMPLATE_PATH=$OPTARG ;;
+    o) OUTPUT_FILE=$OPTARG ;;
+    h)
+      usage
+      exit 0
+      ;;
+    *)
+      usage
+      exit 1
+      ;;
+  esac
+done
+
+: "${BOARD?Missing -d BOARD value}"
+: "${DISK_FILE?Missing -d DISK_FILE value}"
+: "${PRODUCT_NAME?Missing -p PRODUCT_NAME value}"
+: "${IMAGE_NAME?Missing -n IMAGE_NAME value}"
+: "${TEMPLATE_PATH?Missing -t TEMPLATE_PATH value}"
+: "${OUTPUT_FILE?Missing -o OUTPUT_FILE value}"
+
+if [[ ! -f ${TEMPLATE_PATH} ]]; then
+  echo "Cannot find template at ${TEMPLATE_PATH}"
+  exit 1
+fi
+
+XML_NS=(
+  -N 'x=http://schemas.dmtf.org/ovf/envelope/1'
+  -N 'ovf=http://schemas.dmtf.org/ovf/envelope/1'
+  -N 'vssd=http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData'
+)
+
+WORK_DIR=$(mktemp -d)
+trap 'rm -rf "${WORK_DIR}"' EXIT
+
+# xmlstar does not support multiple updates at once, and we need to provide
+# namespaces to every invocation, so disable quoting warning.
+# shellcheck disable=SC2086
+xmlstarlet ed ${XML_NS[*]} \
+  --update '//x:VirtualSystem/@ovf:id' --value "${IMAGE_NAME}" \
+  "${TEMPLATE_PATH}" \
+  | xmlstarlet ed ${XML_NS[*]} \
+    --update '//x:VirtualSystem/x:Name' --value "${IMAGE_NAME}" \
+  | xmlstarlet ed ${XML_NS[*]} \
+    --update '//vssd:VirtualSystemIdentifier' --value "${IMAGE_NAME}" \
+    > "${WORK_DIR}/tmp.ovf"
+
+# Add a disk image to temporary .ovf
+cot --force add-disk "${DISK_FILE}" "${WORK_DIR}/tmp.ovf" \
+  -o "${WORK_DIR}/image.ovf" \
+  -f vmdisk1 -t harddisk -c scsi
+
+# Add product information and convert .ovf to .ova
+cot --force edit-product "${WORK_DIR}/image.ovf" \
+  -o "${OUTPUT_FILE}" \
+  --product "${PRODUCT_NAME}"
+
diff --git a/cos/run_vmtests.sh b/cos/run_vmtests.sh
new file mode 100755
index 0000000..ee25665
--- /dev/null
+++ b/cos/run_vmtests.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+#
+# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# cos/run_vmtests.sh --board=[anthos-amd64-vsphere]
+#
+# This script builds and runs VMTests for a given board.
+
+SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
+SCRIPT_ROOT=${SCRIPT_ROOT%cos}
+. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
+
+# Script must be run inside the chroot.
+restart_in_chroot_if_needed "$@"
+
+DEFINE_string board "${DEFAULT_BOARD}" \
+  "The board to build an image for."
+DEFINE_string image_type "test" \
+  "Image type to process, base, test or dev."
+DEFINE_string image_dir "" "Path to the folder to store netboot images."
+
+# Parse command line.
+FLAGS "$@" || exit 1
+eval set -- "${FLAGS_ARGV}"
+
+. "${SCRIPT_ROOT}/build_library/build_common.sh" || exit 1
+. "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
+
+switch_to_strict_mode
+
+set -x
+# build_packages artifact output.
+SYSROOT="${GCLIENT_ROOT}/chroot/build/${FLAGS_board}"
+# build_image artifact output.
+
+IMAGE_DIR="${CHROOT_TRUNK_DIR}"/src/build/images/"${FLAGS_board}"/latest
+if [ -n "${FLAGS_image_dir}" ]; then
+  IMAGE_DIR=${FLAGS_image_dir}
+fi
+
+BOARD_ARCH=$(portageq-${FLAGS_board} envvar ARCH)
+if [[ ${BOARD_ARCH} == "amd64" ]]; then
+  BOARD_ARCH="x86_64"
+elif [[ ${BOARD_ARCH} == "arm64" ]]; then
+  BOARD_ARCH="aarch64"
+else
+  echo "Unsupported ${BOARD_ARCH}"
+  exit 1
+fi
+
+cros_run_vm_test --board ${BOARD} \
+  --image-path ${IMAGE_DIR}/chromiumos_${FLAGS_image_type}_image.bin \
+  --private-key ${IMAGE_DIR}/id_rsa \
+  --test_that-args=--model=ad_hoc_model \
+  --copy-on-write \
+  --start-vm \
+  --qemu-arch ${BOARD_ARCH} \
+  --autotest 'suite:smoke'
diff --git a/cos/template.ovf b/cos/template.ovf
new file mode 100644
index 0000000..f8b5220
--- /dev/null
+++ b/cos/template.ovf
@@ -0,0 +1,147 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Envelope xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <References>
+  </References>
+  <DiskSection>
+    <Info>Virtual disk information</Info>
+  </DiskSection>
+  <NetworkSection>
+    <Info>The list of logical networks</Info>
+    <Network ovf:name="VM Network">
+      <Description>The VM Network network</Description>
+    </Network>
+  </NetworkSection>
+  <VirtualSystem ovf:id="__NAME__">
+    <Info>A virtual machine</Info>
+    <Name>__NAME__</Name>
+    <OperatingSystemSection ovf:id="94" vmw:osType="genericLinuxGuest">
+      <Info>The kind of installed guest operating system</Info>
+      <Description>Other Linux</Description>
+    </OperatingSystemSection>
+
+    <ProductSection ovf:required="false">
+      <Info>Cloud-Init customization</Info>
+      <Product>__PRODUCT_REPLACED_BY_COT__</Product>
+      <Property ovf:key="instance-id" ovf:type="string" ovf:userConfigurable="true" ovf:value="id-ovf">
+          <Label>A Unique Instance ID for this instance</Label>
+          <Description>Specifies the instance id.  This is required and used to determine if the machine should take "first boot" actions</Description>
+      </Property>
+      <Property ovf:key="hostname" ovf:type="string" ovf:userConfigurable="true" ovf:value="cosguest">
+          <Description>Specifies the hostname for the appliance</Description>
+      </Property>
+      <Property ovf:key="seedfrom" ovf:type="string" ovf:userConfigurable="true">
+          <Label>Url to seed instance data from</Label>
+          <Description>This field is optional, but indicates that the instance should 'seed' user-data and meta-data from the given url.  If set to 'http://tinyurl.com/sm-' is given, meta-data will be pulled from http://tinyurl.com/sm-meta-data and user-data from http://tinyurl.com/sm-user-data.  Leave this empty if you do not want to seed from a url.</Description>
+      </Property>
+      <Property ovf:key="public-keys" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
+          <Label>ssh public keys</Label>
+          <Description>This field is optional, but indicates that the instance should populate the default user's 'authorized_keys' with this value</Description>
+      </Property>
+      <Property ovf:key="user-data" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
+          <Label>Encoded user-data</Label>
+          <Description>In order to fit into a xml attribute, this value is base64 encoded . It will be decoded, and then processed normally as user-data.</Description>
+          <!--  The following represents '#!/bin/sh\necho "hi world"'
+          ovf:value="IyEvYmluL3NoCmVjaG8gImhpIHdvcmxkIgo="
+        -->
+      </Property>
+      <Property ovf:key="password" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
+          <Label>Default User's password</Label>
+          <Description>If set, the default user's password will be set to this value to allow password based login.  The password will be good for only a single login.  If set to the string 'RANDOM' then a random password will be generated, and written to the console.</Description>
+      </Property>
+    </ProductSection>
+
+    <VirtualHardwareSection ovf:transport="com.vmware.guestInfo">
+      <Info>Virtual hardware requirements</Info>
+      <System>
+        <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+        <vssd:InstanceID>0</vssd:InstanceID>
+        <vssd:VirtualSystemIdentifier>__NAME__</vssd:VirtualSystemIdentifier>
+        <vssd:VirtualSystemType>vmx-13</vssd:VirtualSystemType>
+      </System>
+      <Item>
+        <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+        <rasd:Description>Number of Virtual CPUs</rasd:Description>
+        <rasd:ElementName>2 virtual CPU(s)</rasd:ElementName>
+        <rasd:InstanceID>1</rasd:InstanceID>
+        <rasd:ResourceType>3</rasd:ResourceType>
+        <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
+      </Item>
+      <Item>
+        <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+        <rasd:Description>Memory Size</rasd:Description>
+        <rasd:ElementName>1024MB of memory</rasd:ElementName>
+        <rasd:InstanceID>2</rasd:InstanceID>
+        <rasd:ResourceType>4</rasd:ResourceType>
+        <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>
+      </Item>
+      <Item>
+        <rasd:Address>0</rasd:Address>
+        <rasd:Description>SCSI Controller</rasd:Description>
+        <rasd:ElementName>SCSI Controller 0</rasd:ElementName>
+        <rasd:InstanceID>3</rasd:InstanceID>
+        <rasd:ResourceSubType>VirtualSCSI</rasd:ResourceSubType>
+        <rasd:ResourceType>6</rasd:ResourceType>
+      </Item>
+      <Item>
+        <rasd:Address>1</rasd:Address>
+        <rasd:Description>IDE Controller</rasd:Description>
+        <rasd:ElementName>VirtualIDEController 1</rasd:ElementName>
+        <rasd:InstanceID>4</rasd:InstanceID>
+        <rasd:ResourceType>5</rasd:ResourceType>
+      </Item>
+      <Item>
+        <rasd:Address>0</rasd:Address>
+        <rasd:Description>IDE Controller</rasd:Description>
+        <rasd:ElementName>VirtualIDEController 0</rasd:ElementName>
+        <rasd:InstanceID>5</rasd:InstanceID>
+        <rasd:ResourceType>5</rasd:ResourceType>
+      </Item>
+      <Item ovf:required="false">
+        <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+        <rasd:ElementName>VirtualVideoCard</rasd:ElementName>
+        <rasd:InstanceID>6</rasd:InstanceID>
+        <rasd:ResourceType>24</rasd:ResourceType>
+        <vmw:Config ovf:required="false" vmw:key="enable3DSupport" vmw:value="false"/>
+        <vmw:Config ovf:required="false" vmw:key="use3dRenderer" vmw:value="automatic"/>
+        <vmw:Config ovf:required="false" vmw:key="useAutoDetect" vmw:value="false"/>
+        <vmw:Config ovf:required="false" vmw:key="videoRamSizeInKB" vmw:value="4096"/>
+      </Item>
+      <Item ovf:required="false">
+        <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+        <rasd:ElementName>VirtualVMCIDevice</rasd:ElementName>
+        <rasd:InstanceID>7</rasd:InstanceID>
+        <rasd:ResourceSubType>vmware.vmci</rasd:ResourceSubType>
+        <rasd:ResourceType>1</rasd:ResourceType>
+        <vmw:Config ovf:required="false" vmw:key="allowUnrestrictedCommunication" vmw:value="false"/>
+      </Item>
+      <Item>
+        <rasd:AddressOnParent>7</rasd:AddressOnParent>
+        <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+        <rasd:Connection>VM Network</rasd:Connection>
+        <rasd:Description>VMXNET3 ethernet adapter on &quot;VM Network&quot;</rasd:Description>
+        <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+        <rasd:InstanceID>11</rasd:InstanceID>
+        <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+        <rasd:ResourceType>10</rasd:ResourceType>
+        <vmw:Config ovf:required="false" vmw:key="wakeOnLanEnabled" vmw:value="true"/>
+      </Item>
+      <vmw:Config ovf:required="false" vmw:key="cpuHotAddEnabled" vmw:value="false"/>
+      <vmw:Config ovf:required="false" vmw:key="cpuHotRemoveEnabled" vmw:value="false"/>
+      <vmw:Config ovf:required="false" vmw:key="firmware" vmw:value="bios"/>
+      <vmw:Config ovf:required="false" vmw:key="virtualICH7MPresent" vmw:value="false"/>
+      <vmw:Config ovf:required="false" vmw:key="virtualSMCPresent" vmw:value="false"/>
+      <vmw:Config ovf:required="false" vmw:key="memoryHotAddEnabled" vmw:value="false"/>
+      <vmw:Config ovf:required="false" vmw:key="nestedHVEnabled" vmw:value="false"/>
+      <vmw:Config ovf:required="false" vmw:key="powerOpInfo.powerOffType" vmw:value="preset"/>
+      <vmw:Config ovf:required="false" vmw:key="powerOpInfo.resetType" vmw:value="preset"/>
+      <vmw:Config ovf:required="false" vmw:key="powerOpInfo.standbyAction" vmw:value="checkpoint"/>
+      <vmw:Config ovf:required="false" vmw:key="powerOpInfo.suspendType" vmw:value="preset"/>
+      <vmw:Config ovf:required="false" vmw:key="tools.afterPowerOn" vmw:value="true"/>
+      <vmw:Config ovf:required="false" vmw:key="tools.afterResume" vmw:value="true"/>
+      <vmw:Config ovf:required="false" vmw:key="tools.beforeGuestShutdown" vmw:value="true"/>
+      <vmw:Config ovf:required="false" vmw:key="tools.beforeGuestStandby" vmw:value="true"/>
+      <vmw:Config ovf:required="false" vmw:key="tools.syncTimeWithHost" vmw:value="false"/>
+      <vmw:Config ovf:required="false" vmw:key="tools.toolsUpgradePolicy" vmw:value="manual"/>
+    </VirtualHardwareSection>
+  </VirtualSystem>
+</Envelope>
diff --git a/hooks/install/gen-sbom-package-info.py b/hooks/install/gen-sbom-package-info.py
new file mode 100755
index 0000000..59e5ffd
--- /dev/null
+++ b/hooks/install/gen-sbom-package-info.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+#
+# Copyright 2022 Google LLC
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+# This script is used to automatically generate package
+# information for SBOM of COS image bundled dependencies.
+
+import os
+import sys
+from sbom_info_lib import download_url
+from sbom_info_lib import go_dep
+from sbom_info_lib import licenses
+
+SBOM_INFO_FILE_NAME = "sbom-pkg-info"
+
+
+class SbomPackageInfo:
+    def __init__(self, url, license, go_dep):
+        self.download_url = url
+        self.licenses = license
+        self.go_dep = go_dep
+
+    def write_to_build_info(self, build_info_dir):
+        with open(f"{build_info_dir}/{SBOM_INFO_FILE_NAME}", "w") as f:
+            f.write(f"download-url:{self.download_url}\n")
+            f.write(f"licenses:{self.licenses}\n")
+            f.write(f"go-dep:{self.go_dep}\n")
+
+
+class SBOMPkgInfoError(Exception):
+    def __init__(self, msg):
+        super().__init__(msg)
+
+
+def main():
+    package_dir = os.getenv("PORTAGE_BUILDDIR")
+    build_info_dir = os.path.join(package_dir, "build-info")
+    package_name = os.path.basename(package_dir)
+    ebuild = os.path.join(build_info_dir, package_name + ".ebuild")
+    url = download_url.get_download_url(build_info_dir, ebuild)
+    sbom_pkg_info = SbomPackageInfo(
+        url,
+        licenses.get_licenses(build_info_dir),
+        go_dep.get_go_dep(url, build_info_dir),
+    )
+    if not sbom_pkg_info.download_url and "private-overlays" not in ebuild:
+        raise SBOMPkgInfoError(f"download url not found")
+    if not sbom_pkg_info.licenses:
+        raise SBOMPkgInfoError(f"license not found")
+    sbom_pkg_info.write_to_build_info(build_info_dir)
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/hooks/install/sbom_info_lib/download_url.py b/hooks/install/sbom_info_lib/download_url.py
new file mode 100644
index 0000000..9df3912
--- /dev/null
+++ b/hooks/install/sbom_info_lib/download_url.py
@@ -0,0 +1,389 @@
+# Copyright 2022 Google LLC
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+# get_download_url() in this script is used to
+# find download location for a COS package.
+
+import subprocess
+import re
+import os
+import requests
+
+
+CROS_GCS_MIRRORS = [
+    "gs://chromeos-mirror/gentoo/distfiles/",
+    "gs://chromeos-localmirror/distfiles/",
+]
+# An allow-list for variables parsed in an ebuild file.
+EBUILD_VARS = {
+    "MY_P",
+    "MY_PV",
+    "MY_PN",
+    "PARCH",
+    "SRC_PV",
+    "code_ver",
+    "RE2_VER",
+    "MODULE_VERSION",
+    "GIT_COMMIT",
+    "SRC_URI",
+    "EGIT_REPO_URI",
+    "EGIT_COMMIT",
+    "CROS_WORKON_COMMIT",
+    "CROS_WORKON_PROJECT",
+    "CROS_WORKON_SUBTREE",
+    "HOMEPAGE",
+    "CROS_GO_SOURCE",
+    "GN_X64_SHA1",
+    "LLVM_HASH",
+    "CROS_WORKON_REPO",
+    "GNOME_ORG_MODULE",
+}
+# For packages whose package names are hard to parse or not defined in ebuilds.
+PN_REPLACE_DICT = {
+    "Locale-gettext": lambda x: "gettext",
+    "systemd": lambda x: "systemd-stable" if "." in x else "systemd",
+    "perf": lambda x: "patch",
+}
+SRC_URI_VARS = ["SRC_URI", "EGIT_REPO_URI"]
+COMMIT_VARS = ["GIT_COMMIT", "EGIT_COMMIT", "LLVM_HASH"]
+# REGEX_STRING_VAR finds `var_name=var_value` and `var_name="var_value"` (no new line) in ebuilds.
+REGEX_STRING_VAR = '([^\n]*?)="?([^\n]*?)"?\n'
+# REGEX_ARRAY_VAR finds `var_name=("var_value1" "var_value2" ...)` (allow new lines) in ebuilds.
+REGEX_ARRAY_VAR = "([^\n]*?)=(\(.*?\))"
+# REGEX_SRC_URI finds `SRC_URI="uri1 uri2 ..."` (allow new lines) in ebuilds.
+REGEX_SRC_URI = 'SRC_URI="(.*?)"'
+# REGEX_SRC_URI_PLUS finds `SRC_URI+="uri1 uri2 ..."` (allow new lines) in ebuilds.
+REGEX_SRC_URI_PLUS = 'SRC_URI\+="(.*?)"'
+# REGEX_PKG_REVISION finds package revision like `-r12` in package full name.
+REGEX_PKG_REVISION = "-r[0-9]+$"
+# REGEX_PKG_REVISION finds package version like `-1` or `-1.2.3.4` in package full name.
+REGEX_PKG_VERSION = "-[0-9]+(\.[0-9]*)*"
+# REGEX_FIND_STRING finds string inside double quotes like "string1".
+REGEX_FIND_STRING = '"(.*?)"'
+# REGEX_EBUILD_REPLACE finds ebuild replacement string `(ver_rs 1- some_string)`.
+REGEX_EBUILD_REPLACE = "\$\(ver_rs 1- (.*?)\)"
+GNOME_PN = "GNOME_ORG_MODULE"
+GO_SOURCE = "CROS_GO_SOURCE"
+CROS_REPO = "CROS_WORKON_REPO"
+CROS_COMMIT = "CROS_WORKON_COMMIT"
+CROS_PROJECT = "CROS_WORKON_PROJECT"
+CROS_SUBTREE = "CROS_WORKON_SUBTREE"
+CROS_GIT_HOST_URL = "https://chromium.googlesource.com"
+CROS_GIT_AOSP_URL = "https://android.googlesource.com"
+CROS_HOMEPAGE = "HOMEPAGE"
+GOB_REPO_DICT = {
+    "project-lakitu": "https://cos.googlesource.com/cos/overlays/board-overlays/+/master/project-lakitu/",
+    "chromiumos-overlay": "https://cos.googlesource.com/third_party/overlays/chromiumos-overlay/+/master/",
+    "portage-stable": "https://cos.googlesource.com/third_party/overlays/portage-stable/+/master/",
+    "eclass-overlay": "https://cos.googlesource.com/third_party/overlays/eclass-overlay/+/master/",
+}
+# Packages that use `MODULE_VERSION` as package version.
+KEYWORDS_FOR_MODULE_VERSION = ["dev-perl", "perl-core"]
+PACKAGES_FROM_GOB = {
+    # portage-stable
+    "dev-util/meson-format-array",
+    "sys-devel/autoconf-wrapper",
+    "sys-devel/automake-wrapper",
+    "dev-python/namespace-zope",
+    # project-lakitu
+    "app-admin/cgroup-helper",
+    "app-admin/extensions-manager",
+    "app-admin/kdump-helper",
+    "app-admin/stackdriver",
+    "app-admin/toolbox-config",
+    "app-emulation/cloud-init-config",
+    "chromeos-base/chromeos-auth-config-lakitu",
+    "chromeos-base/chromeos-base",
+    "chromeos-base/chromeos-bsp-lakitu-common",
+    "chromeos-base/chromeos-firewall-init-lakitu",
+    "chromeos-base/chromeos-init-systemd",
+    "chromeos-base/cloud-audit-config",
+    "chromeos-base/cloud-filesystem-init",
+    "chromeos-base/cloud-network-init",
+    "net-misc/chrony-config",
+    "sys-apps/loadpin-trigger",
+    "sys-apps/system-sysdaemons",
+    "sys-libs/lakitu-custom-locales",
+    # chromiumos-overlay
+    "chromeos-base/chromeos-ca-certificates",
+    "chromeos-base/chromeos-sshd-init",
+    "chromeos-base/tty",
+    "chromeos-base/update-policy-embedded",
+    "dev-util/glib-utils",
+    "chromeos-base/openssh-server-init",
+}
+
+
+def is_uri_valid(uri):
+    if not uri.strip().startswith("http"):
+        return False
+    request = requests.get(uri, stream=True)
+    if request.status_code == 200:
+        return True
+    return False
+
+
+def parse_var(s):
+    # avoid downloading packages.
+    parts = s.split("->")
+    if len(parts) > 1:
+        s = parts[0]
+    # do not evaluate commands.
+    if s.startswith("("):
+        s = f"'{s}'"
+    cmd = f"echo {s}"
+    res = subprocess.run(
+        ["bash", "-c", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+    )
+    if res.stderr:
+        return ""
+    return res.stdout.decode("utf-8").rstrip()
+
+
+# Parse an environment variable and return a list.
+def parse_var_from_env(key):
+    val = os.getenv(key)
+    if not val:
+        return []
+    if val.startswith("("):
+        res = []
+        match = re.findall(REGEX_FIND_STRING, val, re.DOTALL)
+        # in some cases, go src version cannot be parsed in array
+        # e.g. chromiumos-overlay/dev-go/protobuf
+        for m in match:
+            res.append(parse_var(m))
+        return res
+    return [val]
+
+
+def find_var_and_set_env(regex, content):
+    env_set = set()
+    match = re.findall(regex, content, re.DOTALL)
+    for m in match:
+        key = m[0].strip()
+        if key not in EBUILD_VARS:
+            continue
+        val = parse_var(m[1]).strip()
+        if val:
+            os.environ[key] = val
+            env_set.add(key)
+    return env_set
+
+
+def parse_vars_in_ebuild(content):
+    env_set = set()
+    # Replace ebuild replacement gramma with bash format.
+    match = re.findall(REGEX_EBUILD_REPLACE, content, re.DOTALL)
+    if match:
+        for m in match:
+            content = content.replace(f"$(ver_rs 1- {m})", f"${{PV//./{m}}}")
+    env_set.update(find_var_and_set_env(REGEX_STRING_VAR, content))
+    env_set.update(find_var_and_set_env(REGEX_ARRAY_VAR, content))
+    return env_set
+
+
+def parse_pkg_name(pf):
+    match = re.search(REGEX_PKG_REVISION, pf)
+    if match:
+        p = pf[: match.start()]
+    else:
+        p = pf
+    match = re.search(REGEX_PKG_VERSION, p)
+    pn = p[: match.start()]
+    p_name = pn
+    pv = p[match.start() + 1 :]
+    if pn in PN_REPLACE_DICT:
+        pn = PN_REPLACE_DICT[pn](pv)
+        p = f"{pn}-{pv}"
+    os.environ["PN"] = pn
+    os.environ["PV"] = pv
+    os.environ["P"] = p
+    # possbile package names in CROS GCS mirror buckets.
+    return p_name, {f"{p}.tar.gz", f"{p}.tar.xz", f"{p}.tgz", f"{p}.xz"}
+
+
+def search_pkg_from_gob(repository, category, p_name):
+    pkg = f"{category}/{p_name}"
+    if (
+        pkg in PACKAGES_FROM_GOB
+        or category == "virtual"
+        or repository == "eclass-overlay"
+    ):
+        uri = os.path.join(GOB_REPO_DICT[repository], pkg)
+        if is_uri_valid(uri):
+            return uri
+        return ""
+    return ""
+
+
+def find_cros_uri():
+    res = []
+    cros_repo = parse_var_from_env(CROS_REPO)
+    cros_proj = parse_var_from_env(CROS_PROJECT)
+    cros_subtree = parse_var_from_env(CROS_SUBTREE)
+    cros_commit = parse_var_from_env(CROS_COMMIT)
+    if not cros_repo:
+        cros_repo = [CROS_GIT_HOST_URL] * len(cros_proj)
+    if len(cros_proj) != len(cros_commit):
+        return res
+    for i in range(len(cros_proj)):
+        uri = os.path.join(cros_repo[i], cros_proj[i])
+        if not is_uri_valid(uri):
+            continue
+        if cros_subtree and cros_subtree[i]:
+            subtrees = cros_subtree[i].split(" ")
+            for subtree in subtrees:
+                res.append(f"{uri}@{cros_commit[i]}#{subtree}")
+        else:
+            res.append(f"{uri}@{cros_commit[i]}")
+    return res
+
+
+def get_gcs_name_from_src_uri(regex, content):
+    gcs_names = set()
+    match = re.findall(regex, content, re.DOTALL)
+    if match:
+        for src_uri_group in match:
+            for uri_line in src_uri_group.split("\n"):
+                for uri in uri_line.split(" "):
+                    if uri == "->":
+                        continue
+                    gcs_names.add(os.path.basename(parse_var(uri)))
+    return gcs_names
+
+
+# Parse ebuild and set environment variables.
+# Find possible CROS gcs mirror package names,
+# and cros download url.
+def parse_ebuild(ebuild):
+    gcs_names = set()
+    with open(ebuild) as eb:
+        content = eb.read()
+        env_set = parse_vars_in_ebuild(content)
+        cros_uri = find_cros_uri()
+        for keyword in KEYWORDS_FOR_MODULE_VERSION:
+            if keyword in ebuild:
+                gcs_names.add(f'{os.getenv("PN")}-{os.getenv("MODULE_VERSION")}.tar.gz')
+                break
+        gnome_pn = os.getenv(GNOME_PN)
+        if gnome_pn:
+            gcs_names.add(f'{gnome_pn}-{os.getenv["PV"]}')
+        gcs_names_src = get_gcs_name_from_src_uri(REGEX_SRC_URI, content)
+        if gcs_names:
+            gcs_names.update(gcs_names_src)
+        else:
+            gcs_names.update(get_gcs_name_from_src_uri(REGEX_SRC_URI_PLUS, content))
+        return env_set, cros_uri, gcs_names
+
+
+def search_mirror_gcs(gcs_names):
+    for name in gcs_names:
+        name = name.replace("?", "%3f")
+        for bucket in CROS_GCS_MIRRORS:
+            link = os.path.join(bucket, name)
+            res = subprocess.run(
+                ["gsutil", "ls", link], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+            )
+            if res.stderr:
+                continue
+            else:
+                return res.stdout.decode("utf-8").rstrip()
+    return ""
+
+
+def search_src_uri():
+    for uri_name in SRC_URI_VARS:
+        uri = os.getenv(uri_name)
+        if uri and is_uri_valid(uri):
+            for commit_name in COMMIT_VARS:
+                commit = os.getenv(commit_name)
+                if commit:
+                    return f"{uri}@{commit}"
+            return uri
+    return ""
+
+
+def search_go_source(category):
+    res = []
+    go_src = parse_var_from_env(GO_SOURCE)
+    for src in go_src:
+        parts = src.split(" ")
+        if len(parts) == 2:
+            version = parts[1]
+            sources = parts[0].split(":")
+            for uri in sources:
+                uri = "https://" + uri
+                if is_uri_valid(uri):
+                    res.append(f"{uri}@{version}")
+                    break
+    return ",".join(res)
+
+
+def search_homepage():
+    homepage = os.getenv(CROS_HOMEPAGE)
+    if "chromium.googlesource.com" in homepage and is_uri_valid(homepage):
+        commit = os.getenv(CROS_COMMIT)
+        if commit:
+            return f"{homepage}@{commit}"
+        return homepage
+    return ""
+
+
+def search_download_location(gcs_names, category, cros_uri):
+    res = search_mirror_gcs(gcs_names)
+    if res:
+        return res
+    res = search_src_uri()
+    if res:
+        return res
+    if cros_uri:
+        return cros_uri
+    res = search_go_source(category)
+    if res:
+        return res
+    res = search_homepage()
+    if res:
+        return res
+    return ""
+
+
+def unset_env(env_set):
+    for var in env_set:
+        os.environ[var] = ""
+
+
+def read_build_info(build_info_dir):
+    with open(os.path.join(build_info_dir, "repository"), "r") as f:
+        repository = f.read().strip()
+    with open(os.path.join(build_info_dir, "CATEGORY"), "r") as f:
+        category = f.read().strip()
+    with open(os.path.join(build_info_dir, "PF"), "r") as f:
+        pf = f.read().strip()
+    return repository, category, pf
+
+
+def get_download_url(build_info_dir, ebuild):
+    repository, category, pf = read_build_info(build_info_dir)
+    if repository == "private-overlays":
+        return ""
+    os.environ["CROS_GIT_HOST_URL"] = CROS_GIT_HOST_URL
+    os.environ["CROS_GIT_AOSP_URL"] = CROS_GIT_AOSP_URL
+    p_name, gcs_names = parse_pkg_name(pf)
+    gob_res = search_pkg_from_gob(repository, category, p_name)
+    if gob_res:
+        return gob_res
+    env_set, cros_uri, gcs_names_ebuild = parse_ebuild(ebuild)
+    gcs_names.update(gcs_names_ebuild)
+    gcs_names.discard("")
+    res = search_download_location(gcs_names, category, cros_uri)
+    unset_env(env_set)
+    return res
diff --git a/hooks/install/sbom_info_lib/go_dep.py b/hooks/install/sbom_info_lib/go_dep.py
new file mode 100644
index 0000000..da05c9c
--- /dev/null
+++ b/hooks/install/sbom_info_lib/go_dep.py
@@ -0,0 +1,70 @@
+# Copyright 2022 Google LLC
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+# This script is used to find go dependencies
+# of a go pacakge. It reads 'go.mod', 'vendor.mod'
+# or 'vendor.conf' in the source code.
+
+import os
+import re
+import subprocess
+import tarfile
+import requests
+
+# REGEX_GO_MOD_DEP finds
+# `require (
+#   go-pkg1 v1.2.3
+#   go-pkg2 v4.5.6 ...
+# )` in go.mod or other mod file.
+REGEX_GO_MOD_DEP = "require \((.*?)\)"
+GO_MOD_DEP_FILE = ["go.mod", "vendor.mod", "vendor.conf"]
+
+
+def download_src_code(url, build_info_dir):
+    filepath = os.path.join(build_info_dir, os.path.basename(url))
+    if url.startswith("gs://"):
+        subprocess.run(["gsutil", "cp", url, filepath])
+    else:
+        if url.startswith("https://github.com"):
+            url = f'{url.replace("@","/archive/")}.tar.gz'
+        else:
+            url = f'{url.replace("@","/+archive/").replace("#","/")}.tar.gz'
+        response = requests.get(url)
+        open(filepath, "wb").write(response.content)
+    return filepath
+
+
+def get_go_dep(download_url, build_info_dir):
+    res = set()
+    for url in download_url.split(","):
+        if url.endswith(".gn"):
+            continue
+        filepath = download_src_code(url, build_info_dir)
+        try:
+            t = tarfile.open(filepath, "r:gz")
+            for filename in t.getnames():
+                if os.path.basename(filename) not in GO_MOD_DEP_FILE:
+                    continue
+                f = t.extractfile(filename)
+                content = f.read()
+                match = re.findall(REGEX_GO_MOD_DEP, content.decode("utf-8"), re.DOTALL)
+                for req in match:
+                    deps = req.strip().split("\n")
+                    for dep in deps:
+                        # remove comments.
+                        dep = dep.split("//")[0].strip()
+                        if dep:
+                            res.add(dep)
+        except:
+            print(f"{url} is not a .gz file.")
+        os.remove(filepath)
+    return ",".join(res)
diff --git a/hooks/install/sbom_info_lib/licenses.py b/hooks/install/sbom_info_lib/licenses.py
new file mode 100644
index 0000000..cbf379d
--- /dev/null
+++ b/hooks/install/sbom_info_lib/licenses.py
@@ -0,0 +1,71 @@
+# Copyright 2022 Google LLC
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+# This script is used to parse licenses of a package.
+
+import re
+import os
+
+# Parse LICENSE is a ebuild.
+def parse_gentoo_license(line):
+    license_set = set()
+    use_or = False
+    res = ""
+    for e in line.strip().split(" "):
+        if e == "||":
+            use_or = True
+        elif e == "(":
+            if res:
+                res += "AND ("
+            else:
+                res += "("
+        elif e == ")":
+            res = f"{res[:-1]}) "
+            use_or = False
+        else:
+            license_set.add(e)
+            if not res or res.endswith("("):
+                res += f"{e} "
+            elif use_or:
+                res += f"OR {e} "
+            else:
+                res += f"AND {e} "
+    return res.strip(), license_set
+
+
+# If a license is in license.yaml but not LICENSE,
+# add it to the result.
+def parse_license_yaml(yaml, res, license_set):
+    lines = yaml.strip().split("\n")
+    idx = lines.index("  - license_names") + 1
+    match = re.findall("\{(.*?)\}", lines[idx], re.DOTALL)
+    if not match:
+        return res
+    found = []
+    for m in match:
+        for part in m.split(","):
+            found.append(part.split(":")[0])
+    for license in found:
+        license = license.strip()
+        if license and not license in license_set:
+            license_set.add(license)
+            res += f" AND {license}"
+    return res
+
+
+def get_licenses(build_info_dir):
+    if not os.path.exists(os.path.join(build_info_dir, "LICENSE")):
+        return ""
+    with open(os.path.join(build_info_dir, "LICENSE"), "r") as f:
+        res, license_set = parse_gentoo_license(f.read())
+        with open(os.path.join(build_info_dir, "license.yaml"), "r") as y:
+            return parse_license_yaml(y.read(), res, license_set)
diff --git a/update_bootloaders.sh b/update_bootloaders.sh
index 8b9f489..371859e 100755
--- a/update_bootloaders.sh
+++ b/update_bootloaders.sh
@@ -80,10 +80,10 @@
     local template_dir="$4"
     local to="$5"
 
-    # Pull out the dm="" values
+    # Pull out the dm-mod.create="" values
     dm_table=
-    if echo "$kernel_cmdline" | grep -q 'dm="'; then
-      dm_table=$(echo "$kernel_cmdline" | sed -s 's/.*dm="\([^"]*\)".*/\1/')
+    if echo "$kernel_cmdline" | grep -q 'dm-mod.create="'; then
+      dm_table=$(echo "$kernel_cmdline" | sed -s 's/.*dm-mod.create="\([^"]*\)".*/\1/')
     fi
 
     # Discover last known partition numbers.
@@ -97,7 +97,7 @@
     # Rewrite grub table
     grub_dm_table_a=${dm_table//${old_root}/${root_a_uuid}}
     grub_dm_table_b=${dm_table//${old_root}/${root_b_uuid}}
-    sed -e "s|DMTABLEA|${grub_dm_table_a}|g" \
+    sudo sed -e "s|DMTABLEA|${grub_dm_table_a}|g" \
         -e "s|DMTABLEB|${grub_dm_table_b}|g" \
         -e "s|/dev/\\\$linuxpartA|${root_a_uuid}|g" \
         -e "s|/dev/\\\$linuxpartB|${root_b_uuid}|g" \
@@ -126,6 +126,40 @@
   }
 fi
 
+if ! type -p update_arm64_bootloaders; then
+  update_arm64_bootloaders() {
+    local old_root="$1"  # e.g., /dev/sd%D%P or %U+1
+    local kernel_cmdline="$2"
+    local esp_fs_dir="$3"
+    local template_dir="$4"
+    local to="$5"
+
+    # Pull out the dm-mod.create="" values
+    dm_table=
+    if echo "$kernel_cmdline" | grep -q 'dm-mod.create="'; then
+      dm_table=$(echo "$kernel_cmdline" | sed -s 's/.*dm-mod.create="\([^"]*\)".*/\1/')
+    fi
+
+    # Discover last known partition numbers.
+    local partition_num_root_a="$(get_layout_partition_number \
+      "${FLAGS_image_type}" ROOT-A)"
+    local partition_num_root_b="$(get_layout_partition_number \
+      "${FLAGS_image_type}" ROOT-B)"
+    root_a_uuid="PARTUUID=$(part_index_to_uuid "$to" ${partition_num_root_a})"
+    root_b_uuid="PARTUUID=$(part_index_to_uuid "$to" ${partition_num_root_b})"
+
+    # Rewrite grub table
+    grub_dm_table_a=${dm_table//${old_root}/${root_a_uuid}}
+    grub_dm_table_b=${dm_table//${old_root}/${root_b_uuid}}
+    sudo sed -e "s|DMTABLEA|${grub_dm_table_a}|g" \
+        -e "s|DMTABLEB|${grub_dm_table_b}|g" \
+        -e "s|/dev/\\\$linuxpartA|${root_a_uuid}|g" \
+        -e "s|/dev/\\\$linuxpartB|${root_b_uuid}|g" \
+        "${template_dir}"/efi/boot/grub.cfg |
+        sudo dd of="${esp_fs_dir}"/efi/boot/grub.cfg status=none
+  }
+fi
+
 ESP_DEV_OURS=
 ESP_DEV=
 if [[ ! -e "${FLAGS_to}" ]]; then
@@ -216,6 +250,26 @@
     # mount again for cleanup to free resource gracefully
     sudo mount -o ro "${ESP_DEV}" "${ESP_FS_DIR}"
   fi
+elif [[ "${FLAGS_arch}" = "arm64" ]]; then
+  set -x
+  # Populate the EFI bootloader configuration
+  sudo mkdir -p "${ESP_FS_DIR}/efi/boot"
+
+  # Extract kernel flags
+  kernel_cfg=
+  old_root='PARTUUID=%U/PARTNROFF=1'
+  if [[ -n "${FLAGS_kernel_cmdline}" ]]; then
+    info "Using supplied kernel_cmdline to update templates."
+    kernel_cfg="${FLAGS_kernel_cmdline}"
+  elif [[ -n "${FLAGS_kernel_partition}" ]]; then
+    info "Extracting the kernel command line from ${FLAGS_kernel_partition}"
+    kernel_cfg=$(dump_kernel_config "${FLAGS_kernel_partition}")
+  fi
+  update_arm64_bootloaders "${old_root}" \
+                         "${kernel_cfg}" \
+                         "${ESP_FS_DIR}" \
+                         "${FLAGS_from}" \
+                         "${FLAGS_to}"
 elif [[ "${FLAGS_arch}" = "arm" || "${FLAGS_arch}" = "mips" ]]; then
   # Copy u-boot script to ESP partition
   if [ -r "${FLAGS_from}/boot-A.scr.uimg" ]; then