Move cos-gpu-installer@019168e30c14c4026de0980f0fe965aa85b9e005 from Github

BUG=b/183723779

Change-Id: I2362a60e2b253a824d8a95713e1be4817162c22f
Reviewed-on: https://cos-review.googlesource.com/c/cos/tools/+/16632
Cloud-Build: GCB Service account <228075978874@cloudbuild.gserviceaccount.com>
Reviewed-by: Robert Kolchmeyer <rkolchmeyer@google.com>
Reviewed-by: Ke Wu <mikewu@google.com>
Tested-by: Arnav Kansal <rnv@google.com>
diff --git a/src/cmd/cos_gpu_installer_v1/README.md b/src/cmd/cos_gpu_installer_v1/README.md
new file mode 100644
index 0000000..fcd06fa
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/README.md
@@ -0,0 +1,28 @@
+# GPU Driver Installer containers for Container-Optimized OS from Google
+
+Note: This is not an official Google product.
+
+This repository contains scripts to build Docker containers that can be used to
+download, compile and install GPU drivers on
+[Container-Optimized OS](https://cloud.google.com/container-optimized-os/) images.
+
+## How to use
+
+Example command:
+``` shell
+gcloud compute instances create $USER-cos-gpu-test \
+    --image-family cos-stable \
+    --image-project cos-cloud \
+    --accelerator=type=nvidia-tesla-k80 \
+    --boot-disk-size=25GB \
+    --maintenance-policy=TERMINATE \
+    --metadata-from-file "cos-gpu-installer-env=scripts/gpu-installer-env,user-data=install-test-gpu.cfg,run-installer-script=scripts/run_installer.sh,run-cuda-test-script=scripts/run_cuda_test.sh"
+```
+
+The command above creates a GCE instance based on cos-stable image. Then it
+installs GPU driver on the instance by running a container 'cos-gpu-installer'
+which is implemented in this repository.
+
+The GPU driver version and container image version are specified in
+scripts/gpu-installer-env. You can edit the file if you want to install
+GPU driver version or use container image other than the default.
diff --git a/src/cmd/cos_gpu_installer_v1/container_build_request.yaml b/src/cmd/cos_gpu_installer_v1/container_build_request.yaml
new file mode 100644
index 0000000..dc74682
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/container_build_request.yaml
@@ -0,0 +1,29 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# GCP Container Builder build request in YAML format.
+# See https://cloud.google.com/container-builder/docs/ for details.
+
+steps:
+- name: 'gcr.io/cloud-builders/docker'
+  args:
+  - 'build'
+  - '-t'
+  - 'gcr.io/${_PROJECT_ID}/cos-gpu-installer:latest'
+  - '-t'
+  - 'gcr.io/${_PROJECT_ID}/cos-gpu-installer:${TAG_NAME}'
+  - 'cos-gpu-installer-docker/'
+images:
+- 'gcr.io/${_PROJECT_ID}/cos-gpu-installer:latest'
+- 'gcr.io/${_PROJECT_ID}/cos-gpu-installer:${TAG_NAME}'
diff --git a/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/Dockerfile b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/Dockerfile
new file mode 100644
index 0000000..aca2170
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/Dockerfile
@@ -0,0 +1,36 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Dockerfile for the COS GPU Installer container.
+
+FROM debian:9
+LABEL maintainer="cos-containers@google.com"
+
+# Install minimal tools needed to build kernel modules.
+RUN apt-get update -qq && \
+    apt-get install -y --no-install-recommends xz-utils python2.7-minimal \
+    libc6-dev libpython2.7-stdlib kmod make curl ca-certificates libssl-dev \
+    gcc libelf-dev keyutils && \
+    rm -rf /var/lib/apt/lists/*
+
+# Download & install prebuild COS toolchain package, and prepare the environment for cross-compiling.
+RUN cd /usr/bin && ln -s python2.7 python && ln -s python2.7 python2
+
+COPY README.container /README
+COPY gpu_installer_url_lib.sh /gpu_installer_url_lib.sh
+COPY driver_signature_lib.sh /driver_signature_lib.sh
+COPY sign_gpu_driver.sh /sign_gpu_driver.sh
+COPY get_metadata_value /get_metadata_value
+COPY entrypoint.sh /entrypoint.sh
+CMD /entrypoint.sh
diff --git a/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/README.container b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/README.container
new file mode 100644
index 0000000..1fee089
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/README.container
@@ -0,0 +1,4 @@
+This image is based on the sources located at
+https://github.com/GoogleCloudPlatform/cos-gpu-installer. For any issues or
+assistance, please report on
+https://github.com/GoogleCloudPlatform/cos-gpu-installer.
\ No newline at end of file
diff --git a/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/driver_signature_lib.sh b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/driver_signature_lib.sh
new file mode 100755
index 0000000..acb17b6
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/driver_signature_lib.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+readonly GPU_DRIVER_SIGNATURE="gpu-driver-signature.tar.gz"
+readonly GPU_PRECOMPILED_DRIVER_SIGNATURE="gpu-precompiled-driver-signature.tar.gz"
+readonly GPU_DRIVER_PUBLIC_KEY_PEM="gpu-driver-cert.pem"
+readonly GPU_DRIVER_PUBLIC_KEY_DER="gpu-driver-cert.der"
+readonly GPU_DRIVER_PRIVATE_KEY="dummy-key"
+readonly GPU_DRIVER_SIGNING_DIR="/build/sign-gpu-driver"
+
+download_artifact_from_gcs() {
+  local -r gcs_url_prefix="$1"
+  local -r filename="$2"
+  local -r download_url="${gcs_url_prefix}/${filename}"
+  local -r output_path="${GPU_DRIVER_SIGNING_DIR}/${filename}"
+
+  download_content_from_url "${download_url}" "${output_path}" "${filename}"
+}
+
+download_driver_signature() {
+  local -r gcs_url_prefix="$1"
+
+  mkdir -p "${GPU_DRIVER_SIGNING_DIR}"
+  # Try to Download GPU driver signature. If fail then return immediately to
+  # reduce latency because in such case precompiled GPU driver signature must
+  # not exist.
+  download_artifact_from_gcs "${gcs_url_prefix}" "${GPU_DRIVER_SIGNATURE}" || return 0
+  # Try to download precompiled GPU driver signature
+  download_artifact_from_gcs "${gcs_url_prefix}" "${GPU_PRECOMPILED_DRIVER_SIGNATURE}" || true
+}
+
+has_driver_signature() {
+  [[ -f "${GPU_DRIVER_SIGNING_DIR}/${GPU_DRIVER_SIGNATURE}" ]]
+}
+
+has_precompiled_driver_signature() {
+  [[ -f "${GPU_DRIVER_SIGNING_DIR}/${GPU_PRECOMPILED_DRIVER_SIGNATURE}" ]]
+}
+
+decompress_driver_signature() {
+  if ! has_driver_signature && ! has_precompiled_driver_signature; then
+    return 1
+  fi
+
+  pushd "${GPU_DRIVER_SIGNING_DIR}" || return 1
+  if has_precompiled_driver_signature; then
+    tar xzf "${GPU_PRECOMPILED_DRIVER_SIGNATURE}"
+  elif has_driver_signature; then
+    tar xzf "${GPU_DRIVER_SIGNATURE}"
+  fi
+  popd || return 1
+
+  # Create a dummy private key. We don't need private key to sign the driver
+  # because we already have the signature.
+  touch "${GPU_DRIVER_SIGNING_DIR}/${GPU_DRIVER_PRIVATE_KEY}"
+}
+
+get_private_key() {
+  echo "${GPU_DRIVER_SIGNING_DIR}/${GPU_DRIVER_PRIVATE_KEY}"
+}
+
+get_public_key_pem() {
+  echo "${GPU_DRIVER_SIGNING_DIR}/${GPU_DRIVER_PUBLIC_KEY_PEM}"
+}
+
+load_public_key() {
+  info "Loading GPU driver public key to system keyring."
+  /bin/keyctl padd asymmetric "gpu_key" \
+    %keyring:.secondary_trusted_keys < \
+    "${GPU_DRIVER_SIGNING_DIR}/${GPU_DRIVER_PUBLIC_KEY_DER}"
+}
diff --git a/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/entrypoint.sh b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/entrypoint.sh
new file mode 100755
index 0000000..b652c0a
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/entrypoint.sh
@@ -0,0 +1,537 @@
+#!/bin/bash
+#
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -o errexit
+set -o pipefail
+set -u
+
+set -x
+COS_KERNEL_INFO_FILENAME="kernel_info"
+COS_KERNEL_SRC_HEADER="kernel-headers.tgz"
+TOOLCHAIN_URL_FILENAME="toolchain_url"
+TOOLCHAIN_ENV_FILENAME="toolchain_env"
+TOOLCHAIN_PKG_DIR="${TOOLCHAIN_PKG_DIR:-/build/cos-tools}"
+CHROMIUMOS_SDK_GCS="https://storage.googleapis.com/chromiumos-sdk"
+ROOT_OS_RELEASE="${ROOT_OS_RELEASE:-/root/etc/os-release}"
+KERNEL_SRC_HEADER="${KERNEL_SRC_HEADER:-/build/usr/src/linux}"
+NVIDIA_DRIVER_VERSION="${NVIDIA_DRIVER_VERSION:-418.67}"
+NVIDIA_DRIVER_MD5SUM="${NVIDIA_DRIVER_MD5SUM:-}"
+NVIDIA_INSTALL_DIR_HOST="${NVIDIA_INSTALL_DIR_HOST:-/var/lib/nvidia}"
+NVIDIA_INSTALL_DIR_CONTAINER="${NVIDIA_INSTALL_DIR_CONTAINER:-/usr/local/nvidia}"
+ROOT_MOUNT_DIR="${ROOT_MOUNT_DIR:-/root}"
+CACHE_FILE="${NVIDIA_INSTALL_DIR_CONTAINER}/.cache"
+LOCK_FILE="${ROOT_MOUNT_DIR}/tmp/cos_gpu_installer_lock"
+LOCK_FILE_FD=20
+set +x
+
+# TOOLCHAIN_DOWNLOAD_URL, CC and CXX are set by
+# set_compilation_env
+TOOLCHAIN_DOWNLOAD_URL=""
+
+# Compilation environment variables
+CC=""
+CXX=""
+
+# URL prefix to use for data dependencies
+COS_DOWNLOAD_GCS="${COS_DOWNLOAD_GCS:-}"
+
+RETCODE_SUCCESS=0
+RETCODE_ERROR=1
+RETRY_COUNT=${RETRY_COUNT:-5}
+
+# GPU installer filename. Expect to be set by download_nvidia_installer().
+INSTALLER_FILE=""
+
+# Preload driver independent components. Set in parse_opt()
+PRELOAD="${PRELOAD:-false}"
+
+source gpu_installer_url_lib.sh
+
+_log() {
+  local -r prefix="$1"
+  shift
+  echo "[${prefix}$(date -u "+%Y-%m-%d %H:%M:%S %Z")] ""$*" >&2
+}
+
+info() {
+  _log "INFO    " "$*"
+}
+
+warn() {
+  _log "WARNING " "$*"
+}
+
+error() {
+  _log "ERROR   " "$*"
+}
+
+lock() {
+  info "Checking if this is the only cos-gpu-installer that is running."
+  eval "exec ${LOCK_FILE_FD}>${LOCK_FILE}"
+  if ! flock -ne ${LOCK_FILE_FD}; then
+    error "File ${LOCK_FILE} is locked. Other cos-gpu-installer container might be running."
+    exit ${RETCODE_ERROR}
+  fi
+}
+
+load_etc_os_release() {
+  if [[ ! -f "${ROOT_OS_RELEASE}" ]]; then
+    error "File ${ROOT_OS_RELEASE} not found, /etc/os-release from COS host must be mounted."
+    exit ${RETCODE_ERROR}
+  fi
+  . "${ROOT_OS_RELEASE}"
+  info "Running on COS build id ${BUILD_ID}"
+}
+
+# Set COS_DOWNLOAD_GCS, if unset.
+set_cos_download_gcs() {
+  if [[ -z "${COS_DOWNLOAD_GCS}" ]]; then
+    COS_DOWNLOAD_GCS="https://storage.googleapis.com/cos-tools/${BUILD_ID}"
+  fi
+  info "Data dependencies (e.g. kernel source) will be fetched from ${COS_DOWNLOAD_GCS}"
+}
+
+reboot_machine() {
+  warn "Rebooting"
+  echo b > /proc/sysrq-trigger
+}
+
+is_secure_boot_enabled() {
+  local -r kernel_output="$(dmesg)"
+  echo "${kernel_output}" | grep -q 'Secure boot enabled'
+}
+
+configure_kernel_module_locking() {
+  info "Checking if third party kernel modules can be installed"
+  local -r esp_partition="/dev/sda12"
+  local -r mount_path="/tmp/esp"
+  local -r grub_cfg="efi/boot/grub.cfg"
+  local sed_cmds=()
+
+  mkdir -p "${mount_path}"
+  mount "${esp_partition}" "${mount_path}"
+  pushd "${mount_path}"
+
+  # Disable kernel module signature verification.
+  if grep -q "module.sig_enforce=1" /proc/cmdline; then
+    sed_cmds+=('s/module.sig_enforce=1/module.sig_enforce=0/g')
+  fi
+
+  # Disable loadpin.
+  if grep -q "loadpin.enabled" /proc/cmdline; then
+    if grep -q "loadpin.enabled=1" /proc/cmdline; then
+      sed_cmds+=('s/loadpin.enabled=1/loadpin.enabled=0/g')
+    fi
+  else
+    sed_cmds+=('s/cros_efi/cros_efi loadpin.enabled=0/g')
+  fi
+
+  if [ "${#sed_cmds[@]}" -gt 0 ]; then
+      # Check secure boot before try to modify kernel cmdline.
+      if is_secure_boot_enabled; then
+        error "Secure boot is enabled. Can't modify kernel cmdline."
+        exit ${RETCODE_ERROR}
+      fi
+      cp "${grub_cfg}" "${grub_cfg}.orig"
+      for sed_cmd in "${sed_cmds[@]}"; do
+        sed "${sed_cmd}" -i "${grub_cfg}"
+      done
+      # Reboot to make the new kernel cmdline to be effective.
+      trap reboot_machine RETURN
+  fi
+
+  popd
+  sync
+  umount "${mount_path}"
+}
+
+check_cached_version() {
+  info "Checking cached version"
+  if [[ ! -f "${CACHE_FILE}" ]]; then
+    info "Cache file ${CACHE_FILE} not found."
+    return ${RETCODE_ERROR}
+  fi
+
+  # Source the cache file and check if the cached driver matches
+  # currently running image build and driver versions.
+  . "${CACHE_FILE}"
+  if [[ "${BUILD_ID}" == "${CACHE_BUILD_ID}" ]]; then
+    if [[ "${NVIDIA_DRIVER_VERSION}" == \
+          "${CACHE_NVIDIA_DRIVER_VERSION}" ]]; then
+      info "Found existing driver installation for image version ${BUILD_ID} \
+          and driver version ${NVIDIA_DRIVER_VERSION}."
+      return ${RETCODE_SUCCESS}
+    fi
+  fi
+  return ${RETCODE_ERROR}
+}
+
+update_cached_version() {
+  cat >"${CACHE_FILE}"<<__EOF__
+CACHE_BUILD_ID=${BUILD_ID}
+CACHE_NVIDIA_DRIVER_VERSION=${NVIDIA_DRIVER_VERSION}
+__EOF__
+
+  info "Updated cached version as:"
+  cat "${CACHE_FILE}"
+}
+
+update_container_ld_cache() {
+  info "Updating container's ld cache"
+  echo "${NVIDIA_INSTALL_DIR_CONTAINER}/lib64" > /etc/ld.so.conf.d/nvidia.conf
+  ldconfig
+}
+
+download_nvidia_installer() {
+  info "Downloading GPU installer ... "
+  pushd "${NVIDIA_INSTALL_DIR_CONTAINER}"
+  local gpu_installer_download_url
+  gpu_installer_download_url="$(get_gpu_installer_url "${NVIDIA_DRIVER_VERSION}" "${VERSION_ID}" "${BUILD_ID}")"
+  info "Downloading from ${gpu_installer_download_url}"
+  INSTALLER_FILE="$(basename "${gpu_installer_download_url}")"
+  download_content_from_url "${gpu_installer_download_url}" "${INSTALLER_FILE}" "GPU installer"
+  if [ -n "${NVIDIA_DRIVER_MD5SUM}" ]; then
+    echo "${NVIDIA_DRIVER_MD5SUM}" "${INSTALLER_FILE}" | md5sum --check
+  fi
+  popd
+}
+
+is_precompiled_driver() {
+  # Helper function to decide whether the gpu drvier is pre-compiled.
+  [[ "${INSTALLER_FILE##*.}" == "cos" ]] || return $?
+}
+
+# Download kernel_info file.
+download_kernel_info_file() {
+  local -r cos_build_gcs_path=${1}
+  local -r kernel_info_file_path="${cos_build_gcs_path}/${COS_KERNEL_INFO_FILENAME}"
+  info "Obtaining kernel_info file from ${kernel_info_file_path}"
+  download_content_from_url "${kernel_info_file_path}" "${COS_KERNEL_INFO_FILENAME}" "kernel_info file"
+}
+
+download_kernel_headers() {
+  if [[ -z "$(ls -A "${KERNEL_SRC_HEADER}")" ]]; then
+    info "Downloading kernel headers"
+    mkdir -p "${KERNEL_SRC_HEADER}"
+    pushd "${KERNEL_SRC_HEADER}"
+    if ! download_content_from_url "${COS_DOWNLOAD_GCS}/${COS_KERNEL_SRC_HEADER}" "${COS_KERNEL_SRC_HEADER}" "kernel headers"; then
+      popd
+      return ${RETCODE_ERROR}
+    fi
+    tar xf "${COS_KERNEL_SRC_HEADER}"
+    rm "${COS_KERNEL_SRC_HEADER}"
+    cp -r "usr/src/linux-headers-$(uname -r)"/* ./
+    rm -r usr
+    popd
+  fi
+}
+
+# Gets default service account credentials of the VM which cos-gpu-installer runs in.
+# These credentials are needed to access GCS buckets.
+get_default_vm_credentials() {
+  local -r creds="$(/get_metadata_value service-accounts/default/token)"
+  local -r token=$(echo "${creds}" | python -c \
+    'import sys; import json; print(json.loads(sys.stdin.read())["access_token"])')
+  echo "${token}"
+}
+
+# Download content from a given URL to specific location.
+#
+# Args:
+# download_url: The URL used to download the archive/file.
+# output_name: Output name of the downloaded archive/file.
+# info_str: Describes the archive/file that is downloaded.
+# Returns:
+# 0 if successful; Otherwise 1.
+download_content_from_url() {
+  local -r download_url=$1
+  local -r output_name=$2
+  local -r info_str=$3
+  local -r auth_header="Authorization: Bearer $(get_default_vm_credentials)"
+
+  info "Downloading ${info_str} from ${download_url}"
+
+  local args=(
+    -sfS
+    --http1.1
+    "${download_url}"
+    -o "${output_name}"
+  )
+  if [[ "${download_url}" == "https://storage.googleapis.com"* ]]; then
+    args+=(-H "${auth_header}")
+  fi
+
+  local attempts=0
+  until time curl "${args[@]}"; do
+    attempts=$(( attempts + 1 ))
+    if (( "${attempts}" >= "${RETRY_COUNT}" )); then
+      error "Could not download ${info_str} from ${download_url}, giving up."
+      return ${RETCODE_ERROR}
+    fi
+    warn "Error fetching ${info_str} from ${download_url}, retrying"
+    sleep 1
+  done
+  return ${RETCODE_SUCCESS}
+}
+
+# Get the toolchain from Chromiumos GCS bucket when
+# toolchain tarball is not found in COS GCS bucket.
+get_cross_toolchain_pkg() {
+  # First, check if the toolchain path is available locally.
+  local -r tc_path_file="${ROOT_MOUNT_DIR}/etc/toolchain-path"
+  if [[ -f "${tc_path_file}" ]]; then
+    info "Found toolchain path file locally"
+    local -r tc_path="$(cat "${tc_path_file}")"
+    local -r download_url="${CHROMIUMOS_SDK_GCS}/${tc_path}"
+  else
+    # Next, check if the toolchain path is available in GCS.
+    local -r tc_path_url="${COS_DOWNLOAD_GCS}/${TOOLCHAIN_URL_FILENAME}"
+    info "Obtaining toolchain download URL from ${tc_path_url}"
+    local -r download_url="$(curl --http1.1 -sfS "${tc_path_url}")"
+  fi
+  echo "${download_url}"
+}
+
+# Download, extracts and install the toolchain package
+install_cross_toolchain_pkg() {
+  info "$TOOLCHAIN_PKG_DIR: $(ls -A "${TOOLCHAIN_PKG_DIR}")"
+  if [[ -n "$(ls -A "${TOOLCHAIN_PKG_DIR}")" ]]; then
+    info "Found existing toolchain package. Skipping download and installation"
+    if mountpoint -q "${TOOLCHAIN_PKG_DIR}"; then
+      info "${TOOLCHAIN_PKG_DIR} is a mountpoint; remounting as exec"
+      mount -o remount,exec "${TOOLCHAIN_PKG_DIR}"
+    fi
+  else
+    mkdir -p "${TOOLCHAIN_PKG_DIR}"
+    pushd "${TOOLCHAIN_PKG_DIR}"
+
+    info "Downloading toolchain from ${TOOLCHAIN_DOWNLOAD_URL}"
+
+    # Download toolchain from download_url to pkg_name
+    local -r pkg_name="$(basename "${TOOLCHAIN_DOWNLOAD_URL}")"
+    if ! download_content_from_url "${TOOLCHAIN_DOWNLOAD_URL}" "${pkg_name}" "toolchain archive"; then
+      # Failed to download the toolchain
+      return ${RETCODE_ERROR}
+    fi
+
+    # Don't unpack Rust toolchain elements because they are not needed and they
+    # use a lot of disk space.
+    tar xf "${pkg_name}" \
+      --exclude='./usr/lib64/rustlib*' \
+      --exclude='./lib/librustc*' \
+      --exclude='./usr/lib64/librustc*'
+    rm "${pkg_name}"
+    popd
+  fi
+
+  info "Creating the ${TOOLCHAIN_PKG_DIR}/bin/ld symlink. \
+The nvidia installer expects an 'ld' executable to be in the PATH. \
+The nvidia installer does not respect the LD environment variable. \
+So we create a symlink to make sure the correct linker is used by \
+the nvidia installer."
+  ln -sf x86_64-cros-linux-gnu-ld "${TOOLCHAIN_PKG_DIR}/bin/ld"
+
+  info "Configuring environment variables for cross-compilation"
+  export PATH="${TOOLCHAIN_PKG_DIR}/bin:${PATH}"
+  export SYSROOT="${TOOLCHAIN_PKG_DIR}/usr/x86_64-cros-linux-gnu"
+}
+
+# Download toolchain_env file.
+download_toolchain_env() {
+  # Get toolchain_env path from COS GCS bucket
+  local -r cos_build_gcs_path=${1}
+  local -r tc_info_file_path="${cos_build_gcs_path}/${TOOLCHAIN_ENV_FILENAME}"
+  info "Obtaining toolchain_env file from ${tc_info_file_path}"
+  download_content_from_url "${tc_info_file_path}" "${TOOLCHAIN_ENV_FILENAME}" "toolchain_env file"
+}
+
+# Set-up compilation environment for compiling GPU drivers
+# using toolchain used for kernel compilation
+set_compilation_env() {
+  info "Setting up compilation environment"
+  # Download toolchain_env if present
+  if ! download_toolchain_env "${COS_DOWNLOAD_GCS}"; then
+        # Required to support COS builds not having toolchain_env file
+        TOOLCHAIN_DOWNLOAD_URL=$(get_cross_toolchain_pkg)
+        CC="x86_64-cros-linux-gnu-gcc"
+        CXX="x86_64-cros-linux-gnu-g++"
+  else
+        # Successful download of toolchain_env file
+        # toolchain_env file will set 'CC' and 'CXX' environment
+        # variable based on the toolchain used for kernel compilation
+        source "${TOOLCHAIN_ENV_FILENAME}"
+        # Downloading toolchain from COS GCS Bucket
+        TOOLCHAIN_DOWNLOAD_URL=$(get_cross_toolchain_pkg)
+  fi
+
+  export CC
+  export CXX
+}
+
+configure_nvidia_installation_dirs() {
+  info "Configuring installation directories"
+  mkdir -p "${NVIDIA_INSTALL_DIR_CONTAINER}"
+  pushd "${NVIDIA_INSTALL_DIR_CONTAINER}"
+
+  # nvidia-installer does not provide an option to configure the
+  # installation path of `nvidia-modprobe` utility and always installs it
+  # under /usr/bin. The following workaround ensures that
+  # `nvidia-modprobe` is accessible outside the installer container
+  # filesystem.
+  mkdir -p bin bin-workdir
+  mount -t overlay -o lowerdir=/usr/bin,upperdir=bin,workdir=bin-workdir none /usr/bin
+
+  # nvidia-installer does not provide an option to configure the
+  # installation path of libraries such as libnvidia-ml.so. The following
+  # workaround ensures that the libs are accessible from outside the
+  # installer container filesystem.
+  mkdir -p lib64 lib64-workdir
+  mkdir -p /usr/lib/x86_64-linux-gnu
+  mount -t overlay -o lowerdir=/usr/lib/x86_64-linux-gnu,upperdir=lib64,workdir=lib64-workdir none /usr/lib/x86_64-linux-gnu
+
+  # nvidia-installer does not provide an option to configure the
+  # installation path of driver kernel modules such as nvidia.ko. The following
+  # workaround ensures that the modules are accessible from outside the
+  # installer container filesystem.
+  mkdir -p drivers drivers-workdir
+  mkdir -p /lib/modules/"$(uname -r)"/video
+  mount -t overlay -o lowerdir=/lib/modules/"$(uname -r)"/video,upperdir=drivers,workdir=drivers-workdir none /lib/modules/"$(uname -r)"/video
+
+  # Populate ld.so.conf to avoid warning messages in nvidia-installer logs.
+  update_container_ld_cache
+
+  # Install an exit handler to cleanup the overlayfs mount points.
+  trap "{ umount /lib/modules/\"$(uname -r)\"/video; umount /usr/lib/x86_64-linux-gnu ; umount /usr/bin; }" EXIT
+  popd
+}
+
+run_nvidia_installer() {
+  info "Running Nvidia installer"
+  pushd "${NVIDIA_INSTALL_DIR_CONTAINER}"
+  local installer_args=(
+    "--utility-prefix=${NVIDIA_INSTALL_DIR_CONTAINER}"
+    "--opengl-prefix=${NVIDIA_INSTALL_DIR_CONTAINER}"
+    "--no-install-compat32-libs"
+    "--log-file-name=${NVIDIA_INSTALL_DIR_CONTAINER}/nvidia-installer.log"
+    "--silent"
+    "--accept-license"
+  )
+  if ! is_precompiled_driver; then
+    installer_args+=("--kernel-source-path=${KERNEL_SRC_HEADER}")
+  fi
+
+  local -r dir_to_extract="/tmp/extract"
+  # Extract files to a fixed path first to make sure md5sum of generated gpu
+  # drivers are consistent.
+  sh "${INSTALLER_FILE}" -x --target "${dir_to_extract}"
+  "${dir_to_extract}/nvidia-installer" "${installer_args[@]}"
+
+  popd
+}
+
+configure_cached_installation() {
+  info "Configuring cached driver installation"
+  update_container_ld_cache
+  if ! lsmod | grep -q -w 'nvidia'; then
+    insmod "${NVIDIA_INSTALL_DIR_CONTAINER}/drivers/nvidia.ko"
+  fi
+  if ! lsmod | grep -q -w 'nvidia_uvm'; then
+    insmod "${NVIDIA_INSTALL_DIR_CONTAINER}/drivers/nvidia-uvm.ko"
+  fi
+  if ! lsmod | grep -q -w 'nvidia_drm'; then
+    insmod "${NVIDIA_INSTALL_DIR_CONTAINER}/drivers/nvidia-drm.ko"
+  fi
+}
+
+verify_nvidia_installation() {
+  info "Verifying Nvidia installation"
+  export PATH="${NVIDIA_INSTALL_DIR_CONTAINER}/bin:${PATH}"
+  nvidia-smi
+  # Create unified memory device file.
+  nvidia-modprobe -c0 -u
+
+  # TODO: Add support for enabling persistence mode.
+}
+
+update_host_ld_cache() {
+  info "Updating host's ld cache"
+  echo "${NVIDIA_INSTALL_DIR_HOST}/lib64" >> "${ROOT_MOUNT_DIR}/etc/ld.so.conf"
+  ldconfig -r "${ROOT_MOUNT_DIR}"
+}
+
+usage() {
+  echo "usage: $0 [-p]"
+  echo "Default behavior installs all components needed for the Nvidia driver."
+  echo "  -p: Install cross toolchain package and kernel source only."
+}
+
+parse_opt() {
+  while getopts ":ph" opt; do
+  case ${opt} in
+    p)
+      PRELOAD="true"
+      ;;
+    h)
+      usage
+      exit 0
+      ;;
+    \?)
+      echo "Invalid option: -$OPTARG" >&2
+      usage
+      exit 1
+      ;;
+  esac
+  done
+}
+
+main() {
+  parse_opt "$@"
+  info "PRELOAD: ${PRELOAD}"
+  load_etc_os_release
+  set_cos_download_gcs
+  if [[ "$PRELOAD" == "true" ]]; then
+    set_compilation_env
+    install_cross_toolchain_pkg
+    download_kernel_headers
+    info "Finished installing the cross toolchain package and kernel source."
+  else
+    lock
+    configure_kernel_module_locking
+    if check_cached_version; then
+      configure_cached_installation
+      verify_nvidia_installation
+      info "Found cached version, NOT building the drivers."
+    else
+      info "Did not find cached version, building the drivers..."
+      download_nvidia_installer
+      if ! is_precompiled_driver; then
+        info "Did not find pre-compiled driver, need to download kernel sources."
+        download_kernel_headers
+      fi
+      set_compilation_env
+      install_cross_toolchain_pkg
+      configure_nvidia_installation_dirs
+      run_nvidia_installer
+      update_cached_version
+      verify_nvidia_installation
+      info "Finished installing the drivers."
+    fi
+    update_host_ld_cache
+  fi
+}
+
+main "$@"
diff --git a/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/get_metadata_value b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/get_metadata_value
new file mode 100755
index 0000000..1fb2ba6
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/get_metadata_value
@@ -0,0 +1,74 @@
+#! /bin/bash
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get a metadata value from the metadata server.
+declare -r VARNAME=$1
+declare -r MDS_PREFIX=http://metadata.google.internal/computeMetadata/v1
+declare -r MDS_TRIES=${MDS_TRIES:-100}
+
+function print_metadata_value() {
+  local readonly tmpfile=$(mktemp)
+  http_code=$(curl -f "${1}" -H "Metadata-Flavor: Google" -w "%{http_code}" \
+    -s -o ${tmpfile} 2>/dev/null)
+  local readonly return_code=$?
+  # If the command completed successfully, print the metadata value to stdout.
+  if [[ ${return_code} == 0 && ${http_code} == 200 ]]; then
+    cat ${tmpfile}
+  fi
+  rm -f ${tmpfile}
+  return ${return_code}
+}
+
+function print_metadata_value_if_exists() {
+  local return_code=1
+  local readonly url=$1
+  print_metadata_value ${url}
+  return_code=$?
+  return ${return_code}
+}
+
+function get_metadata_value() {
+  local readonly varname=$1
+  # Print the instance metadata value.
+  print_metadata_value_if_exists ${MDS_PREFIX}/instance/${varname}
+  return_code=$?
+  # If the instance doesn't have the value, try the project.
+  if [[ ${return_code} != 0 ]]; then
+    print_metadata_value_if_exists ${MDS_PREFIX}/project/${varname}
+    return_code=$?
+  fi
+  return ${return_code}
+}
+
+function get_metadata_value_with_retries() {
+  local return_code=1  # General error code.
+  for ((count=0; count <= ${MDS_TRIES}; count++)); do
+    get_metadata_value $VARNAME
+    return_code=$?
+    case $return_code in
+      # No error.  We're done.
+      0) exit ${return_code};;
+      # Failed to resolve host or connect to host.  Retry.
+      6|7) sleep 0.3; continue;;
+      # A genuine error.  Exit.
+      *) exit ${return_code};
+    esac
+  done
+  # Exit with the last return code we got.
+  exit ${return_code}
+}
+
+get_metadata_value_with_retries
diff --git a/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/gpu_installer_url_lib.sh b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/gpu_installer_url_lib.sh
new file mode 100755
index 0000000..68fbf14
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/gpu_installer_url_lib.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+GPU_INSTALLER_DOWNLOAD_URL="${GPU_INSTALLER_DOWNLOAD_URL:-}"
+
+get_major_version() {
+  echo "$1" | cut -d "." -f 1
+}
+
+get_minor_version() {
+  echo "$1" | cut -d "." -f 2
+}
+
+get_download_location() {
+  # projects/000000000000/zones/us-west1-a -> us
+  local -r instance_location="$(curl --http1.1 -sfS "http://metadata.google.internal/computeMetadata/v1/instance/zone" -H "Metadata-Flavor: Google" | cut -d '/' -f4 | cut -d '-' -f1)"
+  declare -A location_mapping
+  location_mapping=( ["us"]="us" ["asia"]="asia" ["europe"]="eu" )
+  # Use us as default download location.
+  echo "${location_mapping[${instance_location}]:-us}"
+}
+
+precompiled_installer_download_url() {
+  local -r driver_version="$1"
+  local -r milestone="$2"
+  local -r build_id="${3//\./-}"  # 11895.86.0 -> 11895-86-0
+  local -r major_version="$(get_major_version "${driver_version}")"
+  local -r minor_version="$(get_minor_version "${driver_version}")"
+  local -r download_location="$(get_download_location)"
+
+  echo "https://storage.googleapis.com/nvidia-drivers-${download_location}-public/nvidia-cos-project/${milestone}/tesla/${major_version}_00/${driver_version}/NVIDIA-Linux-x86_64-${driver_version}_${milestone}-${build_id}.cos"
+}
+
+default_installer_download_url() {
+  local -r driver_version="$1"
+  local -r major_version="$(get_major_version "${driver_version}")"
+  local -r minor_version="$(get_minor_version "${driver_version}")"
+  local -r download_location="$(get_download_location)"
+
+  if (( "${major_version}" < 390 )); then
+    # Versions prior to 390 are downloaded from the upstream location.
+    echo "https://us.download.nvidia.com/tesla/${driver_version}/NVIDIA-Linux-x86_64-${driver_version}.run"
+  elif (( "${major_version}" == 390 )) && (( "${minor_version}" == 46 )); then
+    # 390.46 is the only version residing in the TESLSA/ dir
+    echo "https://storage.googleapis.com/nvidia-drivers-${download_location}-public/TESLA/NVIDIA-Linux-x86_64-${driver_version}.run"
+  elif (( "${major_version}" == 396 )) && (( "${minor_version}" == 26 )); then
+    # Different naming format for 396.26 including the -dignostic keyword.
+    echo "https://storage.googleapis.com/nvidia-drivers-${download_location}-public/tesla/${driver_version}/NVIDIA-Linux-x86_64-${driver_version}-diagnostic.run"
+  else
+    # All other versions available in the gs conform to this naming convention.
+    echo "https://storage.googleapis.com/nvidia-drivers-${download_location}-public/tesla/${driver_version}/NVIDIA-Linux-x86_64-${driver_version}.run"
+  fi
+}
+
+get_gpu_installer_url() {
+  if [[ -z "${GPU_INSTALLER_DOWNLOAD_URL}" ]]; then
+    # First try to find the precompiled gpu installer.
+    local -r url="$(precompiled_installer_download_url "$@")"
+    if curl --http1.1 -s -I "${url}"  2>&1 | grep -q 'HTTP/2 200'; then
+      GPU_INSTALLER_DOWNLOAD_URL="${url}"
+    else
+      # Fallback to default gpu installer.
+      GPU_INSTALLER_DOWNLOAD_URL="$(default_installer_download_url "$@")"
+    fi
+  fi
+  echo "${GPU_INSTALLER_DOWNLOAD_URL}"
+}
diff --git a/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/sign_gpu_driver.sh b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/sign_gpu_driver.sh
new file mode 100755
index 0000000..0036d18
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/cos-gpu-installer-docker/sign_gpu_driver.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+assert_file_exists() {
+  if [[ ! -f ${1} ]]; then
+    exit 1
+  fi
+}
+
+sign_gpu_driver() {
+  local -r hash_algo="$1"
+  # private key is a dummy key. It is not needed in this script as we already
+  # have the signature.
+  #local -r priv_key="$2"
+  local -r pub_key="$3"
+  local -r module="$4"
+  # sign-file is used to attach driver signature to gpu driver to generate a
+  # signed driver. It is compiled from scripts/sign-file.c of Linux kernel
+  # source code. COS team provide it along with gpu driver signature to make
+  # sure the sign-file matches the kernel of COS version.
+  local -r sign_file="$(dirname "${pub_key}")"/sign-file
+  local -r signature="$(dirname "${pub_key}")/$(basename "${module}")".sig
+
+  assert_file_exists "${pub_key}"
+  assert_file_exists "${module}"
+  assert_file_exists "${sign_file}"
+  assert_file_exists "${signature}"
+
+  chmod +x "${sign_file}"
+
+  "${sign_file}" -s "${signature}" "${hash_algo}" "${pub_key}" "${module}"
+}
+
+sign_gpu_driver "$@"
diff --git a/src/cmd/cos_gpu_installer_v1/install-test-gpu.cfg b/src/cmd/cos_gpu_installer_v1/install-test-gpu.cfg
new file mode 100644
index 0000000..4d2f9a8
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/install-test-gpu.cfg
@@ -0,0 +1,62 @@
+#cloud-config
+#
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /etc/systemd/system/cos-gpu-installer.service
+    permissions: 0755
+    owner: root
+    content: |
+      [Unit]
+      Description=Run the GPU driver installer container
+      Requires=network-online.target gcr-online.target
+      After=network-online.target gcr-online.target
+
+      [Service]
+      User=root
+      Type=oneshot
+      RemainAfterExit=true
+      # The default stateful path to store user provided installer script and
+      # provided environment variables.
+      Environment=STATEFUL_PATH=/var/lib/nvidia
+      ExecStartPre=/bin/mkdir -p ${STATEFUL_PATH}
+      ExecStartPre=/bin/bash -c "/usr/share/google/get_metadata_value attributes/run-installer-script > /tmp/run_installer.sh && cp -f /tmp/run_installer.sh ${STATEFUL_PATH}/run_installer.sh || true"
+      ExecStart=/bin/bash ${STATEFUL_PATH}/run_installer.sh
+      StandardOutput=journal+console
+      StandardError=journal+console
+
+  - path: /etc/systemd/system/cuda-vector-add.service
+    permissions: 0755
+    owner: root
+    content: |
+      [Unit]
+      Description=Run a CUDA Vector Addition Workload
+      Requires=cos-gpu-installer.service
+      After=cos-gpu-installer.service
+
+      [Service]
+      User=root
+      Type=oneshot
+      RemainAfterExit=true
+      ExecStartPre=/bin/bash -c "/usr/share/google/get_metadata_value attributes/run-cuda-test-script > /tmp/run_cuda_test.sh"
+      ExecStart=/bin/bash /tmp/run_cuda_test.sh
+      StandardOutput=journal+console
+      StandardError=journal+console
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl enable cos-gpu-installer.service
+  - systemctl enable cuda-vector-add.service
+  - systemctl start cos-gpu-installer.service
+  - systemctl start cuda-vector-add.service
diff --git a/src/cmd/cos_gpu_installer_v1/scripts/gpu-installer-env b/src/cmd/cos_gpu_installer_v1/scripts/gpu-installer-env
new file mode 100644
index 0000000..3e025d0
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/scripts/gpu-installer-env
@@ -0,0 +1,10 @@
+# Change or Uncomment following variables if you need to install
+# a different gpu driver version, install to different location, etc.
+
+NVIDIA_DRIVER_VERSION=440.64.00
+# NVIDIA_DRIVER_MD5SUM=662865d9a7b5ef1ac3402e098a5fb91f
+# COS_NVIDIA_INSTALLER_CONTAINER=gcr.io/cos-cloud/cos-gpu-installer:latest
+# NVIDIA_INSTALL_DIR_HOST=/var/lib/nvidia
+# NVIDIA_INSTALL_DIR_CONTAINER=/usr/local/nvidia
+# ROOT_MOUNT_DIR=/root
+# CUDA_TEST_CONTAINER=gcr.io/google_containers/cuda-vector-add:v0.1
diff --git a/src/cmd/cos_gpu_installer_v1/scripts/run_cuda_test.sh b/src/cmd/cos_gpu_installer_v1/scripts/run_cuda_test.sh
new file mode 100644
index 0000000..2c6a097
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/scripts/run_cuda_test.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+
+GPU_INSTALLER_ENV_KEY="cos-gpu-installer-env"
+GPU_INSTALLER_ENV_PATH="/etc/gpu-installer-env"
+# The following environment variables may be changed by cos-gpu-installer-env.
+NVIDIA_INSTALL_DIR_HOST="/var/lib/nvidia"
+NVIDIA_INSTALL_DIR_CONTAINER="/usr/local/nvidia"
+CUDA_TEST_CONTAINER="gcr.io/google_containers/cuda-vector-add:v0.1"
+
+setup() {
+  if [ ! -f "${GPU_INSTALLER_ENV_PATH}" ]; then
+    /usr/share/google/get_metadata_value "attributes/${GPU_INSTALLER_ENV_KEY}" \
+      > "${GPU_INSTALLER_ENV_PATH}" || true
+  fi
+  source "${GPU_INSTALLER_ENV_PATH}"
+}
+
+main() {
+  setup
+  docker run \
+    --volume "${NVIDIA_INSTALL_DIR_HOST}"/lib64:"${NVIDIA_INSTALL_DIR_CONTAINER}"/lib64 \
+    --device /dev/nvidia0:/dev/nvidia0 \
+    --device /dev/nvidia-uvm:/dev/nvidia-uvm \
+    --device /dev/nvidiactl:/dev/nvidiactl \
+    "${CUDA_TEST_CONTAINER}"
+}
+
+main
diff --git a/src/cmd/cos_gpu_installer_v1/scripts/run_installer.sh b/src/cmd/cos_gpu_installer_v1/scripts/run_installer.sh
new file mode 100644
index 0000000..69f5ba8
--- /dev/null
+++ b/src/cmd/cos_gpu_installer_v1/scripts/run_installer.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -o errexit
+set -o nounset
+
+_GPU_INSTALLER_ENV_KEY="cos-gpu-installer-env"
+# Environment variable file should be stored in the same directory as
+# this script.
+_GPU_INSTALLER_ENV_PATH="$(dirname $0)/gpu-installer-env"
+
+# The following environment variables may be changed by cos-gpu-installer-env.
+COS_NVIDIA_INSTALLER_CONTAINER="gcr.io/cos-cloud/cos-gpu-installer:latest"
+NVIDIA_INSTALL_DIR_HOST="/var/lib/nvidia"
+NVIDIA_INSTALL_DIR_CONTAINER="/usr/local/nvidia"
+ROOT_MOUNT_DIR="/root"
+
+setup() {
+  # Always use environment variable from metadata if provided.
+  if /usr/share/google/get_metadata_value "attributes/${_GPU_INSTALLER_ENV_KEY}" \
+    > /tmp/gpu-installer-env; then
+    cp -f /tmp/gpu-installer-env "${_GPU_INSTALLER_ENV_PATH}"
+  fi
+  source "${_GPU_INSTALLER_ENV_PATH}"
+
+  mkdir -p "${NVIDIA_INSTALL_DIR_HOST}"
+  # Make NVIDIA_INSTALL_DIR_HOST executable by bind mounting it.
+  mount --bind "${NVIDIA_INSTALL_DIR_HOST}" "${NVIDIA_INSTALL_DIR_HOST}"
+  mount -o remount,exec "${NVIDIA_INSTALL_DIR_HOST}"
+}
+
+main() {
+  setup
+  docker run \
+    --privileged \
+    --net=host \
+    --pid=host \
+    --volume "${NVIDIA_INSTALL_DIR_HOST}":"${NVIDIA_INSTALL_DIR_CONTAINER}" \
+    --volume /dev:/dev \
+    --volume "/":"${ROOT_MOUNT_DIR}" \
+    --env-file "${_GPU_INSTALLER_ENV_PATH}" \
+    "${COS_NVIDIA_INSTALLER_CONTAINER}"
+  # Verify installation.
+  ${NVIDIA_INSTALL_DIR_HOST}/bin/nvidia-smi
+}
+
+main