Import cos-customizer@c1b59e6c88c59e0634ede3c1fcad57c17f23fd9b

Following files were moved from cos-customizer root dir to src/cmd/cos-customizer dir:
1. CONTRIBUTING.md
2. LICENSE
3. README.md
4. cloudbuild.yaml

Following files had merge conflict:
1. go.mod (go.sum)
2. src/pkg/utils/utils.go

BUG=b/183723779
TEST=go test (utils pkg) and ./run_tests.sh

Change-Id: I29ac81a05f6d397408d0828e25fa6e095a6b663d
Reviewed-on: https://cos-review.googlesource.com/c/cos/tools/+/16473
Cloud-Build: GCB Service account <228075978874@cloudbuild.gserviceaccount.com>
Reviewed-by: Robert Kolchmeyer <rkolchmeyer@google.com>
Tested-by: Arnav Kansal <rnv@google.com>
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..b0eaff1
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,102 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@bazel_gazelle//:def.bzl", "gazelle")
+load("@io_bazel_rules_docker//go:image.bzl", "go_image")
+load("@io_bazel_rules_docker//container:container.bzl", "container_image")
+load("@package_bundle//file:packages.bzl", "packages")
+load("@rules_pkg//:pkg.bzl", "pkg_deb", "pkg_tar")
+
+# gazelle:prefix github.com/GoogleCloudPlatform/cos-customizer
+gazelle(name = "gazelle")
+
+exports_files(glob(["src/data/**"]))
+
+genrule(
+    name = "workspace_dir",
+    outs = ["workspace"],
+    cmd = "mkdir $@",
+)
+
+genrule(
+    name = "tmp_dir",
+    outs = ["tmp"],
+    cmd = "mkdir $@",
+)
+
+container_image(
+    name = "veritysetup",
+    debs = [
+        packages["coreutils"],
+        packages["tar"],
+        packages["libacl1"],
+        packages["libattr1"],
+        packages["libc6"],
+        packages["libselinux1"],
+        packages["libpcre3"],
+        packages["cryptsetup-bin"],
+        packages["libcryptsetup4"],
+        packages["libpopt0"],
+        packages["libuuid1"],
+        packages["libdevmapper1.02.1"],
+        packages["libgcrypt20"],
+        packages["libargon2-0"],
+        packages["libjson-c3"],
+        packages["libudev1"],
+        packages["libpthread-stubs0-dev"],
+        packages["libm17n-0"],
+        packages["libgpg-error0"],
+    ],
+    repository = "veritysetup",
+    visibility = ["//visibility:public"],
+)
+
+pkg_tar(
+    name = "data_tar",
+    srcs = glob(["src/data/**"]),
+    strip_prefix = "src/",
+)
+
+container_image(
+    name = "cos_customizer_base",
+    base = "@daisy//image",
+    data_path = ".",
+    debs = [
+        packages["coreutils"],
+        packages["tar"],
+        packages["libacl1"],
+        packages["libattr1"],
+        packages["libc6"],
+        packages["libselinux1"],
+        packages["libpcre3"],
+        packages["mtools"],
+    ],
+    files = [
+        ":tmp_dir",
+        ":workspace_dir",
+    ],
+    tars = [
+        ":data_tar",
+    ],
+)
+
+go_image(
+    name = "cos_customizer",
+    base = ":cos_customizer_base",
+    embed = ["//src/cmd/cos_customizer:cos_customizer_lib"],
+    goarch = "amd64",
+    goos = "linux",
+    pure = "on",
+    visibility = ["//visibility:public"],
+)
diff --git a/WORKSPACE b/WORKSPACE
new file mode 100644
index 0000000..e219a7b
--- /dev/null
+++ b/WORKSPACE
@@ -0,0 +1,192 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
+
+http_archive(
+    name = "io_bazel_rules_go",
+    sha256 = "355d40d12749d843cfd05e14c304ac053ae82be4cd257efaf5ef8ce2caf31f1c",
+    strip_prefix = "rules_go-197699822e081dad064835a09825448a3e4cc2a2",
+    urls = [
+        "https://mirror.bazel.build/github.com/bazelbuild/rules_go/archive/197699822e081dad064835a09825448a3e4cc2a2.tar.gz",
+        "https://github.com/bazelbuild/rules_go/archive/197699822e081dad064835a09825448a3e4cc2a2.tar.gz",
+    ],
+)
+
+http_archive(
+    name = "bazel_gazelle",
+    sha256 = "222e49f034ca7a1d1231422cdb67066b885819885c356673cb1f72f748a3c9d4",
+    urls = [
+        "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.3/bazel-gazelle-v0.22.3.tar.gz",
+        "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.3/bazel-gazelle-v0.22.3.tar.gz",
+    ],
+)
+
+http_archive(
+    name = "io_bazel_rules_docker",
+    sha256 = "4521794f0fba2e20f3bf15846ab5e01d5332e587e9ce81629c7f96c793bb7036",
+    strip_prefix = "rules_docker-0.14.4",
+    urls = ["https://github.com/bazelbuild/rules_docker/archive/v0.14.4.tar.gz"],
+)
+
+http_archive(
+    name = "distroless",
+    sha256 = "14834aaf9e005b9175de2cfa2b420c80778880ee4d9f9a9f7f385d3b177abff7",
+    strip_prefix = "distroless-fa0765cc86064801e42a3b35f50ff2242aca9998",
+    urls = ["https://github.com/GoogleContainerTools/distroless/archive/fa0765cc86064801e42a3b35f50ff2242aca9998.tar.gz"],
+)
+
+http_archive(
+    name = "rules_pkg",
+    sha256 = "aeca78988341a2ee1ba097641056d168320ecc51372ef7ff8e64b139516a4937",
+    urls = ["https://github.com/bazelbuild/rules_pkg/releases/download/0.2.6-1/rules_pkg-0.2.6.tar.gz"],
+)
+
+http_archive(
+    name = "rules_foreign_cc",
+    sha256 = "ab805b9e00747ba9b184790cbe2d4d19b672770fcac437f01d8c101ae60df996",
+    strip_prefix = "rules_foreign_cc-c309ec13192f69a46aaaba39587c3d7ff684eb35",
+    urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/c309ec13192f69a46aaaba39587c3d7ff684eb35.zip"],
+)
+
+git_repository(
+    name = "com_google_protobuf",
+    commit = "31ebe2ac71400344a5db91ffc13c4ddfb7589f92",
+    remote = "https://github.com/protocolbuffers/protobuf",
+    shallow_since = "1591135967 -0700",
+)
+
+git_repository(
+    name = "com_github_googlecloudplatform_docker_credential_gcr",
+    commit = "6093d30b51d725877bc6971aa6700153c1a364f1",
+    remote = "https://github.com/GoogleCloudPlatform/docker-credential-gcr",
+    shallow_since = "1613169008 -0800",
+)
+
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+protobuf_deps()
+
+load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+
+go_rules_dependencies()
+
+go_register_toolchains(version="1.16")
+
+load("//:deps.bzl", "go_mod_deps")
+
+# gazelle:repository_macro deps.bzl%go_mod_deps
+go_mod_deps()
+
+rules_pkg_dependencies()
+
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
+
+gazelle_dependencies()
+
+load(
+    "@io_bazel_rules_docker//repositories:repositories.bzl",
+    container_repositories = "repositories",
+)
+
+container_repositories()
+
+load("@io_bazel_rules_docker//repositories:deps.bzl", container_deps = "deps")
+
+container_deps()
+
+load("@io_bazel_rules_docker//repositories:pip_repositories.bzl", "pip_deps")
+
+pip_deps()
+
+load(
+    "@io_bazel_rules_docker//container:container.bzl",
+    "container_pull",
+)
+
+container_pull(
+    name = "daisy",
+    digest = "sha256:a23774074d5941ed9e25f64ee7e02f96d2f8e09a4d7cee7131b49664267c33c7",
+    registry = "gcr.io",
+    repository = "compute-image-tools/daisy",
+)
+
+load(
+    "@io_bazel_rules_docker//go:image.bzl",
+    _go_image_repos = "repositories",
+)
+
+_go_image_repos()
+
+load(
+    "@distroless//package_manager:package_manager.bzl",
+    "package_manager_repositories",
+)
+
+package_manager_repositories()
+
+load(
+    "@distroless//package_manager:dpkg.bzl",
+    "dpkg_src",
+    "dpkg_list",
+)
+
+dpkg_src(
+    name = "debian_stretch",
+    arch = "amd64",
+    distro = "stretch",
+    sha256 = "79a66cd92ba9096fce679e15d0b5feb9effcf618b0a6d065eb32684dbffd0311",
+    snapshot = "20190328T105444Z",
+    url = "http://snapshot.debian.org/archive",
+)
+
+dpkg_list(
+    name = "package_bundle",
+    packages = [
+        "coreutils",
+        "libacl1",
+        "libattr1",
+        "libc6",
+        "libpcre3",
+        "libselinux1",
+        "tar",
+        "cryptsetup-bin",
+        "libcryptsetup4",
+        "libpopt0",
+        "libuuid1",
+        "libdevmapper1.02.1",
+        "libgcrypt20",
+        "libargon2-0",
+        "libjson-c3",
+        "libudev1",
+        "libpthread-stubs0-dev",
+        "libm17n-0",
+        "libgpg-error0",
+        "mtools",
+    ],
+    sources = [
+        "@debian_stretch//file:Packages.json",
+    ],
+)
+
+load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
+rules_foreign_cc_dependencies()
+
+load("//src/third_party/dosfstools:dosfstools_repositories.bzl", "dosfstools_repositories")
+dosfstools_repositories()
+
+load("//src/third_party/mtools:mtools_repositories.bzl", "mtools_repositories")
+mtools_repositories()
diff --git a/deps.bzl b/deps.bzl
new file mode 100644
index 0000000..24a1614
--- /dev/null
+++ b/deps.bzl
@@ -0,0 +1,441 @@
+load("@bazel_gazelle//:deps.bzl", "go_repository")
+
+def go_mod_deps():
+    go_repository(
+        name = "co_honnef_go_tools",
+        importpath = "honnef.co/go/tools",
+        sum = "h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=",
+        version = "v0.0.1-2020.1.4",
+    )
+    go_repository(
+        name = "com_github_burntsushi_toml",
+        importpath = "github.com/BurntSushi/toml",
+        sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
+        version = "v0.3.1",
+    )
+    go_repository(
+        name = "com_github_burntsushi_xgb",
+        importpath = "github.com/BurntSushi/xgb",
+        sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
+        version = "v0.0.0-20160522181843-27f122750802",
+    )
+    go_repository(
+        name = "com_github_census_instrumentation_opencensus_proto",
+        importpath = "github.com/census-instrumentation/opencensus-proto",
+        sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
+        version = "v0.2.1",
+    )
+    go_repository(
+        name = "com_github_chzyer_logex",
+        importpath = "github.com/chzyer/logex",
+        sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=",
+        version = "v1.1.10",
+    )
+    go_repository(
+        name = "com_github_chzyer_readline",
+        importpath = "github.com/chzyer/readline",
+        sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=",
+        version = "v0.0.0-20180603132655-2972be24d48e",
+    )
+    go_repository(
+        name = "com_github_chzyer_test",
+        importpath = "github.com/chzyer/test",
+        sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=",
+        version = "v0.0.0-20180213035817-a1ea475d72b1",
+    )
+    go_repository(
+        name = "com_github_client9_misspell",
+        importpath = "github.com/client9/misspell",
+        sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
+        version = "v0.3.4",
+    )
+    go_repository(
+        name = "com_github_cncf_udpa_go",
+        importpath = "github.com/cncf/udpa/go",
+        sum = "h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=",
+        version = "v0.0.0-20201120205902-5459f2c99403",
+    )
+    go_repository(
+        name = "com_github_davecgh_go_spew",
+        importpath = "github.com/davecgh/go-spew",
+        sum = "h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=",
+        version = "v1.1.0",
+    )
+    go_repository(
+        name = "com_github_envoyproxy_go_control_plane",
+        importpath = "github.com/envoyproxy/go-control-plane",
+        sum = "h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE=",
+        version = "v0.9.9-0.20201210154907-fd9021fe5dad",
+    )
+    go_repository(
+        name = "com_github_envoyproxy_protoc_gen_validate",
+        importpath = "github.com/envoyproxy/protoc-gen-validate",
+        sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
+        version = "v0.1.0",
+    )
+    go_repository(
+        name = "com_github_go_gl_glfw",
+        importpath = "github.com/go-gl/glfw",
+        sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=",
+        version = "v0.0.0-20190409004039-e6da0acd62b1",
+    )
+    go_repository(
+        name = "com_github_go_gl_glfw_v3_3_glfw",
+        importpath = "github.com/go-gl/glfw/v3.3/glfw",
+        sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=",
+        version = "v0.0.0-20200222043503-6f7a984d4dc4",
+    )
+    go_repository(
+        name = "com_github_golang_glog",
+        importpath = "github.com/golang/glog",
+        sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
+        version = "v0.0.0-20160126235308-23def4e6c14b",
+    )
+    go_repository(
+        name = "com_github_golang_groupcache",
+        importpath = "github.com/golang/groupcache",
+        sum = "h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=",
+        version = "v0.0.0-20200121045136-8c9f03a8e57e",
+    )
+    go_repository(
+        name = "com_github_golang_mock",
+        importpath = "github.com/golang/mock",
+        sum = "h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=",
+        version = "v1.4.4",
+    )
+    go_repository(
+        name = "com_github_golang_protobuf",
+        importpath = "github.com/golang/protobuf",
+        sum = "h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=",
+        version = "v1.4.3",
+    )
+    go_repository(
+        name = "com_github_google_btree",
+        importpath = "github.com/google/btree",
+        sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=",
+        version = "v1.0.0",
+    )
+    go_repository(
+        name = "com_github_google_go_cmp",
+        importpath = "github.com/google/go-cmp",
+        sum = "h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=",
+        version = "v0.5.4",
+    )
+    go_repository(
+        name = "com_github_google_martian",
+        importpath = "github.com/google/martian",
+        sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=",
+        version = "v2.1.0+incompatible",
+    )
+    go_repository(
+        name = "com_github_google_martian_v3",
+        importpath = "github.com/google/martian/v3",
+        sum = "h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=",
+        version = "v3.1.0",
+    )
+    go_repository(
+        name = "com_github_google_pprof",
+        importpath = "github.com/google/pprof",
+        sum = "h1:LR89qFljJ48s990kEKGsk213yIJDPI4205OKOzbURK8=",
+        version = "v0.0.0-20201218002935-b9804c9f04c2",
+    )
+    go_repository(
+        name = "com_github_google_renameio",
+        importpath = "github.com/google/renameio",
+        sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
+        version = "v0.1.0",
+    )
+    go_repository(
+        name = "com_github_google_subcommands",
+        importpath = "github.com/google/subcommands",
+        sum = "h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=",
+        version = "v1.2.0",
+    )
+    go_repository(
+        name = "com_github_google_uuid",
+        importpath = "github.com/google/uuid",
+        sum = "h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=",
+        version = "v1.1.2",
+    )
+    go_repository(
+        name = "com_github_googleapis_gax_go_v2",
+        importpath = "github.com/googleapis/gax-go/v2",
+        sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
+        version = "v2.0.5",
+    )
+    go_repository(
+        name = "com_github_hashicorp_golang_lru",
+        importpath = "github.com/hashicorp/golang-lru",
+        sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=",
+        version = "v0.5.1",
+    )
+    go_repository(
+        name = "com_github_ianlancetaylor_demangle",
+        importpath = "github.com/ianlancetaylor/demangle",
+        sum = "h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI=",
+        version = "v0.0.0-20200824232613-28f6c0f3b639",
+    )
+    go_repository(
+        name = "com_github_jstemmer_go_junit_report",
+        importpath = "github.com/jstemmer/go-junit-report",
+        sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=",
+        version = "v0.9.1",
+    )
+    go_repository(
+        name = "com_github_kisielk_gotool",
+        importpath = "github.com/kisielk/gotool",
+        sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
+        version = "v1.0.0",
+    )
+    go_repository(
+        name = "com_github_kr_pretty",
+        importpath = "github.com/kr/pretty",
+        sum = "h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=",
+        version = "v0.1.0",
+    )
+    go_repository(
+        name = "com_github_kr_pty",
+        importpath = "github.com/kr/pty",
+        sum = "h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=",
+        version = "v1.1.1",
+    )
+    go_repository(
+        name = "com_github_kr_text",
+        importpath = "github.com/kr/text",
+        sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=",
+        version = "v0.1.0",
+    )
+    go_repository(
+        name = "com_github_pmezard_go_difflib",
+        importpath = "github.com/pmezard/go-difflib",
+        sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
+        version = "v1.0.0",
+    )
+    go_repository(
+        name = "com_github_prometheus_client_model",
+        importpath = "github.com/prometheus/client_model",
+        sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=",
+        version = "v0.0.0-20190812154241-14fe0d1b01d4",
+    )
+    go_repository(
+        name = "com_github_rogpeppe_go_internal",
+        importpath = "github.com/rogpeppe/go-internal",
+        sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=",
+        version = "v1.3.0",
+    )
+    go_repository(
+        name = "com_github_stretchr_objx",
+        importpath = "github.com/stretchr/objx",
+        sum = "h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=",
+        version = "v0.1.0",
+    )
+    go_repository(
+        name = "com_github_stretchr_testify",
+        importpath = "github.com/stretchr/testify",
+        sum = "h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=",
+        version = "v1.5.1",
+    )
+    go_repository(
+        name = "com_github_yuin_goldmark",
+        importpath = "github.com/yuin/goldmark",
+        sum = "h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=",
+        version = "v1.2.1",
+    )
+    go_repository(
+        name = "com_google_cloud_go",
+        importpath = "cloud.google.com/go",
+        sum = "h1:XgtDnVJRCPEUG21gjFiRPz4zI1Mjg16R+NYQjfmU4XY=",
+        version = "v0.75.0",
+    )
+    go_repository(
+        name = "com_google_cloud_go_bigquery",
+        importpath = "cloud.google.com/go/bigquery",
+        sum = "h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=",
+        version = "v1.8.0",
+    )
+    go_repository(
+        name = "com_google_cloud_go_datastore",
+        importpath = "cloud.google.com/go/datastore",
+        sum = "h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=",
+        version = "v1.1.0",
+    )
+    go_repository(
+        name = "com_google_cloud_go_pubsub",
+        importpath = "cloud.google.com/go/pubsub",
+        sum = "h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=",
+        version = "v1.3.1",
+    )
+    go_repository(
+        name = "com_google_cloud_go_storage",
+        importpath = "cloud.google.com/go/storage",
+        sum = "h1:amPvhCOI+Hltp6rPu+62YdwhIrjf+34PKVAL4HwgYwk=",
+        version = "v1.13.0",
+    )
+    go_repository(
+        name = "com_shuralyov_dmitri_gpu_mtl",
+        importpath = "dmitri.shuralyov.com/gpu/mtl",
+        sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=",
+        version = "v0.0.0-20190408044501-666a987793e9",
+    )
+    go_repository(
+        name = "in_gopkg_check_v1",
+        importpath = "gopkg.in/check.v1",
+        sum = "h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=",
+        version = "v1.0.0-20180628173108-788fd7840127",
+    )
+    go_repository(
+        name = "in_gopkg_errgo_v2",
+        importpath = "gopkg.in/errgo.v2",
+        sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
+        version = "v2.1.0",
+    )
+    go_repository(
+        name = "in_gopkg_yaml_v2",
+        importpath = "gopkg.in/yaml.v2",
+        sum = "h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=",
+        version = "v2.2.2",
+    )
+    go_repository(
+        name = "io_opencensus_go",
+        importpath = "go.opencensus.io",
+        sum = "h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=",
+        version = "v0.22.5",
+    )
+    go_repository(
+        name = "io_rsc_binaryregexp",
+        importpath = "rsc.io/binaryregexp",
+        sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=",
+        version = "v0.2.0",
+    )
+    go_repository(
+        name = "io_rsc_quote_v3",
+        importpath = "rsc.io/quote/v3",
+        sum = "h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=",
+        version = "v3.1.0",
+    )
+    go_repository(
+        name = "io_rsc_sampler",
+        importpath = "rsc.io/sampler",
+        sum = "h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=",
+        version = "v1.3.0",
+    )
+    go_repository(
+        name = "org_golang_google_api",
+        importpath = "google.golang.org/api",
+        sum = "h1:zHCTXf0NeDdKTgcSQpT+ZflWAqHsEp1GmdpxW09f3YM=",
+        version = "v0.39.0",
+    )
+    go_repository(
+        name = "org_golang_google_appengine",
+        importpath = "google.golang.org/appengine",
+        sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=",
+        version = "v1.6.7",
+    )
+    go_repository(
+        name = "org_golang_google_genproto",
+        importpath = "google.golang.org/genproto",
+        sum = "h1:np3A9jnmE/eMtrOwwvUycmQ1XoLyj5nqZ41bAyYLqJ0=",
+        version = "v0.0.0-20210203152818-3206188e46ba",
+    )
+    go_repository(
+        name = "org_golang_google_grpc",
+        importpath = "google.golang.org/grpc",
+        sum = "h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=",
+        version = "v1.35.0",
+    )
+    go_repository(
+        name = "org_golang_google_protobuf",
+        importpath = "google.golang.org/protobuf",
+        sum = "h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=",
+        version = "v1.25.0",
+    )
+    go_repository(
+        name = "org_golang_x_crypto",
+        importpath = "golang.org/x/crypto",
+        sum = "h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=",
+        version = "v0.0.0-20200622213623-75b288015ac9",
+    )
+    go_repository(
+        name = "org_golang_x_exp",
+        importpath = "golang.org/x/exp",
+        sum = "h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=",
+        version = "v0.0.0-20200224162631-6cc2880d07d6",
+    )
+    go_repository(
+        name = "org_golang_x_image",
+        importpath = "golang.org/x/image",
+        sum = "h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=",
+        version = "v0.0.0-20190802002840-cff245a6509b",
+    )
+    go_repository(
+        name = "org_golang_x_lint",
+        importpath = "golang.org/x/lint",
+        sum = "h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=",
+        version = "v0.0.0-20201208152925-83fdc39ff7b5",
+    )
+    go_repository(
+        name = "org_golang_x_mobile",
+        importpath = "golang.org/x/mobile",
+        sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=",
+        version = "v0.0.0-20190719004257-d2bd2a29d028",
+    )
+    go_repository(
+        name = "org_golang_x_mod",
+        importpath = "golang.org/x/mod",
+        sum = "h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=",
+        version = "v0.4.1",
+    )
+    go_repository(
+        name = "org_golang_x_net",
+        importpath = "golang.org/x/net",
+        sum = "h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=",
+        version = "v0.0.0-20201224014010-6772e930b67b",
+    )
+    go_repository(
+        name = "org_golang_x_oauth2",
+        importpath = "golang.org/x/oauth2",
+        sum = "h1:HiAZXo96zOhVhtFHchj/ojzoxCFiPrp9/j0GtS38V3g=",
+        version = "v0.0.0-20210201163806-010130855d6c",
+    )
+    go_repository(
+        name = "org_golang_x_sync",
+        importpath = "golang.org/x/sync",
+        sum = "h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=",
+        version = "v0.0.0-20201207232520-09787c993a3a",
+    )
+    go_repository(
+        name = "org_golang_x_sys",
+        importpath = "golang.org/x/sys",
+        sum = "h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c=",
+        version = "v0.0.0-20210503173754-0981d6026fa6",
+    )
+    go_repository(
+        name = "org_golang_x_term",
+        importpath = "golang.org/x/term",
+        sum = "h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=",
+        version = "v0.0.0-20201126162022-7de9c90e9dd1",
+    )
+    go_repository(
+        name = "org_golang_x_text",
+        importpath = "golang.org/x/text",
+        sum = "h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=",
+        version = "v0.3.4",
+    )
+    go_repository(
+        name = "org_golang_x_time",
+        importpath = "golang.org/x/time",
+        sum = "h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=",
+        version = "v0.0.0-20191024005414-555d28b269f0",
+    )
+    go_repository(
+        name = "org_golang_x_tools",
+        importpath = "golang.org/x/tools",
+        sum = "h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=",
+        version = "v0.1.0",
+    )
+    go_repository(
+        name = "org_golang_x_xerrors",
+        importpath = "golang.org/x/xerrors",
+        sum = "h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=",
+        version = "v0.0.0-20200804184101-5ec99f83aff1",
+    )
diff --git a/go.mod b/go.mod
index 62b8ce0..32fe52f 100644
--- a/go.mod
+++ b/go.mod
@@ -5,6 +5,7 @@
 require (
 	cloud.google.com/go v0.75.0
 	cloud.google.com/go/storage v1.13.0
+	github.com/GoogleCloudPlatform/cos-customizer v0.0.0-20210511200649-c1b59e6c88c5
 	github.com/andygrunwald/go-gerrit v0.0.0-20201231163137-46815e48bfe0
 	github.com/beevik/etree v1.1.0
 	github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
@@ -19,6 +20,7 @@
 	github.com/urfave/cli/v2 v2.2.0
 	go.chromium.org/luci v0.0.0-20200722211809-bab0c30be68b
 	golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c
+	golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6
 	google.golang.org/api v0.39.0
 	google.golang.org/genproto v0.0.0-20210203152818-3206188e46ba
 	google.golang.org/grpc v1.35.0
diff --git a/go.sum b/go.sum
index 079843a..d14edb3 100644
--- a/go.sum
+++ b/go.sum
@@ -43,6 +43,8 @@
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/GoogleCloudPlatform/cos-customizer v0.0.0-20210511200649-c1b59e6c88c5 h1:ORu5XQ8EpfkCEMMPy3/WwWM5+UN/BuUNbC57fhMOr5A=
+github.com/GoogleCloudPlatform/cos-customizer v0.0.0-20210511200649-c1b59e6c88c5/go.mod h1:xZa5PyQCdrikKLf7qlFMbAnLFx7d20xeDYUcz8ozBiQ=
 github.com/andygrunwald/go-gerrit v0.0.0-20201231163137-46815e48bfe0 h1:1IlIh8TmY+eAX17cPIUzT4e5R5bQoEngAO5QFcGHbrA=
 github.com/andygrunwald/go-gerrit v0.0.0-20201231163137-46815e48bfe0/go.mod h1:soxaYLbAFToS0OelBriItCts/mtUZOuLBkCk1Xv4ZSo=
 github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
@@ -165,10 +167,7 @@
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -182,7 +181,6 @@
 github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
@@ -190,8 +188,6 @@
 github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4=
 github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
 github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -329,8 +325,9 @@
 golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
 golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c=
+golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -380,7 +377,6 @@
 golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
 golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
 golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
@@ -393,8 +389,6 @@
 golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
 golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
@@ -491,7 +485,6 @@
 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
 google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/run_tests.sh b/run_tests.sh
new file mode 100755
index 0000000..adb961d
--- /dev/null
+++ b/run_tests.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o pipefail
+
+PROJECT=""
+
+usage() {
+  cat <<'EOF'
+Usage: ./run_tests.sh [OPTION]
+run_tests.sh runs cos-customizer integration tests.
+
+-p,--project=<project_name>    GCP project to run tests in. Required.
+EOF
+}
+
+parse_arguments() {
+  local -r long_options="project:,help"
+  parsed_args="$(getopt --options=p:,h --longoptions="${long_options}" --name "$0" -- "$@")"
+  eval set -- "${parsed_args}"
+  while true; do
+    case "$1" in
+      -p|--project)
+        PROJECT="$2"
+        shift 2
+        ;;
+      -h|--help)
+        usage
+        exit
+        ;;
+      --)
+        shift
+        break
+        ;;
+      *)
+        usage
+        exit
+        ;;
+    esac
+  done
+}
+
+get_build_status() {
+  local -r build_id="$1"
+  gcloud builds describe "${build_id}" --project="${PROJECT}" --format='value(status)'
+}
+
+get_log_url() {
+  local -r build_id="$1"
+  gcloud builds describe "${build_id}" --project="${PROJECT}" --format='value(logUrl)'
+}
+
+start_build() {
+  local -r config="$1"
+  gcloud builds submit --config="${config}" --project="${PROJECT}" --async --format='value(ID)' .
+}
+
+wait_for_build() {
+  local -r build_id="$1"
+  local status
+  while true; do
+    status=$(get_build_status "${build_id}")
+    case "${status}" in
+      "SUCCESS"|"FAILURE"|"INTERNAL_ERROR"|"TIMEOUT"|"CANCELLED")
+        echo "${status}"
+        return
+        ;;
+      "QUEUED"|"WORKING")
+        sleep 5
+        ;;
+      "STATUS_UNKNOWN")
+        echo "Received STATUS_UNKNOWN for build ${build_id}" 1>&2
+        sleep 5
+        ;;
+      *)
+        echo "Unknown status for build ${build_id}: ${status}" 1>&2
+        return 1
+        ;;
+    esac
+  done
+}
+
+main() {
+  local -a build_ids
+  local status
+  local log_url
+  local exit_code=0
+  if [[ -z "${PROJECT}" ]]; then
+    usage
+    return 1
+  fi
+  for config in testing/*.yaml; do
+    build_ids+=("$(start_build "${config}")")
+  done
+  for build_id in "${build_ids[@]}"; do
+    status="$(wait_for_build "${build_id}")"
+    if [[ "${status}" == "SUCCESS" ]]; then
+      echo "Build ${build_id} succeeded"
+    else
+      log_url="$(get_log_url "${build_id}")"
+      echo "Build ${build_id} failed"
+      echo "Logs: ${log_url}"
+      exit_code=1
+    fi
+  done
+  return "${exit_code}"
+}
+
+parse_arguments "$@"
+main
diff --git a/src/cmd/cos_customizer/BUILD.bazel b/src/cmd/cos_customizer/BUILD.bazel
new file mode 100644
index 0000000..806d884
--- /dev/null
+++ b/src/cmd/cos_customizer/BUILD.bazel
@@ -0,0 +1,74 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
+
+go_library(
+    name = "cos_customizer_lib",
+    srcs = [
+        "disable_auto_update.go",
+        "finish_image_build.go",
+        "flag_vars.go",
+        "install_gpu.go",
+        "main.go",
+        "run_script.go",
+        "seal_oem.go",
+        "start_image_build.go",
+    ],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/cmd/cos_customizer",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//src/pkg/config",
+        "//src/pkg/fs",
+        "//src/pkg/gce",
+        "//src/pkg/preloader",
+        "//src/pkg/provisioner",
+        "//src/pkg/tools/partutil",
+        "//src/pkg/utils",
+        "@com_github_google_subcommands//:subcommands",
+        "@com_google_cloud_go_storage//:storage",
+        "@org_golang_google_api//compute/v1:compute",
+        "@org_golang_google_api//iterator",
+        "@org_golang_google_api//option",
+        "@org_golang_x_oauth2//google",
+    ],
+)
+
+go_test(
+    name = "cos_customizer_test",
+    srcs = [
+        "finish_image_build_test.go",
+        "flag_vars_test.go",
+        "install_gpu_test.go",
+        "run_script_test.go",
+        "start_image_build_test.go",
+    ],
+    embed = [":cos_customizer_lib"],
+    deps = [
+        "//src/pkg/config",
+        "//src/pkg/fakes",
+        "//src/pkg/fs",
+        "//src/pkg/provisioner",
+        "@com_github_google_go_cmp//cmp",
+        "@com_github_google_subcommands//:subcommands",
+        "@com_google_cloud_go_storage//:storage",
+        "@org_golang_google_api//compute/v1:compute",
+    ],
+)
+
+go_binary(
+    name = "cos_customizer",
+    embed = [":cos_customizer_lib"],
+    visibility = ["//visibility:public"],
+)
diff --git a/src/cmd/cos_customizer/CONTRIBUTING.md b/src/cmd/cos_customizer/CONTRIBUTING.md
new file mode 100644
index 0000000..db177d4
--- /dev/null
+++ b/src/cmd/cos_customizer/CONTRIBUTING.md
@@ -0,0 +1,28 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution;
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## Community Guidelines
+
+This project follows
+[Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
diff --git a/src/cmd/cos_customizer/LICENSE b/src/cmd/cos_customizer/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/src/cmd/cos_customizer/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/src/cmd/cos_customizer/README.md b/src/cmd/cos_customizer/README.md
new file mode 100644
index 0000000..91c42fe
--- /dev/null
+++ b/src/cmd/cos_customizer/README.md
@@ -0,0 +1,391 @@
+# Image Customizer for Container-Optimized OS from Google
+
+Note: This is not an official Google product.
+
+The COS Customizer is a tool for creating customized Container-Optimized OS
+images. It uses
+[Daisy](https://github.com/GoogleCloudPlatform/compute-image-tools/tree/master/daisy)
+to create a COS VM instance, load data onto the instance, and create a disk
+image from the modified instance.
+
+Currently, the COS Customizer is intended to be run as part of a
+[Google Cloud Build](https://cloud.google.com/cloud-build/) workflow as a
+sequence of Google Cloud Build build steps. No other usage mode is currently
+supported.
+
+*   [Accessing the cos-customizer container image](#accessing-the-cos-customizer-container-image)
+*   [Quick Start](#quick-start)
+    *   [Minimal example](#minimal-example)
+*   [Build Steps](#build-steps)
+    *   [Required build steps](#required-build-steps)
+        *   [The start-image-build step](#the-start-image-build-step)
+        *   [The finish-image-build step](#the-finish-image-build-step)
+    *   [Optional build steps](#optional-build-steps)
+        *   [run-script](#run-script)
+        *   [install-gpu](#install-gpu)
+        *   [seal-oem](#seal-oem)
+        *   [disable-auto-update](#disable-auto-update)
+
+## Accessing the cos-customizer container image
+
+The container image is available at `gcr.io/cos-cloud/cos-customizer`.
+Alternatively, it can be built from source using [Bazel](https://bazel.build/).
+To build COS customizer and load the image into Docker, run:
+
+    $ bazel run :cos_customizer -- --norun
+
+The COS Customizer docker image will then be available in Docker as
+`bazel:cos_customizer`.
+
+## Quick Start
+
+The COS Customizer is intended to be run as a sequence of steps in a Google
+Cloud Build workflow. It is implemented and distributed as a Docker container.
+Each subcommand of the COS Customizer implements a Google Cloud Build build
+step. Two of these steps need to be present for every image build, and the rest
+of the steps are optional steps that can be used for customizing a COS image.
+
+The required build steps are the `start-image-build` and `finish-image-build`
+steps. The `start-image-build` step initializes local state for the image build,
+and the `finish-image-build` step performs the image building operation with
+Daisy.
+
+Example optional build steps are `run-script`, `install-gpu`, `seal-oem` and
+`disable-auto-update`.  
+`run-script`allows users to customize an image by running a script.  
+`install-gpu` allows users to install GPU drivers using the
+[COS GPU installer](https://github.com/GoogleCloudPlatform/cos-gpu-installer).  
+`seal-oem` allows users to setup a verified read-only OEM partition. It will be 
+verified when the VM boots and when the data inside are accessed.  
+`disable-auto-update` allows users to disable the auto-update service. And it
+will reclaim the disk space of the unused root partition.
+
+### Minimal example
+
+Here is a minimal Google Cloud Build workflow demonstrating usage of the COS
+Customizer. It customizes the image `cos-stable-68-10718-86-0` by running the
+script `preload.sh`. This results in an image with the custom file
+`/var/lib/hello`.
+
+    $ cat preload.sh
+    echo "Hello, World!" > /var/lib/hello
+    $ cat cloudbuild.yaml
+    steps:
+    - name: 'gcr.io/cos-cloud/cos-customizer'
+      args: ['start-image-build',
+             '-image-name=cos-stable-68-10718-86-0',
+             '-image-project=cos-cloud',
+             '-gcs-bucket=${PROJECT_ID}_cloudbuild',
+             '-gcs-workdir=image-build-$BUILD_ID']
+    - name: 'gcr.io/cos-cloud/cos-customizer'
+      args: ['run-script',
+             '-script=preload.sh']
+    - name: 'gcr.io/cos-cloud/cos-customizer'
+      args: ['finish-image-build',
+             '-zone=us-west1-b',
+             '-project=$PROJECT_ID',
+             '-image-name=my-custom-image',
+             '-image-project=$PROJECT_ID']
+    timeout: '1500s'
+    $ gcloud builds submit --config=cloudbuild.yaml .
+
+## Build Steps
+
+The COS Customizer is different from typical Google Cloud Build build steps.
+Most build steps, like the `gcr.io/cloud-builders/gcloud` build step, are
+single-purpose container images that are capable of being useful when run in
+isolation. The COS Customizer is not one of these build steps.
+
+The COS Customizer is a container image that provides a collection of Google
+Cloud Build build steps that are intended to be used together. When run in
+sequence as part of a Google Cloud Build workflow, these build steps create a
+Compute Engine disk image.
+
+Each build step is invoked as a subcommand of the COS Customizer container
+image; for example, usage of the `run-script` build step works as follows:
+
+    ...
+    - name: 'gcr.io/cos-cloud/cos-customizer'
+      args: ['run-script',
+             '-script=preload.sh']
+    ...
+
+### Required build steps
+
+Two build steps are required for each image build operation; the
+`start-image-build` step and the `finish-image-build` step.
+
+#### The start-image-build step
+
+The primary purpose of this step is to initialize the image build process. It
+only initializes local state in the Google Cloud Build builder. It does not
+create any cloud resources. It must run before all of the other steps in the
+image build process, and it must only be run once. It takes the following flags:
+
+`-build-context`: A path to a file or directory that should be relative to the
+default Google Cloud Build working directory. Defaults to `.`. The contents of
+this path will be copied to the builder VM in a temporary directory. All scripts
+specified by a `run-script` step will execute with this directory as a working
+directory. For example, suppose that the source directory provided to Google
+Cloud Build looked like this:
+
+    .
+    ├── lib
+    │   └── mylib.sh
+    └── main.sh
+
+If `-build-context` is set to `.`, this directory structure will be copied to
+the builder VM and will be the working directory for all specified `run-script`
+steps. If a `run-script` step runs the script `main.sh`, `main.sh` will have
+access to `lib/mylib.sh`. However, suppose `-build-context` is set to `lib`;
+then, a `run-script` step that specifies `main.sh` will fail, since `main.sh`
+won't be included in the working directory on the builder VM. Specifying
+`mylib.sh` in a `run-script` step would be valid in this case though.
+
+`-gcs-bucket`: A GCS bucket to use for scratch space. Optional build steps are
+free to use this bucket for scratch space. Normally, it's expected that only
+`finish-image-build` will use this GCS bucket. `finish-image-build` uses this
+GCS bucket for transferring binary blobs to the builder VM.
+
+`-gcs-workdir`: A directory in the aforementioned GCS bucket that will be used
+for scratch space.
+
+`-image-project`: The Google Cloud Platform (GCP) project that contains the
+source image; that is, the image to customize.
+
+`-image-name`: The name of the source image. Mutually exclusive with
+`-image-milestone` and `-image-family`.
+
+`-image-milestone`: The milestone of the source image. If `-image-milestone` is
+specified and `-image-project` is set to `cos-cloud`, the `start-image-build`
+step will resolve the source image by finding the latest image in `cos-cloud` on
+the specified milestone. An example value for this field is `69`. Mutually
+exclusive with `-image-name` and `-image-family`.
+
+`-image-family`: The family of the source image. If `-image-family` is
+specified, the `start-image-build` step will resolve the source image by finding
+the latest active image in the specified image family. This is done using Google
+Compute Engine's `getFromFamily` API. Mutually exclusive with `-image-name` and
+`-image-milestone`.
+
+An example `start-image-build` step looks like the following:
+
+    - name: 'gcr.io/cos-cloud/cos-customizer'
+      args: ['start-image-build',
+             '-image-name=cos-stable-68-10718-86-0',
+             '-image-project=cos-cloud',
+             '-gcs-bucket=${PROJECT_ID}_cloudbuild',
+             '-gcs-workdir=image-build-$BUILD_ID']
+
+#### The finish-image-build step
+
+The primary purpose of this step is to execute the steps specified in the image
+build process. This step creates a builder VM, runs configured scripts on it,
+and creates a disk image from the VM. It must run after all of the other steps
+in an image build process. This step will clean up the local state stored by
+previous COS Customizer steps; a new image build process can be started after a
+`finish-image-build` step. It takes the following flags:
+
+`-image-project`: The GCP project that should contain the output image.
+
+`-image-name`: The name of the output image. Mutually exclusive with
+`-image-suffix`.
+
+`-image-suffix`: Construct the name of the output image by appending the
+specified suffix to the name of the input image. Mutually exclusive with
+`-image-name`.
+
+`-image-family`: An image family to assign the output image to.
+
+`-deprecate-old-images`: If present, the image build process will deprecate all
+of the old images in the output image's image family. Can only be specified if
+`-image-family` is specified.
+
+`-old-image-ttl`: Time-to-live in seconds to apply to images deprecated by
+`-deprecate-old-images`. Configures the "deleted" field of the image's
+deprecation status to be this many seconds after the image is deprecated. Can
+only be used if `-deprecate-old-images` is also given.
+
+`-zone`: The GCE zone in which to perform the image building operation. This is
+an important consideration when installing GPU drivers on the image, since
+installing GPU drivers requires that GPU quota is available in this zone.
+
+`-project`: The GCP project to use for the image building operation.
+
+`-labels`: Key-value pairs to apply to the output image as image labels.
+Example: `-labels=cos_image=true,milestone=65`
+
+`-licenses`: A list of licenses to apply to the output image. License names must
+be formatted as `projects/{project}/global/licenses/{license}`. Example:
+`-licenses=projects/cos-cloud/global/licenses/cos`
+
+`-inherit-labels`: If present, the output image will be assigned the exact same
+image labels present on the source image. The labels specified by the `-labels`
+flag take precedence over labels assigned with this flag.
+
+`-disk-size-gb`: The disk size in GB to use when creating the image.
+This value should never be smaller than 10 (the default size of a COS image).
+If `-oem-size` is set,  the lower limit of `-disk-size-gb` is as shown in the 
+following table. The larger one of the value in the table and 10 is 
+effective. See section `-oem-size`,
+[seal-oem](#seal-oem) and [disable-auto-update](#disable-auto-update) for details.
+
+| disk-size-gb-lower-limit |        no seal-oem       |           seal-oem           |
+|:------------------------:|:------------------------:|:----------------------------:|
+|  no disable-auto-update  |      10GB + oem-size     | 10GB + oem-size x 2 - 2046MB |
+|    disable-auto-update   | 10GB + oem-size - 2046MB | 10GB + oem-size x 2 - 2046MB | 
+
+Note that if `seal-oem` is run without specifying `-oem-size`, the lower limit of
+`-disk-size-gb` will be 10.
+
+`-oem-size`: The file system size of the extended OEM partition with unit 
+`G`,`M`,`K` or `B`. 
+If no unit is provided, it will be parsed as the number of sectors of 512 Bytes.
+Since the default size of the OEM partition in a COS image is assumed to be 16MB, 
+this value must be no smaller than 16MB, otherwise the build will fail. 
+Make sure the disk size is large enough if this flag is used to extend the OEM partition.
+If the `seal-oem` or `disable-auto-update` is run, the OEM partition will firstly
+use the reclaimed space.
+See section `-disk-size-gb` for the limits of the disk size value.
+Example: `-oem-size=500M`
+
+Note that this feature is supported by COS versions higher than milestone 73 (included).
+
+`-timeout`: Timeout value of this step. Must be formatted according to Golang's
+time.Duration string format. Defaults to "1h0m0s". Keep in mind that this timeout
+value is different from the overall Cloud Build workflow timeout value, which is
+set at the Cloud Build workflow level. If this timeout value expires, resources
+created during the image build process will be properly cleaned up. If the
+overall Cloud Build workflow timeout expires, the task will be cancelled without
+any opportunity to clean up resources.
+
+An example `finish-image-build` step looks like the following:
+
+    - name: 'gcr.io/cos-cloud/cos-customizer'
+      args: ['finish-image-build',
+             '-zone=us-west1-b',
+             '-project=$PROJECT_ID',
+             '-image-name=my-custom-image',
+             '-image-project=$PROJECT_ID']
+
+### Optional build steps
+
+The rest of the build steps provided by COS Customizer are optional; if they are
+not included, the image build will run successfully, but will generate an image
+that is identical to the source image. Optional build steps are used to make
+meaningful changes to an image.
+
+#### run-script
+
+The `run-script` build step configures the image build to run a script on the
+builder VM. If multiple `run-script` steps are given, the scripts specified by
+each step will run in the same order in which the `run-script` steps were given.
+It takes the following flags:
+
+`-script`: A path to the script to run. The path should be relative to the root
+of the build context provided in `start-image-build`.
+
+`-env`: Key-value pairs indicating environment variables to provide to the
+script when it is run. Example: `-env=RELEASE=1,FOO=bar`
+
+An example `run-script` step looks like the following:
+
+    - name: 'gcr.io/cos-cloud/cos-customizer'
+      args: ['run-script',
+             '-script=preload.sh']
+
+#### install-gpu
+
+The `install-gpu` build step configures the image build to install GPU drivers
+on the builder VM. GPU drivers are installed using the
+[COS GPU installer](https://github.com/GoogleCloudPlatform/cos-gpu-installer).
+In addition to installing GPU drivers, the `install-gpu` step installs a script
+named `setup_gpu.sh` in the GPU driver install directory. _In order to use the
+installed GPU drivers, this script must be run every time the system boots_. It
+should be executed as part of a startup script or cloud config. `install-gpu`
+takes the following flags:
+
+`-version`: The GPU driver version to install. Currently, we only support
+installing Tesla drivers that are present in the
+[nvidia-drivers-us-public GCS bucket](https://console.cloud.google.com/storage/browser/nvidia-drivers-us-public).
+The set of supported drivers can be found by running the `install-gpu` step
+independently on your local machine with the `-get-driver-version` flag.
+Example: `-version=396.26`
+
+`-get-driver-version`: Prints out the list of supported driver versions to
+stdout and exits. If this flag is provided, the build step doesn't do anything
+meaningful; it only prints the list of supported driver versions. It is not
+intended to be used in a Google Cloud Build workflow; it is meant to be run
+independently for users to easily see the set of supported driver versions.
+
+`-md5sum`: If you have the md5sum of the driver you want to install, you can
+provide it here and the COS GPU installer will verify the driver with this
+md5sum.
+
+`-install-dir`: The directory on the image to install GPU drivers to. The
+`setup_gpu.sh` script will also be installed in this directory. Make sure to
+choose a directory that will persist across reboots; for the most part, this
+means a subdirectory of `/var` or `/home`.
+
+`-gpu-type`: The type of GPU to use to verify correct installation of GPU
+drivers. The valid values here are nvidia-tesla-k80, nvidia-tesla-p100, and
+nvidia-tesla-v100. This value has no impact on the drivers that are installed on
+the image; it is only used when verifying that the driver installation
+succeeded. Make sure that the zone you are running the image build in has quota
+for a GPU of this type.
+
+An example `install-gpu` step looks like the following:
+
+    - name: 'gcr.io/cos-cloud/cos-customizer'
+      args: ['install-gpu',
+             '-version=396.26']
+
+Note that when using an image customized with `install-gpu`, the hosted docker
+container should be set to run in privileged mode so that it has access to the
+GPU device on the host machine.
+
+#### seal-oem
+
+The `seal-oem` build step utilizes `dm-verity` to verify the data in the OEM
+partition when the system boots and when data are accessed. 
+If the verification fails, the system will refuse to boot or will panic. 
+This step takes no flags and needs to be run after any step 
+that makes changes to the OEM partition (`/dev/sda8` or `/usr/share/oem`).
+
+If this step is run, the size of the OEM partition will be doubled to store
+the hash tree for verification in the second half of the partition.
+If `-oem-size` in `finish-image-build` step is not set, the file system 
+size of the OEM partition will be assumed to be the same as the default size, 
+16MB. And the size of the OEM partition will be doubled to 32MB.
+
+The auto-update service is automatically disabled in this step. So it is not 
+necessary to run the `disable-auto-update` step explicitly. This will reclaim
+the unused space and the OEM partition will firstly use the reclaimed space.
+See section `-disk-size-gb` for the limits of the disk size value. If the 
+disk size is not large enough, the build will fail.
+
+After running this build step, the OEM partition will not be automatically
+mounted when the system boots.   
+`sudo mount /dev/dm-1 /usr/share/oem` should be  added to 
+`startup script` or `cloud init` to mount the OEM partition.
+
+Note that this feature is supported by COS versions higher than milestone 73 (included).
+
+#### disable-auto-update
+
+The `disable-auto-update` build step modifies the kernel commandline to disable
+the auto-update serive. This step takes no flags.
+
+The root partition that is used by auto-update service will not be needed anymore,
+so the disk space (2046MB) of that partition will be reclaimed. The reclaimed
+space will be used by the OEM partition if extended and the stateful partition.
+
+Note that this feature is supported by COS versions higher than milestone 73 (included).
+
+# Contributor Docs
+
+## Releasing
+
+To release a new version of COS Customizer, tag the commit you want to release
+with the date in the form of `vYYYYMMDD`. This will trigger a Cloud Build job to
+build and release the container image.
diff --git a/src/cmd/cos_customizer/cloudbuild.yaml b/src/cmd/cos_customizer/cloudbuild.yaml
new file mode 100644
index 0000000..b23c64a
--- /dev/null
+++ b/src/cmd/cos_customizer/cloudbuild.yaml
@@ -0,0 +1,38 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+steps:
+- name: 'gcr.io/cloud-builders/docker'
+  entrypoint: 'bash'
+  args:
+  - '-c'
+  - |
+    cat <<EOF | docker build -t bazel -
+    FROM gcr.io/cloud-builders/bazel
+    RUN apt-get update && apt-get install -y mtools
+    EOF
+- name: 'bazel'
+  args: ['test', '--spawn_strategy=standalone','--','...','-//src/pkg/tools/...']
+- name: 'bazel'
+  args: ['run', '--spawn_strategy=standalone', ':cos_customizer', '--', '--norun']
+- name: 'gcr.io/cloud-builders/docker'
+  args: ['tag', 'bazel:cos_customizer', 'gcr.io/${_OUTPUT_PROJECT}/cos-customizer:${TAG_NAME}']
+- name: 'gcr.io/cloud-builders/docker'
+  args: ['tag', 'bazel:cos_customizer', 'gcr.io/${_OUTPUT_PROJECT}/cos-customizer:latest']
+options:
+  machineType: 'N1_HIGHCPU_8'
+  substitutionOption: 'MUST_MATCH'
+images:
+- 'gcr.io/${_OUTPUT_PROJECT}/cos-customizer:${TAG_NAME}'
+- 'gcr.io/${_OUTPUT_PROJECT}/cos-customizer:latest'
diff --git a/src/cmd/cos_customizer/disable_auto_update.go b/src/cmd/cos_customizer/disable_auto_update.go
new file mode 100644
index 0000000..2ec5a55
--- /dev/null
+++ b/src/cmd/cos_customizer/disable_auto_update.go
@@ -0,0 +1,76 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"flag"
+	"log"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+
+	"github.com/google/subcommands"
+)
+
+// DisableAutoUpdate implements subcommands.Command for the "disable-auto-update" command.
+// It writes a script name to the state file and run the script in builtin_build_context.
+type DisableAutoUpdate struct{}
+
+// Name implements subcommands.Command.Name.
+func (d *DisableAutoUpdate) Name() string {
+	return "disable-auto-update"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (d *DisableAutoUpdate) Synopsis() string {
+	return "Disable auto-update service."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (d *DisableAutoUpdate) Usage() string {
+	return `disable-auto-update
+`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (d *DisableAutoUpdate) SetFlags(f *flag.FlagSet) {}
+
+func (d *DisableAutoUpdate) updateProvConfig(configPath string) error {
+	var provConfig provisioner.Config
+	if err := config.LoadFromFile(configPath, &provConfig); err != nil {
+		return err
+	}
+	provConfig.BootDisk.ReclaimSDA3 = true
+	provConfig.Steps = append(provConfig.Steps, provisioner.StepConfig{
+		Type: "DisableAutoUpdate",
+	})
+	return config.SaveConfigToPath(configPath, &provConfig)
+}
+
+// Execute implements subcommands.Command.Execute. It disables the auto-update systemd service.
+func (d *DisableAutoUpdate) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+	if f.NArg() != 0 {
+		f.Usage()
+		return subcommands.ExitUsageError
+	}
+	files := args[0].(*fs.Files)
+	if err := d.updateProvConfig(files.ProvConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	return subcommands.ExitSuccess
+}
diff --git a/src/cmd/cos_customizer/finish_image_build.go b/src/cmd/cos_customizer/finish_image_build.go
new file mode 100644
index 0000000..d37d9af
--- /dev/null
+++ b/src/cmd/cos_customizer/finish_image_build.go
@@ -0,0 +1,341 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"flag"
+	"fmt"
+	"log"
+	"os/exec"
+	"strconv"
+	"time"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/gce"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/preloader"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil"
+
+	"github.com/google/subcommands"
+)
+
+// FinishImageBuild implements subcommands.Command for the "finish-image-build" command.
+// This command finishes an image build by converting saved image configurations into
+// an actual GCE image.
+type FinishImageBuild struct {
+	imageProject   string
+	zone           string
+	project        string
+	imageName      string
+	imageSuffix    string
+	imageFamily    string
+	deprecateOld   bool
+	oldImageTTLSec int
+	labels         *mapVar
+	licenses       *listVar
+	inheritLabels  bool
+	oemSize        string
+	oemFSSize4K    uint64
+	diskSize       int
+	timeout        time.Duration
+}
+
+// Name implements subcommands.Command.Name.
+func (f *FinishImageBuild) Name() string {
+	return "finish-image-build"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (f *FinishImageBuild) Synopsis() string {
+	return "Complete the COS image build and generate a GCE image."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (f *FinishImageBuild) Usage() string {
+	return `finish-image-build [flags]
+`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (f *FinishImageBuild) SetFlags(flags *flag.FlagSet) {
+	flags.StringVar(&f.imageProject, "image-project", "", "Output image project.")
+	flags.StringVar(&f.imageName, "image-name", "", "Output image name. Mutually exclusive with 'image-suffix'.")
+	flags.StringVar(&f.imageSuffix, "image-suffix", "", "Construct the output image name from the input image "+
+		"name and this suffix. Mutually exclusive with 'image-name'.")
+	flags.StringVar(&f.imageFamily, "image-family", "", "Output image family.")
+	flags.BoolVar(&f.deprecateOld, "deprecate-old-images", false, "Deprecate old images in the output image "+
+		"family. Can only be used if 'image-family' is set.")
+	flags.IntVar(&f.oldImageTTLSec, "old-image-ttl", 0, "Time-to-live in seconds for old images that are "+
+		"deprecated. After this period of time, old images will enter the deleted state. Can only be used if "+
+		"'deprecate-old-images' is set. '0' indicates no time-to-live (images won't be configured to enter "+
+		"the deleted state).")
+	flags.StringVar(&f.zone, "zone", "", "Zone to make GCE resources in.")
+	flags.StringVar(&f.project, "project", "", "Project to make GCE resources in.")
+	if f.labels == nil {
+		f.labels = newMapVar()
+	}
+	flags.Var(f.labels, "labels", "Image labels to apply to the result image. Format is "+
+		"'key1=value1,key2=value2,...'. Example: -labels=hello=world,foo=bar")
+	if f.licenses == nil {
+		f.licenses = &listVar{}
+	}
+	flags.Var(f.licenses, "licenses", "Image licenses to apply to the result image. Format is "+
+		"'license1,license2,...' or '-licenses=license1 -licenses=license2'.")
+	flags.BoolVar(&f.inheritLabels, "inherit-labels", false, "Indicates if the result image should inherit labels "+
+		"from the source image. Labels specified through the '-labels' flag take precedence over inherited "+
+		"labels.")
+	flags.StringVar(&f.oemSize, "oem-size", "", "Size of the new OEM partition, "+
+		"can be a number with unit like 10G, 10M, 10K or 10B, "+
+		"or without unit indicating the number of 512B sectors.")
+	flags.IntVar(&f.diskSize, "disk-size-gb", 0, "The disk size to use when creating the image in GB. Value of '0' "+
+		"indicates the default size.")
+	flags.DurationVar(&f.timeout, "timeout", time.Hour, "Timeout value of the image build process. Must be formatted "+
+		"according to Golang's time.Duration string format.")
+}
+
+func (f *FinishImageBuild) validate() error {
+	// The default size of the OEM partition in a COS image is assumed to be 16MB.
+	const defaultOEMSizeMB = 16
+	if f.oemSize != "" {
+		oemSizeBytes, err := partutil.ConvertSizeToBytes(f.oemSize)
+		if err != nil {
+			return fmt.Errorf("invalid format of oem-size: %q, error msg:(%v)", f.oemSize, err)
+		}
+		if oemSizeBytes < (defaultOEMSizeMB << 20) {
+			return fmt.Errorf("oem-size must be at least %dM", defaultOEMSizeMB)
+		}
+	}
+	switch {
+	case f.imageName == "" && f.imageSuffix == "":
+		return fmt.Errorf("one of 'image-name' or 'image-suffix' must be set")
+	case f.imageName != "" && f.imageSuffix != "":
+		return fmt.Errorf("'image-name' and 'image-suffix' are mutually exclusive")
+	case f.deprecateOld && f.imageFamily == "":
+		return fmt.Errorf("'deprecate-old-images' can only be used if 'image-family' is set")
+	case f.oldImageTTLSec != 0 && !f.deprecateOld:
+		return fmt.Errorf("'old-image-ttl' can only be used if 'deprecate-old-images' is set")
+	case f.zone == "":
+		return fmt.Errorf("'zone' must be set")
+	case f.project == "":
+		return fmt.Errorf("'project' must be set")
+	default:
+		return nil
+	}
+}
+
+func (f *FinishImageBuild) loadConfigs(files *fs.Files) (*config.Image, *config.Build, *config.Image, *provisioner.Config, error) {
+	sourceImageConfig := &config.Image{}
+	if err := config.LoadFromFile(files.SourceImageConfig, sourceImageConfig); err != nil {
+		return nil, nil, nil, nil, err
+	}
+	imageName := f.imageName
+	if f.imageSuffix != "" {
+		imageName = sourceImageConfig.Name + f.imageSuffix
+	}
+	buildConfig := &config.Build{}
+	if err := config.LoadFromFile(files.BuildConfig, buildConfig); err != nil {
+		return nil, nil, nil, nil, err
+	}
+	buildConfig.Project = f.project
+	buildConfig.Zone = f.zone
+	buildConfig.DiskSize = f.diskSize
+	buildConfig.Timeout = f.timeout.String()
+	provConfig := &provisioner.Config{}
+	if err := config.LoadFromFile(files.ProvConfig, provConfig); err != nil {
+		return nil, nil, nil, nil, err
+	}
+	provConfig.BootDisk.OEMSize = f.oemSize
+	outputImageConfig := config.NewImage(imageName, f.imageProject)
+	outputImageConfig.Labels = f.labels.m
+	outputImageConfig.Licenses = f.licenses.l
+	outputImageConfig.Family = f.imageFamily
+	return sourceImageConfig, buildConfig, outputImageConfig, provConfig, nil
+}
+
+func hasSealOEM(provConfig *provisioner.Config) bool {
+	for _, s := range provConfig.Steps {
+		if s.Type == "SealOEM" {
+			return true
+		}
+	}
+	return false
+}
+
+func validateOEM(buildConfig *config.Build, provConfig *provisioner.Config) error {
+	// The default size of a COS image (imgSize) is assumed to be 10GB.
+	const imgSize uint64 = 10
+	// If auto-update is disabled, 2046MB will be reclaimed.
+	// The size of sda3 is 2GB. We don't want to delete the partition,
+	// so we need to leave some space in the sda3. And `sfdisk --move-data`
+	// in some situations requires 1MB free space in the moving direction.
+	// Therefore, leaving 2MB after the start of sda3 is a safe choice.
+	// Also, this will make sure the start point of the next partition is
+	// 4K aligned.
+	const reclaimedMB uint64 = 2046
+	const reclaimedBytes uint64 = reclaimedMB << 20
+	var sizeError error
+	var oemSizeBytes uint64
+	var err error
+	if !hasSealOEM(provConfig) {
+		if provConfig.BootDisk.OEMSize == "" {
+			return nil
+		}
+		// no need to seal the OEM partition.
+		// If the OEM partition is to be extended, the following must be true:
+		// disk-size >= imgSize + oem-size - reclaimed-size.
+		if provConfig.BootDisk.ReclaimSDA3 {
+			sizeError = fmt.Errorf("'disk-size-gb' must be at least 'oem-size'- reclaimed space "+
+				"(%dMB) + image size (%dGB)", reclaimedMB, imgSize)
+		} else {
+			sizeError = fmt.Errorf("'disk-size-gb' must be at least 'oem-size' + image size (%dGB)", imgSize)
+		}
+		oemSizeBytes, err = partutil.ConvertSizeToBytes(provConfig.BootDisk.OEMSize)
+		if err != nil {
+			return fmt.Errorf("invalid format of oem-size: %q, error msg:(%v)", provConfig.BootDisk.OEMSize, err)
+		}
+	} else {
+		// `seal-oem` will automatically disable auto-update and reclaim sda3.
+		if provConfig.BootDisk.OEMSize == "" {
+			// If need to seal OEM partition and the oem-size is not set,
+			// assume the OEM fs size is 16M as it is in a COS image,
+			// and the OEM partition size is doubled to 32M.
+			// It will use space reclaimed from sda3.
+			provConfig.BootDisk.OEMSize = "32M"
+			provConfig.BootDisk.OEMFSSize4K = 4096
+			return nil
+		}
+		// need extra space to seal the OEM partition.
+		// The OEM partition size should be doubled to store the
+		// hash tree of dm-verity. The following must be true:
+		// disk-size >= imgSize + oem-size x 2 - reclaimed-size.
+		sizeError = fmt.Errorf("'disk-size-gb' must be at least 'oem-size' x 2 - reclaimed space "+
+			"(%dMB) + image size (%dGB)", reclaimedMB, imgSize)
+
+		oemSizeBytes, err = partutil.ConvertSizeToBytes(provConfig.BootDisk.OEMSize)
+		if err != nil {
+			return fmt.Errorf("invalid format of oem-size: %q, error msg:(%v)", provConfig.BootDisk.OEMSize, err)
+		}
+		provConfig.BootDisk.OEMFSSize4K = oemSizeBytes >> 12
+		// double the oem size.
+		oemSizeBytes <<= 1
+	}
+	// Since we allow user input like "500M", and the "resize-disk" API can only take GB as input,
+	// the oem-size is rounded up to GB to make sure there is enough space.
+	// If the auto-update is disabled, space in sda3 will be reclaimed and used.
+	// Extra space will be taken by the stateful partition.
+	oemSizeReclaimBytes := oemSizeBytes
+	if provConfig.BootDisk.ReclaimSDA3 {
+		if oemSizeReclaimBytes <= reclaimedBytes {
+			oemSizeReclaimBytes = 0
+		} else {
+			oemSizeReclaimBytes -= reclaimedBytes
+		}
+	}
+	oemSizeGB, err := partutil.ConvertSizeToGBRoundUp(strconv.FormatUint(oemSizeReclaimBytes, 10) + "B")
+	if err != nil {
+		return fmt.Errorf("invalid format of oem-size: %q, error msg:(%v)", provConfig.BootDisk.OEMSize, err)
+	}
+	//  if no disk-size-gb input, assume the default image size to be 10GB.
+	var diskSize uint64 = imgSize
+	if buildConfig.DiskSize != 0 {
+		diskSize = (uint64)(buildConfig.DiskSize)
+	}
+	if diskSize < imgSize+oemSizeGB {
+		return sizeError
+	}
+	// Shrink OEM size input (rounded down) by 1MB to deal with cases
+	// where disk size is 1MB smaller than needed.
+	// This will take 1MB from the hash tree part (the second half)
+	// of the OEM partition if seal-oem is set. Otherwise, it will
+	// take 1MB from user data space of the OEM partition.
+	// For example oem-size=1G, disk-size-gb=11, seal-oem not set.
+	// Or oem-size=1G, disk-size-gb=11, seal-oem set.
+	// In those cases the disk size is not large enough without shrinking
+	// the OEM partition size by 1MB.
+	provConfig.BootDisk.OEMSize = strconv.FormatUint((oemSizeBytes>>20)-1, 10) + "M"
+	return nil
+}
+
+func update(dst, src map[string]string) {
+	for k, v := range src {
+		if _, ok := dst[k]; !ok {
+			dst[k] = v
+		}
+	}
+}
+
+// Execute implements subcommands.Command.Execute. It gathers image configuration parameters
+// and creates a GCE image.
+func (f *FinishImageBuild) Execute(ctx context.Context, flags *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+	if flags.NArg() != 0 {
+		flags.Usage()
+		return subcommands.ExitUsageError
+	}
+	files := args[0].(*fs.Files)
+	defer files.CleanupAllPersistent()
+	svc, gcsClient, err := args[1].(ServiceClients)(ctx, false)
+	if err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	defer gcsClient.Close()
+	if err := f.validate(); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	sourceImage, buildConfig, outputImage, provConfig, err := f.loadConfigs(files)
+	if err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := validateOEM(buildConfig, provConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	exists, err := gce.ImageExists(svc, outputImage.Project, outputImage.Name)
+	if err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if exists {
+		log.Printf("Result image %s already exists in project %s. Exiting.\n", outputImage.Name, outputImage.Project)
+		return subcommands.ExitSuccess
+	}
+	if f.inheritLabels {
+		image, err := svc.Images.Get(sourceImage.Project, sourceImage.Name).Do()
+		if err != nil {
+			log.Println(err)
+			return subcommands.ExitFailure
+		}
+		update(outputImage.Labels, image.Labels)
+	}
+	if err := preloader.BuildImage(ctx, gcsClient, files, sourceImage, outputImage, buildConfig, provConfig); err != nil {
+		if _, ok := err.(*exec.ExitError); ok {
+			log.Printf("command failed: %s. See stdout logs for details", err)
+			return subcommands.ExitFailure
+		}
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if f.deprecateOld {
+		if err := gce.DeprecateInFamily(ctx, svc, outputImage, f.oldImageTTLSec); err != nil {
+			log.Printf("deprecating images failed: %s", err)
+			return subcommands.ExitFailure
+		}
+	}
+	return subcommands.ExitSuccess
+}
diff --git a/src/cmd/cos_customizer/finish_image_build_test.go b/src/cmd/cos_customizer/finish_image_build_test.go
new file mode 100644
index 0000000..082afdb
--- /dev/null
+++ b/src/cmd/cos_customizer/finish_image_build_test.go
@@ -0,0 +1,193 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fakes"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+
+	"cloud.google.com/go/storage"
+	"github.com/google/subcommands"
+	compute "google.golang.org/api/compute/v1"
+)
+
+func executeFinishBuild(files *fs.Files, svc *compute.Service, gcs *storage.Client, flags ...string) (subcommands.ExitStatus, error) {
+	clients := ServiceClients(func(_ context.Context, _ bool) (*compute.Service, *storage.Client, error) {
+		return svc, gcs, nil
+	})
+	flagSet := &flag.FlagSet{}
+	finishBuild := &FinishImageBuild{}
+	finishBuild.SetFlags(flagSet)
+	if err := flagSet.Parse(flags); err != nil {
+		return 0, err
+	}
+	ret := finishBuild.Execute(context.Background(), flagSet, files, clients)
+	if ret != subcommands.ExitSuccess {
+		return ret, fmt.Errorf("FinishImageBuild failed; input: %v", flags)
+	}
+	return ret, nil
+}
+
+func setupFinishBuildFiles() (string, *fs.Files, error) {
+	tmpDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		return "", nil, err
+	}
+	files := &fs.Files{}
+	files.DaisyWorkflow, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	buildConfigFile, err := ioutil.TempFile(tmpDir, "")
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := config.SaveConfigToFile(buildConfigFile, &config.Build{GCSBucket: "b", GCSDir: "d"}); err != nil {
+		buildConfigFile.Close()
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := buildConfigFile.Close(); err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	files.BuildConfig = buildConfigFile.Name()
+	files.ProvConfig, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := ioutil.WriteFile(files.ProvConfig, []byte("{}"), 0644); err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	sourceImageFile, err := ioutil.TempFile(tmpDir, "")
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := config.Save(sourceImageFile, config.NewImage("in", "p")); err != nil {
+		sourceImageFile.Close()
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := sourceImageFile.Close(); err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	files.SourceImageConfig = sourceImageFile.Name()
+	files.DaisyBin = "/bin/true"
+	files.UserBuildContextArchive, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	return tmpDir, files, nil
+}
+
+func TestOutputImageExists(t *testing.T) {
+	tmpDir, files, err := setupFinishBuildFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	gce, svc := fakes.GCEForTest(t, "p")
+	gce.Images = &compute.ImageList{Items: []*compute.Image{{Name: "out"}}}
+	files.DaisyBin = "/bin/false"
+	if _, err := executeFinishBuild(files, svc, gcs.Client, "-project=p", "-zone=z", "-image-name=out", "-image-project=p"); err != nil {
+		t.Logf("images: %v", gce.Images)
+		t.Errorf("FinishImageBuild.Execute(-image-name=out -image-project=p); daisy shouldn't execute if image exists; err: %q", err)
+	}
+}
+
+func TestOutputImageSuffixExists(t *testing.T) {
+	tmpDir, files, err := setupFinishBuildFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	gce, svc := fakes.GCEForTest(t, "p")
+	gce.Images = &compute.ImageList{Items: []*compute.Image{{Name: "in-out"}}}
+	files.DaisyBin = "/bin/false"
+	if _, err := executeFinishBuild(files, svc, gcs.Client, "-project=p", "-zone=z", "-image-suffix=-out", "-image-project=p"); err != nil {
+		t.Logf("images: %v", gce.Images)
+		t.Errorf("FinishImageBuild.Execute(-image-suffix=-out -image-project=p); daisy shouldn't execute if image exists; err: %q", err)
+	}
+}
+
+func TestDeprecateImages(t *testing.T) {
+	tmpDir, files, err := setupFinishBuildFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	gce, svc := fakes.GCEForTest(t, "p")
+	gce.Images = &compute.ImageList{Items: []*compute.Image{{Name: "old", Family: "f"}}}
+	gce.Operations = []*compute.Operation{{Status: "DONE"}}
+	if _, err := executeFinishBuild(files, svc, gcs.Client, "-project=p", "-zone=z", "-image-name=out", "-image-project=p", "-image-family=f", "-deprecate-old-images"); err != nil {
+		t.Fatal(err)
+	}
+	if _, ok := gce.Deprecated["old"]; !ok {
+		t.Errorf("Image 'old' is not deprecated; deprecated images: %v", gce.Deprecated)
+	}
+}
+
+func TestValidateFailure(t *testing.T) {
+	tests := []struct {
+		name      string
+		flags     []string
+		expectErr bool
+		msg       string
+	}{
+		{
+			name:      "Timeout",
+			flags:     []string{"-project=p", "-zone=z", "-image-name=out", "-image-project=p", "-image-family=f", "-timeout=t"},
+			expectErr: true,
+			msg:       "'timeout' value should be invalid",
+		}, {
+			name:      "SmallDiskSize",
+			flags:     []string{"-project=p", "-zone=z", "-image-name=out", "-image-project=p", "-image-family=f", "-disk-size-gb=11", "-oem-size=3072M"},
+			expectErr: true,
+			msg:       "disk size should be invalid",
+		},
+	}
+	for _, test := range tests {
+		t.Run(test.name, func(t *testing.T) {
+			tmpDir, files, err := setupFinishBuildFiles()
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tmpDir)
+			gcs := fakes.GCSForTest(t)
+			_, svc := fakes.GCEForTest(t, "p")
+			if _, err := executeFinishBuild(files, svc, gcs.Client, test.flags...); test.expectErr && err == nil {
+				t.Errorf("Got nil, want error; %s", test.msg)
+			}
+		})
+	}
+}
diff --git a/src/cmd/cos_customizer/flag_vars.go b/src/cmd/cos_customizer/flag_vars.go
new file mode 100644
index 0000000..15bedf5
--- /dev/null
+++ b/src/cmd/cos_customizer/flag_vars.go
@@ -0,0 +1,70 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// mapVar implements flag.Value for a map flag variable. Example:
+// "-my-map a=A,b=B,c=C" results in {"a": "A", "b": "B", "c": "C"}
+type mapVar struct {
+	m map[string]string
+}
+
+// newMapVar returns an empty mapVar.
+func newMapVar() *mapVar {
+	return &mapVar{make(map[string]string)}
+}
+
+// String implements flag.Value.String.
+func (mv *mapVar) String() string {
+	mapJSON, _ := json.Marshal(mv.m)
+	return string(mapJSON)
+}
+
+// Set implements flag.Value.Set. It parses the given string and adds the encoded map values to the mapVar.
+func (mv *mapVar) Set(s string) error {
+	pairs := strings.Split(s, ",")
+	for _, pair := range pairs {
+		split := strings.SplitN(pair, "=", 2)
+		if len(split) != 2 {
+			return fmt.Errorf("item %q is improperly formatted; does it have an '=' character?", pair)
+		}
+		mv.m[split[0]] = split[1]
+	}
+	return nil
+}
+
+// listVar implements flag.Value for a list flag variable. Example:
+// "-my-list a,b,c,d" results in {"a", "b", "c", "d"}
+type listVar struct {
+	l []string
+}
+
+// String implements flag.Value.String.
+func (lv *listVar) String() string {
+	listJSON, _ := json.Marshal(lv.l)
+	return string(listJSON)
+}
+
+// Set implements flag.Value.Set. It parses the given string and adds the encoded list values to the listVar.
+func (lv *listVar) Set(s string) error {
+	list := strings.Split(s, ",")
+	lv.l = append(lv.l, list...)
+	return nil
+}
diff --git a/src/cmd/cos_customizer/flag_vars_test.go b/src/cmd/cos_customizer/flag_vars_test.go
new file mode 100644
index 0000000..6f27ae8
--- /dev/null
+++ b/src/cmd/cos_customizer/flag_vars_test.go
@@ -0,0 +1,90 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+)
+
+func TestMapVar(t *testing.T) {
+	var testData = []struct {
+		testName string
+		flags    []string
+		want     map[string]string
+	}{
+		{
+			"OneFlag",
+			[]string{"a=A,b=B"},
+			map[string]string{"a": "A", "b": "B"},
+		},
+		{
+			"TwoFlag",
+			[]string{"a=A,b=B", "c=C"},
+			map[string]string{"a": "A", "b": "B", "c": "C"},
+		},
+		{
+			"MultipleEquals",
+			[]string{"a=A=B=C,d=D"},
+			map[string]string{"a": "A=B=C", "d": "D"},
+		},
+	}
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			mv := newMapVar()
+			for _, flag := range input.flags {
+				if err := mv.Set(flag); err != nil {
+					t.Fatalf("mapVar.Set(%s) = %s; want nil", flag, err)
+				}
+			}
+			if diff := cmp.Diff(mv.m, input.want); diff != "" {
+				t.Errorf("mapVar: got unexpected result with flags %v: diff (-got, +want):\n%v", input.flags, diff)
+			}
+		})
+	}
+}
+
+func TestListVar(t *testing.T) {
+	var testData = []struct {
+		testName string
+		flags    []string
+		want     []string
+	}{
+		{
+			"OneFlag",
+			[]string{"a,b"},
+			[]string{"a", "b"},
+		},
+		{
+			"TwoFlag",
+			[]string{"a", "b"},
+			[]string{"a", "b"},
+		},
+	}
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			lv := &listVar{}
+			for _, flag := range input.flags {
+				if err := lv.Set(flag); err != nil {
+					t.Fatalf("listVar.Set(%s) = %s; want nil", flag, err)
+				}
+			}
+			if got := lv.l; !cmp.Equal(got, input.want) {
+				t.Errorf("listVar: got unexpected result with flags %v: got %v, want %v", input.flags, got, input.want)
+			}
+		})
+	}
+}
diff --git a/src/cmd/cos_customizer/install_gpu.go b/src/cmd/cos_customizer/install_gpu.go
new file mode 100644
index 0000000..34c817f
--- /dev/null
+++ b/src/cmd/cos_customizer/install_gpu.go
@@ -0,0 +1,307 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+	"text/template"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+
+	"cloud.google.com/go/storage"
+	"github.com/google/subcommands"
+	"google.golang.org/api/iterator"
+)
+
+const (
+	gpuScript          = "install_gpu.sh"
+	installerContainer = "gcr.io/cos-cloud/cos-gpu-installer:v20210319"
+)
+
+// TODO(b/121332360): Move most GPU functionality to cos-gpu-installer
+var (
+	validGPUs = []string{"nvidia-tesla-k80", "nvidia-tesla-p100", "nvidia-tesla-v100"}
+)
+
+// InstallGPU implements subcommands.Command for the "install-gpu" command.
+// This command configures the current image build process to customize the result image
+// with GPU drivers.
+type InstallGPU struct {
+	NvidiaDriverVersion  string
+	NvidiaDriverMd5sum   string
+	NvidiaInstallDirHost string
+	gpuType              string
+	getValidDrivers      bool
+	gpuDataDir           string
+}
+
+// Name implements subcommands.Command.Name.
+func (*InstallGPU) Name() string {
+	return "install-gpu"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (*InstallGPU) Synopsis() string {
+	return "Configure the image build with GPU drivers."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (*InstallGPU) Usage() string {
+	return `install-gpu [flags]
+`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (i *InstallGPU) SetFlags(f *flag.FlagSet) {
+	f.StringVar(&i.NvidiaDriverVersion, "version", "", "Driver version to install. Can also be the name of an nvidia installer present in the "+
+		"directory specified by '-deps-dir'; e.g., NVIDIA-Linux-x86_64-450.51.06.run.")
+	f.StringVar(&i.NvidiaDriverMd5sum, "md5sum", "", "Md5sum of the driver to install.")
+	f.StringVar(&i.NvidiaInstallDirHost, "install-dir", "/var/lib/nvidia",
+		"Location to install drivers on the image.")
+	f.StringVar(
+		&i.gpuType, "gpu-type", "nvidia-tesla-p100",
+		fmt.Sprintf("The type of GPU to verify drivers for. Must be one of: %v", validGPUs))
+	f.BoolVar(
+		&i.getValidDrivers, "get-valid-drivers", false,
+		"Print the list of supported GPU driver versions. If this flag is given, no other actions will be taken.")
+	f.StringVar(&i.gpuDataDir, "deps-dir", "", "If provided, the local directory to search for cos-gpu-installer data dependencies. "+
+		"The exact data dependencies that must be present in this directory depends on the version of cos-gpu-installer "+
+		"used by cos-customizer. Do not expect this flag to be stable; it exists for compatibility with pre-release COS images.")
+}
+
+func validDriverVersions(ctx context.Context, gcsClient *storage.Client) (map[string]bool, error) {
+	// We gather the set of valid drivers from the set of drivers provided by Nvidia in their GCS bucket.
+	// Nominally, paths we care about in this bucket look like 'tesla/<version>/<binaries>'. Version 390.46 has
+	// a deprecated path structure, and since it's supported by cos-gpu-installer, we special case that here.
+	validDrivers := map[string]bool{"390.46": true}
+	query := &storage.Query{Prefix: "tesla/"}
+	it := gcsClient.Bucket("nvidia-drivers-us-public").Objects(ctx, query)
+	for {
+		objAttrs, err := it.Next()
+		if err == iterator.Done {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+		// Example object: tesla/396.26/NVIDIA-Linux-x86_64-396.26-diagnostic.run
+		if splitPath := strings.SplitN(objAttrs.Name, "/", 3); len(splitPath) > 1 {
+			validDrivers[splitPath[1]] = true
+		}
+	}
+	return validDrivers, nil
+}
+
+func (i *InstallGPU) validate(ctx context.Context, gcsClient *storage.Client, files *fs.Files, provConfig *provisioner.Config) error {
+	isValidGPU := false
+	for _, g := range validGPUs {
+		if i.gpuType == g {
+			isValidGPU = true
+			break
+		}
+	}
+	if !isValidGPU {
+		return fmt.Errorf("%q is an invalid GPU type. Must be one of: %v", i.gpuType, validGPUs)
+	}
+	if i.NvidiaDriverVersion == "" {
+		return fmt.Errorf("version must be set")
+	}
+	var gpuAlreadyConf bool
+	for _, s := range provConfig.Steps {
+		if s.Type == "InstallGPU" {
+			gpuAlreadyConf = true
+			break
+		}
+	}
+	if gpuAlreadyConf {
+		return fmt.Errorf("install-gpu can only be invoked once in an image build process. Only one driver version can be installed on the image")
+	}
+	if strings.HasSuffix(i.NvidiaDriverVersion, ".run") {
+		log.Printf("driver version is set to %q, which looks like an nvidia installer file", i.NvidiaDriverVersion)
+		if i.gpuDataDir == "" {
+			return errors.New(`"-deps-dir" must be set when the version is specified as an nvidia installer file`)
+		}
+		fileName := filepath.Join(i.gpuDataDir, i.NvidiaDriverVersion)
+		info, err := os.Stat(fileName)
+		if os.IsNotExist(err) {
+			return fmt.Errorf("nvidia installer file at %q does not exist", fileName)
+		}
+		if info.IsDir() {
+			return fmt.Errorf("nvidia installer file at %q is a directory", fileName)
+		}
+	} else {
+		validDrivers, err := validDriverVersions(ctx, gcsClient)
+		if err != nil {
+			return err
+		}
+		if !validDrivers[i.NvidiaDriverVersion] {
+			var drivers []string
+			for d := range validDrivers {
+				drivers = append(drivers, d)
+			}
+			return fmt.Errorf("driver version %s is not valid; valid driver versions are: %v", i.NvidiaDriverVersion, drivers)
+		}
+	}
+	return nil
+}
+
+func (i *InstallGPU) templateScript(scriptPath string) error {
+	setCOSDownloadGCS := ""
+	if i.gpuDataDir != "" {
+		setCOSDownloadGCS = "true"
+	}
+	data := struct {
+		NvidiaDriverVersion  string
+		NvidiaDriverMd5sum   string
+		NvidiaInstallDirHost string
+		SetCOSDownloadGCS    string
+	}{
+		NvidiaDriverVersion:  utils.QuoteForShell(i.NvidiaDriverVersion),
+		NvidiaDriverMd5sum:   utils.QuoteForShell(i.NvidiaDriverMd5sum),
+		NvidiaInstallDirHost: utils.QuoteForShell(i.NvidiaInstallDirHost),
+		SetCOSDownloadGCS:    utils.QuoteForShell(setCOSDownloadGCS),
+	}
+	tmpl, err := template.New(filepath.Base(scriptPath)).ParseFiles(scriptPath)
+	if err != nil {
+		return err
+	}
+	w, err := os.Create(scriptPath)
+	if err != nil {
+		return err
+	}
+	defer w.Close()
+	return tmpl.Execute(w, data)
+}
+
+func (i *InstallGPU) updateBuildConfig(configPath string) error {
+	buildConfig := &config.Build{}
+	configFile, err := os.OpenFile(configPath, os.O_RDWR, 0666)
+	if err != nil {
+		return err
+	}
+	defer configFile.Close()
+	if err := config.Load(configFile, buildConfig); err != nil {
+		return err
+	}
+	buildConfig.GPUType = i.gpuType
+	if i.gpuDataDir != "" {
+		files, err := ioutil.ReadDir(i.gpuDataDir)
+		if err != nil {
+			return fmt.Errorf("error reading dir %q: %v", i.gpuDataDir, err)
+		}
+		for _, f := range files {
+			if f.Mode().IsRegular() {
+				buildConfig.GCSFiles = append(buildConfig.GCSFiles, filepath.Join(i.gpuDataDir, f.Name()))
+			}
+		}
+	}
+	if _, err := configFile.Seek(0, 0); err != nil {
+		return err
+	}
+	return config.SaveConfigToFile(configFile, buildConfig)
+}
+
+func (i *InstallGPU) updateProvConfig(provConfig *provisioner.Config) error {
+	buf, err := json.Marshal(&provisioner.InstallGPUStep{
+		NvidiaDriverVersion:      i.NvidiaDriverVersion,
+		NvidiaDriverMD5Sum:       i.NvidiaDriverMd5sum,
+		NvidiaInstallDirHost:     i.NvidiaInstallDirHost,
+		NvidiaInstallerContainer: installerContainer,
+		// GCSDepsPrefix will be converted into a gs:// address by the preloader
+		// package.
+		GCSDepsPrefix: i.gpuDataDir,
+	})
+	if err != nil {
+		return err
+	}
+	provConfig.Steps = append(provConfig.Steps, provisioner.StepConfig{
+		Type: "InstallGPU",
+		Args: json.RawMessage(buf),
+	})
+	return nil
+}
+
+// Execute implements subcommands.Command.Execute. It configures the current image build process to
+// customize the result image with GPU drivers.
+func (i *InstallGPU) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+	if f.NArg() != 0 {
+		f.Usage()
+		return subcommands.ExitUsageError
+	}
+	if len(args) < 2 {
+		log.Panic("InstallGPU expects two arguments; *fs.Files and ServiceClients")
+	}
+	files, ok := args[0].(*fs.Files)
+	if !ok {
+		log.Panic("InstallGPU expects two arguments; *fs.Files and ServiceClients")
+	}
+	serviceClients, ok := args[1].(ServiceClients)
+	if !ok {
+		log.Panic("InstallGPU expects two arguments; *fs.Files and ServiceClients")
+	}
+	_, gcsClient, err := serviceClients(ctx, true)
+	if err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	defer gcsClient.Close()
+	if i.getValidDrivers {
+		validDrivers, err := validDriverVersions(ctx, gcsClient)
+		if err != nil {
+			log.Println(err)
+			return subcommands.ExitFailure
+		}
+		var drivers []string
+		for d := range validDrivers {
+			drivers = append(drivers, d)
+		}
+		log.Printf("Valid driver versions are: %v\n", drivers)
+		return subcommands.ExitSuccess
+	}
+	var provConfig provisioner.Config
+	if err := config.LoadFromFile(files.ProvConfig, &provConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := i.validate(ctx, gcsClient, files, &provConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := i.updateBuildConfig(files.BuildConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := i.updateProvConfig(&provConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := config.SaveConfigToPath(files.ProvConfig, &provConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	return subcommands.ExitSuccess
+}
diff --git a/src/cmd/cos_customizer/install_gpu_test.go b/src/cmd/cos_customizer/install_gpu_test.go
new file mode 100644
index 0000000..982f029
--- /dev/null
+++ b/src/cmd/cos_customizer/install_gpu_test.go
@@ -0,0 +1,276 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fakes"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+
+	"cloud.google.com/go/storage"
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/subcommands"
+	compute "google.golang.org/api/compute/v1"
+)
+
+func executeInstallGPU(ctx context.Context, files *fs.Files, gcs *storage.Client, flgs ...string) (subcommands.ExitStatus, error) {
+	clients := ServiceClients(func(context.Context, bool) (*compute.Service, *storage.Client, error) {
+		return nil, gcs, nil
+	})
+	fs := &flag.FlagSet{}
+	installGPU := &InstallGPU{}
+	installGPU.SetFlags(fs)
+	if err := fs.Parse(flgs); err != nil {
+		return 0, err
+	}
+	ret := installGPU.Execute(ctx, fs, files, clients)
+	if ret != subcommands.ExitSuccess {
+		return ret, fmt.Errorf("InstallGPU failed. input: %v", flgs)
+	}
+	return ret, nil
+}
+
+func TestGetValidDriverVersions(t *testing.T) {
+	testData := []struct {
+		testName string
+		objects  map[string][]byte
+		want     map[string]bool
+	}{
+		{
+			"NonEmpty",
+			map[string][]byte{
+				"/nvidia-drivers-us-public/tesla/396.26/obj-1": nil,
+				"/nvidia-drivers-us-public/tesla/396.44/obj-2": nil,
+			},
+			map[string]bool{"390.46": true, "396.26": true, "396.44": true},
+		},
+		{
+			"Empty",
+			nil,
+			map[string]bool{"390.46": true},
+		},
+	}
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			gcs.Objects = input.objects
+			got, err := validDriverVersions(context.Background(), gcs.Client)
+			if err != nil {
+				t.Fatal(err)
+			}
+			if !cmp.Equal(got, input.want) {
+				t.Errorf("validDriverVersions; got %v, want %v; objects:\n%v", got, input.want, input.objects)
+			}
+		})
+	}
+}
+
+func TestGetValidDriverVersionsNoOp(t *testing.T) {
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	if _, err := executeInstallGPU(context.Background(), nil, gcs.Client, "-get-valid-drivers"); err != nil {
+		t.Fatalf("install-gpu(-get-valid-drivers); failed with nil files input; err %q; should succeed", err)
+	}
+}
+
+func setupInstallGPUFiles() (string, *fs.Files, error) {
+	tmpDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		return "", nil, err
+	}
+	files := &fs.Files{}
+	files.ProvConfig, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := ioutil.WriteFile(files.ProvConfig, []byte("{}"), 0644); err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	buildConfigFile, err := ioutil.TempFile(tmpDir, "")
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := config.Save(buildConfigFile, struct{}{}); err != nil {
+		buildConfigFile.Close()
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := buildConfigFile.Close(); err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	files.BuildConfig = buildConfigFile.Name()
+	return tmpDir, files, nil
+}
+
+func TestInstallGPUBuildConfig(t *testing.T) {
+	tmpDir, files, err := setupInstallGPUFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	if _, err := executeInstallGPU(context.Background(), files, gcs.Client, "-version=390.46", "-gpu-type=nvidia-tesla-k80"); err != nil {
+		t.Fatal(err)
+	}
+	buildConfig := &config.Build{}
+	if err := config.LoadFromFile(files.BuildConfig, buildConfig); err != nil {
+		t.Fatal(err)
+	}
+	if got := buildConfig.GPUType; got != "nvidia-tesla-k80" {
+		t.Errorf("install-gpu(-version=390.46 -gpu-type=nvidia-tesla-k80); GPU; got %s, want nvidia-tesla-k80", buildConfig.GPUType)
+	}
+}
+
+func TestInstallGPUBuildConfigGCSFiles(t *testing.T) {
+	tmpDir, files, err := setupInstallGPUFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	depsDir := filepath.Join(tmpDir, "deps")
+	if err := os.Mkdir(depsDir, 0755); err != nil {
+		t.Fatal(err)
+	}
+	if err := ioutil.WriteFile(filepath.Join(depsDir, "test-file"), []byte("test-file"), 0644); err != nil {
+		t.Fatal(err)
+	}
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	if _, err := executeInstallGPU(context.Background(), files, gcs.Client, "-version=390.46", "-deps-dir="+depsDir); err != nil {
+		t.Fatal(err)
+	}
+	buildConfig := &config.Build{}
+	if err := config.LoadFromFile(files.BuildConfig, buildConfig); err != nil {
+		t.Fatal(err)
+	}
+	want := filepath.Join(depsDir, "test-file")
+	foundWant := false
+	for _, got := range buildConfig.GCSFiles {
+		if got == want {
+			foundWant = true
+			break
+		}
+	}
+	if !foundWant {
+		t.Errorf("install-gpu(-version=390.46 -deps-dir=%q); buildConfig.GCSFiles; got %v, must include %q", depsDir, buildConfig.GCSFiles, want)
+	}
+}
+
+func TestInstallGPUInvalidVersion(t *testing.T) {
+	tmpDir, files, err := setupInstallGPUFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	if _, err := executeInstallGPU(context.Background(), files, gcs.Client, "-version=bad"); err == nil {
+		t.Error("install-gpu(-version=bad); got nil, want error")
+	}
+}
+
+func TestInstallGPUInvalidGPUType(t *testing.T) {
+	tmpDir, files, err := setupInstallGPUFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	if _, err := executeInstallGPU(context.Background(), files, gcs.Client, "-version=390.46", "-gpu-type=bad"); err == nil {
+		t.Error("install-gpu(-version=390.46 -gpu-type=bad); got nil, want error")
+	}
+}
+
+func TestInstallGPURunTwice(t *testing.T) {
+	tmpDir, files, err := setupInstallGPUFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	if _, err := executeInstallGPU(context.Background(), files, gcs.Client, "-version=390.46"); err != nil {
+		t.Fatal(err)
+	}
+	if _, err = executeInstallGPU(context.Background(), files, gcs.Client, "-version=390.46"); err == nil {
+		t.Error("install-gpu(_); run twice; got nil, want error")
+	}
+}
+
+func TestInstallGPUProvisionerConfig(t *testing.T) {
+	tmpDir, files, err := setupInstallGPUFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	if _, err := executeInstallGPU(context.Background(), files, gcs.Client, "-version=390.46"); err != nil {
+		t.Fatal(err)
+	}
+	want := provisioner.Config{
+		Steps: []provisioner.StepConfig{
+			{
+				Type: "InstallGPU",
+				Args: mustMarshalJSON(t, &provisioner.InstallGPUStep{
+					NvidiaDriverVersion:      "390.46",
+					NvidiaInstallDirHost:     "/var/lib/nvidia",
+					NvidiaInstallerContainer: installerContainer,
+				}),
+			},
+		},
+	}
+	var got provisioner.Config
+	data, err := ioutil.ReadFile(files.ProvConfig)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := json.Unmarshal(data, &got); err != nil {
+		t.Fatal(err)
+	}
+	if diff := cmp.Diff(got, want); diff != "" {
+		t.Errorf("install-gpu(-version=390.46); provisioner config mismatch; diff (-got, +want): %s", diff)
+	}
+}
+
+func TestInstallGPUInstallerWithoutDepsDir(t *testing.T) {
+	tmpDir, files, err := setupInstallGPUFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	if _, err := executeInstallGPU(context.Background(), files, gcs.Client, "-version=NVIDIA-Linux-x86_64-450.51.06.run"); err == nil {
+		t.Error("install-gpu(-version=NVIDIA-Linux-x86_64-450.51.06.run); got nil, want error")
+	}
+}
diff --git a/src/cmd/cos_customizer/main.go b/src/cmd/cos_customizer/main.go
new file mode 100644
index 0000000..b730a51
--- /dev/null
+++ b/src/cmd/cos_customizer/main.go
@@ -0,0 +1,75 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// cos_customizer is a Cloud Build builder for building custom COS images.
+package main
+
+import (
+	"context"
+	"flag"
+	"log"
+	"net/http"
+	"os"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+
+	"golang.org/x/oauth2/google"
+
+	"cloud.google.com/go/storage"
+	"github.com/google/subcommands"
+	compute "google.golang.org/api/compute/v1"
+	"google.golang.org/api/option"
+)
+
+var persistentDir = flag.String("local-state-workdir", ".cos-customizer-workdir",
+	"Name of the directory in $HOME to use for storing local state.")
+
+func clients(ctx context.Context, anonymousCreds bool) (*compute.Service, *storage.Client, error) {
+	var httpClient *http.Client
+	var err error
+	if anonymousCreds {
+		httpClient = &http.Client{}
+	} else {
+		httpClient, err = google.DefaultClient(ctx)
+		if err != nil {
+			return nil, nil, err
+		}
+	}
+	svc, err := compute.New(httpClient)
+	if err != nil {
+		return nil, nil, err
+	}
+	gcsClient, err := storage.NewClient(ctx, option.WithHTTPClient(httpClient))
+	if err != nil {
+		return nil, nil, err
+	}
+	return svc, gcsClient, nil
+}
+
+func main() {
+	log.SetFlags(log.LstdFlags | log.Lshortfile)
+	subcommands.Register(subcommands.HelpCommand(), "")
+	subcommands.Register(subcommands.FlagsCommand(), "")
+	subcommands.Register(new(StartImageBuild), "")
+	subcommands.Register(new(RunScript), "")
+	subcommands.Register(new(InstallGPU), "")
+	subcommands.Register(new(SealOEM), "")
+	subcommands.Register(new(DisableAutoUpdate), "")
+	subcommands.Register(new(FinishImageBuild), "")
+	flag.Parse()
+	ctx := context.Background()
+	files := fs.DefaultFiles(*persistentDir)
+	ret := int(subcommands.Execute(ctx, files, ServiceClients(clients)))
+	os.Exit(ret)
+}
diff --git a/src/cmd/cos_customizer/run_script.go b/src/cmd/cos_customizer/run_script.go
new file mode 100644
index 0000000..e139973
--- /dev/null
+++ b/src/cmd/cos_customizer/run_script.go
@@ -0,0 +1,121 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"flag"
+	"log"
+	"sort"
+	"strings"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+
+	"github.com/google/subcommands"
+)
+
+// RunScript implements subcommands.Command for the "run-script" command.
+// This command configures the current image build process to customize the result image
+// with a shell script.
+type RunScript struct {
+	script string
+	env    *mapVar
+}
+
+// Name implements subcommands.Command.Name.
+func (r *RunScript) Name() string {
+	return "run-script"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (r *RunScript) Synopsis() string {
+	return "Configure the image build with a script to run."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (r *RunScript) Usage() string {
+	return `run-script [flags]
+`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (r *RunScript) SetFlags(f *flag.FlagSet) {
+	f.StringVar(&r.script, "script", "", "Name of script to run.")
+	if r.env == nil {
+		r.env = newMapVar()
+	}
+	f.Var(r.env, "env", "Env vars to set before running the script.")
+}
+
+// createEnvString creates an environment variable string used by the
+// provisioner tool. The format is the same as the format used by exec.Command.
+// Elements are sorted for predictable output.
+func createEnvString(m map[string]string) string {
+	var elems []string
+	for k, v := range m {
+		elems = append(elems, k+"="+v)
+	}
+	sort.Strings(elems)
+	return strings.Join(elems, ",")
+}
+
+// Execute implements subcommands.Command.Execute. It configures the current image build process to
+// customize the result image with a shell script.
+func (r *RunScript) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+	if f.NArg() != 0 {
+		f.Usage()
+		return subcommands.ExitUsageError
+	}
+	files := args[0].(*fs.Files)
+	if r.script == "" {
+		log.Printf("script not provided for %s step; script is required\n", r.Name())
+		return subcommands.ExitFailure
+	}
+	isValid, err := fs.ArchiveHasObject(files.UserBuildContextArchive, r.script)
+	if err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if !isValid {
+		log.Printf("could not find script %s in build context", r.script)
+		return subcommands.ExitFailure
+	}
+	var provConfig provisioner.Config
+	if err := config.LoadFromFile(files.ProvConfig, &provConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	buf, err := json.Marshal(&provisioner.RunScriptStep{
+		BuildContext: "user",
+		Path:         r.script,
+		Env:          createEnvString(r.env.m),
+	})
+	if err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	provConfig.Steps = append(provConfig.Steps, provisioner.StepConfig{
+		Type: "RunScript",
+		Args: json.RawMessage(buf),
+	})
+	if err := config.SaveConfigToPath(files.ProvConfig, &provConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	return subcommands.ExitSuccess
+}
diff --git a/src/cmd/cos_customizer/run_script_test.go b/src/cmd/cos_customizer/run_script_test.go
new file mode 100644
index 0000000..5d1f4bc
--- /dev/null
+++ b/src/cmd/cos_customizer/run_script_test.go
@@ -0,0 +1,202 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/subcommands"
+)
+
+func createTempFile(dir string) (string, error) {
+	file, err := ioutil.TempFile(dir, "")
+	if err != nil {
+		return "", err
+	}
+	if err := file.Close(); err != nil {
+		os.Remove(file.Name())
+		return "", err
+	}
+	return file.Name(), nil
+}
+
+func setupRunScriptFiles() (string, *fs.Files, error) {
+	tmpDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		return "", nil, err
+	}
+	files := &fs.Files{}
+	files.ProvConfig, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	if err := ioutil.WriteFile(files.ProvConfig, []byte("{}"), 0644); err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	files.UserBuildContextArchive, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	return tmpDir, files, nil
+}
+
+func createNonEmptyUserCtxArchive(files *fs.Files, fileName string) error {
+	tmpDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		return err
+	}
+	defer os.RemoveAll(tmpDir)
+	newFile, err := os.Create(filepath.Join(tmpDir, fileName))
+	if err != nil {
+		return err
+	}
+	if err := newFile.Close(); err != nil {
+		return err
+	}
+	if err := os.Remove(files.UserBuildContextArchive); err != nil {
+		return err
+	}
+	return fs.CreateBuildContextArchive(newFile.Name(), files.UserBuildContextArchive)
+}
+
+func executeRunScript(files *fs.Files, flags ...string) (subcommands.ExitStatus, error) {
+	fs := &flag.FlagSet{}
+	runScript := &RunScript{}
+	runScript.SetFlags(fs)
+	if err := fs.Parse(flags); err != nil {
+		return 0, err
+	}
+	ret := runScript.Execute(nil, fs, files)
+	if ret != subcommands.ExitSuccess {
+		return ret, fmt.Errorf("RunScript failed. input: %v", flags)
+	}
+	return ret, nil
+}
+
+func mustMarshalJSON(t *testing.T, v interface{}) []byte {
+	t.Helper()
+	data, err := json.Marshal(v)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return data
+}
+
+func TestRunScript(t *testing.T) {
+	var testData = []struct {
+		testName       string
+		flags          []string
+		wantProvConfig provisioner.Config
+	}{
+		{
+			testName: "NoEnv",
+			flags:    nil,
+			wantProvConfig: provisioner.Config{
+				Steps: []provisioner.StepConfig{
+					{
+						Type: "RunScript",
+						Args: mustMarshalJSON(t, &provisioner.RunScriptStep{
+							BuildContext: "user",
+							Path:         "script",
+						}),
+					},
+				},
+			},
+		},
+		{
+			testName: "Env",
+			flags:    []string{"-env=HELLO1=world1,HELLO2=world2"},
+			wantProvConfig: provisioner.Config{
+				Steps: []provisioner.StepConfig{
+					{
+						Type: "RunScript",
+						Args: mustMarshalJSON(t, &provisioner.RunScriptStep{
+							BuildContext: "user",
+							Path:         "script",
+							Env:          "HELLO1=world1,HELLO2=world2",
+						}),
+					},
+				},
+			},
+		},
+	}
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			tmpDir, files, err := setupRunScriptFiles()
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tmpDir)
+			if err := createNonEmptyUserCtxArchive(files, "script"); err != nil {
+				t.Fatal(err)
+			}
+			if _, err := executeRunScript(files, append(input.flags, "-script=script")...); err != nil {
+				t.Fatal(err)
+			}
+			var provConfig provisioner.Config
+			got, err := ioutil.ReadFile(files.ProvConfig)
+			if err != nil {
+				t.Fatal(err)
+			}
+			if err := json.Unmarshal(got, &provConfig); err != nil {
+				t.Fatal(err)
+			}
+			if diff := cmp.Diff(provConfig, input.wantProvConfig); diff != "" {
+				t.Errorf("run-script(%v): provisioner config mismatch: diff (-got, +want): %s", input.flags, diff)
+			}
+		})
+	}
+}
+
+func TestRunScriptBadScript(t *testing.T) {
+	var testData = []struct {
+		testName string
+		flags    []string
+	}{
+		{
+			"BadScript",
+			[]string{"-script=script"},
+		},
+		{
+			"NoScript",
+			nil,
+		},
+	}
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			tmpDir, files, err := setupRunScriptFiles()
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tmpDir)
+			if got, _ := executeRunScript(files, input.flags...); got == subcommands.ExitSuccess {
+				t.Errorf("run-script(%v); got subcommands.ExitSuccess, want failure", input.flags)
+			}
+		})
+	}
+}
diff --git a/src/cmd/cos_customizer/seal_oem.go b/src/cmd/cos_customizer/seal_oem.go
new file mode 100644
index 0000000..747209c
--- /dev/null
+++ b/src/cmd/cos_customizer/seal_oem.go
@@ -0,0 +1,79 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"flag"
+	"log"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+
+	"github.com/google/subcommands"
+)
+
+// SealOEM implements subcommands.Command for the "seal-oem" command.
+// It builds a hash tree of the OEM partition and modifies the kernel
+// command line to verify the OEM partition at boot time.
+type SealOEM struct{}
+
+// Name implements subcommands.Command.Name.
+func (s *SealOEM) Name() string {
+	return "seal-oem"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (s *SealOEM) Synopsis() string {
+	return "Seal the OEM partition."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (s *SealOEM) Usage() string {
+	return `seal-oem
+`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (s *SealOEM) SetFlags(f *flag.FlagSet) {}
+
+func (s *SealOEM) updateProvConfig(configPath string) error {
+	var provConfig provisioner.Config
+	if err := config.LoadFromFile(configPath, &provConfig); err != nil {
+		return err
+	}
+	provConfig.BootDisk.ReclaimSDA3 = true
+	provConfig.Steps = append(provConfig.Steps, provisioner.StepConfig{
+		Type: "SealOEM",
+	})
+	return config.SaveConfigToPath(configPath, &provConfig)
+}
+
+// Execute implements subcommands.Command.Execute. It modifies the kernel command line
+// to enable dm-verity check on /dev/sda8 and disables update-engine (auto-update) and
+// usr-share-oem-mount systemd services.
+func (s *SealOEM) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+	if f.NArg() != 0 {
+		f.Usage()
+		return subcommands.ExitUsageError
+	}
+	files := args[0].(*fs.Files)
+	if err := s.updateProvConfig(files.ProvConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	return subcommands.ExitSuccess
+}
diff --git a/src/cmd/cos_customizer/start_image_build.go b/src/cmd/cos_customizer/start_image_build.go
new file mode 100644
index 0000000..670b095
--- /dev/null
+++ b/src/cmd/cos_customizer/start_image_build.go
@@ -0,0 +1,214 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cmd contains cos-customizer subcommand implementations.
+package main
+
+import (
+	"context"
+	"flag"
+	"fmt"
+	"log"
+	"os"
+	"path/filepath"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/gce"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+
+	"cloud.google.com/go/storage"
+	"github.com/google/subcommands"
+	compute "google.golang.org/api/compute/v1"
+)
+
+// ServiceClients gets the GCE and GCS clients to use.
+type ServiceClients func(ctx context.Context, anonymousCreds bool) (*compute.Service, *storage.Client, error)
+
+// StartImageBuild implements subcommands.Command for the 'start-image-build' command.
+// This command initializes a new image customization process.
+type StartImageBuild struct {
+	buildContext string
+	gcsBucket    string
+	gcsWorkdir   string
+	imageProject string
+	imageName    string
+	milestone    int
+	imageFamily  string
+}
+
+// Name implements subcommands.Command.Name.
+func (*StartImageBuild) Name() string {
+	return "start-image-build"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (*StartImageBuild) Synopsis() string {
+	return "Start a COS image build."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (*StartImageBuild) Usage() string {
+	return `start-image-build [flags]
+`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (s *StartImageBuild) SetFlags(f *flag.FlagSet) {
+	f.StringVar(&s.buildContext, "build-context", ".", "Path to the build context")
+	f.StringVar(&s.gcsBucket, "gcs-bucket", "", "GCS bucket to use for scratch space")
+	f.StringVar(&s.gcsWorkdir, "gcs-workdir", "", "GCS directory to use for scratch space")
+	f.StringVar(&s.imageProject, "image-project", "", "Source image project")
+	f.StringVar(&s.imageName, "image-name", "", "Source image name. Mutually exclusive with 'image-milestone' and "+
+		"'image-family'.")
+	f.IntVar(&s.milestone, "image-milestone", 0, "Source image milestone. Mutually exclusive with 'image-name' "+
+		"and 'image-family'. Can only be used if 'image-project' is cos-cloud.")
+	f.StringVar(&s.imageFamily, "image-family", "", "Source image family. Mutually exclusive with 'image-name' "+
+		"and 'image-milestone'.")
+}
+
+func (s *StartImageBuild) validate() error {
+	numSet := 0
+	for _, val := range []bool{s.imageName != "", s.milestone != 0, s.imageFamily != ""} {
+		if val {
+			numSet++
+		}
+	}
+	switch {
+	case numSet != 1:
+		return fmt.Errorf("exactly one of image-name, image-milestone, image-family must be set")
+	case s.milestone != 0 && s.imageProject != "cos-cloud":
+		return fmt.Errorf("image-milestone can only be used if image-project is set to cos-cloud. "+
+			"image-milestone: %d image-project: %s", s.milestone, s.imageProject)
+	case s.gcsBucket == "":
+		return fmt.Errorf("gcs-bucket must be set")
+	case s.gcsWorkdir == "":
+		return fmt.Errorf("gcs-workdir must be set")
+	case s.imageProject == "":
+		return fmt.Errorf("image-project must be set")
+	default:
+		return nil
+	}
+}
+
+func (s *StartImageBuild) resolveImageName(ctx context.Context, svc *compute.Service) error {
+	switch {
+	case s.milestone != 0:
+		var err error
+		s.imageName, err = gce.ResolveMilestone(ctx, svc, s.milestone)
+		if err != nil {
+			if err == gce.ErrImageNotFound {
+				return fmt.Errorf("no image found on milestone %d", s.milestone)
+			}
+			return err
+		}
+		log.Printf("Using image %s from milestone %d\n", s.imageName, s.milestone)
+	case s.imageFamily != "":
+		image, err := svc.Images.GetFromFamily(s.imageProject, s.imageFamily).Do()
+		if err != nil {
+			return err
+		}
+		s.imageName = image.Name
+		log.Printf("Using image %s from family %s\n", s.imageName, s.imageFamily)
+	default:
+		exists, err := gce.ImageExists(svc, s.imageProject, s.imageName)
+		if err != nil {
+			return err
+		}
+		if !exists {
+			return fmt.Errorf("could not find source image %s in project %s", s.imageName, s.imageProject)
+		}
+	}
+	return nil
+}
+
+func saveImage(imageName, imageProject, dst string) error {
+	image := config.NewImage(imageName, imageProject)
+	if err := os.MkdirAll(filepath.Dir(dst), 0774); err != nil {
+		return err
+	}
+	outFile, err := os.Create(dst)
+	if err != nil {
+		return err
+	}
+	defer outFile.Close()
+	return config.Save(outFile, image)
+}
+
+func saveBuildConfig(gcsBucket, gcsWorkdir, dst string) error {
+	buildConfig := &config.Build{GCSBucket: gcsBucket, GCSDir: gcsWorkdir}
+	if err := os.MkdirAll(filepath.Dir(dst), 0774); err != nil {
+		return err
+	}
+	outFile, err := os.Create(dst)
+	if err != nil {
+		return err
+	}
+	defer outFile.Close()
+	return config.SaveConfigToFile(outFile, buildConfig)
+}
+
+func saveProvConfig(dst string) (err error) {
+	provConfig := &provisioner.Config{}
+	if err := os.MkdirAll(filepath.Dir(dst), 0774); err != nil {
+		return err
+	}
+	outFile, err := os.Create(dst)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(outFile, "error closing provisioner config", &err)
+	return config.SaveConfigToFile(outFile, provConfig)
+}
+
+// Execute implements subcommands.Command.Execute. It initializes persistent state for a new
+// image customization process.
+func (s *StartImageBuild) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+	if f.NArg() != 0 {
+		f.Usage()
+		return subcommands.ExitUsageError
+	}
+	files := args[0].(*fs.Files)
+	svc, _, err := args[1].(ServiceClients)(ctx, false)
+	if err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := s.validate(); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := s.resolveImageName(ctx, svc); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := fs.CreateBuildContextArchive(s.buildContext, files.UserBuildContextArchive); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := saveImage(s.imageName, s.imageProject, files.SourceImageConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := saveBuildConfig(s.gcsBucket, s.gcsWorkdir, files.BuildConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	if err := saveProvConfig(files.ProvConfig); err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	return subcommands.ExitSuccess
+}
diff --git a/src/cmd/cos_customizer/start_image_build_test.go b/src/cmd/cos_customizer/start_image_build_test.go
new file mode 100644
index 0000000..6647270
--- /dev/null
+++ b/src/cmd/cos_customizer/start_image_build_test.go
@@ -0,0 +1,191 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fakes"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+
+	"cloud.google.com/go/storage"
+	"github.com/google/subcommands"
+	compute "google.golang.org/api/compute/v1"
+)
+
+func executeStartBuild(files *fs.Files, svc *compute.Service, flags ...string) (subcommands.ExitStatus, error) {
+	clients := ServiceClients(func(_ context.Context, _ bool) (*compute.Service, *storage.Client, error) {
+		return svc, nil, nil
+	})
+	flagSet := &flag.FlagSet{}
+	startBuild := &StartImageBuild{}
+	startBuild.SetFlags(flagSet)
+	if err := flagSet.Parse(flags); err != nil {
+		return 0, err
+	}
+	ret := startBuild.Execute(context.Background(), flagSet, files, clients)
+	if ret != subcommands.ExitSuccess {
+		return ret, fmt.Errorf("StartImageBuild failed; input: %v", flags)
+	}
+	return ret, nil
+}
+
+func setupStartBuildFiles() (*fs.Files, string, error) {
+	files := &fs.Files{}
+	tmpDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		return nil, "", err
+	}
+	files.BuildConfig = filepath.Join(tmpDir, "build_config")
+	files.SourceImageConfig = filepath.Join(tmpDir, "source_image")
+	files.ProvConfig = filepath.Join(tmpDir, "provisioner_config")
+	files.UserBuildContextArchive = filepath.Join(tmpDir, "user_archive")
+	return files, tmpDir, nil
+}
+
+func TestNoImageName(t *testing.T) {
+	files, tmpDir, err := setupStartBuildFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gce, client := fakes.GCEForTest(t, "p")
+	defer gce.Close()
+	if _, err := executeStartBuild(files, client, "-gcs-bucket=b", "-gcs-workdir=w", "-image-project=p"); err == nil {
+		t.Errorf("start-image-build should fail with no image name")
+	}
+}
+
+func TestDuplicateImageName(t *testing.T) {
+	files, tmpDir, err := setupStartBuildFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gce, client := fakes.GCEForTest(t, "p")
+	defer gce.Close()
+	if _, err := executeStartBuild(files, client, "-image-name=n", "-image-family=f", "-gcs-bucket=b", "-gcs-workdir=w",
+		"-image-project=p"); err == nil {
+		t.Errorf("start-image-build should fail with duplicate image names")
+	}
+}
+
+func TestResolveMilestoneNoCosCloud(t *testing.T) {
+	files, tmpDir, err := setupStartBuildFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gce, client := fakes.GCEForTest(t, "p")
+	defer gce.Close()
+	if _, err := executeStartBuild(files, client, "-image-milestone=65", "-image-project=p", "-gcs-bucket=b",
+		"-gcs-workdir=w"); err == nil {
+		t.Errorf("start-image-build should fail when using -image-milestone without cos-cloud")
+	}
+}
+
+func TestSourceImage(t *testing.T) {
+	testData := []struct {
+		testName string
+		images   []*compute.Image
+		flag     string
+		want     string
+	}{
+		{
+			"MilestoneDifferentImages",
+			[]*compute.Image{
+				{Name: "cos-beta-65-10032-9-0"},
+				{Name: "cos-stable-65-10032-10-0"}},
+			"-image-milestone=65",
+			"cos-stable-65-10032-10-0",
+		},
+		{
+			"MilestoneSameImage",
+			[]*compute.Image{
+				{Name: "cos-stable-65-10032-10-0"},
+				{Name: "cos-65-10032-10-0"}},
+			"-image-milestone=65",
+			"cos-stable-65-10032-10-0",
+		},
+		{
+			"ProvideImageName",
+			[]*compute.Image{
+				{Name: "cos-beta-65-10032-9-0"},
+				{Name: "cos-stable-65-10032-10-0"}},
+			"-image-name=cos-beta-65-10032-9-0",
+			"cos-beta-65-10032-9-0",
+		},
+	}
+	gce, client := fakes.GCEForTest(t, "cos-cloud")
+	defer gce.Close()
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			files, tmpDir, err := setupStartBuildFiles()
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tmpDir)
+			gce.Images.Items = input.images
+			if _, err := executeStartBuild(files, client, input.flag, "-image-project=cos-cloud", "-gcs-bucket=b",
+				"-gcs-workdir=w"); err != nil {
+				t.Fatal(err)
+			}
+			sourceImage := config.NewImage("", "")
+			if err := config.LoadFromFile(files.SourceImageConfig, sourceImage); err != nil {
+				t.Fatal(err)
+			}
+			if got := sourceImage.Name; got != input.want {
+				t.Errorf("StartImageBuild.Execute(%s); source image is %s, want %s", input.flag, got, input.want)
+			}
+			if got := sourceImage.Project; got != "cos-cloud" {
+				t.Errorf("StartImageBuild.Execute(%s); source image project is %s, want cos-cloud", input.flag, got)
+			}
+		})
+	}
+}
+
+func TestProvisionerConfigCreated(t *testing.T) {
+	files, tmpDir, err := setupStartBuildFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	gce, client := fakes.GCEForTest(t, "p")
+	defer gce.Close()
+	gce.Images.Items = []*compute.Image{{Name: "n"}}
+	if _, err := executeStartBuild(files, client, "-image-name=n", "-image-project=p", "-gcs-bucket=b", "-gcs-workdir=w"); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := os.Stat(files.ProvConfig); os.IsNotExist(err) {
+		t.Errorf("provisioner config does not exist: should exist")
+	}
+	var got provisioner.Config
+	data, err := ioutil.ReadFile(files.ProvConfig)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := json.Unmarshal(data, &got); err != nil {
+		t.Errorf("cannot unmarshal provisioner config %q: got %v", string(data), err)
+	}
+}
diff --git a/src/cmd/handle_disk_layout/BUILD.bazel b/src/cmd/handle_disk_layout/BUILD.bazel
new file mode 100644
index 0000000..7ce7f1e
--- /dev/null
+++ b/src/cmd/handle_disk_layout/BUILD.bazel
@@ -0,0 +1,29 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+
+go_library(
+    name = "handle_disk_layout_lib",
+    srcs = ["handle_disk_layout_bin.go"],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/cmd/handle_disk_layout",
+    visibility = ["//visibility:private"],
+    deps = ["//src/pkg/tools"],
+)
+
+go_binary(
+    name = "handle_disk_layout_bin",
+    embed = [":handle_disk_layout_lib"],
+    visibility = ["//visibility:public"],
+)
diff --git a/src/cmd/handle_disk_layout/handle_disk_layout_bin.go b/src/cmd/handle_disk_layout/handle_disk_layout_bin.go
new file mode 100644
index 0000000..146c737
--- /dev/null
+++ b/src/cmd/handle_disk_layout/handle_disk_layout_bin.go
@@ -0,0 +1,48 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the License);
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an AS IS BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"log"
+	"os"
+	"strconv"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools"
+)
+
+// main generates binary file to extend the OEM partition.
+// Built by Bazel. The binary will be in data/builtin_build_context/.
+func main() {
+	log.SetOutput(os.Stdout)
+	args := os.Args
+	if len(args) != 6 {
+		log.Fatalln("error: must have 5 arguments: disk string, statePartNum, oemPartNum int, oemSize string, reclaimSDA3 bool")
+	}
+	statePartNum, err := strconv.Atoi(args[2])
+	if err != nil {
+		log.Fatalln("error: the 2nd argument statePartNum must be an int")
+	}
+	oemPartNum, err := strconv.Atoi(args[3])
+	if err != nil {
+		log.Fatalln("error: the 3rd argument oemPartNum must be an int")
+	}
+	reclaimSDA3, err := strconv.ParseBool(args[5])
+	if err != nil {
+		log.Fatalln("error: the 5th argument reclaimSDA3 must be a bool")
+	}
+	if err := tools.HandleDiskLayout(args[1], statePartNum, oemPartNum, args[4], reclaimSDA3); err != nil {
+		log.Fatalln(err)
+	}
+}
diff --git a/src/cmd/metadata_watcher/BUILD.bazel b/src/cmd/metadata_watcher/BUILD.bazel
new file mode 100644
index 0000000..41bdffd
--- /dev/null
+++ b/src/cmd/metadata_watcher/BUILD.bazel
@@ -0,0 +1,29 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+
+go_library(
+    name = "metadata_watcher_lib",
+    srcs = ["main.go"],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/cmd/metadata_watcher",
+    visibility = ["//visibility:private"],
+    deps = ["@com_google_cloud_go//compute/metadata"],
+)
+
+go_binary(
+    name = "metadata_watcher",
+    embed = [":metadata_watcher_lib"],
+    visibility = ["//visibility:public"],
+)
diff --git a/src/cmd/metadata_watcher/main.go b/src/cmd/metadata_watcher/main.go
new file mode 100644
index 0000000..738f8cf
--- /dev/null
+++ b/src/cmd/metadata_watcher/main.go
@@ -0,0 +1,43 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// metadata_watcher is a program that waits for 5 minutes for a specific GCE
+// instance metadata key to be present.
+package main
+
+import (
+	"fmt"
+	"os"
+	"time"
+
+	"cloud.google.com/go/compute/metadata"
+)
+
+func main() {
+	if len(os.Args) < 2 {
+		os.Exit(1)
+	}
+	key := os.Args[1]
+	fmt.Printf("Waiting for metadata key %q...\n", key)
+	end := time.Now().Add(5 * time.Minute)
+	for time.Now().Before(end) {
+		_, err := metadata.InstanceAttributeValue(key)
+		if err == nil {
+			fmt.Printf("Found metadata key %q\n", key)
+			os.Exit(0)
+		}
+		fmt.Printf("Could not fetch metadata key %q: %v\n", key, err)
+		time.Sleep(time.Second)
+	}
+}
diff --git a/src/cmd/provisioner/BUILD.bazel b/src/cmd/provisioner/BUILD.bazel
new file mode 100644
index 0000000..8f93478
--- /dev/null
+++ b/src/cmd/provisioner/BUILD.bazel
@@ -0,0 +1,37 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+
+go_library(
+    name = "provisioner_lib",
+    srcs = [
+        "main.go",
+        "resume.go",
+        "run.go",
+    ],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/cmd/provisioner",
+    visibility = ["//visibility:private"],
+    deps = [
+        "//src/pkg/provisioner",
+        "@com_github_google_subcommands//:subcommands",
+        "@com_google_cloud_go_storage//:storage",
+    ],
+)
+
+go_binary(
+    name = "provisioner",
+    embed = [":provisioner_lib"],
+    visibility = ["//visibility:public"],
+)
diff --git a/src/cmd/provisioner/main.go b/src/cmd/provisioner/main.go
new file mode 100644
index 0000000..d042a8d
--- /dev/null
+++ b/src/cmd/provisioner/main.go
@@ -0,0 +1,66 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// provisioner is a tool for provisioning COS instances. The tool is intended to
+// run on a running COS machine.
+package main
+
+import (
+	"context"
+	"flag"
+	"log"
+	"os"
+
+	"cloud.google.com/go/storage"
+	"github.com/google/subcommands"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+)
+
+var (
+	stateDir = flag.String("state-dir", "/var/lib/.cos-customizer", "Absolute path to the directory to use for provisioner state. "+
+		"This directory is used for persisting internal state across reboots, unpacking inputs, and running provisioning scripts. "+
+		"The size of the directory scales with the size of the inputs.")
+)
+
+func main() {
+	log.SetFlags(log.LstdFlags | log.Lshortfile)
+	subcommands.Register(subcommands.HelpCommand(), "")
+	subcommands.Register(subcommands.FlagsCommand(), "")
+	subcommands.Register(&Run{}, "")
+	subcommands.Register(&Resume{}, "")
+	flag.Parse()
+	ctx := context.Background()
+	gcsClient, err := storage.NewClient(ctx)
+	if err != nil {
+		log.Println(err)
+		os.Exit(int(subcommands.ExitFailure))
+	}
+	deps := provisioner.Deps{
+		GCSClient:    gcsClient,
+		TarCmd:       "tar",
+		SystemctlCmd: "systemctl",
+		RootdevCmd:   "rootdev",
+		CgptCmd:      "cgpt",
+		Resize2fsCmd: "resize2fs",
+		E2fsckCmd:    "e2fsck",
+		RootDir:      "/",
+	}
+	var exitCode int
+	ret := subcommands.Execute(ctx, deps, &exitCode)
+	if ret != subcommands.ExitSuccess {
+		os.Exit(int(ret))
+	}
+	os.Exit(exitCode)
+}
diff --git a/src/cmd/provisioner/resume.go b/src/cmd/provisioner/resume.go
new file mode 100644
index 0000000..eda3bda
--- /dev/null
+++ b/src/cmd/provisioner/resume.go
@@ -0,0 +1,65 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"errors"
+	"flag"
+	"log"
+
+	"github.com/google/subcommands"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+)
+
+// Resume implements subcommands.Command for the "resume" command.
+// This command resumes provisioning from given provisioning state.
+type Resume struct{}
+
+// Name implements subcommands.Command.Name.
+func (r *Resume) Name() string {
+	return "resume"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (r *Resume) Synopsis() string {
+	return "Resume provisioning from provided state. Has an exit code of 3 if a reboot is required after execution."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (r *Resume) Usage() string {
+	return `resume
+`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (r *Resume) SetFlags(f *flag.FlagSet) {}
+
+// Execute implements subcommands.Command.Execute.
+func (r *Resume) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+	deps := args[0].(provisioner.Deps)
+	exitCode := args[1].(*int)
+	if err := provisioner.Resume(ctx, deps, *stateDir); err != nil {
+		if errors.Is(err, provisioner.ErrRebootRequired) {
+			log.Println(rebootMsg)
+			*exitCode = 3
+			return subcommands.ExitSuccess
+		}
+		log.Printf("Provisioning error: %v", err)
+		return subcommands.ExitFailure
+	}
+	return subcommands.ExitSuccess
+}
diff --git a/src/cmd/provisioner/run.go b/src/cmd/provisioner/run.go
new file mode 100644
index 0000000..5ab8e2f
--- /dev/null
+++ b/src/cmd/provisioner/run.go
@@ -0,0 +1,94 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"flag"
+	"io/ioutil"
+	"log"
+
+	"github.com/google/subcommands"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+)
+
+const rebootMsg = "Reboot is required to continue provisioning. Please reboot the system and resume provisioning with the `resume` subcommand."
+
+// Run implements subcommands.Command for the "run" command.
+// This command runs the provisioner from a provided configuration file.
+type Run struct {
+	configPath string
+}
+
+// Name implements subcommands.Command.Name.
+func (r *Run) Name() string {
+	return "run"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (r *Run) Synopsis() string {
+	return "Provision a COS instance from the provided configuration file. Has an exit code of 3 if a reboot is required after execution."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (r *Run) Usage() string {
+	return `run [flags]
+`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (r *Run) SetFlags(f *flag.FlagSet) {
+	f.StringVar(&r.configPath, "config", "", "Path to a configuration file to use for provisioning.")
+}
+
+func (r *Run) validate() error {
+	if r.configPath == "" {
+		return errors.New("-config must be provided")
+	}
+	return nil
+}
+
+// Execute implements subcommands.Command.Execute.
+func (r *Run) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+	deps := args[0].(provisioner.Deps)
+	exitCode := args[1].(*int)
+	if err := r.validate(); err != nil {
+		log.Printf("Error in flags: %v", err)
+		return subcommands.ExitUsageError
+	}
+	data, err := ioutil.ReadFile(r.configPath)
+	if err != nil {
+		log.Println(err)
+		return subcommands.ExitFailure
+	}
+	var c provisioner.Config
+	if err := json.Unmarshal(data, &c); err != nil {
+		log.Printf("JSON parsing error in %q: %v", r.configPath, err)
+		return subcommands.ExitFailure
+	}
+	if err := provisioner.Run(ctx, deps, *stateDir, c); err != nil {
+		if errors.Is(err, provisioner.ErrRebootRequired) {
+			log.Println(rebootMsg)
+			*exitCode = 3
+			return subcommands.ExitSuccess
+		}
+		log.Printf("Provisioning error: %v", err)
+		return subcommands.ExitFailure
+	}
+	return subcommands.ExitSuccess
+}
diff --git a/src/data/build_image.wf.json b/src/data/build_image.wf.json
new file mode 100644
index 0000000..7329148
--- /dev/null
+++ b/src/data/build_image.wf.json
@@ -0,0 +1,134 @@
+{
+  "Name": "build-image",
+  "Vars": {
+    "source_image": {"Required": true, "Description": "URL of the source image to preload."},
+    "output_image_name": {"Required": true, "Description": "Name of output image."},
+    "output_image_family": {"Value": "", "Description": "Family of output image."},
+    "output_image_project": {"Required": true, "Description": "Project of output image."},
+    "cidata_img": {"Required": true, "Description": "Path to CIDATA vfat image containing cloud-init user-data and the provisioner program. Must be in .tar.gz format."},
+    "disk_size_gb": {"Value": "10", "Description": "The disk size to use for preloading."},
+    "host_maintenance": {"Value": "MIGRATE", "Description": "VM behavior when there is maintenance."}
+  },
+  "Sources": {
+    "cloud-config": "/data/startup.yaml",
+    "cidata.tar.gz_": "${cidata_img}"
+  },
+  "Steps": {
+    "copy-gcs": {
+      "CopyGCSObjects": [
+        {
+          "Source": "${SOURCESPATH}/cidata.tar.gz_",
+          "Destination": "${SOURCESPATH}/cidata.tar.gz"
+        }
+      ]
+    },
+    "create-cidata": {
+      "CreateImages": [
+        {
+          "Name": "cidata",
+          "RawDisk": {
+            "Source": "${SOURCESPATH}/cidata.tar.gz"
+          }
+        }
+      ]
+    },
+    "setup": {
+      "CreateDisks": [
+        {
+          "Name": "boot-disk",
+          "SourceImage": "${source_image}",
+          "SizeGb": "${disk_size_gb}"
+        },
+        {
+          "Name": "cidata-disk",
+          "SourceImage": "cidata"
+        }
+      ]
+    },
+    "run": {
+      "CreateInstances": [
+        {
+          "Name": "preload-vm",
+          "Disks": [{"Source": "boot-disk"}, {"Source": "cidata-disk"}],
+          "guestAccelerators": {{.Accelerators}},
+          "scheduling": {
+            "onHostMaintenance": "${host_maintenance}"
+          },
+          "Metadata": {
+            "user-data": "${SOURCE:cloud-config}",
+            "block-project-ssh-keys": "TRUE",
+            "cos-update-strategy": "update_disabled"
+          },
+          "Scopes": [
+            "https://www.googleapis.com/auth/devstorage.read_write",
+            "https://www.googleapis.com/auth/cloud-platform"
+          ]
+        }
+      ]
+    },
+    "wait-preload-finished": {
+      "WaitForInstancesSignal": [
+        {
+          "Name": "preload-vm",
+          "Interval": "30s",
+          "SerialOutput": {
+            "Port": 3,
+            "FailureMatch": "BuildFailed:",
+            "SuccessMatch": "BuildSucceeded:",
+            "StatusMatch": "BuildStatus:"
+          }
+        }
+      ]
+    },
+    "send-logging-end-msg": {
+      "UpdateInstancesMetadata": [
+        {
+          "Instance": "preload-vm",
+          "Metadata": {
+            "DaisyEnd": "ack"
+          }
+        }
+      ]
+    },
+    "wait-vm-shutdown": {
+      "WaitForInstancesSignal": [
+        {
+          "Name": "preload-vm",
+          "Interval": "2s",
+          "Stopped": true
+        }
+      ]
+    },
+    "wait-for-resize": {
+      {{.WaitResize}}
+    },
+    "resize-disk": {
+      {{.ResizeDisks}}
+    },
+    "image": {
+      "CreateImages": [
+        {
+          "RealName": "${output_image_name}",
+          "Project": "${output_image_project}",
+          "NoCleanup": true,
+          "SourceDisk": "boot-disk",
+          "labels": {{.Labels}},
+          "description": "Derivative of ${source_image}.",
+          "family": "${output_image_family}",
+          "licenses": {{.Licenses}}
+        }
+      ]
+    }
+  },
+  "Dependencies": {
+    "create-cidata": ["copy-gcs"],
+    "setup": ["create-cidata"],
+    "run": ["setup"],
+    "wait-preload-finished": ["run"],
+    "wait-for-resize": ["run"],
+    "resize-disk": ["wait-for-resize"],
+    "send-logging-end-msg": ["wait-preload-finished", "resize-disk"],
+    "wait-vm-shutdown": ["send-logging-end-msg"],
+    "image": ["wait-vm-shutdown"]
+  }
+}
diff --git a/src/data/startup.yaml b/src/data/startup.yaml
new file mode 100644
index 0000000..a2851be
--- /dev/null
+++ b/src/data/startup.yaml
@@ -0,0 +1,94 @@
+#cloud-config
+#
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This script runs customization scripts on a COS VM instance. It pulls
+# source from GCS and executes it.
+
+write_files:
+- path: /tmp/startup.sh
+  permissions: 0644
+  content: |
+    set -o errexit
+    set -o pipefail
+    set -o nounset
+
+    status() {
+      $@ 2>&1 | sed "s/^/BuildStatus: /"
+      return "${PIPESTATUS[0]}"
+    }
+
+    run_provisioner() {
+      status $@ && ret=$? || ret=$?
+      if [[ "${ret}" != 0 ]]; then
+        if [[ "${ret}" == 3 ]]; then
+          status echo "Rebooting..."
+          sleep 15 || :
+          reboot
+          while true; do sleep 1; done
+        fi
+        echo "BuildFailed: exiting due to errors"
+        # Under normal circumstances, Daisy will delete the VM once it sees
+        # "BuildFailed". But sometimes Daisy will die unexpectedly, so we want
+        # to shutdown ourselves to conserve resources. Let's give Daisy 5
+        # minutes to capture logs and delete the VM. If Daisy doesn't do that in
+        # 5 minutes, let's shut ourselves down.
+        sleep 300 || :
+        shutdown -h now
+        while true; do sleep 1; done
+      else
+        echo "BuildSucceeded: Build completed with no errors. Shutting down..."
+        # Once we shut down, the serial logs will be gone. We need to give Daisy
+        # time to capture the serial logs. Once Daisy is done capturing the
+        # serial logs, it will add the "DaisyEnd" metadata key. Let's wait for
+        # that key to appear (and shutdown anyway after 5 minutes).
+        /mnt/disks/cidata/metadata_watcher DaisyEnd
+        shutdown -h now
+        while true; do sleep 1; done
+      fi
+    }
+
+    main() {
+      status mkdir -p /mnt/disks/cidata
+      status mount /dev/disk/by-label/CIDATA /mnt/disks/cidata
+      if [[ ! -d /var/lib/.cos-customizer ]]; then
+        run_provisioner /mnt/disks/cidata/provisioner run --config=/mnt/disks/cidata/config.json
+      else
+        run_provisioner /mnt/disks/cidata/provisioner resume
+      fi
+    }
+
+    main
+- path: /etc/systemd/system/customizer.service
+  permissions: 0644
+  content: |
+    [Unit]
+    Description=Container-Optimized OS Customization Service
+    Wants=network-online.target gcr-online.target docker.service
+    After=network-online.target gcr-online.target docker.service
+
+    [Service]
+    Type=oneshot
+    RemainAfterExit=yes
+    User=root
+    ExecStart=/bin/bash /tmp/startup.sh
+    StandardOutput=tty
+    StandardError=tty
+    TTYPath=/dev/ttyS2
+
+runcmd:
+- echo "Starting startup service..."
+- systemctl daemon-reload
+- systemctl --no-block start customizer.service
diff --git a/src/pkg/config/BUILD.bazel b/src/pkg/config/BUILD.bazel
new file mode 100644
index 0000000..3b468c8
--- /dev/null
+++ b/src/pkg/config/BUILD.bazel
@@ -0,0 +1,37 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "config",
+    srcs = ["config.go"],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//src/pkg/utils",
+        "@org_golang_google_api//compute/v1:compute",
+    ],
+)
+
+go_test(
+    name = "config_test",
+    srcs = ["config_test.go"],
+    data = glob(["testdata/**"]),
+    embed = [":config"],
+    deps = [
+        "@com_github_google_go_cmp//cmp",
+        "@org_golang_google_api//compute/v1:compute",
+    ],
+)
diff --git a/src/pkg/config/config.go b/src/pkg/config/config.go
new file mode 100644
index 0000000..778f2a4
--- /dev/null
+++ b/src/pkg/config/config.go
@@ -0,0 +1,128 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package config exports functionality for storing/retrieving build step configuration on/from
+// the local disk.
+package config
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+	compute "google.golang.org/api/compute/v1"
+)
+
+// Image stores GCE image configuration.
+type Image struct {
+	*compute.Image
+	Project string
+}
+
+// NewImage builds an Image with some initialized defaults.
+func NewImage(name string, project string) *Image {
+	return &Image{
+		&compute.Image{Name: name, Labels: make(map[string]string)},
+		project,
+	}
+}
+
+// URL gets the partial GCE URL of the image.
+func (i *Image) URL() string {
+	return fmt.Sprintf("projects/%s/global/images/%s", i.Project, i.Name)
+}
+
+// MarshalJSON marshals the image configuration into JSON.
+func (i *Image) MarshalJSON() ([]byte, error) {
+	computeImData, err := json.Marshal(i.Image)
+	if err != nil {
+		return nil, err
+	}
+	computeIm := make(map[string]interface{})
+	if err := json.Unmarshal(computeImData, &computeIm); err != nil {
+		return nil, err
+	}
+	computeIm["Project"] = i.Project
+	return json.Marshal(computeIm)
+}
+
+// Build stores configuration data associated with the image build session.
+type Build struct {
+	GCSBucket string
+	GCSDir    string
+	Project   string
+	Zone      string
+	DiskSize  int
+	GPUType   string
+	Timeout   string
+	GCSFiles  []string
+}
+
+// SaveConfigToFile clears the target config file and then saves the new config
+// data.
+func SaveConfigToFile(configFile *os.File, v interface{}) error {
+	if _, err := configFile.Seek(0, 0); err != nil {
+		return fmt.Errorf("cannot seek config file, error msg:(%v)", err)
+	}
+	if err := configFile.Truncate(0); err != nil {
+		return fmt.Errorf("cannot truncate config file, error msg:(%v)", err)
+	}
+	if err := Save(configFile, v); err != nil {
+		return fmt.Errorf("cannot save config file, error msg:(%v)", err)
+	}
+	return nil
+}
+
+// SaveConfigToPath does the same thing as SaveConfigToFile, but updates a file
+// system path.
+func SaveConfigToPath(configPath string, config interface{}) (err error) {
+	configFile, err := os.OpenFile(configPath, os.O_RDWR, 0666)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(configFile, "error writing to "+configPath, &err)
+	return SaveConfigToFile(configFile, config)
+}
+
+// Save serializes the given struct as JSON and writes it out.
+func Save(w io.Writer, v interface{}) error {
+	data, err := json.Marshal(v)
+	if err != nil {
+		return err
+	}
+	_, err = io.WriteString(w, string(data))
+	return err
+}
+
+// Load deserializes JSON formatted data into the given struct.
+func Load(r io.Reader, v interface{}) error {
+	data, err := ioutil.ReadAll(r)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(data, v)
+}
+
+// LoadFromFile loads JSON data from a file into the given struct.
+func LoadFromFile(path string, v interface{}) (err error) {
+	r, err := os.Open(path)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(r, fmt.Sprintf("error closing %q", path), &err)
+	return Load(r, v)
+}
diff --git a/src/pkg/config/config_test.go b/src/pkg/config/config_test.go
new file mode 100644
index 0000000..b34ba7b
--- /dev/null
+++ b/src/pkg/config/config_test.go
@@ -0,0 +1,76 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	"encoding/json"
+	"strings"
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+
+	compute "google.golang.org/api/compute/v1"
+)
+
+func TestSave(t *testing.T) {
+	data := &struct{ Test string }{"test"}
+	expected := "{\"Test\":\"test\"}"
+	actual := new(strings.Builder)
+	if err := Save(actual, data); err != nil {
+		t.Fatal(err)
+	}
+	if got := actual.String(); got != expected {
+		t.Errorf("actual: %s expected: %s", got, expected)
+	}
+}
+
+func TestLoad(t *testing.T) {
+	data := strings.NewReader("{\"Test\":\"test\"}")
+	expected := &struct{ Test string }{"test"}
+	actual := new(struct{ Test string })
+	if err := Load(data, actual); err != nil {
+		t.Fatal(err)
+	}
+	if *actual != *expected {
+		t.Errorf("actual: %s expected: %s", actual, expected)
+	}
+}
+
+func TestLoadFromFile(t *testing.T) {
+	file := "testdata/test_1"
+	expected := &Image{&compute.Image{Name: "test-name", Licenses: []string{}}, "test-project"}
+	actual := new(Image)
+	if err := LoadFromFile(file, actual); err != nil {
+		t.Fatal(err)
+	}
+	if !cmp.Equal(actual, expected) {
+		t.Errorf("actual: %v expected: %v", actual, expected)
+	}
+}
+
+func TestImageMarshalJSON(t *testing.T) {
+	image := NewImage("name", "project")
+	bytes, err := json.Marshal(image)
+	if err != nil {
+		t.Fatal(err)
+	}
+	got := NewImage("", "")
+	if err := json.Unmarshal(bytes, got); err != nil {
+		t.Fatal(err)
+	}
+	if !cmp.Equal(got, image) {
+		t.Errorf("actual: %v expected: %v", got, image)
+	}
+}
diff --git a/src/pkg/config/testdata/test_1 b/src/pkg/config/testdata/test_1
new file mode 100644
index 0000000..a7cc9e6
--- /dev/null
+++ b/src/pkg/config/testdata/test_1
@@ -0,0 +1,5 @@
+{
+	"Name": "test-name",
+	"Project": "test-project",
+	"Licenses": []
+}
diff --git a/src/pkg/fakes/BUILD.bazel b/src/pkg/fakes/BUILD.bazel
new file mode 100644
index 0000000..d58fca3
--- /dev/null
+++ b/src/pkg/fakes/BUILD.bazel
@@ -0,0 +1,50 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "fakes",
+    testonly = True,
+    srcs = [
+        "gce.go",
+        "gcs.go",
+        "time.go",
+    ],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fakes",
+    visibility = ["//visibility:public"],
+    deps = [
+        "@com_google_cloud_go_storage//:storage",
+        "@org_golang_google_api//compute/v1:compute",
+        "@org_golang_google_api//googleapi",
+        "@org_golang_google_api//option",
+    ],
+)
+
+go_test(
+    name = "fakes_test",
+    srcs = [
+        "gce_test.go",
+        "gcs_test.go",
+    ],
+    embed = [":fakes"],
+    deps = [
+        "@com_github_google_go_cmp//cmp",
+        "@com_github_google_go_cmp//cmp/cmpopts",
+        "@com_google_cloud_go_storage//:storage",
+        "@org_golang_google_api//compute/v1:compute",
+        "@org_golang_google_api//googleapi",
+        "@org_golang_google_api//iterator",
+    ],
+)
diff --git a/src/pkg/fakes/gce.go b/src/pkg/fakes/gce.go
new file mode 100644
index 0000000..66a9b6e
--- /dev/null
+++ b/src/pkg/fakes/gce.go
@@ -0,0 +1,189 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fakes contains fake implementations to be used in unit tests.
+package fakes
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"net/http/httptest"
+	"strings"
+	"testing"
+
+	compute "google.golang.org/api/compute/v1"
+	"google.golang.org/api/googleapi"
+)
+
+// writeError writes the given error code to the given http.ResponseWriter.
+func writeError(w http.ResponseWriter, r *http.Request, code int) {
+	w.WriteHeader(code)
+	resp := &googleapi.Error{Code: code}
+	bytes, err := json.Marshal(resp)
+	if err != nil {
+		log.Printf("error: %s URL: %s Response: %v", err, r.URL.Path, resp)
+		return
+	}
+	w.Write(bytes)
+}
+
+// GCE is a fake GCE implementation. It is intended to be constructed with NewGCEServer.
+//
+// The GCE struct represents the state of the fake GCE instance. Fields on this struct can be modified to influence the
+// return values of GCE API calls.
+//
+// GCE should not be considered concurrency-safe. Do not use this struct in a concurrent way.
+type GCE struct {
+	// Images represents the images present in the project.
+	Images *compute.ImageList
+	// Deprecated represents the set of deprecated images in the project.
+	Deprecated map[string]*compute.DeprecationStatus
+	// Operations is the sequence of operations that the fake GCE server should return.
+	Operations []*compute.Operation
+	// server is an HTTP server that serves fake GCE requests. Requests are served using the state stored in
+	// the other struct fields.
+	server  *httptest.Server
+	project string
+}
+
+// NewGCEServer constructs a fake GCE implementation for a given GCE project.
+func NewGCEServer(project string) *GCE {
+	gce := &GCE{
+		Images:     &compute.ImageList{},
+		Deprecated: make(map[string]*compute.DeprecationStatus),
+		project:    project,
+	}
+	mux := http.NewServeMux()
+	mux.HandleFunc(fmt.Sprintf("/projects/%s/global/images", project), gce.imagesListHandler)
+	mux.HandleFunc(fmt.Sprintf("/projects/%s/global/images/", project), gce.imageHandler)
+	mux.HandleFunc(fmt.Sprintf("/projects/%s/global/operations/", project), gce.operationsHandler)
+	gce.server = httptest.NewServer(mux)
+	return gce
+}
+
+// Client gets a GCE client to use for accessing the fake GCE server.
+func (g *GCE) Client() (*compute.Service, error) {
+	client, err := compute.New(g.server.Client())
+	if err != nil {
+		return nil, err
+	}
+	client.BasePath = g.server.URL
+	return client, nil
+}
+
+func (g *GCE) operation() *compute.Operation {
+	op := g.Operations[0]
+	g.Operations = g.Operations[1:]
+	return op
+}
+
+func (g *GCE) deprecate(name string, status *compute.DeprecationStatus) *compute.Operation {
+	g.Deprecated[name] = status
+	return g.operation()
+}
+
+func (g *GCE) image(name string) *compute.Image {
+	for _, image := range g.Images.Items {
+		if image.Name == name {
+			return image
+		}
+	}
+	return nil
+}
+
+func (g *GCE) imagesListHandler(w http.ResponseWriter, r *http.Request) {
+	bytes, err := json.Marshal(g.Images)
+	if err != nil {
+		writeError(w, r, http.StatusInternalServerError)
+		return
+	}
+	w.Write(bytes)
+}
+
+func (g *GCE) imageHandler(w http.ResponseWriter, r *http.Request) {
+	// Path starts with /project/<project>/global/images/<name>
+	splitPath := strings.Split(r.URL.Path, "/")
+	splitPath = splitPath[1:]
+	switch {
+	case len(splitPath) == 5:
+		image := g.image(splitPath[4])
+		if image == nil {
+			writeError(w, r, http.StatusNotFound)
+			return
+		}
+		bytes, err := json.Marshal(image)
+		if err != nil {
+			writeError(w, r, http.StatusInternalServerError)
+			return
+		}
+		w.Write(bytes)
+	case len(splitPath) == 6 && splitPath[5] == "deprecate":
+		if g.image(splitPath[4]) == nil {
+			writeError(w, r, http.StatusNotFound)
+			return
+		}
+		body, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			log.Println("failed to read body")
+			writeError(w, r, http.StatusInternalServerError)
+			return
+		}
+		status := &compute.DeprecationStatus{}
+		err = json.Unmarshal(body, status)
+		if err != nil {
+			log.Printf("failed to parse body: %s", string(body))
+			writeError(w, r, http.StatusInternalServerError)
+			return
+		}
+		op := g.deprecate(splitPath[4], status)
+		bytes, err := json.Marshal(op)
+		if err != nil {
+			log.Printf("failed to marshal operation: %v", op)
+			writeError(w, r, http.StatusInternalServerError)
+			return
+		}
+		w.Write(bytes)
+	default:
+		log.Printf("unrecognized path: %s", r.URL.Path)
+		writeError(w, r, http.StatusNotFound)
+	}
+}
+
+func (g *GCE) operationsHandler(w http.ResponseWriter, r *http.Request) {
+	bytes, err := json.Marshal(g.operation())
+	if err != nil {
+		writeError(w, r, http.StatusInternalServerError)
+		return
+	}
+	w.Write(bytes)
+}
+
+// Close closes the fake GCE server.
+func (g *GCE) Close() {
+	g.server.Close()
+}
+
+// GCEForTest encapsulates boilerplate needed for many test cases.
+func GCEForTest(t *testing.T, project string) (*GCE, *compute.Service) {
+	t.Helper()
+	gce := NewGCEServer(project)
+	client, err := gce.Client()
+	if err != nil {
+		t.Fatal(err)
+	}
+	return gce, client
+}
diff --git a/src/pkg/fakes/gce_test.go b/src/pkg/fakes/gce_test.go
new file mode 100644
index 0000000..ecff8b8
--- /dev/null
+++ b/src/pkg/fakes/gce_test.go
@@ -0,0 +1,199 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fakes
+
+import (
+	"net/http"
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+	compute "google.golang.org/api/compute/v1"
+	"google.golang.org/api/googleapi"
+)
+
+func TestImageList(t *testing.T) {
+	testImageListData := []struct {
+		testName string
+		images   *compute.ImageList
+	}{
+		{
+			"Two images",
+			&compute.ImageList{Items: []*compute.Image{{Name: "test-1"}, {Name: "test-2"}}},
+		},
+		{
+			"No images",
+			&compute.ImageList{},
+		},
+		{
+			"Image with family",
+			&compute.ImageList{Items: []*compute.Image{{Name: "test-1", Family: "test-family"}}},
+		},
+	}
+	fakeGCE, client := GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	for _, input := range testImageListData {
+		t.Run(input.testName, func(t *testing.T) {
+			fakeGCE.Images = input.images
+			actual, err := client.Images.List("test-project").Do()
+			if err != nil {
+				t.Fatal(err)
+			}
+			if !cmp.Equal(actual.Items, input.images.Items) {
+				t.Errorf("actual: %v expected: %v", actual.Items, input.images.Items)
+			}
+		})
+	}
+}
+
+func TestImageGet(t *testing.T) {
+	testImageGetData := []struct {
+		testName string
+		images   []*compute.Image
+		name     string
+		httpCode int
+	}{
+		{
+			"ImageExists",
+			[]*compute.Image{{Name: "im-1"}},
+			"im-1",
+			http.StatusOK,
+		},
+		{
+			"ImageDoesntExist",
+			nil,
+			"im-2",
+			http.StatusNotFound,
+		},
+	}
+	fakeGCE, client := GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	for _, input := range testImageGetData {
+		t.Run(input.testName, func(t *testing.T) {
+			fakeGCE.Images.Items = input.images
+			actualIm, err := client.Images.Get("test-project", input.name).Do()
+			if apiErr, ok := err.(*googleapi.Error); ok {
+				if apiErr.Code != input.httpCode {
+					t.Errorf("actual: %d expected: %d", apiErr.Code, input.httpCode)
+				}
+				return
+			}
+			if err != nil {
+				t.Fatal(err)
+			}
+			if actualIm.Name != input.name {
+				t.Errorf("actual: %s expected: %s", actualIm.Name, input.name)
+			}
+		})
+	}
+}
+
+func TestDeprecate(t *testing.T) {
+	testDeprecateData := []struct {
+		testName  string
+		images    []*compute.Image
+		name      string
+		status    *compute.DeprecationStatus
+		operation *compute.Operation
+		httpCode  int
+	}{
+		{
+			"SetStatusReturnDone",
+			[]*compute.Image{{Name: "test-1"}},
+			"test-1",
+			&compute.DeprecationStatus{State: "DEPRECATED"},
+			&compute.Operation{Name: "op-1", Status: "DONE"},
+			http.StatusOK,
+		},
+		{
+			"ClearStatusReturnRunning",
+			[]*compute.Image{{Name: "test-2"}},
+			"test-2",
+			&compute.DeprecationStatus{},
+			&compute.Operation{Name: "op-1", Status: "RUNNING"},
+			http.StatusOK,
+		},
+		{
+			"ImageNotFound",
+			nil,
+			"test-3",
+			&compute.DeprecationStatus{},
+			nil,
+			http.StatusNotFound,
+		},
+	}
+	fakeGCE, client := GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	for _, input := range testDeprecateData {
+		t.Run(input.testName, func(t *testing.T) {
+			fakeGCE.Images.Items = input.images
+			fakeGCE.Operations = []*compute.Operation{input.operation}
+			actualOp, err := client.Images.Deprecate("test-project", input.name, input.status).Do()
+			if apiErr, ok := err.(*googleapi.Error); ok {
+				if apiErr.Code != input.httpCode {
+					t.Errorf("actual: %d expected: %d", apiErr.Code, input.httpCode)
+				}
+				return
+			}
+			if err != nil {
+				t.Fatal(err)
+			}
+			if actualOp.Name != input.operation.Name {
+				t.Errorf("actual: %s expected: %s", actualOp.Name, input.operation.Name)
+			}
+			if actualOp.Status != input.operation.Status {
+				t.Errorf("actual: %s expected: %s", actualOp.Status, input.operation.Status)
+			}
+			if actualStatus, ok := fakeGCE.Deprecated[input.name]; !ok {
+				t.Errorf("deprecated images: %v expected element: %s", fakeGCE.Deprecated, input.name)
+			} else if !cmp.Equal(actualStatus, input.status) {
+				t.Errorf("actual: %v expected: %v", actualStatus, input.status)
+			}
+		})
+	}
+}
+
+func TestGetOperation(t *testing.T) {
+	testGetOperationData := []struct {
+		testName   string
+		operations []*compute.Operation
+	}{
+		{
+			"OneOperation",
+			[]*compute.Operation{{Name: "op-1"}},
+		},
+		{
+			"TwoOperations",
+			[]*compute.Operation{{Name: "op-2"}, {Name: "op-3"}},
+		},
+	}
+	fakeGCE, client := GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	for _, input := range testGetOperationData {
+		t.Run(input.testName, func(t *testing.T) {
+			fakeGCE.Operations = make([]*compute.Operation, len(input.operations))
+			copy(fakeGCE.Operations, input.operations)
+			for _, expectedOp := range input.operations {
+				actualOp, err := client.GlobalOperations.Get("test-project", "").Do()
+				if err != nil {
+					t.Error(err)
+					continue
+				}
+				if actualOp.Name != expectedOp.Name {
+					t.Errorf("actual: %s expected: %s", actualOp.Name, expectedOp.Name)
+				}
+			}
+		})
+	}
+}
diff --git a/src/pkg/fakes/gcs.go b/src/pkg/fakes/gcs.go
new file mode 100644
index 0000000..55a34bc
--- /dev/null
+++ b/src/pkg/fakes/gcs.go
@@ -0,0 +1,232 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fakes
+
+import (
+	"context"
+	"crypto/tls"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"mime"
+	"mime/multipart"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"strings"
+	"testing"
+
+	"cloud.google.com/go/storage"
+	"google.golang.org/api/option"
+)
+
+type gcsObject struct{ Name, Bucket string }
+type gcsObjects struct{ Items []gcsObject }
+
+func setTransportAddr(transport *http.Transport, addr string) {
+	transport.DialTLS = func(_, _ string) (net.Conn, error) {
+		return tls.Dial("tcp", addr, transport.TLSClientConfig)
+	}
+}
+
+// GCS contains data and functionality for a fake GCS server. It is intended to be constructed with NewGCSServer.
+//
+// The GCS struct represents the state of the fake GCS instance. Fields on this struct can be modified to influence the
+// return values of GCS API calls.
+//
+// The fake GCS server implements a small part of the API discussed here:
+// https://godoc.org/cloud.google.com/go/storage. Only the parts that we need for testing
+// are implemented here. Documentation for the GCS JSON API is here:
+// https://cloud.google.com/storage/docs/json_api/v1/
+//
+// This struct should not be considered concurrency safe.
+type GCS struct {
+	// Objects represents the collection of objects that exist in the fake GCS server.
+	// Keys are strings of the form "/<bucket>/<object path>". Values are data that belong
+	// in each object.
+	Objects map[string][]byte
+	// Client is the client to use when accessing the fake GCS server.
+	Client *storage.Client
+	// Server is the fake GCS server. It uses state from this struct for serving requests.
+	Server *httptest.Server
+}
+
+// NewGCSServer constructs a fake GCS implementation.
+func NewGCSServer(ctx context.Context) (*GCS, error) {
+	var err error
+	gcs := &GCS{make(map[string][]byte), nil, nil}
+	mux := http.NewServeMux()
+	mux.HandleFunc("/", gcs.objectHandler)
+	mux.HandleFunc("/storage/v1/b/", gcs.bucketHandler)
+	mux.HandleFunc("/upload/storage/v1/b/", gcs.uploadHandler)
+	gcs.Server = httptest.NewTLSServer(mux)
+	httpClient := gcs.Server.Client()
+	setTransportAddr(httpClient.Transport.(*http.Transport), gcs.Server.Listener.Addr().String())
+	gcs.Client, err = storage.NewClient(ctx, option.WithHTTPClient(httpClient), option.WithoutAuthentication())
+	if err != nil {
+		gcs.Server.Close()
+		return nil, err
+	}
+	return gcs, nil
+}
+
+func (g *GCS) objectHandler(w http.ResponseWriter, r *http.Request) {
+	data, ok := g.Objects[r.URL.Path]
+	if !ok {
+		writeError(w, r, http.StatusNotFound)
+		return
+	}
+	if _, err := w.Write(data); err != nil {
+		log.Printf("write %q failed: %v", r.URL.Path, err)
+	}
+}
+
+// list handles a `list` request.
+// See: https://cloud.google.com/storage/docs/json_api/v1/#Objects, `list` method.
+// Only handles the 'prefix' optional parameter.
+func (g *GCS) list(w http.ResponseWriter, r *http.Request, bucket string) {
+	if err := r.ParseForm(); err != nil {
+		log.Printf("failed to parse form %q: %v", r.URL.Path, err)
+		return
+	}
+	bucketPrefix := fmt.Sprintf("/%s/", bucket)
+	prefix := bucketPrefix + r.Form.Get("prefix")
+	var all gcsObjects
+	for k := range g.Objects {
+		if strings.HasPrefix(k, prefix) {
+			all.Items = append(all.Items, gcsObject{strings.TrimPrefix(k, bucketPrefix), bucket})
+		}
+	}
+	bytes, err := json.Marshal(all)
+	if err != nil {
+		writeError(w, r, http.StatusInternalServerError)
+		return
+	}
+	if _, err := w.Write(bytes); err != nil {
+		log.Printf("write %q failed: %v", r.URL.Path, err)
+	}
+}
+
+// del handles a `delete` request.
+// See: https://cloud.google.com/storage/docs/json_api/v1/#Objects, `delete` method.
+// Doesn't handle any optional parameters.
+func (g *GCS) del(w http.ResponseWriter, r *http.Request, bucket, objectPath string) {
+	key := fmt.Sprintf("/%s/%s", bucket, objectPath)
+	if _, ok := g.Objects[key]; !ok {
+		log.Printf("delete failed: item %s does not exist", key)
+		writeError(w, r, http.StatusNotFound)
+		return
+	}
+	delete(g.Objects, key)
+}
+
+func (g *GCS) bucketHandler(w http.ResponseWriter, r *http.Request) {
+	// Path looks like:
+	// - /storage/v1/b/<bucket>/o
+	// - /storage/v1/b/<bucket>/o/<object>
+	splitPath := strings.SplitN(r.URL.Path, "/", 7)
+	if len(splitPath) < 6 || splitPath[5] != "o" {
+		log.Printf("unrecognized path: %s", r.URL.Path)
+		writeError(w, r, http.StatusNotFound)
+		return
+	}
+	objectPath := ""
+	if len(splitPath) == 7 {
+		objectPath = splitPath[6]
+	}
+	bucket := splitPath[4]
+	switch {
+	case objectPath != "" && r.Method == "DELETE":
+		g.del(w, r, bucket, objectPath)
+	case objectPath == "":
+		g.list(w, r, bucket)
+	default:
+		log.Printf("unrecognized path: %s", r.URL.Path)
+		writeError(w, r, http.StatusNotFound)
+		return
+	}
+}
+
+func (g *GCS) uploadHandler(w http.ResponseWriter, r *http.Request) {
+	splitPath := strings.Split(r.URL.Path, "/")
+	// Path looks like /upload/storage/v1/b/<bucket>/o
+	// (see: https://cloud.google.com/storage/docs/json_api/v1/#Objects, `insert` method)
+	// This implementation only accepts the path parameter 'bucket'. It does not accept any optional parameters.
+	//
+	// GCS uses multipart HTTP messages to upload data. The first part contains object metadata (name, bucket, etc)
+	// in JSON format, and the second part contains the object data. Here, we extract the object metadata and data
+	// from the multipart message and store it
+	if len(splitPath) != 7 || splitPath[6] != "o" {
+		log.Printf("unrecognized path: %s", r.URL.Path)
+		writeError(w, r, http.StatusNotFound)
+		return
+	}
+	_, params, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
+	if err != nil {
+		log.Printf("failed to parse Content-Type: %s", r.Header.Get("Content-Type"))
+		writeError(w, r, http.StatusInternalServerError)
+		return
+	}
+	var parts [][]byte
+	mr := multipart.NewReader(r.Body, params["boundary"])
+	for {
+		part, err := mr.NextPart()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			log.Printf("failed to parse request: %v", r)
+			writeError(w, r, http.StatusInternalServerError)
+			return
+		}
+		partData, err := ioutil.ReadAll(part)
+		if err != nil {
+			log.Printf("failed to parse request: %v", r)
+			writeError(w, r, http.StatusInternalServerError)
+			return
+		}
+		parts = append(parts, partData)
+	}
+	objectMetadata := parts[0]
+	objectData := parts[1]
+	object := &gcsObject{}
+	if err := json.Unmarshal(objectMetadata, object); err != nil {
+		log.Printf("failed to parse object: %s", string(objectMetadata))
+		writeError(w, r, http.StatusInternalServerError)
+		return
+	}
+	g.Objects[fmt.Sprintf("/%s/%s", object.Bucket, object.Name)] = objectData
+	if _, err := w.Write(objectMetadata); err != nil {
+		log.Printf("write %q failed: %v", r.URL.Path, err)
+	}
+}
+
+// Close closes the fake GCS server and its client.
+func (g *GCS) Close() error {
+	defer g.Server.Close()
+	return g.Client.Close()
+}
+
+// GCSForTest encapsulates boilerplate for getting a GCS object in tests.
+func GCSForTest(t *testing.T) *GCS {
+	t.Helper()
+	gcs, err := NewGCSServer(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+	return gcs
+}
diff --git a/src/pkg/fakes/gcs_test.go b/src/pkg/fakes/gcs_test.go
new file mode 100644
index 0000000..33b8c15
--- /dev/null
+++ b/src/pkg/fakes/gcs_test.go
@@ -0,0 +1,235 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fakes
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"testing"
+
+	"cloud.google.com/go/storage"
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/go-cmp/cmp/cmpopts"
+	"google.golang.org/api/iterator"
+)
+
+func TestReadObject(t *testing.T) {
+	gcs := GCSForTest(t)
+	defer gcs.Close()
+	gcs.Objects["/test-bucket/test-object"] = []byte("data")
+	r, err := gcs.Client.Bucket("test-bucket").Object("test-object").NewReader(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+	got, err := ioutil.ReadAll(r)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !cmp.Equal(got, []byte("data")) {
+		t.Errorf("bucket 'test-bucket', object 'test-object' has %s; want 'data'", string(got))
+	}
+}
+
+func TestReadObjectNotFound(t *testing.T) {
+	gcs := GCSForTest(t)
+	defer gcs.Close()
+	_, err := gcs.Client.Bucket("test-bucket").Object("test-object").NewReader(context.Background())
+	if err != storage.ErrObjectNotExist {
+		t.Errorf("bucket 'test-bucket', object 'test-object' has %s; want %s", err, storage.ErrObjectNotExist)
+	}
+}
+
+func TestIterate(t *testing.T) {
+	testData := []struct {
+		testName        string
+		prefix          string
+		objects         map[string][]byte
+		bucket          string
+		expectedObjects []string
+	}{
+		{
+			"HasItems",
+			"",
+			map[string][]byte{
+				"/test-bucket/obj-1": []byte(""),
+				"/test-bucket/obj-2": []byte(""),
+				"/test-bucket/obj-3": []byte(""),
+				"/bucket/obj-4":      []byte(""),
+			},
+			"test-bucket",
+			[]string{
+				"/test-bucket/obj-1",
+				"/test-bucket/obj-2",
+				"/test-bucket/obj-3",
+			},
+		},
+		{
+			"NoItems",
+			"",
+			make(map[string][]byte),
+			"test-bucket",
+			nil,
+		},
+		{
+			"HasPrefix",
+			"pre",
+			map[string][]byte{
+				"/test-bucket/pre-1": []byte(""),
+				"/test-bucket/pre-2": []byte(""),
+				"/test-bucket/obj-3": []byte(""),
+			},
+			"test-bucket",
+			[]string{
+				"/test-bucket/pre-1",
+				"/test-bucket/pre-2",
+			},
+		},
+	}
+	gcs := GCSForTest(t)
+	defer gcs.Close()
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			gcs.Objects = input.objects
+			q := &storage.Query{
+				Delimiter: "",
+				Prefix:    input.prefix,
+				Versions:  false,
+			}
+			it := gcs.Client.Bucket(input.bucket).Objects(context.Background(), q)
+			var actualObjects []string
+			for {
+				objAttrs, err := it.Next()
+				if err == iterator.Done {
+					break
+				}
+				if err != nil {
+					t.Fatal(err)
+				}
+				actualObjects = append(actualObjects, fmt.Sprintf("/%s/%s", objAttrs.Bucket, objAttrs.Name))
+			}
+			sortStrSlices := cmpopts.SortSlices(func(a, b string) bool { return a < b })
+			if !cmp.Equal(actualObjects, input.expectedObjects, sortStrSlices) {
+				t.Errorf("bucket %s has %v, want %v", input.bucket, actualObjects, input.expectedObjects)
+			}
+		})
+	}
+}
+
+func TestWrite(t *testing.T) {
+	testData := []struct {
+		testName string
+		object   string
+		bucket   string
+		data     []byte
+	}{
+		{
+			"NonEmptyWrite",
+			"test-object",
+			"test-bucket",
+			[]byte("data"),
+		},
+		{
+			"EmptyWrite",
+			"test-object",
+			"test-bucket",
+			nil,
+		},
+	}
+	gcs := GCSForTest(t)
+	defer gcs.Close()
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			gcs.Objects = make(map[string][]byte)
+			w := gcs.Client.Bucket(input.bucket).Object(input.object).NewWriter(context.Background())
+			w.Write(input.data)
+			w.Close()
+			if got, ok := gcs.Objects[fmt.Sprintf("/%s/%s", input.bucket, input.object)]; !ok {
+				t.Errorf("bucket %s, object %s does not exist", input.bucket, input.object)
+			} else if !cmp.Equal(got, input.data, cmpopts.EquateEmpty()) {
+				t.Errorf("bucket %s, object %s has %v; want %v", input.bucket, input.object, got, input.data)
+			}
+		})
+	}
+}
+
+func TestDelete(t *testing.T) {
+	testData := []struct {
+		testName string
+		objects  []string
+		toDelete []string
+		want     []string
+	}{
+		{
+			"OneObject",
+			[]string{"/bucket/obj1"},
+			[]string{"obj1"},
+			nil,
+		},
+		{
+			"HasLeftovers",
+			[]string{"/bucket/obj1", "/bucket/obj2"},
+			[]string{"obj1"},
+			[]string{"/bucket/obj2"},
+		},
+		{
+			"MultipleObjects",
+			[]string{"/bucket/obj1", "/bucket/obj2", "/bucket/obj3"},
+			[]string{"obj1", "obj2"},
+			[]string{"/bucket/obj3"},
+		},
+	}
+	gcs := GCSForTest(t)
+	defer gcs.Close()
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			for _, k := range input.objects {
+				gcs.Objects[k] = []byte("data")
+			}
+			for _, object := range input.toDelete {
+				if err := gcs.Client.Bucket("bucket").Object(object).Delete(context.Background()); err != nil {
+					t.Fatalf("Delete() failed - got err: %v, want: nil", err)
+				}
+			}
+			var got []string
+			it := gcs.Client.Bucket("bucket").Objects(context.Background(), &storage.Query{})
+			for {
+				objAttrs, err := it.Next()
+				if err == iterator.Done {
+					break
+				}
+				if err != nil {
+					t.Fatalf("Iterate failed - got err: %v, want: nil", err)
+				}
+				got = append(got, fmt.Sprintf("/%s/%s", objAttrs.Bucket, objAttrs.Name))
+			}
+			sortStrSlices := cmpopts.SortSlices(func(a, b string) bool { return a < b })
+			if !cmp.Equal(got, input.want, cmpopts.EquateEmpty(), sortStrSlices) {
+				t.Errorf("Tried to delete %v: got %v, want %v", input.toDelete, got, input.want)
+			}
+		})
+	}
+}
+
+func TestDeleteObjectDoesNotExist(t *testing.T) {
+	gcs := GCSForTest(t)
+	defer gcs.Close()
+	err := gcs.Client.Bucket("bucket").Object("object").Delete(context.Background())
+	if err != storage.ErrObjectNotExist {
+		t.Logf("objects: %v", gcs.Objects)
+		t.Errorf("delete object 'object' in bucket 'bucket' has %s; want %s", err, storage.ErrObjectNotExist)
+	}
+
+}
diff --git a/src/pkg/fakes/time.go b/src/pkg/fakes/time.go
new file mode 100644
index 0000000..506a616
--- /dev/null
+++ b/src/pkg/fakes/time.go
@@ -0,0 +1,37 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fakes
+
+import "time"
+
+// Time is a fake implementation of the time package.
+type Time struct {
+	current time.Time
+}
+
+// NewTime gets a Time instance initialized with the given time.
+func NewTime(t time.Time) *Time {
+	return &Time{t}
+}
+
+// Now gets the current time.
+func (t *Time) Now() time.Time {
+	return t.current
+}
+
+// Sleep increments the current time by the given duration.
+func (t *Time) Sleep(s time.Duration) {
+	t.current = t.current.Add(s)
+}
diff --git a/src/pkg/fs/BUILD.bazel b/src/pkg/fs/BUILD.bazel
new file mode 100644
index 0000000..33fda78
--- /dev/null
+++ b/src/pkg/fs/BUILD.bazel
@@ -0,0 +1,43 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "fs",
+    srcs = [
+        "build_context.go",
+        "copy.go",
+        "file_system.go",
+        "gzip.go",
+    ],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//src/pkg/utils",
+    ],
+)
+
+go_test(
+    name = "fs_test",
+    srcs = [
+        "build_context_test.go",
+        "gzip_test.go",
+    ],
+    data = glob(
+        ["testdata/**"],
+        exclude_directories = 0,
+    ),
+    embed = [":fs"],
+)
diff --git a/src/pkg/fs/build_context.go b/src/pkg/fs/build_context.go
new file mode 100644
index 0000000..44e0269
--- /dev/null
+++ b/src/pkg/fs/build_context.go
@@ -0,0 +1,102 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+	"archive/tar"
+	"fmt"
+	"io"
+	"os"
+	"os/exec"
+	"path/filepath"
+)
+
+func tarFile(src, dst string) error {
+	dirPath := filepath.Dir(src)
+	baseName := filepath.Base(src)
+	cmd := exec.Command("tar", "cf", dst, "-C", dirPath, baseName)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	return cmd.Run()
+}
+
+func tarDir(root, dst string) error {
+	args := []string{"cf", dst, "-C", root}
+	inputFiles, err := filepath.Glob(filepath.Join(root, "*"))
+	if err != nil {
+		return err
+	}
+	var relInputFiles []string
+	for _, path := range inputFiles {
+		relPath, err := filepath.Rel(root, path)
+		if err != nil {
+			return err
+		}
+		relInputFiles = append(relInputFiles, relPath)
+	}
+	if relInputFiles == nil {
+		relInputFiles = []string{"."}
+	}
+	args = append(args, relInputFiles...)
+	cmd := exec.Command("tar", args...)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	return cmd.Run()
+}
+
+// CreateBuildContextArchive creates a tar archive of the given build context.
+func CreateBuildContextArchive(src, dst string) error {
+	if _, err := os.Stat(dst); !os.IsNotExist(err) {
+		return fmt.Errorf("dst path already exists: %s", dst)
+	}
+	if err := os.MkdirAll(filepath.Dir(dst), 0774); err != nil {
+		return err
+	}
+	info, err := os.Stat(src)
+	if err != nil {
+		return err
+	}
+	switch {
+	case info.IsDir():
+		return tarDir(src, dst)
+	case info.Mode().IsRegular():
+		return tarFile(src, dst)
+	default:
+		return fmt.Errorf("input path %s is neither a directory nor a regular file", src)
+	}
+}
+
+// ArchiveHasObject determines if the given tar archive contains the given object.
+func ArchiveHasObject(archive string, path string) (bool, error) {
+	reader, err := os.Open(archive)
+	if err != nil {
+		return false, err
+	}
+	defer reader.Close()
+	tarReader := tar.NewReader(reader)
+	for {
+		hdr, err := tarReader.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return false, err
+		}
+		if hdr.Name == path {
+			return true, nil
+		}
+	}
+	return false, nil
+}
diff --git a/src/pkg/fs/build_context_test.go b/src/pkg/fs/build_context_test.go
new file mode 100644
index 0000000..4dde5ce
--- /dev/null
+++ b/src/pkg/fs/build_context_test.go
@@ -0,0 +1,210 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"testing"
+)
+
+func diffDirs(got, want string) (string, error) {
+	cmd := exec.Command("diff", "-r", "-q", got, want)
+	diff, err := cmd.CombinedOutput()
+	if err != nil {
+		log.Println(string(diff))
+		return "", err
+	}
+	return string(diff), nil
+}
+
+func archiveMatchesPath(archive, path string) (string, error) {
+	got, err := ioutil.TempDir("", "")
+	if err != nil {
+		return "", err
+	}
+	defer os.RemoveAll(got)
+	cmd := exec.Command("tar", "xvf", archive, "-C", got)
+	if err := cmd.Run(); err != nil {
+		return "", err
+	}
+	info, err := os.Stat(path)
+	if err != nil {
+		return "", err
+	}
+	if info.IsDir() {
+		return diffDirs(got, path)
+	}
+	if info.Mode().IsRegular() {
+		want, err := ioutil.TempDir("", "")
+		if err != nil {
+			return "", err
+		}
+		defer os.RemoveAll(want)
+		cmd := exec.Command("cp", path, want)
+		if err := cmd.Run(); err != nil {
+			return "", err
+		}
+		return diffDirs(got, want)
+	}
+	return "", fmt.Errorf("path %s is not a directory or regular file", path)
+}
+
+func TestCreateBuildContextArchiveEmptyDir(t *testing.T) {
+	outputDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(outputDir)
+	emptyDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(emptyDir)
+	if err := CreateBuildContextArchive(emptyDir, filepath.Join(outputDir, "archive")); err != nil {
+		t.Log("CreateBuildContextArchive(emptyDir, _)")
+		t.Fatal(err)
+	}
+	diff, err := archiveMatchesPath(filepath.Join(outputDir, "archive"), emptyDir)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(diff) > 0 {
+		t.Errorf("CreateBuildContextArchive(emptyDir, _), diff: %s, want: emptyDir", diff)
+	}
+}
+
+func TestCreateBuildContextArchiveSelfReferential(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	testdata := filepath.Join(tmpDir, "test_1")
+	if err := CopyRecursive("testdata/test_1", testdata); err != nil {
+		t.Fatal(err)
+	}
+	if err := CreateBuildContextArchive(testdata, filepath.Join(testdata, "archive")); err != nil {
+		t.Logf("CreateBuildContextArchive(%s, _)", testdata)
+		t.Fatal(err)
+	}
+	archive, err := ioutil.ReadFile(filepath.Join(testdata, "archive"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := os.Remove(filepath.Join(testdata, "archive")); err != nil {
+		t.Fatal(err)
+	}
+	if err := ioutil.WriteFile(filepath.Join(tmpDir, "archive"), archive, 0664); err != nil {
+		t.Fatal(err)
+	}
+	diff, err := archiveMatchesPath(filepath.Join(tmpDir, "archive"), testdata)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(diff) > 0 {
+		t.Errorf("CreateBuildContextArchive(%s, _), diff: %s, want: %s", testdata, diff, testdata)
+	}
+}
+
+func TestCreateBuildContextArchive(t *testing.T) {
+	testData := []struct {
+		testName string
+		path     string
+	}{
+		{"RegularFiles", "testdata/test_1"},
+		{"RegFilesAndDirs", "testdata/test_2"},
+		{"RegularFile", "testdata/test_3"},
+	}
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			tmpDir, err := ioutil.TempDir("", "")
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tmpDir)
+			if err := CreateBuildContextArchive(input.path, filepath.Join(tmpDir, "archive")); err != nil {
+				t.Logf("CreateBuildContextArchive(%s, _)", input.path)
+				t.Fatal(err)
+			}
+			diff, err := archiveMatchesPath(filepath.Join(tmpDir, "archive"), input.path)
+			if err != nil {
+				t.Fatal(err)
+			}
+			if len(diff) > 0 {
+				t.Errorf("CreateBuildContextArchive(%s, _), diff: %s, want: %s", input.path, diff, input.path)
+			}
+		})
+	}
+}
+
+func TestArchiveHasObjectEmptyDir(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	if err := CreateBuildContextArchive(tmpDir, filepath.Join(tmpDir, "archive")); err != nil {
+		t.Fatal(err)
+	}
+	actual, err := ArchiveHasObject(filepath.Join(tmpDir, "archive"), "a")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if actual != false {
+		t.Errorf("ArchiveHasObject(emptyArchive, a) = %t, want: false", actual)
+	}
+}
+
+func TestArchiveHasObject(t *testing.T) {
+	testData := []struct {
+		testName string
+		path     string
+		object   string
+		expected bool
+	}{
+		{"DirWithoutFile", "testdata/test_1", "d", false},
+		{"DirWithFile", "testdata/test_1", "c", true},
+		{"DirWithDir", "testdata/test_2", "a/", true},
+		{"DirWithDirInvalid", "testdata/test_2", "a", false},
+		{"DirWithNestedFile", "testdata/test_2", "a/a", true},
+		{"RegularFile", "testdata/test_3", "test_3", true},
+		{"EmptyQuery", "testdata/test_3", "", false},
+	}
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			tmpDir, err := ioutil.TempDir("", "")
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tmpDir)
+			if err := CreateBuildContextArchive(input.path, filepath.Join(tmpDir, "archive")); err != nil {
+				t.Fatal(err)
+			}
+			actual, err := ArchiveHasObject(filepath.Join(tmpDir, "archive"), input.object)
+			if err != nil {
+				t.Fatal(err)
+			}
+			if actual != input.expected {
+				t.Errorf("ArchiveHasObject(%s, %s) = %t, want: %t", input.path, input.object, actual,
+					input.expected)
+			}
+		})
+	}
+}
diff --git a/src/pkg/fs/copy.go b/src/pkg/fs/copy.go
new file mode 100644
index 0000000..bdd26f2
--- /dev/null
+++ b/src/pkg/fs/copy.go
@@ -0,0 +1,28 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+	"os"
+	"os/exec"
+)
+
+// CopyRecursive calls 'cp -r <src> <dst>'.
+func CopyRecursive(src string, dst string) error {
+	cmd := exec.Command("cp", "-r", src, dst)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	return cmd.Run()
+}
diff --git a/src/pkg/fs/file_system.go b/src/pkg/fs/file_system.go
new file mode 100644
index 0000000..e9139b7
--- /dev/null
+++ b/src/pkg/fs/file_system.go
@@ -0,0 +1,83 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fs exports functionality related to all of the cos-customizer
+// state stored on the file system.
+package fs
+
+import (
+	"os"
+	"path/filepath"
+)
+
+const (
+	// ScratchDir is used for temp files and the like.
+	ScratchDir = "/tmp"
+
+	// DaisyBin is the location of the Daisy binary.
+	daisyBin = "/daisy"
+
+	// Directory whose contents do not persist across build steps.
+	// This directory is used for building files into the container image.
+	volatileDir = "/data"
+
+	// Persistent files. These paths need to be created before they are used.
+	// Changes to these files persist across build steps.
+	userBuildContextArchive = "user_build_context.tar"
+	sourceImageConfig       = "config/source_image"
+	buildConfig             = "config/build"
+	provConfig              = "config/provisioner"
+
+	// Volatile files. These paths exist in the volatileDir at container start time.
+	// Changes to these files do not persist across build steps.
+	daisyWorkflow = "build_image.wf.json"
+)
+
+// Files stores important file paths.
+type Files struct {
+	persistentDir string
+	// UserBuildContextArchive points to the tar archive of the user build context.
+	// The user build context contains user provided scripts and files that users can use during preloading.
+	UserBuildContextArchive string
+	// SourceImageConfig points to the source image configuration.
+	SourceImageConfig string
+	// BuildConfig points to the image build process configuration.
+	BuildConfig string
+	// ProvConfig points to the provisioner configuration that runs on the preload
+	// VM.
+	ProvConfig string
+	// DaisyWorkflow points to the Daisy workflow to template and use for preloading.
+	DaisyWorkflow string
+	// DaisyBin points to the Daisy binary.
+	DaisyBin string
+}
+
+// DefaultFiles builds a Files struct with a default file layout.
+func DefaultFiles(persistentDir string) *Files {
+	persistentDir = filepath.Join(os.Getenv("HOME"), persistentDir)
+	return &Files{
+		persistentDir:           persistentDir,
+		UserBuildContextArchive: filepath.Join(persistentDir, userBuildContextArchive),
+		SourceImageConfig:       filepath.Join(persistentDir, sourceImageConfig),
+		BuildConfig:             filepath.Join(persistentDir, buildConfig),
+		ProvConfig:              filepath.Join(persistentDir, provConfig),
+		DaisyWorkflow:           filepath.Join(volatileDir, daisyWorkflow),
+		DaisyBin:                daisyBin,
+	}
+}
+
+// CleanupAllPersistent deletes everything in the persistent directory.
+func (f *Files) CleanupAllPersistent() error {
+	return os.RemoveAll(f.persistentDir)
+}
diff --git a/src/pkg/fs/gzip.go b/src/pkg/fs/gzip.go
new file mode 100644
index 0000000..537db5c
--- /dev/null
+++ b/src/pkg/fs/gzip.go
@@ -0,0 +1,45 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+	"compress/gzip"
+	"fmt"
+	"io"
+	"os"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+)
+
+// GzipFile compresses the file at the input path and saves the result at the
+// output path.
+func GzipFile(inPath, outPath string) (err error) {
+	in, err := os.Open(inPath)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(in, fmt.Sprintf("error closing %q", inPath), &err)
+	out, err := os.Create(outPath)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(out, fmt.Sprintf("error closing %q", outPath), &err)
+	gzOut := gzip.NewWriter(out)
+	defer utils.CheckClose(gzOut, fmt.Sprintf("error closing gzip writer for %q", outPath), &err)
+	if _, err := io.Copy(gzOut, in); err != nil {
+		return fmt.Errorf("error gzipping %q to %q: %v", inPath, outPath, err)
+	}
+	return nil
+}
diff --git a/src/pkg/fs/gzip_test.go b/src/pkg/fs/gzip_test.go
new file mode 100644
index 0000000..b0820a6
--- /dev/null
+++ b/src/pkg/fs/gzip_test.go
@@ -0,0 +1,68 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+func TestGzipFile(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "test-gzip-file-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	data, err := os.Create(filepath.Join(tmpDir, "data"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer data.Close()
+	if _, err := io.Copy(data, bytes.NewReader([]byte("aaaaa"))); err != nil {
+		t.Fatal(err)
+	}
+	if err := data.Sync(); err != nil {
+		t.Fatal(err)
+	}
+	outPath := filepath.Join(tmpDir, "out")
+	signature := fmt.Sprintf("GzipFile(%q, %q)", data.Name(), outPath)
+	if err := GzipFile(data.Name(), outPath); err != nil {
+		t.Fatalf("%s = %v; want nil", signature, err)
+	}
+	outFile, err := os.Open(outPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer outFile.Close()
+	uncompressed, err := gzip.NewReader(outFile)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer uncompressed.Close()
+	gotBytes, err := ioutil.ReadAll(uncompressed)
+	if err != nil {
+		t.Fatal(err)
+	}
+	got := string(gotBytes)
+	if want := "aaaaa"; got != want {
+		t.Errorf("%s = %q; want %q", signature, got, want)
+	}
+}
diff --git a/src/pkg/fs/testdata/test_1/a b/src/pkg/fs/testdata/test_1/a
new file mode 100644
index 0000000..7898192
--- /dev/null
+++ b/src/pkg/fs/testdata/test_1/a
@@ -0,0 +1 @@
+a
diff --git a/src/pkg/fs/testdata/test_1/b b/src/pkg/fs/testdata/test_1/b
new file mode 100644
index 0000000..6178079
--- /dev/null
+++ b/src/pkg/fs/testdata/test_1/b
@@ -0,0 +1 @@
+b
diff --git a/src/pkg/fs/testdata/test_1/c b/src/pkg/fs/testdata/test_1/c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/pkg/fs/testdata/test_1/c
diff --git a/src/pkg/fs/testdata/test_2/a/a b/src/pkg/fs/testdata/test_2/a/a
new file mode 100644
index 0000000..7898192
--- /dev/null
+++ b/src/pkg/fs/testdata/test_2/a/a
@@ -0,0 +1 @@
+a
diff --git a/src/pkg/fs/testdata/test_2/b b/src/pkg/fs/testdata/test_2/b
new file mode 100644
index 0000000..6178079
--- /dev/null
+++ b/src/pkg/fs/testdata/test_2/b
@@ -0,0 +1 @@
+b
diff --git a/src/pkg/fs/testdata/test_3 b/src/pkg/fs/testdata/test_3
new file mode 100644
index 0000000..d279263
--- /dev/null
+++ b/src/pkg/fs/testdata/test_3
@@ -0,0 +1 @@
+test_3
diff --git a/src/pkg/gce/BUILD.bazel b/src/pkg/gce/BUILD.bazel
new file mode 100644
index 0000000..b9cefec
--- /dev/null
+++ b/src/pkg/gce/BUILD.bazel
@@ -0,0 +1,38 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "gce",
+    srcs = ["gce.go"],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/gce",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//src/pkg/config",
+        "@org_golang_google_api//compute/v1:compute",
+        "@org_golang_google_api//googleapi",
+    ],
+)
+
+go_test(
+    name = "gce_test",
+    srcs = ["gce_test.go"],
+    embed = [":gce"],
+    deps = [
+        "//src/pkg/config",
+        "//src/pkg/fakes",
+        "@org_golang_google_api//compute/v1:compute",
+    ],
+)
diff --git a/src/pkg/gce/gce.go b/src/pkg/gce/gce.go
new file mode 100644
index 0000000..08214f9
--- /dev/null
+++ b/src/pkg/gce/gce.go
@@ -0,0 +1,219 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package gce contains high-level functionality for manipulating GCE resources.
+package gce
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+
+	compute "google.golang.org/api/compute/v1"
+	"google.golang.org/api/googleapi"
+)
+
+const (
+	defaultOperationTimeout = time.Duration(600) * time.Second
+	defaultRetryInterval    = time.Duration(5) * time.Second
+)
+
+type timePkg struct {
+	Now   func() time.Time
+	Sleep func(time.Duration)
+}
+
+var (
+	// ErrTimeout indicates that an operation timed out.
+	ErrTimeout = errors.New("operation timed out")
+
+	// ErrImageNotFound indicates that a GCE image could not be found
+	ErrImageNotFound = errors.New("image not found")
+
+	realTime = &timePkg{time.Now, time.Sleep}
+
+	// This should match <prefix>-<channel>-<milestone>-<buildnumber>.
+	// This is the format of images in cos-cloud.
+	// Example: cos-dev-72-11172-0-0
+	imageNameRegex = regexp.MustCompile("[a-z0-9-]+-[a-z]+-([0-9]+)-([0-9]+-[0-9]+-[0-9]+)")
+)
+
+// buildDeprecationStatus constructs a *compute.DeprecationStatus struct used in a Deprecate GCE API
+// call. It fills in the structure with the "DEPRECATED" state, the given replacement, and the given
+// delete time, if provided.
+func buildDeprecationStatus(replacement string, deleteTime time.Time) *compute.DeprecationStatus {
+	status := &compute.DeprecationStatus{State: "DEPRECATED", Replacement: replacement}
+	if !deleteTime.IsZero() {
+		status.Deleted = deleteTime.Format(time.RFC3339)
+	}
+	return status
+}
+
+func waitForOp(svc *compute.Service, project string, op *compute.Operation, deadline time.Time, t *timePkg) error {
+	if op.Error != nil {
+		return fmt.Errorf("error with operation. name: %s error: %v", op.Name, op.Error)
+	}
+	if op.Status == "DONE" {
+		return nil
+	}
+	for {
+		t.Sleep(defaultRetryInterval)
+		op, err := svc.GlobalOperations.Get(project, op.Name).Do()
+		if err != nil {
+			return err
+		}
+		if op.Error != nil {
+			return fmt.Errorf("error with operation. name: %s error: %v", op.Name, op.Error)
+		}
+		if op.Status == "DONE" {
+			return nil
+		}
+		if t.Now().After(deadline) {
+			return ErrTimeout
+		}
+	}
+}
+
+func waitForOps(svc *compute.Service, project string, ops []*compute.Operation, t *timePkg) error {
+	deadline := t.Now().Add(defaultOperationTimeout)
+	for _, op := range ops {
+		if err := waitForOp(svc, project, op, deadline, t); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func deprecateInFamily(ctx context.Context, svc *compute.Service, newImage *config.Image, ttl int, t *timePkg) error {
+	if newImage.Family == "" {
+		return fmt.Errorf("input image does not have a family for deprecateInFamily. image: %v", newImage)
+	}
+	filter := fmt.Sprintf("(family = %s) (name != %s)", newImage.Family, newImage.Name)
+	images := []*compute.Image{}
+	err := svc.Images.List(newImage.Project).Filter(filter).Pages(ctx, func(imageList *compute.ImageList) error {
+		images = append(images, imageList.Items...)
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+	ops := []*compute.Operation{}
+	for _, image := range images {
+		if image.Deprecated != nil {
+			continue
+		}
+		deleteTime := time.Time{}
+		if ttl > 0 {
+			deleteTime = t.Now().Add(time.Duration(ttl) * time.Second)
+		}
+		status := buildDeprecationStatus(newImage.URL(), deleteTime)
+		op, err := svc.Images.Deprecate(newImage.Project, image.Name, status).Do()
+		if err != nil {
+			return err
+		}
+		ops = append(ops, op)
+	}
+	return waitForOps(svc, newImage.Project, ops, t)
+}
+
+// DeprecateInFamily deprecates all of the old images in an image family.
+// Allows for assigning TTLs (in seconds) to deprecated images.
+func DeprecateInFamily(ctx context.Context, svc *compute.Service, newImage *config.Image, ttl int) error {
+	return deprecateInFamily(ctx, svc, newImage, ttl, realTime)
+}
+
+// ImageExists checks to see if the given image exists in the given project.
+func ImageExists(svc *compute.Service, project, name string) (bool, error) {
+	if _, err := svc.Images.Get(project, name).Do(); err != nil {
+		if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+			return false, nil
+		}
+		return false, err
+	}
+	return true, nil
+}
+
+type decodedImageName struct {
+	name        string
+	milestone   int
+	buildNumber string
+}
+
+// newDecodedImageName decodes an image name from cos-cloud and returns
+// image information encoded in that image name.
+func newDecodedImageName(name string) (*decodedImageName, error) {
+	match := imageNameRegex.FindStringSubmatch(name)
+	if match == nil {
+		return nil, fmt.Errorf("could not parse name %s", name)
+	}
+	milestone, err := strconv.Atoi(match[1])
+	if err != nil {
+		return nil, fmt.Errorf("could not convert %s to a milestone: %s", match[1], err)
+	}
+	return &decodedImageName{name, milestone, match[2]}, nil
+}
+
+func imageCompare(first, second *decodedImageName) bool {
+	if first.milestone != second.milestone {
+		return first.milestone < second.milestone
+	}
+	for i := 0; i < 3; i++ {
+		// Because of how decodedImageNames are created (see newDecodedImageName),
+		// these atoi operations are guaranteed to work.
+		firstNum, _ := strconv.Atoi(strings.Split(first.buildNumber, "-")[i])
+		secondNum, _ := strconv.Atoi(strings.Split(second.buildNumber, "-")[i])
+		if firstNum != secondNum {
+			return firstNum < secondNum
+		}
+	}
+	return false
+}
+
+// ResolveMilestone gets the name of the latest COS image on the given milestone.
+// This resolution is done by looking at the image names in cos-cloud.
+func ResolveMilestone(ctx context.Context, svc *compute.Service, milestone int) (string, error) {
+	var images []*compute.Image
+	err := svc.Images.List("cos-cloud").Pages(ctx, func(imageList *compute.ImageList) error {
+		images = append(images, imageList.Items...)
+		return nil
+	})
+	if err != nil {
+		return "", err
+	}
+	var inMilestone []*decodedImageName
+	for _, image := range images {
+		decoded, err := newDecodedImageName(image.Name)
+		if err != nil {
+			continue
+		}
+		if decoded.milestone == milestone {
+			inMilestone = append(inMilestone, decoded)
+		}
+	}
+	if len(inMilestone) == 0 {
+		return "", ErrImageNotFound
+	}
+	sort.Slice(inMilestone, func(i, j int) bool {
+		return imageCompare(inMilestone[i], inMilestone[j])
+	})
+	return inMilestone[len(inMilestone)-1].name, nil
+}
diff --git a/src/pkg/gce/gce_test.go b/src/pkg/gce/gce_test.go
new file mode 100644
index 0000000..621dd12
--- /dev/null
+++ b/src/pkg/gce/gce_test.go
@@ -0,0 +1,310 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gce
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fakes"
+
+	compute "google.golang.org/api/compute/v1"
+)
+
+func fakeTime(current time.Time) *timePkg {
+	fake := fakes.NewTime(current)
+	return &timePkg{fake.Now, fake.Sleep}
+}
+
+func TestDeprecateInFamilyNoFamily(t *testing.T) {
+	ctx := context.Background()
+	newImage := &config.Image{&compute.Image{Name: "test-name"}, "test-project"}
+	if err := DeprecateInFamily(ctx, nil, newImage, 0); err == nil {
+		t.Error("DeprecateInFamily: did not fail when input image had no family")
+	}
+}
+
+func TestDeprecateInFamilyNoItems(t *testing.T) {
+	fakeGCE, client := fakes.GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	ctx := context.Background()
+	newImage := &config.Image{&compute.Image{Name: "test-name", Family: "test-family"}, "test-project"}
+	if err := DeprecateInFamily(ctx, client, newImage, 0); err != nil {
+		t.Logf("DeprecateInFamily(_, _, %v, 0)", newImage)
+		t.Fatal(err)
+	}
+	if len(fakeGCE.Deprecated) != 0 {
+		t.Errorf("an image was deprecated. Map: %v", fakeGCE.Deprecated)
+	}
+}
+
+func TestDeprecateInFamilyIgnoreDeprecated(t *testing.T) {
+	fakeGCE, client := fakes.GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	ctx := context.Background()
+	fakeGCE.Images.Items = []*compute.Image{{Deprecated: &compute.DeprecationStatus{}}}
+	newImage := &config.Image{&compute.Image{Name: "test-name", Family: "test-family"}, "test-project"}
+	if err := DeprecateInFamily(ctx, client, newImage, 0); err != nil {
+		t.Logf("fakeGCE.Images.Items: %v", fakeGCE.Images.Items)
+		t.Logf("DeprecateInFamily(_, _, %v, 0)", newImage)
+		t.Fatal(err)
+	}
+	if len(fakeGCE.Deprecated) != 0 {
+		t.Errorf("an image was deprecated. Map: %v", fakeGCE.Deprecated)
+	}
+}
+
+func TestDeprecateInFamily(t *testing.T) {
+	testDeprecateInFamilyData := []struct {
+		testName string
+		images   []*compute.Image
+		ops      []*compute.Operation
+	}{
+		{
+			"DoneInstantly",
+			[]*compute.Image{
+				{Name: "dep-1"},
+			},
+			[]*compute.Operation{
+				{Name: "op-1", Status: "DONE"},
+			},
+		},
+		{
+			"RunningOpBeforeDone",
+			[]*compute.Image{
+				{Name: "dep-1"},
+			},
+			[]*compute.Operation{
+				{Name: "op-1", Status: "RUNNING"},
+				{Name: "op-2", Status: "DONE"},
+			},
+		},
+		{
+			"PendingOpBeforeDone",
+			[]*compute.Image{
+				{Name: "dep-1"},
+			},
+			[]*compute.Operation{
+				{Name: "op-1", Status: "PENDING"},
+				{Name: "op-2", Status: "DONE"},
+			},
+		},
+		{
+			"TwoRunningBeforeDone",
+			[]*compute.Image{
+				{Name: "dep-1"},
+			},
+			[]*compute.Operation{
+				{Name: "op-1", Status: "RUNNING"},
+				{Name: "op-2", Status: "RUNNING"},
+				{Name: "op-3", Status: "DONE"},
+			},
+		},
+		{
+			"TwoImages",
+			[]*compute.Image{
+				{Name: "dep-1"},
+				{Name: "dep-2"},
+			},
+			[]*compute.Operation{
+				{Name: "op-1", Status: "DONE"},
+				{Name: "op-2", Status: "DONE"},
+			},
+		},
+		{
+			"TwoImagesRunningBeforeDone",
+			[]*compute.Image{
+				{Name: "dep-1"},
+				{Name: "dep-2"},
+			},
+			[]*compute.Operation{
+				{Name: "op-1", Status: "RUNNING"},
+				{Name: "op-1", Status: "DONE"},
+				{Name: "op-2", Status: "RUNNING"},
+				{Name: "op-2", Status: "DONE"},
+			},
+		},
+	}
+	fakeGCE, client := fakes.GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	for _, input := range testDeprecateInFamilyData {
+		t.Run(input.testName, func(t *testing.T) {
+			date := time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC)
+			ctx := context.Background()
+			fakeGCE.Images.Items = input.images
+			fakeGCE.Operations = input.ops
+			newImage := &config.Image{&compute.Image{Name: "test-name", Family: "test-family"}, "test-project"}
+			if err := deprecateInFamily(ctx, client, newImage, 0, fakeTime(date)); err != nil {
+				t.Logf("input: %v", input)
+				t.Logf("deprecateInFamily(_, _, %v, 0, _)", newImage)
+				t.Fatal(err)
+			}
+			if len(fakeGCE.Deprecated) != len(input.images) {
+				t.Fatalf("deprecated: %v actual: %d expected: %d", fakeGCE.Deprecated, len(fakeGCE.Deprecated),
+					len(input.images))
+			}
+			for _, image := range input.images {
+				status := fakeGCE.Deprecated[image.Name]
+				if status.State != "DEPRECATED" {
+					t.Errorf("image: %v actual: %s expected: DEPRECATED", image, status.State)
+				}
+				if want := newImage.URL(); status.Replacement != want {
+					t.Errorf("image: %v actual: %s expected: %s", image, status.Replacement, want)
+				}
+			}
+		})
+	}
+}
+
+func TestDeprecateInFamilyTimeout(t *testing.T) {
+	date := time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC)
+	fakeGCE, client := fakes.GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	ctx := context.Background()
+	fakeGCE.Images.Items = []*compute.Image{{Name: "dep-1"}}
+	fakeGCE.Operations = nil
+	for i := 0; i < 1000; i++ {
+		fakeGCE.Operations = append(fakeGCE.Operations, &compute.Operation{Name: "", Status: "RUNNING"})
+	}
+	newImage := &config.Image{&compute.Image{Name: "test-name", Family: "test-family"}, "test-project"}
+	if err := deprecateInFamily(ctx, client, newImage, 0, fakeTime(date)); err != ErrTimeout {
+		t.Errorf("operation did not timeout. err: %s", err)
+	}
+}
+
+func TestDeprecateInFamilyTTL(t *testing.T) {
+	date := time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC)
+	fakeGCE, client := fakes.GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	ctx := context.Background()
+	fakeGCE.Images.Items = []*compute.Image{{Name: "dep-1"}}
+	fakeGCE.Operations = []*compute.Operation{{Name: "op-1", Status: "DONE"}}
+	newImage := &config.Image{&compute.Image{Name: "test-name", Family: "test-family"}, "test-project"}
+	if err := deprecateInFamily(ctx, client, newImage, 30, fakeTime(date)); err != nil {
+		t.Logf("fakeGCE.Images.Items: %v", fakeGCE.Images.Items)
+		t.Logf("fakeGCE.Operations: %v", fakeGCE.Operations)
+		t.Logf("deprecateInFamily(_, _, %v, 30, _)", newImage)
+		t.Fatal(err)
+	}
+	status := fakeGCE.Deprecated["dep-1"]
+	expected := date.Add(time.Duration(30) * time.Second).Format(time.RFC3339)
+	if status.Deleted != expected {
+		t.Errorf("actual: %s expected: %s", status.Deleted, expected)
+	}
+}
+
+func TestImageExists(t *testing.T) {
+	testImageExistsData := []struct {
+		testName string
+		images   []*compute.Image
+		name     string
+		expected bool
+	}{
+		{
+			"DoesntExist",
+			[]*compute.Image{{Name: "im-1"}},
+			"im-2",
+			false,
+		},
+		{
+			"Exists",
+			[]*compute.Image{{Name: "im-1"}},
+			"im-1",
+			true,
+		},
+	}
+	fakeGCE, client := fakes.GCEForTest(t, "test-project")
+	defer fakeGCE.Close()
+	for _, input := range testImageExistsData {
+		t.Run(input.testName, func(t *testing.T) {
+			fakeGCE.Images.Items = input.images
+			actual, err := ImageExists(client, "test-project", input.name)
+			if err != nil {
+				t.Logf("ImageExists(_, _, %s): %t", input.name, actual)
+				t.Logf("fakeGCE.Images.Items: %v", fakeGCE.Images.Items)
+				t.Fatal(err)
+			}
+			if actual != input.expected {
+				t.Logf("ImageExists(_, _, %s): %t", input.name, actual)
+				t.Logf("fakeGCE.Images.Items: %v", fakeGCE.Images.Items)
+				t.Errorf("actual: %v expected: %v", actual, input.expected)
+			}
+		})
+	}
+}
+
+func buildImageList(names []string) []*compute.Image {
+	var images []*compute.Image
+	for _, name := range names {
+		images = append(images, &compute.Image{Name: name})
+	}
+	return images
+}
+
+func TestResolveMilestone(t *testing.T) {
+	testResolveMilestoneData := []struct {
+		testName      string
+		names         []string
+		milestone     int
+		expected      string
+		expectedError error
+	}{
+		{
+			"OneCandidate",
+			[]string{"cos-dev-68-10718-0-0", "cos-beta-67-10525-0-0"},
+			68,
+			"cos-dev-68-10718-0-0",
+			nil,
+		},
+		{
+			"TwoCandidates",
+			[]string{"cos-dev-68-10718-11-0", "cos-dev-68-10718-0-0", "bad-image"},
+			68,
+			"cos-dev-68-10718-11-0",
+			nil,
+		},
+		{
+			"NoImages",
+			nil,
+			68,
+			"",
+			ErrImageNotFound,
+		},
+		{
+			"NoCandidates",
+			[]string{"bad-image"},
+			68,
+			"",
+			ErrImageNotFound,
+		},
+	}
+	fakeGCE, client := fakes.GCEForTest(t, "cos-cloud")
+	defer fakeGCE.Close()
+	for _, input := range testResolveMilestoneData {
+		t.Run(input.testName, func(t *testing.T) {
+			ctx := context.Background()
+			fakeGCE.Images.Items = buildImageList(input.names)
+			actual, err := ResolveMilestone(ctx, client, input.milestone)
+			if err != input.expectedError {
+				t.Errorf("ResolveMilestone(_, _, %v) = %s, want: %s", input.milestone, err, input.expectedError)
+			}
+			if actual != input.expected {
+				t.Errorf("ResolveMilestone(_, _, %v) = %s, want: %s", input.milestone, actual, input.expected)
+			}
+		})
+	}
+}
diff --git a/src/pkg/preloader/BUILD.bazel b/src/pkg/preloader/BUILD.bazel
new file mode 100644
index 0000000..83b1cd1
--- /dev/null
+++ b/src/pkg/preloader/BUILD.bazel
@@ -0,0 +1,75 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+genrule(
+    name = "cidata",
+    srcs = [
+        "//:src/data/startup.yaml",
+        "//src/cmd/provisioner",
+        "//src/cmd/metadata_watcher",
+    ],
+    outs = ["cidata.img"],
+    cmd = "\
+$(location @dosfstools//:mkfs.fat) -n CIDATA -S 512 -s 8 -C $@ 65536;\
+touch meta-data;\
+$(location @mtools//:mcopy) -i $@ $(location //:src/data/startup.yaml) ::/user-data;\
+$(location @mtools//:mcopy) -i $@ meta-data ::/meta-data;\
+$(location @mtools//:mcopy) -i $@ $(location //src/cmd/provisioner:provisioner) ::/provisioner;\
+$(location @mtools//:mcopy) -i $@ $(location //src/cmd/metadata_watcher:metadata_watcher) ::/metadata_watcher;",
+    tools = [
+        "@dosfstools//:mkfs.fat",
+        "@mtools//:mcopy",
+    ],
+)
+
+go_library(
+    name = "preloader",
+    srcs = [
+        "gcs.go",
+        "preload.go",
+    ],
+    embedsrcs = [
+        ":cidata",
+    ],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/preloader",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//src/pkg/config",
+        "//src/pkg/fs",
+        "//src/pkg/provisioner",
+        "//src/pkg/utils",
+        "@com_google_cloud_go_storage//:storage",
+        "@org_golang_google_api//iterator",
+    ],
+)
+
+go_test(
+    name = "preloader_test",
+    srcs = [
+        "gcs_test.go",
+        "preload_test.go",
+    ],
+    embed = [":preloader"],
+    deps = [
+        "//src/pkg/config",
+        "//src/pkg/fakes",
+        "//src/pkg/fs",
+        "//src/pkg/provisioner",
+        "@com_github_google_go_cmp//cmp",
+        "@com_github_google_go_cmp//cmp/cmpopts",
+        "@org_golang_google_api//compute/v1:compute",
+    ],
+)
diff --git a/src/pkg/preloader/gcs.go b/src/pkg/preloader/gcs.go
new file mode 100644
index 0000000..4742cee
--- /dev/null
+++ b/src/pkg/preloader/gcs.go
@@ -0,0 +1,90 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package preloader
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"path/filepath"
+
+	"cloud.google.com/go/storage"
+	"google.golang.org/api/iterator"
+)
+
+const (
+	managedDir = "cos-customizer"
+)
+
+// gcsManager provides a simple key/value interface to a GCS directory, where keys are object paths
+// and values are object data.
+type gcsManager struct {
+	gcsClient         *storage.Client
+	gcsBucket, gcsDir string
+}
+
+func (m *gcsManager) managedDir() string {
+	return filepath.Join(m.gcsDir, managedDir)
+}
+
+// managedDirURL gets the GCS URL of the directory being managed by the GCSManager.
+func (m *gcsManager) managedDirURL() string {
+	return fmt.Sprintf("gs://%s/%s", m.gcsBucket, m.managedDir())
+}
+
+func (m *gcsManager) objectPath(name string) string {
+	return filepath.Join(m.managedDir(), name)
+}
+
+// store stores the given data in the given file. The file should be given as a path
+// relative to the managed directory.
+func (m *gcsManager) store(ctx context.Context, r io.Reader, name string) error {
+	object := m.objectPath(name)
+	w := m.gcsClient.Bucket(m.gcsBucket).Object(object).NewWriter(ctx)
+	if _, err := io.Copy(w, r); err != nil {
+		return err
+	}
+	return w.Close()
+}
+
+// url gets the GCS URL of the given file. The file should be given as a path
+// relative to the managed directory.
+func (m *gcsManager) url(name string) string {
+	object := m.objectPath(name)
+	return fmt.Sprintf("gs://%s/%s", m.gcsBucket, object)
+}
+
+// cleanup cleans up the managed directory.
+func (m *gcsManager) cleanup(ctx context.Context) error {
+	q := &storage.Query{Prefix: m.managedDir()}
+	it := m.gcsClient.Bucket(m.gcsBucket).Objects(ctx, q)
+	var objects []string
+	for {
+		objAttrs, err := it.Next()
+		if err == iterator.Done {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		objects = append(objects, objAttrs.Name)
+	}
+	for _, object := range objects {
+		if err := m.gcsClient.Bucket(m.gcsBucket).Object(object).Delete(ctx); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/src/pkg/preloader/gcs_test.go b/src/pkg/preloader/gcs_test.go
new file mode 100644
index 0000000..028e4d0
--- /dev/null
+++ b/src/pkg/preloader/gcs_test.go
@@ -0,0 +1,89 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package preloader
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fakes"
+
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/go-cmp/cmp/cmpopts"
+)
+
+func TestStore(t *testing.T) {
+	var testData = []struct {
+		testName string
+		data     []byte
+		object   string
+	}{
+		{"Empty", nil, "test-object"},
+		{"NonEmpty", []byte("test-data"), "test-object"},
+	}
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	storageManager := gcsManager{gcs.Client, "bucket", "dir"}
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			gcs.Objects = make(map[string][]byte)
+			storageManager.store(context.Background(), bytes.NewReader(input.data), input.object)
+			got, ok := gcs.Objects[fmt.Sprintf("/bucket/dir/cos-customizer/%s", input.object)]
+			if !ok {
+				t.Fatalf("gcsManager{}.store(_, %s, %s): could not find object", string(input.data), input.object)
+			}
+			if !cmp.Equal(got, input.data, cmpopts.EquateEmpty()) {
+				t.Errorf("gcsManager{}.store(_, %s, %s) = %s, want %s", string(input.data), input.object, got, string(input.data))
+			}
+
+		})
+	}
+}
+
+func TestManagedDirURL(t *testing.T) {
+	storageManager := gcsManager{nil, "bucket", "dir"}
+	if got := storageManager.managedDirURL(); got != "gs://bucket/dir/cos-customizer" {
+		t.Errorf("gcsManager{}.managedDirURL() = %s, want gs://bucket/dir/cos-customizer", got)
+	}
+}
+
+func TestURL(t *testing.T) {
+	storageManager := gcsManager{nil, "bucket", "dir"}
+	if got := storageManager.url("object"); got != "gs://bucket/dir/cos-customizer/object" {
+		t.Errorf("gcsManager{}.url(object) = %s, want gs://bucket/dir/cos-customizer/object", got)
+	}
+}
+
+func TestCleanup(t *testing.T) {
+	ctx := context.Background()
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	storageManager := gcsManager{gcs.Client, "bucket", "dir"}
+	storageManager.store(ctx, bytes.NewReader(nil), "obj1")
+	storageManager.store(ctx, bytes.NewReader(nil), "obj2")
+	gcs.Objects["/bucket/obj3"] = nil
+	storageManager.cleanup(ctx)
+	if _, ok := gcs.Objects["/bucket/dir/cos-customizer/obj1"]; ok {
+		t.Errorf("storageManager.cleanup(_): object /bucket/dir/cos-customizer/obj1 not deleted")
+	}
+	if _, ok := gcs.Objects["/bucket/dir/cos-customizer/obj2"]; ok {
+		t.Errorf("storageManager.cleanup(_): object /bucket/dir/cos-customizer/obj2 not deleted")
+	}
+	if _, ok := gcs.Objects["/bucket/obj3"]; !ok {
+		t.Errorf("storageManager.cleanup(_): object /bucket/obj3 was deleted")
+	}
+}
diff --git a/src/pkg/preloader/preload.go b/src/pkg/preloader/preload.go
new file mode 100644
index 0000000..57e6a1c
--- /dev/null
+++ b/src/pkg/preloader/preload.go
@@ -0,0 +1,326 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package preloader contains functionality for preloading a COS image from
+// provided configuration.
+package preloader
+
+import (
+	"context"
+	_ "embed"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"text/template"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+
+	"cloud.google.com/go/storage"
+)
+
+//go:embed cidata.img
+var ciDataImg []byte
+
+// storeInGCS stores the given files in GCS using the given gcsManager.
+// Files to store are provided in a map where each key is a file on the local
+// file system and each value is the relative path in GCS at which to store the
+// corresponding key. The provided relative paths in GCS must be unique.
+func storeInGCS(ctx context.Context, gcs *gcsManager, files map[string]string) error {
+	gcsRelPaths := make(map[string]bool)
+	for _, gcsRelPath := range files {
+		if gcsRelPaths[gcsRelPath] {
+			return fmt.Errorf("storeInGCS: collision in relative path %q", gcsRelPath)
+		}
+		gcsRelPaths[gcsRelPath] = true
+	}
+	for file, gcsRelPath := range files {
+		r, err := os.Open(file)
+		if err != nil {
+			return fmt.Errorf("error opening %q: %v", file, err)
+		}
+		defer r.Close()
+		if err := gcs.store(ctx, r, gcsRelPath); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func needDiskResize(provConfig *provisioner.Config, buildSpec *config.Build) bool {
+	// We need to resize the disk during provisioning if:
+	// 1. The requested disk size is larger than default, and
+	// 2. Partitions need to be relocated, i.e. we are enlarging the OEM partition
+	// or reclaiming /dev/sda3
+	return buildSpec.DiskSize > 10 && (provConfig.BootDisk.OEMSize != "" || provConfig.BootDisk.ReclaimSDA3)
+}
+
+// writeDaisyWorkflow templates the given Daisy workflow and writes the result to a temporary file.
+// The given workflow should be the one at //data/build_image.wf.json.
+func writeDaisyWorkflow(inputWorkflow string, outputImage *config.Image, buildSpec *config.Build, provConfig *provisioner.Config) (string, error) {
+	tmplContents, err := ioutil.ReadFile(inputWorkflow)
+	if err != nil {
+		return "", err
+	}
+	labelsJSON, err := json.Marshal(outputImage.Labels)
+	if err != nil {
+		return "", err
+	}
+	acceleratorsJSON, err := json.Marshal([]map[string]interface{}{})
+	if err != nil {
+		return "", err
+	}
+	if buildSpec.GPUType != "" {
+		acceleratorType := fmt.Sprintf("projects/%s/zones/%s/acceleratorTypes/%s",
+			buildSpec.Project, buildSpec.Zone, buildSpec.GPUType)
+		acceleratorsJSON, err = json.Marshal([]map[string]interface{}{
+			{"acceleratorType": acceleratorType, "acceleratorCount": 1}})
+		if err != nil {
+			return "", err
+		}
+	}
+	licensesJSON, err := json.Marshal(outputImage.Licenses)
+	if err != nil {
+		return "", err
+	}
+
+	// template content for the step resize-disk.
+	// If the oem-size is set, or need to reclaim sda3 (with disk-size-gb set),
+	// create the disk with the default size, and then resize the disk.
+	// Otherwise, a place holder is used. The disk is created with provided disk-size-gb or
+	// the default size. And the disk will not be resized.
+	// The place holder is needed because ResizeDisk API requires a larger size than the original disk.
+	var resizeDiskJSON string
+	var waitResizeJSON string
+	if needDiskResize(provConfig, buildSpec) {
+		// actual disk size
+		resizeDiskJSON = fmt.Sprintf(`"ResizeDisks": [{"Name": "boot-disk","SizeGb": "%d"}]`, buildSpec.DiskSize)
+		waitResizeJSON = `
+      "WaitForInstancesSignal": [
+        {
+          "Name": "preload-vm",
+          "Interval": "10s",
+          "SerialOutput": {
+            "Port": 3,
+            "SuccessMatch": "waiting for the boot disk size to change",
+            "FailureMatch": "BuildFailed:"
+          }
+        }
+      ]`
+	} else {
+		// placeholder
+		resizeDiskJSON = `"WaitForInstancesSignal": [{"Name": "preload-vm","Interval": "10s","SerialOutput": {"Port": 3,"SuccessMatch": "BuildStatus:"}}]`
+		waitResizeJSON = `"WaitForInstancesSignal": [{"Name": "preload-vm","Interval": "10s","SerialOutput": {"Port": 3,"SuccessMatch": "BuildStatus:"}}]`
+	}
+	tmpl, err := template.New("workflow").Parse(string(tmplContents))
+	if err != nil {
+		return "", err
+	}
+	w, err := ioutil.TempFile(fs.ScratchDir, "daisy-")
+	if err != nil {
+		return "", err
+	}
+	if err := tmpl.Execute(w, struct {
+		Labels       string
+		Accelerators string
+		Licenses     string
+		ResizeDisks  string
+		WaitResize   string
+	}{
+		string(labelsJSON),
+		string(acceleratorsJSON),
+		string(licensesJSON),
+		resizeDiskJSON,
+		waitResizeJSON,
+	}); err != nil {
+		w.Close()
+		os.Remove(w.Name())
+		return "", err
+	}
+	if err := w.Close(); err != nil {
+		os.Remove(w.Name())
+		return "", err
+	}
+	return w.Name(), nil
+}
+
+func writeCIDataImage(files *fs.Files) (path string, err error) {
+	img, err := ioutil.TempFile(fs.ScratchDir, "cidata-")
+	if err != nil {
+		return "", err
+	}
+	_, writeErr := img.Write(ciDataImg)
+	closeErr := img.Close()
+	if writeErr != nil {
+		return "", writeErr
+	}
+	if closeErr != nil {
+		return "", closeErr
+	}
+	if err := utils.RunCommand([]string{"mcopy", "-i", img.Name(), files.ProvConfig, "::/config.json"}, "", nil); err != nil {
+		return "", err
+	}
+	out, err := ioutil.TempFile(fs.ScratchDir, "cidata-tar-")
+	if err != nil {
+		return "", err
+	}
+	if err := out.Close(); err != nil {
+		return "", err
+	}
+	// tar with the "z" option requires a shell to be installed in the container.
+	// To avoid the shell dependency, gzip the tar ourselves.
+	if err := utils.RunCommand([]string{
+		"tar",
+		"cf", out.Name(),
+		"--transform", fmt.Sprintf("s|%s|disk.raw|g", strings.TrimLeft(img.Name(), "/")),
+		img.Name(),
+	}, "", nil); err != nil {
+		return "", err
+	}
+	if err := fs.GzipFile(out.Name(), out.Name()+".gz"); err != nil {
+		return "", err
+	}
+	return out.Name() + ".gz", err
+}
+
+func updateProvConfig(provConfig *provisioner.Config, buildSpec *config.Build, buildContexts map[string]string, gcs *gcsManager, files *fs.Files) error {
+	if needDiskResize(provConfig, buildSpec) {
+		provConfig.BootDisk.WaitForDiskResize = true
+	}
+	provConfig.BuildContexts = buildContexts
+	for idx := range provConfig.Steps {
+		if provConfig.Steps[idx].Type == "InstallGPU" {
+			var step provisioner.InstallGPUStep
+			if err := json.Unmarshal(provConfig.Steps[idx].Args, &step); err != nil {
+				return err
+			}
+			if step.GCSDepsPrefix != "" {
+				step.GCSDepsPrefix = gcs.managedDirURL() + "/gcs_files"
+			}
+			buf, err := json.Marshal(&step)
+			if err != nil {
+				return err
+			}
+			provConfig.Steps[idx].Args = json.RawMessage(buf)
+		}
+	}
+	buf, err := json.Marshal(provConfig)
+	if err != nil {
+		return err
+	}
+	log.Printf("Using provisioner config: %s", string(buf))
+	return config.SaveConfigToPath(files.ProvConfig, provConfig)
+}
+
+func sanitize(output *config.Image) {
+	var licenses []string
+	for _, l := range output.Licenses {
+		if l != "" {
+			licenses = append(licenses, strings.TrimPrefix(l, "https://www.googleapis.com/compute/v1/"))
+		}
+	}
+	output.Licenses = licenses
+}
+
+// daisyArgs computes the parameters to the cos-customizer Daisy workflow (//data/build_image.wf.json)
+// and uploads dependencies to GCS.
+func daisyArgs(ctx context.Context, gcs *gcsManager, files *fs.Files, input *config.Image, output *config.Image, buildSpec *config.Build, provConfig *provisioner.Config) ([]string, error) {
+	sanitize(output)
+	buildContexts := map[string]string{
+		"user": gcs.managedDirURL() + "/" + filepath.Base(files.UserBuildContextArchive),
+	}
+	toUpload := map[string]string{
+		files.UserBuildContextArchive: filepath.Base(files.UserBuildContextArchive),
+	}
+	for _, gcsFile := range buildSpec.GCSFiles {
+		toUpload[gcsFile] = path.Join("gcs_files", filepath.Base(gcsFile))
+	}
+	if err := storeInGCS(ctx, gcs, toUpload); err != nil {
+		return nil, err
+	}
+	daisyWorkflow, err := writeDaisyWorkflow(files.DaisyWorkflow, output, buildSpec, provConfig)
+	if err != nil {
+		return nil, err
+	}
+	if err := updateProvConfig(provConfig, buildSpec, buildContexts, gcs, files); err != nil {
+		return nil, err
+	}
+	ciDataFile, err := writeCIDataImage(files)
+	if err != nil {
+		return nil, err
+	}
+	var args []string
+	if provConfig.BootDisk.OEMSize == "" && buildSpec.DiskSize > 10 && !provConfig.BootDisk.ReclaimSDA3 {
+		// If the oem-size is set, or need to reclaim sda3,
+		// create the disk with default size,
+		// and then resize the disk in the template step "resize-disk".
+		// Otherwise, create the disk with the provided disk-size-gb.
+		args = append(args, "-var:disk_size_gb", strconv.Itoa(buildSpec.DiskSize))
+	}
+	if output.Family != "" {
+		args = append(args, "-var:output_image_family", output.Family)
+	}
+	hostMaintenance := "MIGRATE"
+	if buildSpec.GPUType != "" {
+		hostMaintenance = "TERMINATE"
+	}
+	args = append(
+		args,
+		"-var:source_image",
+		input.URL(),
+		"-var:output_image_name",
+		output.Name,
+		"-var:output_image_project",
+		output.Project,
+		"-var:cidata_img",
+		ciDataFile,
+		"-var:host_maintenance",
+		hostMaintenance,
+		"-gcs_path",
+		gcs.managedDirURL(),
+		"-project",
+		buildSpec.Project,
+		"-zone",
+		buildSpec.Zone,
+		"-default_timeout",
+		buildSpec.Timeout,
+		"-disable_gcs_logging",
+		daisyWorkflow,
+	)
+	return args, nil
+}
+
+// BuildImage builds a customized image using Daisy.
+func BuildImage(ctx context.Context, gcsClient *storage.Client, files *fs.Files, input, output *config.Image,
+	buildSpec *config.Build, provConfig *provisioner.Config) error {
+	gcs := &gcsManager{gcsClient, buildSpec.GCSBucket, buildSpec.GCSDir}
+	defer gcs.cleanup(ctx)
+	args, err := daisyArgs(ctx, gcs, files, input, output, buildSpec, provConfig)
+	if err != nil {
+		return err
+	}
+	cmd := exec.Command(files.DaisyBin, args...)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stdout
+	return cmd.Run()
+}
diff --git a/src/pkg/preloader/preload_test.go b/src/pkg/preloader/preload_test.go
new file mode 100644
index 0000000..db9178a
--- /dev/null
+++ b/src/pkg/preloader/preload_test.go
@@ -0,0 +1,460 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package preloader
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/config"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fakes"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fs"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner"
+
+	"github.com/google/go-cmp/cmp"
+	compute "google.golang.org/api/compute/v1"
+)
+
+func createTempFile(dir string) (string, error) {
+	file, err := ioutil.TempFile(dir, "")
+	if err != nil {
+		return "", err
+	}
+	if err := file.Close(); err != nil {
+		return "", err
+	}
+	return file.Name(), nil
+}
+
+func setupFiles() (string, *fs.Files, error) {
+	tmpDir, err := ioutil.TempDir("", "")
+	if err != nil {
+		return "", nil, err
+	}
+	files := &fs.Files{}
+	files.UserBuildContextArchive, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	files.ProvConfig, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	files.DaisyWorkflow, err = createTempFile(tmpDir)
+	if err != nil {
+		os.RemoveAll(tmpDir)
+		return "", nil, err
+	}
+	return tmpDir, files, nil
+}
+
+func TestDaisyArgsGCSUpload(t *testing.T) {
+	tmpDir, files, err := setupFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	if err := ioutil.WriteFile(filepath.Join(tmpDir, "test-file"), []byte("test-file"), 0644); err != nil {
+		t.Fatal(err)
+	}
+	var testData = []struct {
+		testName string
+		file     string
+		object   string
+		contents []byte
+	}{
+		{
+			testName: "UserBuildContextArchive",
+			file:     files.UserBuildContextArchive,
+			object:   filepath.Base(files.UserBuildContextArchive),
+			contents: []byte("abc"),
+		},
+		{
+			testName: "ArbitraryFileUpload",
+			file:     filepath.Join(tmpDir, "test-file"),
+			object:   "gcs_files/test-file",
+			contents: []byte("test-file"),
+		},
+	}
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			gcs.Objects = make(map[string][]byte)
+			gm := &gcsManager{gcsClient: gcs.Client, gcsBucket: "bucket"}
+			if err := ioutil.WriteFile(input.file, input.contents, 0744); err != nil {
+				t.Fatal(err)
+			}
+			buildSpec := &config.Build{
+				GCSFiles: []string{filepath.Join(tmpDir, "test-file")},
+			}
+			if _, err := daisyArgs(context.Background(), gm, files, config.NewImage("", ""), config.NewImage("", ""), buildSpec, &provisioner.Config{}); err != nil {
+				t.Fatalf("daisyArgs: %v", err)
+			}
+			got, ok := gcs.Objects[fmt.Sprintf("/bucket/cos-customizer/%s", input.object)]
+			if !ok {
+				t.Fatalf("daisyArgs: write /bucket/cos-customizer/%s: not found", input.object)
+			}
+			if !cmp.Equal(got, input.contents) {
+				t.Errorf("daisyArgs: write /bucket/cos-customizer/%s: got %s, want %s", input.object, string(got), string(input.contents))
+			}
+		})
+	}
+}
+
+func getDaisyVarValue(variable string, args []string) (string, bool) {
+	for i, arg := range args {
+		if arg == fmt.Sprintf("-var:%s", variable) {
+			return args[i+1], true
+		}
+	}
+	return "", false
+}
+
+func TestDaisyArgsWorkflowTemplate(t *testing.T) {
+	var testData = []struct {
+		testName    string
+		outputImage *config.Image
+		buildConfig *config.Build
+		workflow    []byte
+		want        []byte
+	}{
+		{
+			testName:    "Empty",
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket"},
+			workflow:    []byte("{{.Licenses}} {{.Labels}} {{.Accelerators}}"),
+			want:        []byte("null {} []"),
+		},
+		{
+			testName:    "OneLicense",
+			outputImage: &config.Image{Image: &compute.Image{Licenses: []string{"my-license"}}, Project: ""},
+			buildConfig: &config.Build{GCSBucket: "bucket"},
+			workflow:    []byte("{{.Licenses}}"),
+			want:        []byte("[\"my-license\"]"),
+		},
+		{
+			testName:    "TwoLicenses",
+			outputImage: &config.Image{Image: &compute.Image{Licenses: []string{"license-1", "license-2"}}, Project: ""},
+			buildConfig: &config.Build{GCSBucket: "bucket"},
+			workflow:    []byte("{{.Licenses}}"),
+			want:        []byte("[\"license-1\",\"license-2\"]"),
+		},
+		{
+			testName:    "EmptyStringLicense",
+			outputImage: &config.Image{Image: &compute.Image{Licenses: []string{""}}, Project: ""},
+			buildConfig: &config.Build{GCSBucket: "bucket"},
+			workflow:    []byte("{{.Licenses}}"),
+			want:        []byte("null"),
+		},
+		{
+			testName:    "OneEmptyLicense",
+			outputImage: &config.Image{Image: &compute.Image{Licenses: []string{"license-1", ""}}, Project: ""},
+			buildConfig: &config.Build{GCSBucket: "bucket"},
+			workflow:    []byte("{{.Licenses}}"),
+			want:        []byte("[\"license-1\"]"),
+		},
+		{
+			testName:    "URLLicense",
+			outputImage: &config.Image{Image: &compute.Image{Licenses: []string{"https://www.googleapis.com/compute/v1/projects/my-proj/global/licenses/my-license"}}, Project: ""},
+			buildConfig: &config.Build{GCSBucket: "bucket"},
+			workflow:    []byte("{{.Licenses}}"),
+			want:        []byte("[\"projects/my-proj/global/licenses/my-license\"]"),
+		},
+		{
+			testName:    "Labels",
+			outputImage: &config.Image{Image: &compute.Image{Labels: map[string]string{"key": "value"}}, Project: ""},
+			buildConfig: &config.Build{GCSBucket: "bucket"},
+			workflow:    []byte("{{.Labels}}"),
+			want:        []byte("{\"key\":\"value\"}"),
+		},
+		{
+			testName:    "Accelerators",
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GPUType: "nvidia-tesla-k80", Project: "p", Zone: "z"},
+			workflow:    []byte("{{.Accelerators}}"),
+			want:        []byte("[{\"acceleratorCount\":1,\"acceleratorType\":\"projects/p/zones/z/acceleratorTypes/nvidia-tesla-k80\"}]"),
+		},
+	}
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			tmpDir, files, err := setupFiles()
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tmpDir)
+			gcs.Objects = make(map[string][]byte)
+			gm := &gcsManager{gcs.Client, input.buildConfig.GCSBucket, input.buildConfig.GCSDir}
+			if err := ioutil.WriteFile(files.DaisyWorkflow, input.workflow, 0744); err != nil {
+				t.Fatal(err)
+			}
+			args, err := daisyArgs(context.Background(), gm, files, config.NewImage("", ""), input.outputImage, input.buildConfig, &provisioner.Config{})
+			if err != nil {
+				t.Fatalf("daisyArgs: %v", err)
+			}
+			got, err := ioutil.ReadFile(args[len(args)-1])
+			if err != nil {
+				t.Fatal(err)
+			}
+			if !cmp.Equal(got, input.want) {
+				t.Errorf("daisyArgs: template Daisy: got %s, want %s", string(got), string(input.want))
+			}
+		})
+	}
+}
+
+func isSubSlice(a, b []string) bool {
+	switch {
+	case a == nil || len(a) == 0:
+		return true
+	case b == nil || len(a) > len(b):
+		return false
+	}
+	for i := len(a); i <= len(b); i++ {
+		subslice := b[i-len(a) : i]
+		if cmp.Equal(a, subslice) {
+			return true
+		}
+	}
+	return false
+}
+
+func mustMarshalJSON(t *testing.T, v interface{}) []byte {
+	t.Helper()
+	data, err := json.Marshal(v)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return data
+}
+
+func TestDaisyArgs(t *testing.T) {
+	tmpDir, files, err := setupFiles()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+	var testData = []struct {
+		testName          string
+		inputImage        *config.Image
+		outputImage       *config.Image
+		buildConfig       *config.Build
+		provConfig        *provisioner.Config
+		want              []string
+		wantBuildContexts map[string]string
+		wantSteps         []provisioner.StepConfig
+		wantBootDisk      *provisioner.BootDiskConfig
+	}{
+		{
+			testName:    "GPU",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GPUType: "nvidia-tesla-k80", GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-var:host_maintenance", "TERMINATE"},
+		},
+		{
+			testName:    "NoGPU",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-var:host_maintenance", "MIGRATE"},
+		},
+		{
+			testName:    "SourceImage",
+			inputImage:  config.NewImage("im", "proj"),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-var:source_image", "projects/proj/global/images/im"},
+		},
+		{
+			testName:    "OutputImageName",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("im", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-var:output_image_name", "im"},
+		},
+		{
+			testName:    "OutputImageProject",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", "proj"),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-var:output_image_project", "proj"},
+		},
+		{
+			testName:    "OutputImageFamily",
+			inputImage:  config.NewImage("", ""),
+			outputImage: &config.Image{Image: &compute.Image{Family: "family"}, Project: ""},
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-var:output_image_family", "family"},
+		},
+		{
+			testName:    "CIData",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-var:cidata_img"},
+		},
+		{
+			testName:    "DiskSize",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{DiskSize: 50, GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-var:disk_size_gb", "50"},
+		},
+		{
+			testName:    "GCSPath",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-gcs_path", "gs://bucket/dir/cos-customizer"},
+		},
+		{
+			testName:    "Project",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{Project: "proj", GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-project", "proj"},
+		},
+		{
+			testName:    "Zone",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{Zone: "zone", GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-zone", "zone"},
+		},
+		{
+			testName:    "Timeout",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{Timeout: "60m", GCSBucket: "bucket", GCSDir: "dir"},
+			want:        []string{"-default_timeout", "60m"},
+		},
+		{
+			testName:    "ProvisionerConfigBuildContexts",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			provConfig:  &provisioner.Config{},
+			wantBuildContexts: map[string]string{
+				"user": fmt.Sprintf("gs://bucket/dir/cos-customizer/%s", filepath.Base(files.UserBuildContextArchive)),
+			},
+		},
+		{
+			testName:    "ProvisionerConfigSteps",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir"},
+			provConfig: &provisioner.Config{
+				Steps: []provisioner.StepConfig{
+					{
+						Type: "InstallGPU",
+						Args: mustMarshalJSON(t, &provisioner.InstallGPUStep{
+							GCSDepsPrefix: "gcs_deps",
+						}),
+					},
+				},
+			},
+			wantSteps: []provisioner.StepConfig{
+				{
+					Type: "InstallGPU",
+					Args: mustMarshalJSON(t, &provisioner.InstallGPUStep{
+						GCSDepsPrefix: "gs://bucket/dir/cos-customizer/gcs_files",
+					}),
+				},
+			},
+		},
+		{
+			testName:    "ProvisionerConfigBootDiskReclaimSDA3",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir", DiskSize: 20},
+			provConfig: &provisioner.Config{
+				BootDisk: provisioner.BootDiskConfig{
+					ReclaimSDA3: true,
+				},
+			},
+			wantBootDisk: &provisioner.BootDiskConfig{
+				ReclaimSDA3:       true,
+				WaitForDiskResize: true,
+			},
+		},
+		{
+			testName:    "ProvisionerConfigBootDiskOEMSize",
+			inputImage:  config.NewImage("", ""),
+			outputImage: config.NewImage("", ""),
+			buildConfig: &config.Build{GCSBucket: "bucket", GCSDir: "dir", DiskSize: 20},
+			provConfig: &provisioner.Config{
+				BootDisk: provisioner.BootDiskConfig{
+					OEMSize: "5G",
+				},
+			},
+			wantBootDisk: &provisioner.BootDiskConfig{
+				OEMSize:           "5G",
+				WaitForDiskResize: true,
+			},
+		},
+	}
+	gcs := fakes.GCSForTest(t)
+	defer gcs.Close()
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			gcs.Objects = make(map[string][]byte)
+			gm := &gcsManager{gcs.Client, input.buildConfig.GCSBucket, input.buildConfig.GCSDir}
+			if input.provConfig == nil {
+				input.provConfig = &provisioner.Config{}
+			}
+			funcCall := fmt.Sprintf("daisyArgs(_, _, _, %v, %v, %v, %v)", input.inputImage, input.outputImage, input.buildConfig, input.provConfig)
+			got, err := daisyArgs(context.Background(), gm, files, input.inputImage, input.outputImage, input.buildConfig, input.provConfig)
+			if err != nil {
+				t.Fatalf("daisyArgs: %v", err)
+			}
+			if !isSubSlice(input.want, got) {
+				t.Errorf("%s = %v; want subslice %v)", funcCall, got, input.want)
+			}
+			var provConfig provisioner.Config
+			data, err := ioutil.ReadFile(files.ProvConfig)
+			if err != nil {
+				t.Fatal(err)
+			}
+			if err := json.Unmarshal(data, &provConfig); err != nil {
+				t.Fatal(err)
+			}
+			if input.wantBuildContexts != nil {
+				if diff := cmp.Diff(provConfig.BuildContexts, input.wantBuildContexts); diff != "" {
+					t.Errorf("%s: build contexts mismatch: diff (-got, +want): %s", funcCall, diff)
+				}
+			}
+			if input.wantSteps != nil {
+				if diff := cmp.Diff(provConfig.Steps, input.wantSteps); diff != "" {
+					t.Errorf("%s: steps mismatch: diff (-got, +want): %s", funcCall, diff)
+				}
+			}
+			if input.wantBootDisk != nil {
+				if diff := cmp.Diff(&provConfig.BootDisk, input.wantBootDisk); diff != "" {
+					t.Errorf("%s: steps mismatch: diff (-got, +want): %s", funcCall, diff)
+				}
+			}
+		})
+	}
+}
diff --git a/src/pkg/provisioner/BUILD.bazel b/src/pkg/provisioner/BUILD.bazel
new file mode 100644
index 0000000..cdcbee1
--- /dev/null
+++ b/src/pkg/provisioner/BUILD.bazel
@@ -0,0 +1,82 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@io_bazel_rules_go//extras:embed_data.bzl", "go_embed_data")
+
+# Our goal is for this program to be embedded into this Go package. Go embed
+# only allows files in the same package directory to be embedded. So we need to
+# use a "no-op" genrule to place this binary in the same directory as the
+# package source.
+genrule(
+    name = "handle_disk_layout.bin",
+    srcs = ["//src/cmd/handle_disk_layout:handle_disk_layout_bin"],
+    outs = ["_handle_disk_layout.bin"],
+    cmd = "cp $< $@",
+)
+
+genrule(
+    name = "veritysetup.img",
+    srcs = ["//:veritysetup.tar"],
+    outs = ["_veritysetup.img"],
+    cmd = "cp $< $@",
+)
+
+genrule(
+    name = "docker_credential_gcr",
+    srcs = ["@com_github_googlecloudplatform_docker_credential_gcr//:docker-credential-gcr"],
+    outs = ["docker-credential-gcr"],
+    cmd = "cp $< $@",
+)
+
+go_library(
+    name = "provisioner",
+    srcs = [
+        "config.go",
+        "disable_auto_update_step.go",
+        "disk_layout.go",
+        "gpu_setup_script.go",
+        "install_gpu_step.go",
+        "provisioner.go",
+        "run_script_step.go",
+        "seal_oem_step.go",
+        "state.go",
+        "systemd.go",
+    ],
+    embedsrcs = [
+        ":handle_disk_layout.bin",
+        ":veritysetup.img",
+        ":docker_credential_gcr",
+    ],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/provisioner",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//src/pkg/tools",
+        "//src/pkg/tools/partutil",
+        "//src/pkg/utils",
+        "@com_google_cloud_go_storage//:storage",
+        "@org_golang_x_sys//unix",
+    ],
+)
+
+go_test(
+    name = "provisioner_test",
+    srcs = ["provisioner_test.go"],
+    data = glob(["testdata/**"]),
+    embed = [":provisioner"],
+    deps = [
+        "//src/pkg/fakes",
+        "@org_golang_x_sys//unix",
+    ],
+)
diff --git a/src/pkg/provisioner/config.go b/src/pkg/provisioner/config.go
new file mode 100644
index 0000000..fb15da6
--- /dev/null
+++ b/src/pkg/provisioner/config.go
@@ -0,0 +1,104 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type StepConfig struct {
+	Type string
+	Args json.RawMessage
+}
+
+type BootDiskConfig struct {
+	OEMSize           string
+	OEMFSSize4K       uint64
+	ReclaimSDA3       bool
+	WaitForDiskResize bool
+}
+
+// Config defines a provisioning flow.
+type Config struct {
+	// BuildContexts identifies the build contexts that should be used during
+	// provisioning. A build context means the same thing here as it does
+	// elsewhere in cos-customizer. The keys are build context identifiers, and
+	// the values are addresses to fetch the build contexts from. Currently, only
+	// gs:// addresses are supported.
+	BuildContexts map[string]string
+	// BootDisk defines how the boot disk should be configured.
+	BootDisk BootDiskConfig
+	// Steps are provisioning behaviors that can be run.
+	// The supported provisioning behaviors are:
+	//
+	// Type: RunScript
+	// Args:
+	// - BuildContext: the name of the build contex to run the script in
+	// - Path: the path to the script in the build context
+	// - Env: Environment variables to pass to the script, in the format
+	//   A=B,C=D
+	//
+	// Type: InstallGPU
+	// Args:
+	// - NvidiaDriverVersion: The nvidia driver version to install. Can also be
+	//   the name of an nvidia installer .run file. If a .run file is provided and
+	//   a GCSDepsPrefix is provided, the .run file will be fetched from the
+	//   GCSDepsPrefix location.
+	// - NvidiaDriverMD5Sum: An optional md5 hash to use to verify the downloaded nvidia
+	//   installer.
+	// - NvidiaInstallDirHost: An absolute path specifying where nvidia drivers
+	//   should be installed. Defaults to /var/lib/nvidia.
+	// - NvidiaInstallerContainer: The cos-gpu-installer container image to use
+	//   for installing nvidia drivers.
+	// - GCSDepsPrefix: A optional gs:// URI that will be used as a prefix
+	//   for downloading cos-gpu-installer dependencies.
+	//
+	// Type: DisableAutoUpdate
+	// Args: This step takes no arguments.
+	//
+	// Type: SealOEM
+	// Args: This step takes no arguments.
+	Steps []StepConfig
+}
+
+type step interface {
+	run(*state) error
+}
+
+func parseStep(stepType string, stepArgs json.RawMessage) (step, error) {
+	switch stepType {
+	case "RunScript":
+		var s step
+		s = &RunScriptStep{}
+		if err := json.Unmarshal(stepArgs, s); err != nil {
+			return nil, err
+		}
+		return s, nil
+	case "InstallGPU":
+		var s step
+		s = &InstallGPUStep{}
+		if err := json.Unmarshal(stepArgs, s); err != nil {
+			return nil, err
+		}
+		return s, nil
+	case "DisableAutoUpdate":
+		return &DisableAutoUpdateStep{}, nil
+	case "SealOEM":
+		return &SealOEMStep{}, nil
+	default:
+		return nil, fmt.Errorf("unknown step type: %q", stepType)
+	}
+}
diff --git a/src/pkg/provisioner/disable_auto_update_step.go b/src/pkg/provisioner/disable_auto_update_step.go
new file mode 100644
index 0000000..e622e37
--- /dev/null
+++ b/src/pkg/provisioner/disable_auto_update_step.go
@@ -0,0 +1,32 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	"log"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools"
+)
+
+type DisableAutoUpdateStep struct{}
+
+func (s *DisableAutoUpdateStep) run(runState *state) error {
+	log.Println("Disabling auto updates")
+	if err := tools.DisableSystemdService("update-engine.service"); err != nil {
+		return err
+	}
+	log.Println("Done disabling auto updates")
+	return nil
+}
diff --git a/src/pkg/provisioner/disk_layout.go b/src/pkg/provisioner/disk_layout.go
new file mode 100644
index 0000000..16ef556
--- /dev/null
+++ b/src/pkg/provisioner/disk_layout.go
@@ -0,0 +1,305 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	_ "embed"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/sys/unix"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+)
+
+//go:embed _handle_disk_layout.bin
+var handleDiskLayoutBin []byte
+
+func switchRoot(deps Deps, runState *state) (err error) {
+	if !runState.data.Config.BootDisk.ReclaimSDA3 {
+		log.Println("ReclaimSDA3 is not set, not switching root device")
+		return nil
+	}
+	sda3Device := filepath.Join(deps.RootDir, "dev", "sda3")
+	sda5Device := filepath.Join(deps.RootDir, "dev", "sda5")
+	rootDev, err := exec.Command(deps.RootdevCmd, "-s").Output()
+	if err != nil {
+		if exitErr, ok := err.(*exec.ExitError); ok {
+			return fmt.Errorf("error running rootdev: %v: stderr = %q", exitErr, string(exitErr.Stderr))
+		}
+		return fmt.Errorf("error running rootdev: %v", err)
+	}
+	if strings.TrimSpace(string(rootDev)) == sda5Device {
+		log.Println("Current root device is /dev/sda5, not switching root device")
+		return nil
+	}
+	log.Println("Need to switch root device")
+	log.Println("Copying sda3 to sda5...")
+	in, err := os.Open(sda3Device)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(in, "error closing /dev/sda3", &err)
+	out, err := os.Create(sda5Device)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(out, "error closing /dev/sda5", &err)
+	if _, err := io.Copy(out, in); err != nil {
+		return fmt.Errorf("error copying from sda3 to sda5: %v", err)
+	}
+	log.Println("Setting GPT priority...")
+	device := filepath.Join(deps.RootDir, "dev", "sda")
+	if err := utils.RunCommand([]string{deps.CgptCmd, "prioritize", "-P", "5", "-i", "4", device}, "", nil); err != nil {
+		return err
+	}
+	log.Println("Reboot required to switch root device")
+	return ErrRebootRequired
+}
+
+func shrinkSDA3(deps Deps, runState *state) error {
+	if !runState.data.Config.BootDisk.ReclaimSDA3 {
+		log.Println("ReclaimSDA3 is not set, not shrinking sda3")
+		return nil
+	}
+	device := filepath.Join(deps.RootDir, "dev", "sda")
+	minimal, err := partutil.IsPartitionMinimal(device, 3)
+	if err != nil {
+		return fmt.Errorf("error checking /dev/sda3 size: %v", err)
+	}
+	if minimal {
+		log.Println("/dev/sda3 is minimally sized, not shrinking sda3")
+		return nil
+	}
+	log.Println("ReclaimSDA3 is set, and /dev/sda3 is not minimal; now shrinking sda3")
+	if _, err := partutil.MinimizePartition(device, 3); err != nil {
+		return fmt.Errorf("error minimizing /dev/sda3: %v", err)
+	}
+	log.Println("Reboot required to reload partition table changes")
+	return ErrRebootRequired
+}
+
+func setupOnShutdownUnit(deps Deps, runState *state) (err error) {
+	if err := mountFunc("", filepath.Join(deps.RootDir, "tmp"), "", unix.MS_REMOUNT|unix.MS_NOSUID|unix.MS_NODEV, ""); err != nil {
+		return fmt.Errorf("error remounting /tmp as exec: %v", err)
+	}
+	if err := ioutil.WriteFile(filepath.Join(deps.RootDir, "tmp", "handle_disk_layout.bin"), handleDiskLayoutBin, 0744); err != nil {
+		return err
+	}
+	data := fmt.Sprintf(`[Unit]
+Description=Run after everything unmounted
+DefaultDependencies=false
+Conflicts=shutdown.target
+Before=mnt-stateful_partition.mount usr-share-oem.mount
+After=tmp.mount
+
+[Service]
+Type=oneshot
+RemainAfterExit=true
+ExecStart=/bin/true
+ExecStop=/bin/bash -c '/tmp/handle_disk_layout.bin /dev/sda 1 8 "%s" "%t" 2>&1 | sed "s/^/BuildStatus: /"'
+TimeoutStopSec=600
+StandardOutput=tty
+StandardError=tty
+TTYPath=/dev/ttyS2
+`, runState.data.Config.BootDisk.OEMSize, runState.data.Config.BootDisk.ReclaimSDA3)
+	if err := ioutil.WriteFile(filepath.Join(deps.RootDir, "etc/systemd/system/last-run.service"), []byte(data), 0664); err != nil {
+		return err
+	}
+	systemd := systemdClient{systemctl: deps.SystemctlCmd}
+	if err := systemd.start("last-run.service", []string{"--no-block"}); err != nil {
+		return err
+	}
+	// journald needs to be stopped in order for the stateful partition to be
+	// unmounted at shutdown. We need the stateful partition to be unmounted so
+	// that disk repartitioning can occur.
+	if err := systemd.stopJournald(deps.RootDir); err != nil {
+		return err
+	}
+	return nil
+}
+
+func calcSDA3End(device string) (uint64, error) {
+	sda3Start, err := partutil.ReadPartitionStart(device, 3)
+	if err != nil {
+		return 0, err
+	}
+	sda3Size, err := partutil.ReadPartitionSize(device, 3)
+	if err != nil {
+		return 0, err
+	}
+	sda3End := sda3Start + sda3Size - 1
+	return sda3End, nil
+}
+
+func waitForDiskResize(deps Deps, runState *state) error {
+	if !runState.data.Config.BootDisk.WaitForDiskResize {
+		log.Println("WaitForDiskResize is not set, not waiting for a boot disk resize")
+		return nil
+	}
+	if runState.data.DiskResizeComplete {
+		log.Println("Already finished waiting for disk resize, not waiting again")
+		return nil
+	}
+	startSize, err := ioutil.ReadFile(filepath.Join(deps.RootDir, "sys/class/block/sda/size"))
+	if err != nil {
+		return err
+	}
+	log.Println("WaitForDiskResize is set; waiting for the boot disk size to change. Timeout is 3 minutes")
+	start := time.Now()
+	end := start.Add(3 * time.Minute)
+	for time.Now().Before(end) {
+		curSize, err := ioutil.ReadFile(filepath.Join(deps.RootDir, "sys/class/block/sda/size"))
+		if err != nil {
+			return err
+		}
+		if string(curSize) != string(startSize) {
+			log.Printf("Boot disk size has changed: start %q, end %q", strings.TrimSpace(string(startSize)), strings.TrimSpace(string(curSize)))
+			runState.data.DiskResizeComplete = true
+			return runState.write()
+		}
+		time.Sleep(time.Second)
+	}
+	return errors.New("timed out waiting for disk resize")
+}
+
+func relocatePartitions(deps Deps, runState *state) error {
+	if !runState.data.Config.BootDisk.ReclaimSDA3 && runState.data.Config.BootDisk.OEMSize == "" {
+		log.Println("ReclaimSDA3 is not set, OEM resize not requested, not relocating partitions")
+		return nil
+	}
+	device := filepath.Join(deps.RootDir, "dev", "sda")
+	if runState.data.Config.BootDisk.OEMSize != "" {
+		// Check if OEM partition is after sda3; if so, then we're done
+		oemStart, err := partutil.ReadPartitionStart(device, 8)
+		if err != nil {
+			return err
+		}
+		sda3End, err := calcSDA3End(device)
+		if err != nil {
+			return err
+		}
+		if oemStart > sda3End {
+			log.Println("OEM resize requested, OEM appears to be relocated after sda3. Partition relocation is complete")
+			return nil
+		}
+	} else {
+		// Check two things:
+		// 1. sda3 is minimal
+		// 2. Stateful partition is located immediately after sda3
+		//
+		// If both are true, we are done.
+		minimal, err := partutil.IsPartitionMinimal(device, 3)
+		if err != nil {
+			return err
+		}
+		statefulStart, err := partutil.ReadPartitionStart(device, 1)
+		if err != nil {
+			return err
+		}
+		sda3Start, err := partutil.ReadPartitionStart(device, 3)
+		// The stateful partition is relocated 4096 sectors after the start of sda3.
+		// See src/pkg/tools/handle_disk_layout.go for details.
+		if minimal && statefulStart == sda3Start+4096 {
+			log.Println("ReclaimSDA3 is set, sda3 appears to have been reclaimed. Partition relocation is complete")
+			return nil
+		}
+	}
+	// Partition relocation must be done. Prepare for disk relocation to happen on
+	// the next reboot
+	log.Println("Partition relocation is required. Preparing for partition relocation to occur on the next reboot")
+	if err := setupOnShutdownUnit(deps, runState); err != nil {
+		return err
+	}
+	log.Println("Reboot required to relocate partitions")
+	return ErrRebootRequired
+}
+
+func resizeOEMFileSystem(deps Deps, runState *state) error {
+	if !runState.data.Config.BootDisk.ReclaimSDA3 && runState.data.Config.BootDisk.OEMSize == "" {
+		log.Println("ReclaimSDA3 is not set, OEM resize not requested, partition relocation did not occur, FS resize unnecessary")
+		return nil
+	}
+	// Check if OEM partition is after sda3; if so, then relocation occurred and
+	// we need to resize the file system.
+	device := filepath.Join(deps.RootDir, "dev", "sda")
+	sda3End, err := calcSDA3End(device)
+	if err != nil {
+		return err
+	}
+	oemStart, err := partutil.ReadPartitionStart(device, 8)
+	if err != nil {
+		return err
+	}
+	if oemStart < sda3End {
+		log.Println("OEM partition is before sda3; relocation did not occur, FS resize unnecessary")
+		return nil
+	}
+	log.Println("Partition relocation appears to have occurred, resizing the OEM file system")
+	systemd := systemdClient{systemctl: deps.SystemctlCmd}
+	if err := systemd.stop("usr-share-oem.mount"); err != nil {
+		return err
+	}
+	sda8 := filepath.Join(deps.RootDir, "dev", "sda8")
+	if err := utils.RunCommand([]string{deps.E2fsckCmd, "-fp", sda8}, "", nil); err != nil {
+		return err
+	}
+	resizeArgs := []string{deps.Resize2fsCmd, sda8}
+	if runState.data.Config.BootDisk.OEMFSSize4K != 0 {
+		resizeArgs = append(resizeArgs, strconv.FormatUint(runState.data.Config.BootDisk.OEMFSSize4K, 10))
+	}
+	if err := utils.RunCommand(resizeArgs, "", nil); err != nil {
+		return err
+	}
+	if err := systemd.start("usr-share-oem.mount", nil); err != nil {
+		return err
+	}
+	log.Println("OEM file system resized to account for available space")
+	return nil
+}
+
+// repartitionBootDisk executes all behaviors related to repartitioning the boot
+// disk. Most of these behaviors require a reboot. To keep reboots simple (e.g.
+// we don't want to initiate a reboot when deferred statements are unresolved),
+// we handle reboots by returning ErrRebootRequired and asking the caller to
+// initiate the reboot.
+func repartitionBootDisk(deps Deps, runState *state) error {
+	if err := switchRoot(deps, runState); err != nil {
+		return err
+	}
+	if err := shrinkSDA3(deps, runState); err != nil {
+		return err
+	}
+	if err := waitForDiskResize(deps, runState); err != nil {
+		return err
+	}
+	if err := relocatePartitions(deps, runState); err != nil {
+		return err
+	}
+	if err := resizeOEMFileSystem(deps, runState); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/src/pkg/provisioner/gpu_setup_script.go b/src/pkg/provisioner/gpu_setup_script.go
new file mode 100644
index 0000000..4c88229
--- /dev/null
+++ b/src/pkg/provisioner/gpu_setup_script.go
@@ -0,0 +1,88 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+const gpuSetupScriptTemplate = `#!/bin/bash
+
+set -o errexit
+
+export NVIDIA_DRIVER_VERSION={{.NvidiaDriverVersion}}
+export NVIDIA_DRIVER_MD5SUM={{.NvidiaDriverMD5Sum}}
+export NVIDIA_INSTALL_DIR_HOST={{.NvidiaInstallDirHost}}
+export COS_NVIDIA_INSTALLER_CONTAINER={{.NvidiaInstallerContainer}}
+export NVIDIA_INSTALL_DIR_CONTAINER=/usr/local/nvidia
+export ROOT_MOUNT_DIR=/root
+
+pull_installer() {
+  local docker_code
+  local i=1
+  while [[ $i -le 10 ]]; do
+    echo "Pulling cos-gpu-installer container image... [${i}/10]"
+    docker pull "${COS_NVIDIA_INSTALLER_CONTAINER}" && break || docker_code="$?"
+    i=$((i+1))
+    sleep 2
+  done
+  if [[ $i -eq 11 ]]; then
+    echo "Pulling cos-gpu-installer failed."
+    echo "Docker journal logs:"
+    journalctl -u docker.service --no-pager
+    exit "${docker_code}"
+  fi
+  echo "Successfully pulled cos-gpu-installer container image."
+}
+
+main() {
+  mkdir -p "${NVIDIA_INSTALL_DIR_HOST}"
+  mount --bind "${NVIDIA_INSTALL_DIR_HOST}" "${NVIDIA_INSTALL_DIR_HOST}"
+  mount -o remount,exec "${NVIDIA_INSTALL_DIR_HOST}"
+  pull_installer
+  docker_run_cmd="docker run \
+    --rm \
+    --privileged \
+    --net=host \
+    --pid=host \
+    --volume ${NVIDIA_INSTALL_DIR_HOST}:${NVIDIA_INSTALL_DIR_CONTAINER} \
+    --volume /dev:/dev \
+    --volume /:${ROOT_MOUNT_DIR} \
+    -e NVIDIA_DRIVER_VERSION \
+    -e NVIDIA_DRIVER_MD5SUM \
+    -e NVIDIA_INSTALL_DIR_HOST \
+    -e COS_NVIDIA_INSTALLER_CONTAINER \
+    -e NVIDIA_INSTALL_DIR_CONTAINER \
+    -e ROOT_MOUNT_DIR \
+    -e COS_DOWNLOAD_GCS \
+    -e GPU_INSTALLER_DOWNLOAD_URL \
+    ${COS_NVIDIA_INSTALLER_CONTAINER}"
+  if ! ${docker_run_cmd}; then
+    echo "GPU install failed."
+    if [[ -f /var/lib/nvidia/nvidia-installer.log ]]; then
+      echo "Nvidia installer debug logs:"
+      cat /var/lib/nvidia/nvidia-installer.log
+    fi
+    return 1
+  fi
+  ${NVIDIA_INSTALL_DIR_HOST}/bin/nvidia-smi
+
+  # Start nvidia-persistenced
+  if ! pgrep -f nvidia-persistenced > /dev/null; then
+    "${NVIDIA_INSTALL_DIR_HOST}/bin/nvidia-persistenced" --verbose
+  fi
+
+  # Set softlockup_panic
+  echo 1 > /proc/sys/kernel/softlockup_panic
+}
+
+main
+`
diff --git a/src/pkg/provisioner/install_gpu_step.go b/src/pkg/provisioner/install_gpu_step.go
new file mode 100644
index 0000000..e2bd484
--- /dev/null
+++ b/src/pkg/provisioner/install_gpu_step.go
@@ -0,0 +1,122 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	"errors"
+	"fmt"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+	"text/template"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+)
+
+type InstallGPUStep struct {
+	NvidiaDriverVersion      string
+	NvidiaDriverMD5Sum       string
+	NvidiaInstallDirHost     string
+	NvidiaInstallerContainer string
+	GCSDepsPrefix            string
+}
+
+func (s *InstallGPUStep) validate() error {
+	if s.NvidiaDriverVersion == "" {
+		return errors.New("invalid args: NvidiaDriverVersion is required in InstallGPU")
+	}
+	if s.NvidiaInstallerContainer == "" {
+		return errors.New("invalid args: NvidiaInstallerContainer is required in InstallGPU")
+	}
+	return nil
+}
+
+func (s *InstallGPUStep) setDefaults() {
+	if s.NvidiaInstallDirHost == "" {
+		s.NvidiaInstallDirHost = "/var/lib/nvidia"
+	}
+}
+
+func (s *InstallGPUStep) installScript(path, driverVersion string) (err error) {
+	if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+		return err
+	}
+	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0744)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(f, fmt.Sprintf("error closing %q", path), &err)
+	t, err := template.New("gpu-script").Parse(gpuSetupScriptTemplate)
+	if err != nil {
+		return err
+	}
+	if err := t.Execute(f, &InstallGPUStep{
+		NvidiaDriverVersion:      utils.QuoteForShell(driverVersion),
+		NvidiaDriverMD5Sum:       utils.QuoteForShell(s.NvidiaDriverMD5Sum),
+		NvidiaInstallDirHost:     utils.QuoteForShell(s.NvidiaInstallDirHost),
+		NvidiaInstallerContainer: utils.QuoteForShell(s.NvidiaInstallerContainer),
+	}); err != nil {
+		return fmt.Errorf("error installing %q: %v", path, err)
+	}
+	return nil
+}
+
+func (s *InstallGPUStep) runInstaller(path string) error {
+	var downloadURL string
+	if s.GCSDepsPrefix != "" {
+		downloadURL = "https://storage.googleapis.com/" + strings.TrimPrefix(s.GCSDepsPrefix, "gs://")
+	}
+	var gpuInstallerDownloadURL string
+	if strings.HasSuffix(s.NvidiaDriverVersion, ".run") && downloadURL != "" {
+		gpuInstallerDownloadURL = downloadURL + "/" + s.NvidiaDriverVersion
+	}
+	if err := utils.RunCommand([]string{"/bin/bash", path}, "", append(os.Environ(), []string{
+		"COS_DOWNLOAD_GCS=" + downloadURL,
+		"GPU_INSTALLER_DOWNLOAD_URL=" + gpuInstallerDownloadURL,
+	}...)); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (s *InstallGPUStep) run(runState *state) error {
+	if err := s.validate(); err != nil {
+		return err
+	}
+	s.setDefaults()
+	var driverVersion string
+	if strings.HasSuffix(s.NvidiaDriverVersion, ".run") {
+		// NVIDIA-Linux-x86_64-450.51.06.run -> 450.51.06
+		fields := strings.FieldsFunc(strings.TrimSuffix(s.NvidiaDriverVersion, ".run"), func(r rune) bool { return r == '-' })
+		if len(fields) != 4 {
+			return fmt.Errorf("malformed nvidia installer: %q", s.NvidiaDriverVersion)
+		}
+		driverVersion = fields[3]
+	} else {
+		driverVersion = s.NvidiaDriverVersion
+	}
+	log.Println("Installing GPU drivers...")
+	scriptPath := filepath.Join(s.NvidiaInstallDirHost, "setup_gpu.sh")
+	if err := s.installScript(scriptPath, driverVersion); err != nil {
+		return err
+	}
+	if err := s.runInstaller(scriptPath); err != nil {
+		log.Println("Installing GPU drivers failed")
+		return err
+	}
+	log.Println("Done installing GPU drivers")
+	return nil
+}
diff --git a/src/pkg/provisioner/provisioner.go b/src/pkg/provisioner/provisioner.go
new file mode 100644
index 0000000..006b80e
--- /dev/null
+++ b/src/pkg/provisioner/provisioner.go
@@ -0,0 +1,334 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package provisioner exports behaviors for provisioning COS systems
+// end-to-end. These behaviors are intended to run on a COS system.
+package provisioner
+
+import (
+	"bufio"
+	"context"
+	_ "embed"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+
+	"cloud.google.com/go/storage"
+	"golang.org/x/sys/unix"
+)
+
+//go:embed docker-credential-gcr
+var dockerCredentialGCR []byte
+
+// ErrRebootRequired indicates that a reboot is necessary for provisioning to
+// continue.
+var ErrRebootRequired = errors.New("reboot required to continue provisioning")
+
+// I typically do not like this style of mocking, but I think it's the best
+// option in this case. These functions cannot execute at all in a normal test
+// environment because they require root privileges. Even if the address to
+// mount is owned by the caller, these functions will fail. To take them out of
+// the test codepath, we can mock them here.
+//
+// I have considered an alternative involving writing a unixPkg interface and
+// passing it through the Deps struct. But it doesn't give us much for its
+// additional verbosity.
+var mountFunc = unix.Mount
+var unmountFunc = unix.Unmount
+
+func mountOptions(rootDir, mountPoint string) (uintptr, error) {
+	mountInfoFile, err := os.Open(filepath.Join(rootDir, "proc/self/mountinfo"))
+	if err != nil {
+		return 0, err
+	}
+	defer mountInfoFile.Close()
+	scanner := bufio.NewScanner(mountInfoFile)
+	var options string
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		if len(fields) < 6 {
+			return 0, fmt.Errorf("invalid line in mountinfo: %q", scanner.Text())
+		}
+		if fields[4] == mountPoint {
+			options = fields[5]
+			break
+		}
+	}
+	if options == "" {
+		return 0, fmt.Errorf("mountpoint %q not found", mountPoint)
+	}
+	var parsedOptions uintptr
+	for _, opt := range strings.FieldsFunc(options, func(r rune) bool { return r == ',' }) {
+		// String representations of mount options are viewable here:
+		// https://github.com/torvalds/linux/blob/8404c9fbc84b741f66cff7d4934a25dd2c344452/fs/proc_namespace.c#L66
+		//
+		// "ro" vs "rw" is special cased:
+		// https://github.com/torvalds/linux/blob/8404c9fbc84b741f66cff7d4934a25dd2c344452/fs/proc_namespace.c#L159
+		switch opt {
+		case "nosuid":
+			parsedOptions |= unix.MS_NOSUID
+		case "nodev":
+			parsedOptions |= unix.MS_NODEV
+		case "noexec":
+			parsedOptions |= unix.MS_NOEXEC
+		case "noatime":
+			parsedOptions |= unix.MS_NOATIME
+		case "nodiratime":
+			parsedOptions |= unix.MS_NODIRATIME
+		case "relatime":
+			parsedOptions |= unix.MS_RELATIME
+		case "nosymfollow":
+			parsedOptions |= unix.MS_NOSYMFOLLOW
+		case "ro":
+			parsedOptions |= unix.MS_RDONLY
+		}
+	}
+	return parsedOptions, nil
+}
+
+func setup(runState *state, rootDir string, systemd *systemdClient) error {
+	log.Println("Setting up environment...")
+	if err := systemd.stop("update-engine.service"); err != nil {
+		return err
+	}
+	if err := mountFunc("tmpfs", filepath.Join(rootDir, "root"), "tmpfs", 0, ""); err != nil {
+		return fmt.Errorf("error mounting tmpfs at /root: %v", err)
+	}
+	binPath := filepath.Join(runState.dir, "bin")
+	dockerCredentialGCRPath := filepath.Join(binPath, "docker-credential-gcr")
+	if _, err := os.Stat(binPath); os.IsNotExist(err) {
+		if err := os.Mkdir(binPath, 0744); err != nil {
+			return err
+		}
+	}
+	if _, err := os.Stat(dockerCredentialGCRPath); os.IsNotExist(err) {
+		if err := ioutil.WriteFile(dockerCredentialGCRPath, dockerCredentialGCR, 0744); err != nil {
+			return err
+		}
+	}
+	// docker-credential-gcr will complain if docker-credential-gcr is not in the
+	// PATH
+	pathVar := os.Getenv("PATH")
+	if err := os.Setenv("PATH", binPath+":"+pathVar); err != nil {
+		return fmt.Errorf("could not update PATH environment variable: %v", err)
+	}
+	// Ensure that docker-credential-gcr is on an executable mount
+	if err := mountFunc(binPath, binPath, "ext4", unix.MS_BIND, ""); err != nil {
+		return fmt.Errorf("error bind mounting %q: %v", dockerCredentialGCRPath, err)
+	}
+	opts, err := mountOptions(rootDir, binPath)
+	if err != nil {
+		return err
+	}
+	if err := mountFunc("", binPath, "", unix.MS_REMOUNT|unix.MS_BIND|opts&^unix.MS_NOEXEC, ""); err != nil {
+		return fmt.Errorf("error remounting %q as executable: %v", dockerCredentialGCRPath, err)
+	}
+	// Run docker-credential-gcr
+	cmd := exec.Command(dockerCredentialGCRPath, "configure-docker")
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("error in cmd docker-credential-gcr, see stderr for details: %v", err)
+	}
+	log.Println("Done setting up the environment")
+	return nil
+}
+
+func stopServices(systemd *systemdClient) error {
+	log.Println("Stopping services...")
+	for _, s := range []string{
+		"crash-reporter.service",
+		"crash-sender.service",
+		"device_policy_manager.service",
+		"metrics-daemon.service",
+		"update-engine.service",
+	} {
+		if err := systemd.stop(s); err != nil {
+			return err
+		}
+	}
+	log.Println("Done stopping services.")
+	return nil
+}
+
+func zeroAllFiles(dir string) error {
+	if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) {
+		return nil
+	}
+	return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return fmt.Errorf("error accessing path %q: %v", path, err)
+		}
+		if info.IsDir() {
+			return nil
+		}
+		// Truncate the file
+		f, err := os.Create(path)
+		if err != nil {
+			return err
+		}
+		if err := f.Close(); err != nil {
+			return err
+		}
+		return nil
+	})
+}
+
+func cleanupDir(dir string) error {
+	fileInfos, err := ioutil.ReadDir(dir)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil
+		} else {
+			return err
+		}
+	}
+	for _, fi := range fileInfos {
+		if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func cleanup(rootDir, stateDir string) error {
+	log.Println("Cleaning up machine state...")
+	binPath := filepath.Join(stateDir, "bin")
+	if err := unmountFunc(binPath, 0); err != nil {
+		return fmt.Errorf("error unmounting %q: %v", binPath, err)
+	}
+	if err := os.RemoveAll(stateDir); err != nil {
+		return err
+	}
+	if err := unmountFunc(filepath.Join(rootDir, "root"), 0); err != nil {
+		// This error can be non-fatal because this is cleaning up a tmpfs mount,
+		// which doesn't impact the final image output in any way
+		log.Printf("Non-fatal error unmounting tmpfs at /root: %v", err)
+	}
+	if err := os.RemoveAll(filepath.Join(rootDir, "mnt", "stateful_partition", "etc")); err != nil && !os.IsNotExist(err) {
+		return err
+	}
+	for _, d := range []string{
+		filepath.Join(rootDir, "var", "cache"),
+		filepath.Join(rootDir, "var", "tmp"),
+		filepath.Join(rootDir, "var", "lib", "crash_reporter"),
+		filepath.Join(rootDir, "var", "lib", "metrics"),
+		filepath.Join(rootDir, "var", "lib", "systemd"),
+		filepath.Join(rootDir, "var", "lib", "update_engine"),
+		filepath.Join(rootDir, "var", "lib", "whitelist"),
+	} {
+		if err := cleanupDir(d); err != nil {
+			return err
+		}
+	}
+	// There are a few files in /var/log that need to exist for daemons to work.
+	// The best way to clear logs is to zero them out instead of deleting them.
+	if err := zeroAllFiles(filepath.Join(rootDir, "var", "log")); err != nil {
+		return err
+	}
+	log.Println("Done cleaning up machine state")
+	return nil
+}
+
+func executeSteps(s *state) error {
+	for i, step := range s.data.Config.Steps {
+		// In the case where executeSteps runs after a reboot, we need to skip
+		// through all the steps that have already been completed.
+		if i < s.data.CurrentStep {
+			continue
+		}
+		abstractStep, err := parseStep(step.Type, step.Args)
+		if err != nil {
+			return fmt.Errorf("error parsing step %d: %v", i, err)
+		}
+		if err := abstractStep.run(s); err != nil {
+			return fmt.Errorf("error in step %d: %v", i, err)
+		}
+		// Persist our most recent completed step to disk, so we can resume after a reboot.
+		s.data.CurrentStep++
+		if err := s.write(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Deps contains provisioner service dependencies.
+type Deps struct {
+	// GCSClient is used to access Google Cloud Storage.
+	GCSClient *storage.Client
+	// TarCmd is used for tar.
+	TarCmd string
+	// SystemctlCmd is used to access systemd.
+	SystemctlCmd string
+	// RootdevCmd is the path to the rootdev binary.
+	RootdevCmd string
+	// CgptCmd is the path to the cgpt binary.
+	CgptCmd string
+	// Resize2fsCmd is the path to the resize2fs binary.
+	Resize2fsCmd string
+	// E2fsckCmd is the path to the e2fsck binary.
+	E2fsckCmd string
+	// RootDir is the path to the root file system. Should be "/" in all real
+	// runtime situations.
+	RootDir string
+}
+
+func run(deps Deps, runState *state) (err error) {
+	systemd := &systemdClient{systemctl: deps.SystemctlCmd}
+	if err := repartitionBootDisk(deps, runState); err != nil {
+		return err
+	}
+	if err := setup(runState, deps.RootDir, systemd); err != nil {
+		return err
+	}
+	if err := executeSteps(runState); err != nil {
+		return err
+	}
+	if err := stopServices(systemd); err != nil {
+		return fmt.Errorf("error stopping services: %v", err)
+	}
+	if err := cleanup(deps.RootDir, runState.dir); err != nil {
+		return fmt.Errorf("error in cleanup: %v", err)
+	}
+	log.Println("Done provisioning machine")
+	return nil
+}
+
+// Run runs a full provisioning flow based on the provided config. The stateDir
+// is used for persisting data used as part of provisioning. The stateDir allows
+// the provisioning flow to be interrupted (e.g. by a reboot) and resumed.
+func Run(ctx context.Context, deps Deps, stateDir string, c Config) error {
+	log.Println("Provisioning machine...")
+	runState, err := initState(ctx, deps, stateDir, c)
+	if err != nil {
+		return err
+	}
+	return run(deps, runState)
+}
+
+// Resume resumes provisioning from the state provided at stateDir.
+func Resume(ctx context.Context, deps Deps, stateDir string) (err error) {
+	log.Println("Resuming provisioning...")
+	runState, err := loadState(stateDir)
+	if err != nil {
+		return err
+	}
+	return run(deps, runState)
+}
diff --git a/src/pkg/provisioner/provisioner_test.go b/src/pkg/provisioner/provisioner_test.go
new file mode 100644
index 0000000..cdddc3e
--- /dev/null
+++ b/src/pkg/provisioner/provisioner_test.go
@@ -0,0 +1,279 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/fakes"
+	"golang.org/x/sys/unix"
+)
+
+func testDataDir(t *testing.T) string {
+	t.Helper()
+	path, err := filepath.Abs("testdata")
+	if err != nil {
+		t.Fatal(err)
+	}
+	return path
+}
+
+func stubMount() {
+	mountFunc = func(a1, a2, a3 string, a4 uintptr, a5 string) error {
+		return nil
+	}
+	unmountFunc = func(a1 string, a2 int) error {
+		return nil
+	}
+}
+
+func restoreMount() {
+	mountFunc = unix.Mount
+	unmountFunc = unix.Unmount
+}
+
+func stubMountInfo(filePath, mountPoint string) error {
+	if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil {
+		return err
+	}
+	return ioutil.WriteFile(filePath, []byte(fmt.Sprintf("0 0 0 / %s ro\n", mountPoint)), 0644)
+}
+
+func TestStateExists(t *testing.T) {
+	ctx := context.Background()
+	dir, err := ioutil.TempDir("", "provisioner-test-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Cleanup(func() { os.RemoveAll(dir) })
+	if err := ioutil.WriteFile(filepath.Join(dir, "state.json"), []byte("{}"), 0660); err != nil {
+		t.Fatal(err)
+	}
+	deps := Deps{
+		GCSClient:    nil,
+		TarCmd:       "",
+		SystemctlCmd: "",
+	}
+	config := Config{}
+	if err := Run(ctx, deps, dir, config); err != errStateAlreadyExists {
+		t.Fatalf("Run(ctx, %+v, %q, %+v) = %v; want %v", deps, dir, config, err, errStateAlreadyExists)
+	}
+}
+
+func TestRunInvalidArgs(t *testing.T) {
+	stubMount()
+	t.Cleanup(restoreMount)
+	tests := []struct {
+		name   string
+		config Config
+	}{
+		{
+			name: "RunScript",
+			config: Config{
+				Steps: []StepConfig{
+					{
+						Type: "RunScript",
+						Args: []byte("{}"),
+					},
+				},
+			},
+		},
+	}
+	for _, test := range tests {
+		test := test
+		t.Run(test.name, func(t *testing.T) {
+			t.Parallel()
+			ctx := context.Background()
+			tempDir, err := ioutil.TempDir("", "provisioner-test-")
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tempDir)
+			gcs := fakes.GCSForTest(t)
+			deps := Deps{
+				GCSClient:    gcs.Client,
+				TarCmd:       "tar",
+				SystemctlCmd: "/bin/true",
+				RootDir:      tempDir,
+			}
+			stateDir := filepath.Join(tempDir, "var", "lib", ".cos-customizer")
+			if err := stubMountInfo(filepath.Join(tempDir, "proc", "self", "mountinfo"), filepath.Join(stateDir, "bin")); err != nil {
+				t.Fatal(err)
+			}
+			funcCall := fmt.Sprintf("Run(ctx, %+v, %q, %+v)", deps, stateDir, test.config)
+			if err := Run(ctx, deps, stateDir, test.config); err == nil {
+				t.Fatalf("%s = nil; want invalid args", funcCall)
+			}
+		})
+	}
+}
+
+func TestRunFailure(t *testing.T) {
+	stubMount()
+	t.Cleanup(restoreMount)
+	testData := testDataDir(t)
+	buildCtxDir, err := ioutil.TempDir("", "provisioner-test-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Cleanup(func() { os.RemoveAll(buildCtxDir) })
+	buildCtx := filepath.Join(buildCtxDir, "test.tar")
+	if err := exec.Command("tar", "cf", buildCtx, "-C", filepath.Join(testData, "test_ctx"), ".").Run(); err != nil {
+		t.Fatal(err)
+	}
+	tests := []struct {
+		name       string
+		gcsObjects map[string]string
+		config     Config
+	}{
+		{
+			name: "RunScript",
+			gcsObjects: map[string]string{
+				"/test/test.tar": buildCtx,
+			},
+			config: Config{
+				BuildContexts: map[string]string{
+					"bc": "gs://test/test.tar",
+				},
+				Steps: []StepConfig{
+					{
+						Type: "RunScript",
+						Args: []byte(`{"BuildContext": "bc", "Path": "run_env.sh"}`),
+					},
+				},
+			},
+		},
+	}
+	for _, test := range tests {
+		test := test
+		t.Run(test.name, func(t *testing.T) {
+			t.Parallel()
+			ctx := context.Background()
+			tempDir, err := ioutil.TempDir("", "provisioner-test-")
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tempDir)
+			gcs := fakes.GCSForTest(t)
+			for name, path := range test.gcsObjects {
+				data, err := ioutil.ReadFile(path)
+				if err != nil {
+					t.Fatal(err)
+				}
+				gcs.Objects[name] = data
+			}
+			deps := Deps{
+				GCSClient:    gcs.Client,
+				TarCmd:       "tar",
+				SystemctlCmd: "/bin/true",
+				RootDir:      tempDir,
+			}
+			stateDir := filepath.Join(tempDir, "var", "lib", ".cos-customizer")
+			if err := stubMountInfo(filepath.Join(tempDir, "proc", "self", "mountinfo"), filepath.Join(stateDir, "bin")); err != nil {
+				t.Fatal(err)
+			}
+			funcCall := fmt.Sprintf("Run(ctx, %+v, %q, %+v)", deps, stateDir, test.config)
+			if err := Run(ctx, deps, stateDir, test.config); err == nil {
+				t.Fatalf("%s = nil; want err", funcCall)
+			}
+		})
+	}
+}
+
+func TestRunSuccess(t *testing.T) {
+	stubMount()
+	t.Cleanup(restoreMount)
+	testData := testDataDir(t)
+	buildCtxDir, err := ioutil.TempDir("", "provisioner-test-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Cleanup(func() { os.RemoveAll(buildCtxDir) })
+	buildCtx := filepath.Join(buildCtxDir, "test.tar")
+	if err := exec.Command("tar", "cf", buildCtx, "-C", filepath.Join(testData, "test_ctx"), ".").Run(); err != nil {
+		t.Fatal(err)
+	}
+	tests := []struct {
+		name       string
+		gcsObjects map[string]string
+		config     Config
+	}{
+		{
+			name:   "EmptyConfig",
+			config: Config{},
+		},
+		{
+			name: "RunScript",
+			gcsObjects: map[string]string{
+				"/test/test.tar": buildCtx,
+			},
+			config: Config{
+				BuildContexts: map[string]string{
+					"bc": "gs://test/test.tar",
+				},
+				Steps: []StepConfig{
+					{
+						Type: "RunScript",
+						Args: []byte(`{"BuildContext": "bc", "Path": "run.sh"}`),
+					},
+					{
+						Type: "RunScript",
+						Args: []byte(`{"BuildContext": "bc", "Path": "run_env.sh", "Env": "TEST=t"}`),
+					},
+				},
+			},
+		},
+	}
+	for _, test := range tests {
+		test := test
+		t.Run(test.name, func(t *testing.T) {
+			t.Parallel()
+			ctx := context.Background()
+			tempDir, err := ioutil.TempDir("", "provisioner-test-")
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer os.RemoveAll(tempDir)
+			gcs := fakes.GCSForTest(t)
+			for name, path := range test.gcsObjects {
+				data, err := ioutil.ReadFile(path)
+				if err != nil {
+					t.Fatal(err)
+				}
+				gcs.Objects[name] = data
+			}
+			deps := Deps{
+				GCSClient:    gcs.Client,
+				TarCmd:       "tar",
+				SystemctlCmd: "/bin/true",
+				RootDir:      tempDir,
+			}
+			stateDir := filepath.Join(tempDir, "var", "lib", ".cos-customizer")
+			if err := stubMountInfo(filepath.Join(tempDir, "proc", "self", "mountinfo"), filepath.Join(stateDir, "bin")); err != nil {
+				t.Fatal(err)
+			}
+			funcCall := fmt.Sprintf("Run(ctx, %+v, %q, %+v)", deps, stateDir, test.config)
+			if err := Run(ctx, deps, stateDir, test.config); err != nil {
+				t.Fatalf("%s = %v; want nil", funcCall, err)
+			}
+		})
+	}
+}
diff --git a/src/pkg/provisioner/run_script_step.go b/src/pkg/provisioner/run_script_step.go
new file mode 100644
index 0000000..3e23ed4
--- /dev/null
+++ b/src/pkg/provisioner/run_script_step.go
@@ -0,0 +1,55 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	"errors"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+)
+
+type RunScriptStep struct {
+	BuildContext string
+	Path         string
+	Env          string
+}
+
+func (s *RunScriptStep) validate() error {
+	if s.BuildContext == "" {
+		return errors.New("invalid args: BuildContext is required in RunScript")
+	}
+	if s.Path == "" {
+		return errors.New("invalid args: Path is required in RunScript")
+	}
+	return nil
+}
+
+func (s *RunScriptStep) run(runState *state) error {
+	if err := s.validate(); err != nil {
+		return err
+	}
+	log.Printf("Executing script %q...", s.Path)
+	buildContext := filepath.Join(runState.dir, s.BuildContext)
+	script := filepath.Join(buildContext, s.Path)
+	if err := utils.RunCommand([]string{"/bin/bash", script}, buildContext, append(os.Environ(), strings.Split(s.Env, ",")...)); err != nil {
+		return err
+	}
+	log.Printf("Done executing script %q", s.Path)
+	return nil
+}
diff --git a/src/pkg/provisioner/seal_oem_step.go b/src/pkg/provisioner/seal_oem_step.go
new file mode 100644
index 0000000..d4f8575
--- /dev/null
+++ b/src/pkg/provisioner/seal_oem_step.go
@@ -0,0 +1,51 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	_ "embed"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools"
+)
+
+//go:embed _veritysetup.img
+var veritysetupImg []byte
+
+type SealOEMStep struct{}
+
+func (s *SealOEMStep) run(runState *state) error {
+	log.Println("Sealing the OEM partition with dm-verity")
+	veritysetupImgPath := filepath.Join(runState.dir, "veritysetup.img")
+	if _, err := os.Stat(veritysetupImgPath); os.IsNotExist(err) {
+		if err := ioutil.WriteFile(veritysetupImgPath, veritysetupImg, 0644); err != nil {
+			return err
+		}
+	}
+	if err := tools.SealOEMPartition(veritysetupImgPath, runState.data.Config.BootDisk.OEMFSSize4K); err != nil {
+		return err
+	}
+	if err := tools.DisableSystemdService("update-engine.service"); err != nil {
+		return err
+	}
+	if err := tools.DisableSystemdService("usr-share-oem.mount"); err != nil {
+		return err
+	}
+	log.Println("Done sealing the OEM partition with dm-verity")
+	return nil
+}
diff --git a/src/pkg/provisioner/state.go b/src/pkg/provisioner/state.go
new file mode 100644
index 0000000..f905869
--- /dev/null
+++ b/src/pkg/provisioner/state.go
@@ -0,0 +1,149 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+
+	"cloud.google.com/go/storage"
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+)
+
+var (
+	errStateAlreadyExists = errors.New("state already exists")
+)
+
+type stateData struct {
+	Config             Config
+	CurrentStep        int
+	DiskResizeComplete bool
+}
+
+type state struct {
+	dir  string
+	data stateData
+}
+
+func (s *state) dataPath() string {
+	return filepath.Join(s.dir, "state.json")
+}
+
+func (s *state) read() error {
+	data, err := ioutil.ReadFile(s.dataPath())
+	if err != nil {
+		return fmt.Errorf("error reading %q: %v", s.dataPath(), err)
+	}
+	if err := json.Unmarshal(data, &s.data); err != nil {
+		return fmt.Errorf("error parsing JSON file %q: %v", s.dataPath(), err)
+	}
+	return nil
+}
+
+func (s *state) write() error {
+	data, err := json.Marshal(&s.data)
+	if err != nil {
+		return fmt.Errorf("error marshalling JSON: %v", err)
+	}
+	if err := ioutil.WriteFile(s.dataPath(), data, 0660); err != nil {
+		return fmt.Errorf("error writing %q: %v", s.dataPath(), err)
+	}
+	return nil
+}
+
+func downloadGCSObject(ctx context.Context, gcsClient *storage.Client, bucket, object, localPath string) error {
+	address := fmt.Sprintf("gs://%s/%s", bucket, object)
+	gcsObj, err := gcsClient.Bucket(bucket).Object(object).NewReader(ctx)
+	if err != nil {
+		return fmt.Errorf("error reading %q: %v", address, err)
+	}
+	defer utils.CheckClose(gcsObj, fmt.Sprintf("error closing GCS reader %q", address), &err)
+	localFile, err := os.Create(localPath)
+	if err != nil {
+		return err
+	}
+	defer utils.CheckClose(localFile, "", &err)
+	if _, err := io.Copy(localFile, gcsObj); err != nil {
+		return fmt.Errorf("error copying %q to %q: %v", address, localFile.Name(), err)
+	}
+	return nil
+}
+
+func (s *state) unpackBuildContexts(ctx context.Context, deps Deps) (err error) {
+	for name, address := range s.data.Config.BuildContexts {
+		log.Printf("Unpacking build context %q from %q", name, address)
+		if address[:len("gs://")] != "gs://" {
+			return fmt.Errorf("cannot use address %q, only gs:// addresses are supported", address)
+		}
+		splitAddr := strings.SplitN(address[len("gs://"):], "/", 2)
+		if len(splitAddr) != 2 || splitAddr[0] == "" || splitAddr[1] == "" {
+			return fmt.Errorf("address %q is malformed", address)
+		}
+		bucket, object := splitAddr[0], splitAddr[1]
+		tarPath := filepath.Join(s.dir, name+".tar")
+		if err := downloadGCSObject(ctx, deps.GCSClient, bucket, object, tarPath); err != nil {
+			return fmt.Errorf("error downloading %q to %q: %v", address, tarPath, err)
+		}
+		tarDir := filepath.Join(s.dir, name)
+		if err := os.Mkdir(tarDir, 0770); err != nil {
+			return err
+		}
+		args := []string{"xf", tarPath, "-C", tarDir}
+		cmd := exec.Command(deps.TarCmd, args...)
+		cmd.Stdout = os.Stdout
+		cmd.Stderr = os.Stderr
+		if err := cmd.Run(); err != nil {
+			return fmt.Errorf(`error in cmd "%s %v", see stderr for details: %v`, deps.TarCmd, args, err)
+		}
+		if err := os.Remove(tarPath); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func initState(ctx context.Context, deps Deps, dir string, c Config) (*state, error) {
+	s := &state{dir: dir, data: stateData{Config: c, CurrentStep: 0}}
+	if _, err := os.Stat(s.dataPath()); err == nil {
+		return nil, errStateAlreadyExists
+	}
+	if err := os.MkdirAll(dir, 0770); err != nil {
+		return nil, fmt.Errorf("error creating directory %q: %v", dir, err)
+	}
+	if err := s.write(); err != nil {
+		return nil, err
+	}
+	if err := s.unpackBuildContexts(ctx, deps); err != nil {
+		return nil, fmt.Errorf("error unpacking build contexts: %v", err)
+	}
+	return s, nil
+}
+
+func loadState(dir string) (*state, error) {
+	s := &state{dir: dir}
+	if err := s.read(); err != nil {
+		return nil, err
+	}
+	return s, nil
+}
diff --git a/src/pkg/provisioner/systemd.go b/src/pkg/provisioner/systemd.go
new file mode 100644
index 0000000..2997443
--- /dev/null
+++ b/src/pkg/provisioner/systemd.go
@@ -0,0 +1,83 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package provisioner
+
+import (
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils"
+)
+
+type systemdClient struct {
+	systemctl string
+}
+
+func (sc *systemdClient) isActive(unit string) bool {
+	return exec.Command(sc.systemctl, "is-active", unit).Run() == nil
+}
+
+func (sc *systemdClient) reload() error {
+	return utils.RunCommand([]string{sc.systemctl, "daemon-reload"}, "", nil)
+}
+
+func (sc *systemdClient) start(unit string, flags []string) error {
+	return utils.RunCommand(append([]string{sc.systemctl, "start", unit}, flags...), "", nil)
+}
+
+func (sc *systemdClient) stop(unit string) error {
+	if sc.isActive(unit) {
+		log.Printf("%q is active, stopping...", unit)
+		if err := utils.RunCommand([]string{sc.systemctl, "stop", unit}, "", nil); err != nil {
+			return err
+		}
+		log.Printf("%q stopped", unit)
+	} else {
+		log.Printf("%q is not active, ignoring", unit)
+	}
+	return nil
+}
+
+func (sc *systemdClient) stopJournald(rootDir string) error {
+	configDirName := filepath.Join(rootDir, "etc/systemd/system/systemd-journald.service.d")
+	configName := filepath.Join(configDirName, "override.conf")
+	configData := `[Service]
+Restart=no
+`
+	if err := os.MkdirAll(configDirName, 0755); err != nil {
+		return err
+	}
+	if err := ioutil.WriteFile(configName, []byte(configData), 0644); err != nil {
+		return err
+	}
+	if err := sc.reload(); err != nil {
+		return err
+	}
+	for _, u := range []string{
+		"systemd-journald.socket",
+		"systemd-journald-dev-log.socket",
+		"systemd-journald-audit.socket",
+		"syslog.socket",
+		"systemd-journald.service",
+	} {
+		if err := sc.stop(u); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/src/pkg/provisioner/testdata/test_ctx/run.sh b/src/pkg/provisioner/testdata/test_ctx/run.sh
new file mode 100644
index 0000000..31d6854
--- /dev/null
+++ b/src/pkg/provisioner/testdata/test_ctx/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+if [[ ! -f run.sh ]]; then
+  echo "Cannot find self in working directory (PWD is incorrect)"
+  exit 1
+fi
diff --git a/src/pkg/provisioner/testdata/test_ctx/run_env.sh b/src/pkg/provisioner/testdata/test_ctx/run_env.sh
new file mode 100644
index 0000000..40c2b4a
--- /dev/null
+++ b/src/pkg/provisioner/testdata/test_ctx/run_env.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+if [[ -z "${TMPDIR}" ]]; then
+  echo "TMPDIR is missing in environment (parent env is not propagated)"
+  exit 1
+fi
+if [[ -z "${TEST}" ]]; then
+  echo "TEST is missing in environment"
+  echo "(user provided env may not be propagated)"
+  exit 1
+fi
diff --git a/src/pkg/tools/BUILD.bazel b/src/pkg/tools/BUILD.bazel
new file mode 100644
index 0000000..90a5b3b
--- /dev/null
+++ b/src/pkg/tools/BUILD.bazel
@@ -0,0 +1,35 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "tools",
+    srcs = [
+        "disable_systemd_service.go",
+        "extend_oem_partition.go",
+        "handle_disk_layout.go",
+        "seal_oem_partition.go",
+    ],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools",
+    visibility = ["//visibility:public"],
+    deps = ["//src/pkg/tools/partutil"],
+)
+
+go_test(
+    name = "tools_test",
+    srcs = ["handle_disk_layout_test.go"],
+    embed = [":tools"],
+    deps = ["//src/pkg/tools/partutil/partutiltest"],
+)
diff --git a/src/pkg/tools/disable_systemd_service.go b/src/pkg/tools/disable_systemd_service.go
new file mode 100644
index 0000000..184b8de
--- /dev/null
+++ b/src/pkg/tools/disable_systemd_service.go
@@ -0,0 +1,47 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tools
+
+import (
+	"fmt"
+	"log"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil"
+)
+
+// DisableSystemdService disables the auto-update service.
+func DisableSystemdService(service string) error {
+	cmd := "systemd.mask=" + service
+	grubPath, err := partutil.MountEFIPartition()
+	if err != nil {
+		return fmt.Errorf("cannot mount EFI partition,"+
+			"error msg:(%v)", err)
+	}
+	defer partutil.UnmountEFIPartition()
+	contains, err := partutil.GRUBContains(grubPath, cmd)
+	if err != nil {
+		return fmt.Errorf("cannot read GRUB file at %q,"+
+			"error msg:(%v)", grubPath, err)
+	}
+	if contains {
+		return nil
+	}
+	if err := partutil.AddCmdToGRUB(grubPath, cmd); err != nil {
+		return fmt.Errorf("cannot add commmand to GRUB file at %q,"+
+			"cmd=%q, error msg:(%v)", grubPath, cmd, err)
+	}
+	log.Printf("%q service disabled.", service)
+	return nil
+}
diff --git a/src/pkg/tools/extend_oem_partition.go b/src/pkg/tools/extend_oem_partition.go
new file mode 100644
index 0000000..fa23906
--- /dev/null
+++ b/src/pkg/tools/extend_oem_partition.go
@@ -0,0 +1,116 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tools
+
+import (
+	"fmt"
+	"log"
+	"strconv"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil"
+)
+
+// ExtendOEMPartition moves stateful partition towards the end of the disk
+// Then move OEM partition to the original place of the stateful partition
+// Finally resize the OEM partition to 1 sector before the new stateful partition
+// OEMSize can be the number of sectors (without unit) or size like "3G", "100M", "10000K" or "99999B"
+func ExtendOEMPartition(disk string, statePartNum, oemPartNum int, oemSize string) error {
+	if len(disk) <= 0 || statePartNum <= 0 || oemPartNum <= 0 || len(oemSize) <= 0 {
+		return fmt.Errorf("empty or non-positive input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q",
+			disk, statePartNum, oemPartNum, oemSize)
+	}
+
+	// read new size of OEM partition.
+	newOEMSizeBytes, err := partutil.ConvertSizeToBytes(oemSize)
+	if err != nil {
+		return fmt.Errorf("error in reading new OEM size, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, err)
+	}
+
+	// read original size of OEM partition.
+	oldOEMSize, err := partutil.ReadPartitionSize(disk, oemPartNum)
+	if err != nil {
+		return fmt.Errorf("error in reading old OEM size, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, err)
+	}
+	oldOEMSizeBytes := oldOEMSize << 9 // change unit to bytes.
+
+	if newOEMSizeBytes <= oldOEMSizeBytes {
+		log.Printf("\n!!!!!!!WARNING!!!!!!!\n"+
+			"oemSize: %d bytes is not larger than the original OEM partition size: %d bytes, "+
+			"nothing is done\n "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q",
+			newOEMSizeBytes, oldOEMSizeBytes, disk, statePartNum, oemPartNum, oemSize)
+		return nil
+	}
+
+	// print the old partition table.
+	table, err := partutil.ReadPartitionTable(disk)
+	if err != nil {
+		return fmt.Errorf("cannot read old partition table of %q, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, disk, statePartNum, oemPartNum, oemSize, err)
+	}
+	log.Printf("\nOld partition table:\n%s\n", table)
+
+	// record the original start sector of the stateful partition.
+	oldStateStartSector, err := partutil.ReadPartitionStart(disk, statePartNum)
+	if err != nil {
+		return fmt.Errorf("cannot read old stateful partition start, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, err)
+	}
+
+	// move the stateful partition.
+	if err := partutil.MovePartition(disk, statePartNum, "+"+oemSize); err != nil {
+		return fmt.Errorf("error in moving stateful partition, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, err)
+	}
+
+	// record the new start sector of the stateful partition.
+	newStateStartSector, err := partutil.ReadPartitionStart(disk, statePartNum)
+	if err != nil {
+		return fmt.Errorf("cannot read new stateful partition start, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, err)
+	}
+
+	// move OEM partition to the original start sector of the stateful partition.
+	if err := partutil.MovePartition(disk, oemPartNum, strconv.FormatUint(oldStateStartSector, 10)); err != nil {
+		return fmt.Errorf("error in moving OEM partition, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, err)
+	}
+
+	// extend the OEM partition.
+	if err = partutil.ExtendPartition(disk, oemPartNum, newStateStartSector-1); err != nil {
+		return fmt.Errorf("error in extending OEM partition, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, err)
+	}
+
+	// print the new partition table.
+	table, err = partutil.ReadPartitionTable(disk)
+	if err != nil {
+		return fmt.Errorf("cannot read new partition table of %q, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, "+
+			"error msg: (%v)", disk, disk, statePartNum, oemPartNum, oemSize, err)
+	}
+	log.Printf("\nCompleted extending OEM partition\n\n New partition table:\n%s\n", table)
+	return nil
+}
diff --git a/src/pkg/tools/handle_disk_layout.go b/src/pkg/tools/handle_disk_layout.go
new file mode 100644
index 0000000..92d7659
--- /dev/null
+++ b/src/pkg/tools/handle_disk_layout.go
@@ -0,0 +1,219 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tools
+
+import (
+	"fmt"
+	"log"
+	"strconv"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil"
+)
+
+// HandleDiskLayout changes the partitions on a COS disk.
+// If the auto-update is disabled, it will shrink sda3 to reclaim the space.
+// It also moves stateful partition and the OEM partition (if extended) by
+// a distance relative to a start point.
+//
+// If sda3 is shrinked, the start point will be the start of sda3 + sda3Margin (2MB).
+// Otherwise, start point will be the original start of the stateful partition.
+//
+// The stateful partition will be moved to leave enough space for the OEM partition,
+// and the OEM partition will be moved to the start point.
+// Finally OEM partition will be resized to 1 sector before the new stateful partition.
+//
+// OEMSize can be the number of sectors (without unit) or size like "3G", "100M", "10000K" or "99999B".
+// If there's no need to extend the OEM partition, `oemSize` in the input will be "", a valid input.
+func HandleDiskLayout(disk string, statePartNum, oemPartNum int, oemSize string, reclaimSDA3 bool) error {
+	if len(disk) <= 0 || statePartNum <= 0 || oemPartNum <= 0 {
+		return fmt.Errorf("empty or non-positive input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q",
+			disk, statePartNum, oemPartNum, oemSize)
+	}
+
+	if len(oemSize) == 0 {
+		oemSize = "0"
+	}
+
+	// print the old partition table.
+	table, err := partutil.ReadPartitionTable(disk)
+	if err != nil {
+		return fmt.Errorf("cannot read old partition table of %q, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+	log.Printf("\nOld partition table:\n%s\n", table)
+
+	// read new size of OEM partition.
+	newOEMSizeBytes, err := partutil.ConvertSizeToBytes(oemSize)
+	if err != nil {
+		return fmt.Errorf("error in reading new OEM size, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+
+	// read original size of OEM partition.
+	oldOEMSizeSector, err := partutil.ReadPartitionSize(disk, oemPartNum)
+	if err != nil {
+		return fmt.Errorf("error in reading old OEM size, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+
+	oldOEMSizeBytes := oldOEMSizeSector << 9 // change unit to bytes.
+	startPointSector, returnAndReboot, err := checkAndReclaimSDA3(disk, statePartNum, reclaimSDA3)
+	if err != nil {
+		return fmt.Errorf("error in reclaiming sda3, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+	// need to reboot to reread disk partition table.
+	if returnAndReboot {
+		return nil
+	}
+
+	oemSmaller, err := checkNewOEMSizeSmaller(disk, statePartNum, reclaimSDA3, oldOEMSizeBytes,
+		newOEMSizeBytes, startPointSector)
+	if err != nil {
+		return fmt.Errorf("error in dealing with smaller OEM size, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+	// No need to resize the OEM partition, everything else done.
+	if oemSmaller {
+		return nil
+	}
+
+	// leave enough space before the stateful partition for the OEM partition.
+	// and shrink the OEM partition to make the new start 4k aligned.
+	newStateStartSector := partutil.FindLast4KSector(startPointSector + (newOEMSizeBytes >> 9))
+
+	// Move the stateful partition.
+	if err := partutil.MovePartition(disk, statePartNum, strconv.FormatUint(newStateStartSector, 10)); err != nil {
+		return fmt.Errorf("error in moving stateful partition, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+
+	// move OEM partition to the start point.
+	if err := partutil.MovePartition(disk, oemPartNum, strconv.FormatUint(startPointSector, 10)); err != nil {
+		return fmt.Errorf("error in moving OEM partition, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+	log.Println("Reclaimed /dev/sda3.")
+
+	// extend the OEM partition.
+	if err = partutil.ExtendPartition(disk, oemPartNum, newStateStartSector-1); err != nil {
+		return fmt.Errorf("error in extending OEM partition, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+
+	// print the new partition table.
+	table, err = partutil.ReadPartitionTable(disk)
+	if err != nil {
+		return fmt.Errorf("cannot read new partition table of %q, "+
+			"input: disk=%q, statePartNum=%d, oemPartNum=%d, oemSize=%q, reclaimSDA3=%t, "+
+			"error msg: (%v)", disk, disk, statePartNum, oemPartNum, oemSize, reclaimSDA3, err)
+	}
+	log.Printf("\nCompleted extending OEM partition\n\n New partition table:\n%s\n", table)
+	return nil
+}
+
+// checkAndReclaimSDA3 checks whether need to reclaim sda3
+// and whether it is done.
+// If sda3 is shrinked, the start point will be the start of sda3 + sda3Margin (2MB).
+// Otherwise, start point will be the original start of the stateful partition.
+// It will return startPointSector, returnAndReboot, error.
+func checkAndReclaimSDA3(disk string, statePartNum int, reclaimSDA3 bool) (uint64, bool, error) {
+	// In some situations, `sfdisk --move-data` requires 1MB free space
+	// in the moving direction. Therefore, leaving 2MB after the start of
+	// sda3 is a safe choice.
+	// Also, this will make sure the start point of the next partition 4K aligned.
+	const sda3Margin = 4096 // 2MB margin
+	var startPointSector uint64
+	var err error
+	if reclaimSDA3 {
+		// check whether sda3 has already been shrinked.
+		minimal, err := partutil.IsPartitionMinimal("/dev/sda", 3)
+		if err != nil {
+			return 0, false, err
+		}
+		// not shrinked yet.
+		if !minimal {
+			_, err = partutil.MinimizePartition("/dev/sda", 3)
+			if err != nil {
+				return 0, false, fmt.Errorf("error in shrinking sda3, "+
+					"error msg: (%v)", err)
+			}
+			log.Println("Shrinked /dev/sda3.")
+			// need to reboot to reload the partition table.
+			return 0, true, nil
+		}
+		// no need to shrink sda3 again.
+		sda3StartSector, err := partutil.ReadPartitionStart("/dev/sda", 3)
+		if err != nil {
+			return 0, false, fmt.Errorf("error in reading the start of sda3, "+
+				"error msg: (%v)", err)
+		}
+		startPointSector = sda3StartSector + sda3Margin // leave enough margin
+
+	} else {
+		// start point is the original start sector of the stateful partition.
+		startPointSector, err = partutil.ReadPartitionStart(disk, statePartNum)
+		if err != nil {
+			return 0, false, fmt.Errorf("error in reading the start of old stateful partition, "+
+				"error msg: (%v)", err)
+		}
+	}
+	return startPointSector, false, nil
+}
+
+// checkNewOEMSizeSmaller checks whether the new OEM size is smaller than the old one.
+// If true, move the stateful partition to reclaim sda3 if needed.
+// Otherwise, do nothing.
+// It returns oemSmaller,error.
+func checkNewOEMSizeSmaller(disk string, statePartNum int, reclaimSDA3 bool, oldOEMSizeBytes,
+	newOEMSizeBytes, startPointSector uint64) (bool, error) {
+	if newOEMSizeBytes <= oldOEMSizeBytes {
+		if newOEMSizeBytes != 0 {
+			log.Printf("\n!!!!!!!WARNING!!!!!!!\n"+
+				"oemSize: %d bytes is not larger than the original OEM partition size: %d bytes, "+
+				"nothing is done for the OEM partition.\n ", newOEMSizeBytes, oldOEMSizeBytes)
+		}
+		if !reclaimSDA3 {
+			return true, nil
+		}
+		// move the stateful partition to the start point.
+		if err := partutil.MovePartition(disk, statePartNum, strconv.FormatUint(startPointSector, 10)); err != nil {
+			return true, fmt.Errorf("error in moving stateful partition, "+
+				"input: disk=%q, statePartNum=%d,reclaimSDA3=%t,oldOEMSizeBytes=%d,newOEMSizeBytes=%d, "+
+				"startPointSector=%d, error msg: (%v)", disk, statePartNum, reclaimSDA3,
+				oldOEMSizeBytes, newOEMSizeBytes, startPointSector, err)
+		}
+		log.Println("Reclaimed /dev/sda3.")
+		// print the new partition table.
+		table, err := partutil.ReadPartitionTable(disk)
+		if err != nil {
+			return true, fmt.Errorf("cannot read new partition table of %q, "+
+				"input: disk=%q, statePartNum=%d,reclaimSDA3=%t,oldOEMSizeBytes=%d,newOEMSizeBytes=%d, "+
+				"startPointSector=%d, error msg: (%v)", disk, disk, statePartNum, reclaimSDA3,
+				oldOEMSizeBytes, newOEMSizeBytes, startPointSector, err)
+		}
+		log.Printf("New partition table:\n%s\n", table)
+		return true, nil
+	}
+	return false, nil
+}
diff --git a/src/pkg/tools/handle_disk_layout_test.go b/src/pkg/tools/handle_disk_layout_test.go
new file mode 100644
index 0000000..ae59ea3
--- /dev/null
+++ b/src/pkg/tools/handle_disk_layout_test.go
@@ -0,0 +1,263 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tools
+
+import (
+	"bufio"
+	"errors"
+	"os"
+	"os/exec"
+	"strconv"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil/partutiltest"
+)
+
+// A file in tools/partutil/testdata is used as the simulation of a disk.
+// When a test program starts, it will copy the file and work on it. Its size is 600K. It has three partitions as follows:
+// 1. partition 8, OEM partition, 100K
+// 2. partition 2, middle partition, 100K
+// 3. partition 1, stateful partition, 100K
+// 4. partition 12, small partition, 3.5K
+
+func TestHandleDiskLayoutFails(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_extend_oem_partition_fails", "partutil/", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	testData := []struct {
+		testName     string
+		disk         string
+		statePartNum int
+		oemPartNum   int
+		size         string
+	}{
+		{
+			testName:     "InvalidDisk",
+			disk:         "./partutil/testdata/no_disk",
+			statePartNum: 1,
+			oemPartNum:   8,
+			size:         "200K",
+		}, {
+			testName:     "InvalidStatePartition",
+			disk:         diskName,
+			statePartNum: 100,
+			oemPartNum:   8,
+			size:         "200K",
+		}, {
+			testName:     "InvalidOEMPartition",
+			disk:         diskName,
+			statePartNum: 1,
+			oemPartNum:   800,
+			size:         "200K",
+		}, {
+			testName:     "InvalidSize1",
+			disk:         diskName,
+			statePartNum: 1,
+			oemPartNum:   8,
+			size:         "-200K",
+		}, {
+			testName:     "InvalidSize2",
+			disk:         diskName,
+			statePartNum: 1,
+			oemPartNum:   8,
+			size:         "200T",
+		}, {
+			testName:     "InvalidSize3",
+			disk:         diskName,
+			statePartNum: 1,
+			oemPartNum:   8,
+			size:         "A45M",
+		}, {
+			testName:     "InvalidSize4",
+			disk:         diskName,
+			statePartNum: 1,
+			oemPartNum:   8,
+			size:         "+200K",
+		}, {
+			testName:     "TooLarge",
+			disk:         diskName,
+			statePartNum: 1,
+			oemPartNum:   8,
+			size:         "800M",
+		}, {
+			testName:     "EmptyDiskName",
+			disk:         "",
+			statePartNum: 1,
+			oemPartNum:   8,
+			size:         "200K",
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			if err := HandleDiskLayout(input.disk, input.statePartNum, input.oemPartNum, input.size, false); err == nil {
+				t.Fatalf("error not found in test %s", input.testName)
+			}
+		})
+	}
+}
+
+func TestHandleDiskLayoutWarnings(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_extend_oem_partition_warnings", "partutil/", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	testData := []struct {
+		testName     string
+		disk         string
+		statePartNum int
+		oemPartNum   int
+		size         string
+	}{
+		{
+			"SmallerSize",
+			diskName,
+			1,
+			8,
+			"60K",
+		}, {
+			"SameSize",
+			diskName,
+			1,
+			8,
+			"100K",
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			if err := HandleDiskLayout(input.disk, input.statePartNum, input.oemPartNum, input.size, false); err != nil {
+				t.Fatalf("error in test %s, error msg: (%v)", input.testName, err)
+			}
+		})
+	}
+}
+
+func TestHandleDiskLayoutPasses(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_extend_oem_partition_passes", "partutil/", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	if err := HandleDiskLayout(diskName, 1, 8, "200K", false); err != nil {
+		t.Fatalf("error when extending OEM partition, error msg: (%v)", err)
+	}
+
+	if err := os.Mkdir("./mt", 0777); err != nil {
+		t.Fatalf("cannot create mount point, error msg: (%v)", err)
+	}
+	defer os.Remove("./mt")
+
+	testData := []struct {
+		partitionName string
+		wantContent   string
+		wantSize      int
+	}{
+		{
+			partitionName: diskName + "p8",
+			wantContent:   "This is partition 8 OEM partition",
+			wantSize:      180,
+		},
+		{
+			partitionName: diskName + "p1",
+			wantContent:   "This is partition 1 stateful partition",
+			wantSize:      80,
+		},
+		{
+			partitionName: diskName + "p2",
+			wantContent:   "This is partition 2 middle partition",
+			wantSize:      80,
+		},
+	}
+
+	// since need to mount at the same dir, tests need to be executed sequentially
+	for _, input := range testData {
+		mountAndCheck(input.partitionName, input.wantContent, t, input.wantSize)
+	}
+}
+
+// readSize reads partition fs size from df -h
+// a line looks like:
+// tmpfs           100K     0  100K   0% /dev/lxd
+// (Filesystem      Size  Used Avail Use% Mounted on)
+func readSize(out string) (int, error) {
+	pos := 0
+	res := -1
+	var err error
+	for pos < len(out) && out[pos] != 'K' {
+		pos++
+	}
+	if pos == len(out) {
+		return -1, errors.New("cannot find unit K")
+	}
+
+	if out[pos-3] != ' ' {
+		res, err = strconv.Atoi(out[pos-3 : pos])
+		if err != nil {
+			return -1, err
+		}
+	} else {
+		res, err = strconv.Atoi(out[pos-2 : pos])
+		if err != nil {
+			return -1, err
+		}
+	}
+
+	return res, nil
+}
+
+// mountAndCheck mounts a partition and check its fs size and content of a file in it
+func mountAndCheck(partName, wantLine string, t *testing.T, size int) {
+	t.Helper()
+	if err := exec.Command("sudo", "mount", partName, "mt").Run(); err != nil {
+		t.Fatalf("error mounting %q, error msg: (%v)", partName, err)
+	}
+	defer exec.Command("sudo", "umount", "mt").Run()
+	cmdD := "df -h | grep mt"
+	out, err := exec.Command("bash", "-c", cmdD).Output()
+	if err != nil {
+		t.Fatalf("error reading df -h of %q, error msg: (%v)", partName, err)
+	}
+	if len(out) <= 0 {
+		t.Fatalf("cannot find partition %q", partName)
+	}
+	oldSize, err := readSize(string(out))
+	if err != nil {
+		t.Fatalf("cannot read fs size, the line in df -h: %q, error msg: (%v)", string(out), err)
+	}
+	if oldSize <= size {
+		t.Fatalf("wrong file system size of partition, fs info: %q, expected: %d", string(out), size)
+	}
+
+	f, err := os.Open("mt/content")
+	if err != nil {
+		t.Fatalf("cannot open content file in %q, error msg: (%v)", partName, err)
+	}
+	defer f.Close()
+	rd := bufio.NewReader(f)
+	line, _, err := rd.ReadLine()
+	if err != nil {
+		t.Fatalf("cannot ReadLine in %q, error msg: (%v)", partName, err)
+	}
+	if string(line) != wantLine {
+		t.Fatalf("content in %q is corrupted, actual line: %q, wanted line: %q", partName, string(line), wantLine)
+	}
+}
diff --git a/src/pkg/tools/partutil/BUILD.bazel b/src/pkg/tools/partutil/BUILD.bazel
new file mode 100644
index 0000000..cd63161
--- /dev/null
+++ b/src/pkg/tools/partutil/BUILD.bazel
@@ -0,0 +1,41 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "partutil",
+    srcs = [
+        "extend_partition.go",
+        "grub_utils.go",
+        "handle_partition_table.go",
+        "helpers.go",
+        "move_partition.go",
+    ],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil",
+    visibility = ["//visibility:public"],
+)
+
+go_test(
+    name = "partutil_test",
+    srcs = [
+        "extend_partition_test.go",
+        "handle_partition_table_test.go",
+        "helpers_test.go",
+        "move_partition_test.go",
+    ],
+    data = glob(["testdata/**"]),
+    embed = [":partutil"],
+    deps = ["//src/pkg/tools/partutil/partutiltest"],
+)
diff --git a/src/pkg/tools/partutil/extend_partition.go b/src/pkg/tools/partutil/extend_partition.go
new file mode 100644
index 0000000..400dd52
--- /dev/null
+++ b/src/pkg/tools/partutil/extend_partition.go
@@ -0,0 +1,109 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"os"
+	"os/exec"
+)
+
+// ExtendPartition extends a partition to a specific end sector.
+func ExtendPartition(disk string, partNumInt int, end uint64) error {
+	if len(disk) <= 0 || partNumInt <= 0 || end <= 0 {
+		return fmt.Errorf("invalid disk name, partition number or end sector, "+
+			"input: disk=%q, partNumInt=%d, end sector=%d. ", disk, partNumInt, end)
+	}
+
+	// get partition number string
+	partNum, err := PartNumIntToString(disk, partNumInt)
+	if err != nil {
+		return fmt.Errorf("error in converting partition number, "+
+			"input: disk=%q, partNumInt=%d, end sector=%d, "+
+			"error msg: (%v)", disk, partNumInt, end, err)
+	}
+
+	partName := disk + partNum
+	var tableBuffer bytes.Buffer
+
+	// dump partition table.
+	table, err := ReadPartitionTable(disk)
+	if err != nil {
+		return fmt.Errorf("cannot read partition table of %q, "+
+			"input: disk=%q, partNumInt=%d, end sector=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, end, err)
+	}
+
+	var oldSize uint64 = 0
+	var newSize uint64 = 0
+
+	// edit partition table.
+	table, err = HandlePartitionTable(table, partName, true, func(p *PartContent) {
+		oldSize = p.Size
+		newSize = end - p.Start + 1
+		p.Size = newSize
+	})
+	if err != nil {
+		return fmt.Errorf("error when editing partition table of %q, "+
+			"input: disk=%q, partNumInt=%d, end sector=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, end, err)
+	}
+	if newSize <= oldSize {
+		return fmt.Errorf("new size=%d is not larger than the old size=%d, "+
+			"input: disk=%q, partNumInt=%d, end sector=%d, "+
+			"error msg: (%v)", newSize, oldSize, disk, partNumInt, end, err)
+	}
+
+	tableBuffer.WriteString(table)
+
+	// write partition table back.
+	writeTableCmd := exec.Command("sudo", "sfdisk", "--no-reread", disk)
+	writeTableCmd.Stdin = &tableBuffer
+	writeTableCmd.Stdout = os.Stdout
+	writeTableCmd.Stderr = os.Stderr
+	if err := writeTableCmd.Run(); err != nil {
+		return fmt.Errorf("error in writing partition table back to %q, "+
+			"input: disk=%q, partNumInt=%d, end sector=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, end, err)
+	}
+
+	log.Printf("\nCompleted extending %s\n\n", partName)
+
+	// check and repair file system in the partition.
+	cmd := exec.Command("sudo", "e2fsck", "-fp", partName)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("error in checking file system of %q, "+
+			"input: disk=%q, partNumInt=%d, end sector=%d, "+
+			"error msg: (%v)", partName, disk, partNumInt, end, err)
+	}
+	log.Printf("\nCompleted checking file system of %s\n\n", partName)
+
+	// resize file system in the partition.
+	cmd = exec.Command("sudo", "resize2fs", partName)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("error in resizing file system of %q, "+
+			"input: disk=%q, partNumInt=%d, end sector=%d, "+
+			"error msg: (%v)", partName, disk, partNumInt, end, err)
+	}
+
+	log.Printf("\nCompleted updating file system of %q\n\n", partName)
+	return nil
+}
diff --git a/src/pkg/tools/partutil/extend_partition_test.go b/src/pkg/tools/partutil/extend_partition_test.go
new file mode 100644
index 0000000..5ca8818
--- /dev/null
+++ b/src/pkg/tools/partutil/extend_partition_test.go
@@ -0,0 +1,150 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"os/exec"
+	"strconv"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil/partutiltest"
+)
+
+// A file in tools/partutil/testdata is used as the simulation of a disk.
+// When a test program starts, it will copy the file and work on it. Its size is 600K. It has three partitions as follows:
+// 1. partition 8, OEM partition, 100K
+// 2. partition 2, middle partition, 100K
+// 3. partition 1, stateful partition, 100K
+// 4. partition 12, small partition, 3.5K
+
+func TestExtendPartitionFails(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_extend_partition_fails", "", t, &testNames)
+
+	diskName := testNames.DiskName
+	testData := []struct {
+		testName string
+		disk     string
+		partNum  int
+		end      uint64
+	}{
+		{
+			testName: "SameSize",
+			disk:     diskName,
+			partNum:  1,
+			end:      633,
+		}, {
+			testName: "InvalidDisk",
+			disk:     "./testdata/no_disk",
+			partNum:  1,
+			end:      833,
+		}, {
+			testName: "InvalidPartition",
+			disk:     diskName,
+			partNum:  0,
+			end:      833,
+		}, {
+			testName: "NonexistPartition",
+			disk:     diskName,
+			partNum:  100,
+			end:      833,
+		}, {
+			testName: "SmallerSize",
+			disk:     diskName,
+			partNum:  100,
+			end:      500,
+		}, {
+			testName: "EmptyDiskName",
+			disk:     "",
+			partNum:  100,
+			end:      833,
+		}, {
+			testName: "TooLargeSize",
+			disk:     diskName,
+			partNum:  1,
+			end:      3000,
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			if err := ExtendPartition(input.disk, input.partNum, input.end); err == nil {
+				t.Fatalf("error not found in test %s", input.testName)
+			}
+		})
+	}
+}
+
+func TestExtendPartitionPasses(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_extend_partition_passes", "", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	if err := ExtendPartition(diskName, 1, 833); err != nil {
+		t.Fatalf("error when extending partition 1 to 833, error msg: (%v)", err)
+	}
+
+	if err := os.Mkdir("./mt", 0777); err != nil {
+		t.Fatal("cannot create mount point ./mt")
+	}
+	defer os.Remove("./mt")
+
+	if err := exec.Command("sudo", "mount", diskName+"p1", "mt").Run(); err != nil {
+		t.Fatalf("error mounting disk file, partName: %q, error msg: (%v)", diskName+"p1", err)
+	}
+	defer exec.Command("sudo", "umount", "mt").Run()
+
+	cmdD := "df -h | grep mt"
+	out, err := exec.Command("bash", "-c", cmdD).Output()
+	if err != nil {
+		t.Fatalf("error reading df -h, error msg: (%v)", err)
+	}
+	size, err := readSize(string(out))
+	if err != nil {
+		t.Fatalf("cannot read fs size from df -h, "+
+			"df line: %q, error msg: (%v) ", string(out), err)
+	}
+	if size <= 180 {
+		t.Fatalf("wrong fs size of %q, "+
+			"actual size: %d, expected size: >180", diskName+"p1", size)
+	}
+}
+
+// readSize reads fs size from df -h, looking for the first unit K
+// to find the size
+func readSize(out string) (int, error) {
+	pos := 0
+	res := -1
+	for pos < len(out) && out[pos] != 'K' {
+		pos++
+	}
+	if pos == len(out) {
+		return 0, errors.New("cannot find unit K")
+	}
+	if !(out[pos-3] >= '0' && out[pos-3] <= '9') {
+		return 0, errors.New("have less than 3 digits")
+	}
+	res, err := strconv.Atoi(out[pos-3 : pos])
+	if err != nil {
+		return 0, fmt.Errorf("cannot convert %q to int", string(out[pos-3:pos]))
+	}
+	return res, nil
+}
diff --git a/src/pkg/tools/partutil/grub_utils.go b/src/pkg/tools/partutil/grub_utils.go
new file mode 100644
index 0000000..5d95e24
--- /dev/null
+++ b/src/pkg/tools/partutil/grub_utils.go
@@ -0,0 +1,94 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"strings"
+)
+
+// MountEFIPartition mounts the EFI partition (/dev/sda12)
+// and returns the path where grub.cfg is at.
+func MountEFIPartition() (string, error) {
+	dir, err := ioutil.TempDir("", "")
+	if err != nil {
+		return "", fmt.Errorf("error in creating tempDir, "+
+			"error msg: (%v)", err)
+	}
+	cmd := exec.Command("sudo", "mount", "/dev/sda12", dir)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return "", fmt.Errorf("error in mounting /dev/sda12 at %q, "+
+			"error msg: (%v)", dir, err)
+	}
+	return dir + "/efi/boot/grub.cfg", nil
+}
+
+// UnmountEFIPartition unmounts the EFI partition (/dev/sda12)
+func UnmountEFIPartition() error {
+	cmd := exec.Command("sudo", "umount", "/dev/sda12")
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("error in unmounting /dev/sda12"+
+			"error msg: (%v)", err)
+	}
+	return nil
+}
+
+// GRUBContains seaches for the command string inside of the GRUB file
+func GRUBContains(grubPath, cmd string) (bool, error) {
+	grubContent, err := ioutil.ReadFile(grubPath)
+	if err != nil {
+		return false, fmt.Errorf("cannot read grub.cfg at %q, "+
+			"input: grubPath=%q, cmd=%q, error msg:(%v)", grubPath, grubPath, cmd, err)
+	}
+	if strings.Contains(string(grubContent), cmd) {
+		return true, nil
+	}
+	return false, nil
+}
+
+// AddCmdToGRUB adds a command string to
+// after every `cros_efi` command
+func AddCmdToGRUB(grubPath, cmd string) error {
+	const appendPoint = "cros_efi"
+	const appendOffset = len(appendPoint)
+	grubContent, err := ioutil.ReadFile(grubPath)
+	if err != nil {
+		return fmt.Errorf("cannot read grub.cfg at %q, "+
+			"input: grubPath=%q, cmd=%q, error msg:(%v)", grubPath, grubPath, cmd, err)
+	}
+	lines := strings.Split(string(grubContent), "\n")
+	for idx, line := range lines {
+		if !strings.Contains(line, appendPoint) {
+			continue
+		}
+		startPos := strings.Index(line, appendPoint) + appendOffset
+		lines[idx] = fmt.Sprintf("%s %s %s", line[:startPos], cmd, line[startPos:])
+	}
+	// new content of grub.cfg
+	grubContent = []byte(strings.Join(lines, "\n"))
+	err = ioutil.WriteFile(grubPath, grubContent, 0755)
+	if err != nil {
+		return fmt.Errorf("cannot write to grub.cfg at %q, "+
+			"input: grubPath=%q, cmd=%q, error msg:(%v)", grubPath, grubPath, cmd, err)
+	}
+	return nil
+}
diff --git a/src/pkg/tools/partutil/handle_partition_table.go b/src/pkg/tools/partutil/handle_partition_table.go
new file mode 100644
index 0000000..594048e
--- /dev/null
+++ b/src/pkg/tools/partutil/handle_partition_table.go
@@ -0,0 +1,257 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"os"
+	"os/exec"
+	"strconv"
+	"strings"
+)
+
+// minPartitionSizeSectors is the minimum partition size on COS in sectors. This
+// size was chosen to maintain 4K alignment of partition start sectors (8
+// sectors = 4K bytes).
+const minPartitionSizeSectors = uint64(8)
+
+// When we read disk information by dumping the partition table, we get output like the following:
+// sudo sfdisk --dump /dev/sdb
+// label: gpt
+// label-id: 8071096F-DA33-154D-A687-AE097B8252C5
+// device: /dev/sdb
+// unit: sectors
+// first-lba: 2048
+// last-lba: 20971486
+
+// /dev/sdb1 : start=     4401152, size=     2097152, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=3B41256B-E064-544A-9101-D2647C0B3A38
+// /dev/sdb2 : start=      206848, size=     4194304, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=60E55EA1-4EEA-9F44-A066-4720F0129089
+// /dev/sdb3 : start=     6498304, size=      204800, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=9479C34A-49A6-9442-A56F-956396DFAC20
+
+// PartContent contains the info of a partition
+type PartContent struct {
+	Start uint64
+	Size  uint64
+}
+
+// HandlePartitionTable takes a partition table and get the start and size of the target partition.
+// If change==true, it will rebuild the partition table with data passed in by p *PartContent
+// and return the new table
+func HandlePartitionTable(table, partName string, change bool, f func(p *PartContent)) (string, error) {
+	foundPartition := false
+	lines := strings.Split(table, "\n")
+	for idx, line := range lines {
+		// a white space is needed to prevent cases like /dev/sda14 matches /dev/sda1
+		if !strings.HasPrefix(line, partName+" ") {
+			continue
+		}
+		foundPartition = true
+		content := strings.Split(line, ":")
+		partInfo := strings.Split(content[1], ",")
+		startSec := strings.Split(partInfo[0], "=")
+		sizeSec := strings.Split(partInfo[1], "=")
+		var p PartContent
+		var err error
+		p.Start, err = strconv.ParseUint(strings.TrimSpace(startSec[1]), 10, 64)
+		if err != nil {
+			return "", fmt.Errorf("cannot convert %q to int, "+
+				"partition info: %q, error msg: (%v)", strings.TrimSpace(startSec[1]), line, err)
+		}
+		p.Size, err = strconv.ParseUint(strings.TrimSpace(sizeSec[1]), 10, 64)
+		if err != nil {
+			return "", fmt.Errorf("cannot convert %q to int, "+
+				"partition info: %q, error msg: (%v)", strings.TrimSpace(startSec[1]), line, err)
+		}
+		// run reading or changing function on the PartContent struct.
+		f(&p)
+
+		// need to rebuild the partition table.
+		if change {
+			startSec[1] = strconv.FormatUint(p.Start, 10)
+			partInfo[0] = strings.Join(startSec, "=")
+			sizeSec[1] = strconv.FormatUint(p.Size, 10)
+			partInfo[1] = strings.Join(sizeSec, "=")
+			content[1] = strings.Join(partInfo, ",")
+			lines[idx] = strings.Join(content, ":")
+		}
+		break
+	}
+	if !foundPartition {
+		return table, fmt.Errorf("cannot find the target partition %q, "+
+			"partition table: %s", partName, table)
+	}
+
+	if change {
+		table = strings.Join(lines, "\n")
+	}
+	return table, nil
+}
+
+// ReadPartitionTable reads the partition table of a disk.
+func ReadPartitionTable(disk string) (string, error) {
+	table, err := exec.Command("sudo", "sfdisk", "--dump", disk).Output()
+	if err != nil {
+		return "", fmt.Errorf("cannot dump partition table of %q, "+
+			"error msg: (%v)", disk, err)
+	}
+	return string(table), nil
+}
+
+// ReadPartitionSize reads the size of a partition (unit:sectors of 512 Bytes).
+func ReadPartitionSize(disk string, partNumInt int) (uint64, error) {
+	if len(disk) <= 0 || partNumInt <= 0 {
+		return 0, fmt.Errorf("invalid input: disk=%q, partNumInt=%d", disk, partNumInt)
+	}
+
+	// get partition number string
+	partNum, err := PartNumIntToString(disk, partNumInt)
+	if err != nil {
+		return 0, fmt.Errorf("error in converting partition number, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, partNumInt, err)
+	}
+	partName := disk + partNum
+
+	table, err := ReadPartitionTable(disk)
+	if err != nil {
+		return 0, fmt.Errorf("cannot read partition table of %q, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, err)
+	}
+
+	var size uint64 = 0
+	if _, err = HandlePartitionTable(table, partName, false, func(p *PartContent) { size = p.Size }); err != nil {
+		return 0, fmt.Errorf("error parsing partition table of %q, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, err)
+	}
+	return size, nil
+}
+
+// ReadPartitionStart reads the start sector of a partition.
+func ReadPartitionStart(disk string, partNumInt int) (uint64, error) {
+	if len(disk) <= 0 || partNumInt <= 0 {
+		return 0, fmt.Errorf("invalid input: disk=%q, partNumInt=%d", disk, partNumInt)
+	}
+
+	// get partition number string
+	partNum, err := PartNumIntToString(disk, partNumInt)
+	if err != nil {
+		return 0, fmt.Errorf("error in converting partition number, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, partNumInt, err)
+	}
+	partName := disk + partNum
+
+	table, err := ReadPartitionTable(disk)
+	if err != nil {
+		return 0, fmt.Errorf("cannot read partition table of %q, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, err)
+	}
+
+	var start uint64 = 0
+	if _, err = HandlePartitionTable(table, partName, false, func(p *PartContent) { start = p.Start }); err != nil {
+		return 0, fmt.Errorf("error parsing partition table of %q, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, err)
+	}
+	return start, nil
+}
+
+// IsPartitionMinimal determines if a partition is the smallest size it can be.
+// If this function returns true, MinimizePartition can make the given partition
+// smaller.
+func IsPartitionMinimal(disk string, partNumInt int) (bool, error) {
+	numSectors, err := ReadPartitionSize(disk, partNumInt)
+	if err != nil {
+		return false, err
+	}
+	if numSectors > minPartitionSizeSectors {
+		return false, nil
+	}
+	return true, nil
+}
+
+// MinimizePartition minimizes the input partition and
+// returns the next sector of the end sector.
+// The smallest partition from fdisk is 1 sector partition.
+func MinimizePartition(disk string, partNumInt int) (uint64, error) {
+	minSize := minPartitionSizeSectors
+	if len(disk) == 0 || partNumInt <= 0 {
+		return 0, fmt.Errorf("empty disk name or nonpositive part number, "+
+			"input: disk=%q, partNumInt=%d", disk, partNumInt)
+	}
+
+	// get partition number string
+	partNum, err := PartNumIntToString(disk, partNumInt)
+	if err != nil {
+		return 0, fmt.Errorf("error in converting partition number, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, partNumInt, err)
+	}
+
+	partName := disk + partNum
+
+	var tableBuffer bytes.Buffer
+	var oldSize uint64
+
+	// dump partition table.
+	table, err := ReadPartitionTable(disk)
+	if err != nil {
+		return 0, fmt.Errorf("cannot read partition table of %q, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, err)
+	}
+
+	var startSector uint64
+
+	// edit partition table.
+	table, err = HandlePartitionTable(table, partName, true, func(p *PartContent) {
+		startSector = p.Start
+		oldSize = p.Size
+		p.Size = minSize
+	})
+	if err != nil {
+		return 0, fmt.Errorf("error when editing partition table of %q, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, err)
+	}
+
+	if oldSize <= minSize {
+		log.Printf("warning: old partition size=%d is smaller than minSize=%d, "+
+			"nothing is done, "+
+			"return value is start sector + minSize (%d)", oldSize, minSize, minSize)
+		return startSector + minSize, nil
+	}
+
+	tableBuffer.WriteString(table)
+
+	// write partition table back.
+	writeTableCmd := exec.Command("sudo", "sfdisk", "--no-reread", disk)
+	writeTableCmd.Stdin = &tableBuffer
+	writeTableCmd.Stdout = os.Stdout
+	writeTableCmd.Stderr = os.Stderr
+	if err := writeTableCmd.Run(); err != nil {
+		return 0, fmt.Errorf("error in writing partition table back to %q, "+
+			"input: disk=%q, partNumInt=%d, "+
+			"error msg: (%v)", disk, disk, partNumInt, err)
+	}
+
+	log.Printf("\nCompleted minimizing %q\n\n", partName)
+	return startSector + minSize, nil
+}
diff --git a/src/pkg/tools/partutil/handle_partition_table_test.go b/src/pkg/tools/partutil/handle_partition_table_test.go
new file mode 100644
index 0000000..b670a18
--- /dev/null
+++ b/src/pkg/tools/partutil/handle_partition_table_test.go
@@ -0,0 +1,310 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil/partutiltest"
+)
+
+// A file in tools/partutil/testdata is used as the simulation of a disk.
+// Disk ori_disk: 600 KiB, 614400 bytes, 1200 sectors
+// Units: sectors of 1 * 512 = 512 bytes
+// Sector size (logical/physical): 512 bytes / 512 bytes
+// I/O size (minimum/optimal): 512 bytes / 512 bytes
+// Disklabel type: gpt
+// Disk identifier: 9CEB1C17-FCD7-8F4F-ADE7-097A2DB2F996
+
+// Device     Start   End Sectors  Size Type
+// ori_disk1    434   633     200  100K Linux filesystem
+// ori_disk2    234   433     200  100K Linux filesystem
+// ori_disk8     34   233     200  100K Linux filesystem
+// ori_disk12  1100  1106       7  3.5K Linux filesystem
+
+// Partition table entries are not in disk order.
+
+func TestHandlePartitionTableFails(t *testing.T) {
+
+	testData := struct {
+		testName string
+		table    string
+		partName string
+	}{
+
+		testName: "NoPartitionFound",
+		table:    "abc",
+		partName: "sda1",
+	}
+
+	if _, err := HandlePartitionTable(testData.table, testData.partName, false, func(p *PartContent) {}); err == nil {
+		t.Fatalf("error not found in %s", testData.testName)
+	}
+}
+
+func TestHandlePartitionTablePasses(t *testing.T) {
+
+	testData := struct {
+		testName string
+		table    string
+		partName string
+		start    uint64
+		size     uint64
+		want     string
+	}{
+
+		testName: "ValidChange",
+		table:    "/dev/sdb11 : start=     4401152, size=     2097152, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=3B41256B-E064-544A-9101-D2647C0B3A38\n/dev/sdb1 : start=     6498304, size=      204800, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=9479C34A-49A6-9442-A56F-956396DFAC20\n",
+		partName: "/dev/sdb1",
+		start:    5001,
+		size:     4096,
+		want:     "/dev/sdb11 : start=     4401152, size=     2097152, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=3B41256B-E064-544A-9101-D2647C0B3A38\n/dev/sdb1 : start=5001, size=4096, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=9479C34A-49A6-9442-A56F-956396DFAC20\n",
+	}
+
+	res, err := HandlePartitionTable(testData.table, testData.partName, true, func(p *PartContent) {
+		p.Start = testData.start
+		p.Size = testData.size
+	})
+	if err != nil {
+		t.Fatalf("error found in %s, error msg: (%v)", testData.testName, err)
+	}
+	if res != testData.want {
+		t.Fatalf("wrong result in %q, res: %q, expected: %q", testData.testName, res, testData.want)
+
+	}
+
+}
+
+func TestReadPartitionSizeFails(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_read_partition_size_fails", "", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	testData := []struct {
+		testName string
+		disk     string
+		partNum  int
+	}{{
+		testName: "InvalidDisk",
+		disk:     "./testdata/no_disk",
+		partNum:  8,
+	}, {
+		testName: "InvalidPartition",
+		disk:     diskName,
+		partNum:  0,
+	}, {
+		testName: "NonexistPartition",
+		disk:     diskName,
+		partNum:  100,
+	}, {
+		testName: "EmptyDiskName",
+		disk:     "",
+		partNum:  1,
+	},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			_, err := ReadPartitionSize(input.disk, input.partNum)
+			if err == nil {
+				t.Fatalf("error not found in test %s", input.testName)
+			}
+		})
+	}
+}
+
+func TestReadPartitionSizePasses(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_read_partition_size_passes", "", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	input := struct {
+		testName string
+		disk     string
+		partNum  int
+		want     uint64
+	}{
+		testName: "200KPart",
+		disk:     diskName,
+		partNum:  8,
+		want:     200,
+	}
+
+	res, err := ReadPartitionSize(input.disk, input.partNum)
+	if err != nil {
+
+	}
+	if res != input.want {
+		t.Fatalf("wrong result: %q partition %d at %d, exp: %d", input.disk, input.partNum, res, input.want)
+	}
+}
+
+func TestReadPartitionStartFails(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_read_partition_start_fails", "", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	testData := []struct {
+		testName string
+		disk     string
+		partNum  int
+	}{
+		{
+			testName: "InvalidDisk",
+			disk:     "./testdata/no_disk",
+			partNum:  8,
+		}, {
+			testName: "InvalidPartition",
+			disk:     diskName,
+			partNum:  0,
+		}, {
+			testName: "NonexistPartition",
+			disk:     diskName,
+			partNum:  1000,
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			_, err := ReadPartitionStart(input.disk, input.partNum)
+			if err == nil {
+				t.Fatalf("error not found in test %s", input.testName)
+			}
+
+		})
+	}
+}
+
+func TestReadPartitionStartPasses(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_read_partition_start_passes", "", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	input := struct {
+		testName string
+		disk     string
+		partNum  int
+		want     uint64
+	}{
+		testName: "PartStartAt434",
+		disk:     diskName,
+		partNum:  1,
+		want:     434,
+	}
+
+	start, err := ReadPartitionStart(input.disk, input.partNum)
+	if err != nil {
+		t.Fatalf("error in test %s, error msg: (%v)", input.testName, err)
+	}
+	if start != input.want {
+		t.Fatalf("wrong result in test %s, start: %d, expected: %d", input.testName, start, input.want)
+	}
+}
+
+func TestMinimizePartitionFails(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_minimize_partition_fails", "", t, &testNames)
+
+	diskName := testNames.DiskName
+	testData := []struct {
+		testName string
+		disk     string
+		partNum  int
+	}{{
+		testName: "InvalidDisk",
+		disk:     "./testdata/no_disk",
+		partNum:  1,
+	}, {
+		testName: "InvalidPartition",
+		disk:     diskName,
+		partNum:  0,
+	}, {
+		testName: "NonexistPartition",
+		disk:     diskName,
+		partNum:  100,
+	}, {
+		testName: "EmptyDiskName",
+		disk:     "",
+		partNum:  100,
+	},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			if _, err := MinimizePartition(input.disk, input.partNum); err == nil {
+				t.Fatalf("error not found in test %s", input.testName)
+			}
+		})
+	}
+}
+
+func TestMinimizePartitionPasses(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_minimize_partition_fails", "", t, &testNames)
+
+	diskName := testNames.DiskName
+	testData := []struct {
+		testName string
+		disk     string
+		partNum  int
+		want     uint64
+		wantSize uint64
+	}{
+		{
+			testName: "200KPart",
+			disk:     diskName,
+			partNum:  8,
+			want:     42,
+			wantSize: 8,
+		}, {
+			testName: "SmallPart",
+			disk:     diskName,
+			partNum:  12,
+			want:     1108,
+			wantSize: 7,
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			res, err := MinimizePartition(input.disk, input.partNum)
+			if err != nil {
+				t.Fatalf("error in test %s, error msg: (%v)", input.testName, err)
+			}
+			if res != input.want {
+				t.Fatalf("wrong result in %q, res: %q, expected: %q", input.testName, res, input.want)
+			}
+
+			size, err := ReadPartitionSize(input.disk, input.partNum)
+			if err != nil {
+				t.Fatalf("error in test %s, cannot read partition size, error msg: (%v)", input.testName, err)
+			}
+			if size != input.wantSize {
+				t.Fatalf("wrong result in %q, size: %q, expected size: %q", input.testName, size, input.wantSize)
+			}
+
+		})
+	}
+}
diff --git a/src/pkg/tools/partutil/helpers.go b/src/pkg/tools/partutil/helpers.go
new file mode 100644
index 0000000..7b644ef
--- /dev/null
+++ b/src/pkg/tools/partutil/helpers.go
@@ -0,0 +1,148 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"os"
+	"os/exec"
+	"strconv"
+	"strings"
+)
+
+// ConvertSizeToBytes converts a size string to int unit: bytes.
+// It takes a string of number with no unit (sectors), unit B, unit K, unit M, or unit G.
+func ConvertSizeToBytes(size string) (uint64, error) {
+	const B = 1
+	const K = 1024
+	const M = K * 1024
+	const G = M * 1024
+	const Sec = 512
+
+	var err error
+	var res uint64 = 0
+	l := len(size)
+
+	if l <= 0 {
+		return 0, errors.New("invalid oemSize: empty string")
+	}
+
+	if size[0] < '0' || size[0] > '9' {
+		return 0, fmt.Errorf("invalid oemSize, the first char should be digit, "+
+			"input size: %q", size)
+	}
+
+	if size[l-1] >= '0' && size[l-1] <= '9' {
+		res, err = strconv.ParseUint(size, 10, 64)
+		if err != nil {
+			return 0, fmt.Errorf("cannot convert %q to int", size)
+		}
+		res *= Sec
+	} else {
+		res, err = strconv.ParseUint(size[0:l-1], 10, 64)
+		if err != nil {
+			return 0, fmt.Errorf("cannot convert %q in input: %q to int", string(size[0:l-1]), size)
+		}
+		switch size[l-1] {
+		case 'B':
+			res *= B
+		case 'K':
+			res *= K
+		case 'M':
+			res *= M
+		case 'G':
+			res *= G
+		default:
+			return 0, fmt.Errorf("wrong format for oemSize, input: %q, "+
+				"expecting input like 10G, 200M, 600K, 5000B or 1024", size)
+		}
+	}
+
+	return res, nil
+}
+
+// ConvertSizeToGBRoundUp converts input size to GB unit.
+// Rounded up, since extend disk can only take GB unit.
+// Used by Daisy workflow to resize the disk.
+func ConvertSizeToGBRoundUp(size string) (uint64, error) {
+	sizeByte, err := ConvertSizeToBytes(size)
+	if err != nil {
+		return 0, err
+	}
+	sizeGB := sizeByte >> 30
+	if (sizeGB << 30) != sizeByte {
+		sizeGB++
+	}
+	return sizeGB, nil
+}
+
+// PartNumIntToString converts input int partNumInt into string,
+// if disk ends with number, add 'p' to the front.
+// Example: /dev/loop5p1
+func PartNumIntToString(disk string, partNumInt int) (string, error) {
+	if len(disk) <= 0 {
+		return "", errors.New("empty disk name")
+	}
+	partNum := strconv.Itoa(partNumInt)
+	if disk[len(disk)-1] >= '0' && disk[len(disk)-1] <= '9' {
+		partNum = "p" + partNum
+	}
+	return partNum, nil
+}
+
+// GetPartUUID finds the PartUUID of a partition using blkid
+func GetPartUUID(partName string) (string, error) {
+	var idBuf bytes.Buffer
+	cmd := exec.Command("sudo", "blkid")
+	cmd.Stdout = &idBuf
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return "", fmt.Errorf("error in running blkid, "+
+			"std output:%s, error msg: (%v)", idBuf.String(), err)
+	}
+	// blkid has output like:
+	// /dev/sda1: LABEL="STATE" UUID="120991ff-4f12-43bf-b962-17325185121d" TYPE="ext4"
+	// /dev/sda3: LABEL="ROOT-A" SEC_TYPE="ext2" TYPE="ext4" PARTLABEL="ROOT-A" PARTUUID="00ce255b-db42-1e47-a62b-735c7a9a7397"
+	// /dev/sda8: LABEL="OEM" UUID="1401457b-449d-4755-9a1e-57054b287489" TYPE="ext4" PARTLABEL="OEM" PARTUUID="9db2ae75-98dc-5b4f-a38b-b3cb0b80b17f"
+	// /dev/sda12: SEC_TYPE="msdos" LABEL="EFI-SYSTEM" UUID="F6E7-003C" TYPE="vfat" PARTLABEL="EFI-SYSTEM" PARTUUID="aaea6e5e-bc5f-2542-b19a-66c2daa4d5a8"
+	// /dev/dm-0: LABEL="ROOT-A" SEC_TYPE="ext2" TYPE="ext4"
+	// /dev/sda2: PARTLABEL="KERN-A" PARTUUID="de4778dd-c187-8343-b86c-e122f9d234c0"
+	// /dev/sda4: PARTLABEL="KERN-B" PARTUUID="7b8374db-78b2-2748-bab9-a52d0867455b"
+	// /dev/sda5: PARTLABEL="ROOT-B" PARTUUID="8ac60384-1187-9e49-91ce-3abd8da295a7"
+	// /dev/sda11: PARTLABEL="RWFW" PARTUUID="682ef1a5-f7f6-7d42-a407-5d8ad0430fc1"
+	lines := strings.Split(idBuf.String(), "\n")
+	for _, line := range lines {
+		if !strings.HasPrefix(line, partName+":") {
+			continue
+		}
+		for _, content := range strings.Fields(line) {
+			if !strings.HasPrefix(content, "PARTUUID") {
+				continue
+			}
+			return strings.Trim(strings.Split(content, "=")[1], "\""), nil
+		}
+	}
+	return "", fmt.Errorf("partition UUID not found, input: partName=%q ,"+
+		"output of \"blkid\": %s", partName, idBuf.String())
+}
+
+// FindLast4KSector returns the last 4K bytes aligned sector from start.
+// If input is a 4K aligned sector, return itself.
+func FindLast4KSector(start uint64) uint64 {
+	var mask uint64 = 7
+	return start & (^mask)
+}
diff --git a/src/pkg/tools/partutil/helpers_test.go b/src/pkg/tools/partutil/helpers_test.go
new file mode 100644
index 0000000..a9fdafd
--- /dev/null
+++ b/src/pkg/tools/partutil/helpers_test.go
@@ -0,0 +1,250 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"testing"
+)
+
+func TestConvertSizeToBytesFails(t *testing.T) {
+	testData := []struct {
+		testName string
+		input    string
+	}{
+		{
+			testName: "InvalidSuffix",
+			input:    "10T",
+		}, {
+			testName: "InvalidNumber",
+			input:    "56AXM",
+		}, {
+			testName: "EmptyString",
+			input:    "",
+		}, {
+			testName: "IntOverflow",
+			input:    "654654654654654654654654654654654654654654654654321321654654654",
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			_, err := ConvertSizeToBytes(input.input)
+			if err == nil {
+				t.Fatalf("error not found in test %s", input.testName)
+			}
+		})
+	}
+}
+
+func TestConvertSizeToBytesPasses(t *testing.T) {
+	testData := []struct {
+		testName string
+		input    string
+		want     uint64
+	}{
+		{
+			testName: "ValidInputSector",
+			input:    "4194304",
+			want:     2147483648,
+		}, {
+			testName: "ValidInputB",
+			input:    "4194304B",
+			want:     4194304,
+		}, {
+			testName: "ValidInputK",
+			input:    "500K",
+			want:     512000,
+		}, {
+			testName: "ValidInputM",
+			input:    "456M",
+			want:     478150656,
+		}, {
+			testName: "ValidInputG",
+			input:    "321G",
+			want:     344671125504,
+		}, {
+			testName: "Zero",
+			input:    "0",
+			want:     0,
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			res, err := ConvertSizeToBytes(input.input)
+			if err != nil {
+				t.Fatalf("errorin test %s, error msg: (%v)", input.testName, err)
+			}
+			if res != input.want {
+				t.Fatalf("wrong result: %q to %d, expect: %d", input.input, res, input.want)
+			}
+		})
+	}
+}
+
+func TestPartNumIntToStringFails(t *testing.T) {
+	_, err := PartNumIntToString("", 1)
+	if err == nil {
+		t.Fatal("error not found in test EmptyDiskName")
+	}
+}
+
+func TestPartNumIntToStringPasses(t *testing.T) {
+	testData := []struct {
+		testName   string
+		diskName   string
+		partNumInt int
+		want       string
+	}{
+		{
+			testName:   "LetterEndDisk",
+			diskName:   "/dev/sda",
+			partNumInt: 1,
+			want:       "1",
+		},
+		{
+			testName:   "NumberEndDisk",
+			diskName:   "/dev/loop5",
+			partNumInt: 1,
+			want:       "p1",
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			res, err := PartNumIntToString(input.diskName, input.partNumInt)
+			if err != nil {
+				t.Fatalf("error in test %s, error msg: (%v)", input.testName, err)
+			}
+			if res != input.want {
+				t.Fatalf("error in test %s, wrong result: %q, expected: %q", input.testName, res, input.want)
+			}
+		})
+	}
+}
+
+func TestConvertSizeToGBRoundUpFails(t *testing.T) {
+	testData := []struct {
+		testName string
+		input    string
+	}{
+		{
+			testName: "InvalidSuffix",
+			input:    "10T",
+		}, {
+			testName: "InvalidNumber",
+			input:    "56AXM",
+		}, {
+			testName: "EmptyString",
+			input:    "",
+		}, {
+			testName: "IntOverflow",
+			input:    "654654654654654654654654654654654654654654654654321321654654654",
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			_, err := ConvertSizeToGBRoundUp(input.input)
+			if err == nil {
+				t.Fatalf("error not found in test %s", input.testName)
+			}
+		})
+	}
+}
+
+func TestConvertSizeToGBRoundUpPasses(t *testing.T) {
+	testData := []struct {
+		testName string
+		input    string
+		want     uint64
+	}{
+		{
+			testName: "ValidInputSector",
+			input:    "4194304",
+			want:     2,
+		}, {
+			testName: "ValidInputB",
+			input:    "4194304B",
+			want:     1,
+		}, {
+			testName: "ValidInputK",
+			input:    "500K",
+			want:     1,
+		}, {
+			testName: "ValidInputM",
+			input:    "456M",
+			want:     1,
+		}, {
+			testName: "ValidInputG",
+			input:    "321G",
+			want:     321,
+		},
+		{
+			testName: "ValidInputM2",
+			input:    "2096M",
+			want:     3,
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			res, err := ConvertSizeToGBRoundUp(input.input)
+			if err != nil {
+				t.Fatalf("errorin test %s, error msg: (%v)", input.testName, err)
+			}
+			if res != input.want {
+				t.Fatalf("wrong result: %q to %d, expect: %d", input.input, res, input.want)
+			}
+		})
+	}
+}
+
+func TestFindLast4KSectorPasses(t *testing.T) {
+	testData := []struct {
+		testName string
+		input    uint64
+		want     uint64
+	}{
+		{
+			testName: "ToZero",
+			input:    7,
+			want:     0,
+		}, {
+			testName: "SmallNum",
+			input:    14,
+			want:     8,
+		}, {
+			testName: "LargeNum",
+			input:    987654316,
+			want:     987654312,
+		}, {
+			testName: "Self",
+			input:    256,
+			want:     256,
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			res := FindLast4KSector(input.input)
+			if res != input.want {
+				t.Fatalf("wrong result: %q to %d, expect: %d", input.input, res, input.want)
+			}
+		})
+	}
+}
+
+// cannot test GetPartUUID() because test disk file "ori_disk" only has UUID, but not PARTUUID
diff --git a/src/pkg/tools/partutil/move_partition.go b/src/pkg/tools/partutil/move_partition.go
new file mode 100644
index 0000000..72f28c5
--- /dev/null
+++ b/src/pkg/tools/partutil/move_partition.go
@@ -0,0 +1,46 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"os"
+	"os/exec"
+	"strconv"
+)
+
+// MovePartition moves a partition to a start sector.
+// It takes destination input like 2048 (absolute sector number), +5G or -200M.
+func MovePartition(disk string, partNumInt int, dest string) error {
+	if len(disk) <= 0 || partNumInt <= 0 || len(dest) <= 0 {
+		return fmt.Errorf("invalid input: disk=%q, partNumInt=%d, dest=%q", disk, partNumInt, dest)
+	}
+
+	var destBuffer bytes.Buffer
+	destBuffer.WriteString(dest)
+	cmd := exec.Command("sudo", "sfdisk", "--no-reread", "--move-data=/dev/null", disk, "-N", strconv.Itoa(partNumInt))
+	cmd.Stdin = &destBuffer
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("error in executing sfdisk --move-data, "+
+			"input: disk=%q, partNumInt=%d, dest=%q, "+
+			"error msg: (%v)", disk, partNumInt, dest, err)
+	}
+	log.Printf("\nCompleted moving %s partition %d \n\n", disk, partNumInt)
+	return nil
+}
diff --git a/src/pkg/tools/partutil/move_partition_test.go b/src/pkg/tools/partutil/move_partition_test.go
new file mode 100644
index 0000000..1b36b7f
--- /dev/null
+++ b/src/pkg/tools/partutil/move_partition_test.go
@@ -0,0 +1,150 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutil
+
+import (
+	"testing"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil/partutiltest"
+)
+
+// A file in tools/partutil/testdata is used as the simulation of a disk.
+// When a test program starts, it will copy the file and work on it. Its size is 600K. It has three partitions as follows:
+// 1. partition 8, OEM partition, 100K
+// 2. partition 2, middle partition, 100K
+// 3. partition 1, stateful partition, 100K
+// 4. partition 12, small partition, 3.5K
+
+func TestMovePartitionFails(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_move_partition_fails", "", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	testData := []struct {
+		testName string
+		disk     string
+		partNum  int
+		dest     string
+	}{
+		{
+			testName: "MoveByDistanceTooSmall",
+			disk:     diskName,
+			partNum:  1,
+			dest:     "-500K",
+		}, {
+			testName: "MoveByDistanceTooLarge",
+			disk:     diskName,
+			partNum:  1,
+			dest:     "+500K",
+		}, {
+			testName: "InvalidDisk",
+			disk:     "./testdata/no_disk",
+			partNum:  8,
+			dest:     "+100K",
+		}, {
+			testName: "InvalidPartition",
+			disk:     diskName,
+			partNum:  0,
+			dest:     "+100K",
+		}, {
+			testName: "NonexistPartition",
+			disk:     diskName,
+			partNum:  100,
+			dest:     "+100K",
+		}, {
+			testName: "MoveToInvalidPosSmall",
+			disk:     diskName,
+			partNum:  1,
+			dest:     "0",
+		}, {
+			testName: "MoveToInvalidPosLarge",
+			disk:     diskName,
+			partNum:  1,
+			dest:     "5000",
+		}, {
+			testName: "MoveCollision",
+			disk:     diskName,
+			partNum:  8,
+			dest:     "300",
+		}, {
+			testName: "EmptyDiskName",
+			disk:     "",
+			partNum:  1,
+			dest:     "+100K",
+		},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			err := MovePartition(input.disk, input.partNum, input.dest)
+			if err == nil {
+				t.Fatalf("error not found in test %s", input.testName)
+			}
+		})
+	}
+}
+
+func TestMovePartitionPasses(t *testing.T) {
+	var testNames partutiltest.TestNames
+	t.Cleanup(func() { partutiltest.TearDown(&testNames) })
+	partutiltest.SetupFakeDisk("tmp_disk_move_partition_passes", "", t, &testNames)
+
+	diskName := testNames.DiskName
+
+	if err := MovePartition(diskName, 1, "+150K"); err != nil {
+		t.Fatalf("error in test MovePartitionByDistancePos, error msg: (%v)", err)
+	}
+
+	testData := []struct {
+		testName string
+		disk     string
+		partNum  int
+		dest     string
+		want     uint64
+	}{{
+		testName: "MovePartitionByDistanceNeg",
+		disk:     diskName,
+		partNum:  1,
+		dest:     "-40K",
+		want:     654,
+	}, {
+		testName: "MovePartitionToPosition",
+		disk:     diskName,
+		partNum:  8,
+		dest:     "434",
+		want:     434,
+	},
+	}
+
+	for _, input := range testData {
+		t.Run(input.testName, func(t *testing.T) {
+			err := MovePartition(input.disk, input.partNum, input.dest)
+			if err != nil {
+				t.Fatalf("error in test %s, error msg: (%v)", input.testName, err)
+			}
+			pos, err := ReadPartitionStart(input.disk, input.partNum)
+			if err != nil {
+				t.Fatalf("cannot read partition start of %q partition %d "+
+					"error msg: (%v)", input.disk, input.partNum, err)
+			}
+			if pos != input.want {
+				t.Fatalf("error result in test %s, pos: %d, expected: %d",
+					input.testName, pos, input.want)
+			}
+		})
+	}
+}
diff --git a/src/pkg/tools/partutil/partutiltest/BUILD.bazel b/src/pkg/tools/partutil/partutiltest/BUILD.bazel
new file mode 100644
index 0000000..7af3cc0
--- /dev/null
+++ b/src/pkg/tools/partutil/partutiltest/BUILD.bazel
@@ -0,0 +1,22 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "partutiltest",
+    srcs = ["set_test_env.go"],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil/partutiltest",
+    visibility = ["//visibility:public"],
+)
diff --git a/src/pkg/tools/partutil/partutiltest/set_test_env.go b/src/pkg/tools/partutil/partutiltest/set_test_env.go
new file mode 100644
index 0000000..0814dea
--- /dev/null
+++ b/src/pkg/tools/partutil/partutiltest/set_test_env.go
@@ -0,0 +1,75 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partutiltest
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"os/exec"
+	"testing"
+)
+
+// TestNames is used for testing environment setup and teardown.
+type TestNames struct {
+	CopyFile string
+	DiskName string
+}
+
+// SetupFakeDisk copys a file to simulate the disk and work on the copy for tests.
+func SetupFakeDisk(copyName, srcPrefix string, t *testing.T, testNames *TestNames) {
+	src, err := os.Open(fmt.Sprintf("./%stestdata/ori_disk", srcPrefix))
+	if err != nil {
+		t.Fatalf("setting up fake disk, cannot open test disk file: ori_disk, "+
+			"input: copyName=%q, srcPrefix=%q, "+
+			"error msg: (%v)", copyName, srcPrefix, err)
+	}
+	defer src.Close()
+
+	copyFile := fmt.Sprintf("./%stestdata/%s", srcPrefix, copyName)
+	testNames.CopyFile = copyFile
+	dest, err := os.Create(copyFile)
+	if err != nil {
+		t.Fatalf("setting up fake disk, cannot create tmp disk file, "+
+			"input: copyName=%q, srcPrefix=%q, "+
+			"error msg: (%v)", copyName, srcPrefix, err)
+	}
+
+	if _, err := io.Copy(dest, src); err != nil {
+		t.Fatalf("setting up fake disk, cannot copy tmp disk file, "+
+			"input: copyName=%q, srcPrefix=%q, "+
+			"error msg: (%v)", copyName, srcPrefix, err)
+	}
+	dest.Close()
+
+	out, err := exec.Command("sudo", "losetup", "-fP", "--show", copyFile).Output()
+	if err != nil {
+		t.Fatalf("setting up fake disk, cannot losetup fake disk file, "+
+			"input: copyName=%q, srcPrefix=%q, "+
+			"error msg: (%v)", copyName, srcPrefix, err)
+	}
+	diskName := string(out)
+	testNames.DiskName = diskName[:len(diskName)-1]
+}
+
+// TearDown delete the loop device and the copied file for testing environment.
+func TearDown(testNames *TestNames) {
+	if testNames.DiskName != "" {
+		exec.Command("sudo", "losetup", "-d", testNames.DiskName).Run()
+	}
+	if testNames.CopyFile != "" {
+		os.Remove(testNames.CopyFile)
+	}
+}
diff --git a/src/pkg/tools/partutil/testdata/ori_disk b/src/pkg/tools/partutil/testdata/ori_disk
new file mode 100644
index 0000000..cc593db
--- /dev/null
+++ b/src/pkg/tools/partutil/testdata/ori_disk
Binary files differ
diff --git a/src/pkg/tools/seal_oem_partition.go b/src/pkg/tools/seal_oem_partition.go
new file mode 100644
index 0000000..047d719
--- /dev/null
+++ b/src/pkg/tools/seal_oem_partition.go
@@ -0,0 +1,215 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tools
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"strconv"
+	"strings"
+
+	"github.com/GoogleCloudPlatform/cos-customizer/src/pkg/tools/partutil"
+)
+
+// SealOEMPartition sets the hashtree of the OEM partition
+// with "veritysetup" and modifies the kernel command line to
+// verify the OEM partition at boot time.
+func SealOEMPartition(veritysetupImgPath string, oemFSSize4K uint64) error {
+	const devName = "oemroot"
+	imageID, err := loadVeritysetupImage(veritysetupImgPath)
+	if err != nil {
+		return fmt.Errorf("cannot load veritysetup image at %q, error msg:(%v)", veritysetupImgPath, err)
+	}
+	log.Println("docker image for veritysetup loaded.")
+	if err := unmountOEMPartition(); err != nil {
+		return fmt.Errorf("cannot umount OEM partition, error msg:(%v)", err)
+	}
+	log.Println("OEM partition unmounted.")
+	hash, salt, err := veritysetup(imageID, oemFSSize4K)
+	if err != nil {
+		return fmt.Errorf("cannot run veritysetup, input:oemFSSize4K=%d, "+
+			"error msg:(%v)", oemFSSize4K, err)
+	}
+	grubPath, err := partutil.MountEFIPartition()
+	log.Println("EFI partition mounted.")
+	if err != nil {
+		return fmt.Errorf("cannot mount EFI partition (/dev/sda12), error msg:(%v)", err)
+	}
+	defer partutil.UnmountEFIPartition()
+	partUUID, err := partutil.GetPartUUID("/dev/sda8")
+	if err != nil {
+		return fmt.Errorf("cannot read partUUID of /dev/sda8")
+	}
+	if err := appendDMEntryToGRUB(grubPath, devName, partUUID, hash, salt, oemFSSize4K); err != nil {
+		return fmt.Errorf("error in appending entry to grub.cfg, input:oemFSSize4K=%d, "+
+			"error msg:(%v)", oemFSSize4K, err)
+	}
+	log.Println("kernel command line modified.")
+	if err := removeVeritysetupImage(imageID); err != nil {
+		return fmt.Errorf("cannot remove veritysetup image, error msg:(%v)", err)
+	}
+	log.Println("docker image for veritysetup removed.")
+	log.Println("OEM partition sealed.")
+	return nil
+}
+
+// loadVeritysetupImage loads the docker image of veritysetup.
+// return the image ID.
+func loadVeritysetupImage(imgPath string) (string, error) {
+	cmd := exec.Command("sudo", "docker", "load", "-i", imgPath)
+	if err := cmd.Run(); err != nil {
+		return "", fmt.Errorf("error in loading docker image, "+
+			"input: imgPath=%q, error msg: (%v)", imgPath, err)
+	}
+	var idBuf bytes.Buffer
+	cmd = exec.Command("sudo", "docker", "images", "veritysetup:veritysetup", "-q")
+	cmd.Stdout = &idBuf
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return "", fmt.Errorf("error in reading image ID, "+
+			"cmd:%q,std output:%s, error msg: (%v)",
+			"sudo docker images veritysetup:veritysetup -q", idBuf.String(), err)
+	}
+	if idBuf.Len() == 0 {
+		return "", fmt.Errorf("image ID not found, "+
+			"input: imgPath=%q", imgPath)
+	}
+	imageID := idBuf.String()
+	return imageID[:len(imageID)-1], nil
+}
+
+// removeVeritysetupImage removes the container and docker image of veritysetup
+func removeVeritysetupImage(imageID string) error {
+	cmd := exec.Command("sudo", "docker", "rmi", imageID)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("error in removing docker image, "+
+			"id=%q, error msg: (%v)", imageID, err)
+	}
+	return nil
+}
+
+// unmountOEMPartition checks whether the OEM partititon (/dev/sda8)
+// is mounted, if so, unmount it.
+func unmountOEMPartition() error {
+	var buf bytes.Buffer
+	cmd := exec.Command("df")
+	cmd.Stdout = &buf
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("error in running df, "+
+			"std output:%s, error msg: (%v)", buf.String(), err)
+	}
+	if !strings.Contains(buf.String(), "/dev/sda8") {
+		return nil
+	}
+	cmd = exec.Command("sudo", "umount", "/dev/sda8")
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("error in unmounting /dev/sda8, "+
+			"error msg: (%v)", err)
+	}
+	return nil
+}
+
+// veritysetup runs the docker container command veritysetup to build hash tree of OEM partition
+// and generate hash root value and salt value.
+func veritysetup(imageID string, oemFSSize4K uint64) (string, string, error) {
+	dataBlocks := "--data-blocks=" + strconv.FormatUint(oemFSSize4K, 10)
+	// --hash-offset is in Bytes
+	hashOffset := "--hash-offset=" + strconv.FormatUint(oemFSSize4K<<12, 10)
+	cmd := exec.Command("sudo", "docker", "run", "--rm", "--name", "veritysetup", "--privileged",
+		"-v", "/dev:/dev", imageID, "veritysetup", "format", "/dev/sda8", "/dev/sda8",
+		"--data-block-size=4096", "--hash-block-size=4096", dataBlocks, hashOffset,
+		"--no-superblock", "--format=0")
+	var verityBuf bytes.Buffer
+	cmd.Stdout = &verityBuf
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return "", "", fmt.Errorf("error in running docker veritysetup, "+
+			"input: oemFSSize4K=%d, std output:%s,error msg: (%v)",
+			oemFSSize4K, verityBuf.String(), err)
+	}
+	// Output of veritysetup is like:
+	// VERITY header information for /dev/sdb1
+	// UUID:
+	// Hash type:              0
+	// Data blocks:            2048
+	// Data block size:        4096
+	// Hash block size:        4096
+	// Hash algorithm:         sha256
+	// Salt:                   9cd7ba29a1771b2097a7d72be8c13b29766d7617c3b924eb0cf23ff5071fee47
+	// Root hash:              d6b862d01e01e6417a1b5e7eb0eed2a2189594b74325dd0749cd83bbf78f5dc8
+	hash := ""
+	salt := ""
+	for _, line := range strings.Split(verityBuf.String(), "\n") {
+		if strings.HasPrefix(line, "Root hash:") {
+			hash = strings.TrimSpace(strings.Split(line, ":")[1])
+		} else if strings.HasPrefix(line, "Salt:") {
+			salt = strings.TrimSpace(strings.Split(line, ":")[1])
+		}
+	}
+	if hash == "" || salt == "" {
+		return "", "", fmt.Errorf("error in veritsetup output format, cannot find \"Salt:\" or \"Root hash:\", "+
+			"input: oemFSSize4K=%d, veritysetup output: %s", oemFSSize4K, verityBuf.String())
+	}
+	return hash, salt, nil
+}
+
+// appendDMEntryToGRUB appends an dm-verity table entry to kernel command line in grub.cfg
+// A target line in grub.cfg looks like
+// ...... root=/dev/dm-0 dm="1 vroot none ro 1,0 4077568 verity
+// payload=PARTUUID=8AC60384-1187-9E49-91CE-3ABD8DA295A7
+// hashtree=PARTUUID=8AC60384-1187-9E49-91CE-3ABD8DA295A7 hashstart=4077568 alg=sha256
+// root_hexdigest=xxxxxxxx salt=xxxxxxxx"
+func appendDMEntryToGRUB(grubPath, name, partUUID, hash, salt string, oemFSSize4K uint64) error {
+	// from 4K blocks to 512B sectors
+	oemFSSizeSector := oemFSSize4K << 3
+	entryString := fmt.Sprintf("%s none ro 1, 0 %d verity payload=PARTUUID=%s hashtree=PARTUUID=%s "+
+		"hashstart=%d alg=sha256 root_hexdigest=%s salt=%s\"", name, oemFSSizeSector,
+		partUUID, partUUID, oemFSSizeSector, hash, salt)
+	grubContent, err := ioutil.ReadFile(grubPath)
+	if err != nil {
+		return fmt.Errorf("cannot read grub.cfg at %q, "+
+			"input: grubPath=%q, name=%q, partUUID=%q, oemFSSize4K=%d, hash=%q, salt=%q, "+
+			"error msg:(%v)", grubPath, grubPath, name, partUUID, oemFSSize4K, hash, salt, err)
+	}
+	lines := strings.Split(string(grubContent), "\n")
+	// add the entry to all kernel command lines containing "dm="
+	for idx, line := range lines {
+		if !strings.Contains(line, "dm=") {
+			continue
+		}
+		startPos := strings.Index(line, "dm=")
+		// remove the end quote.
+		lineBuf := []rune(line[:len(line)-1])
+		// add number of entries.
+		lineBuf[startPos+4] = '2'
+		lines[idx] = strings.Join(append(strings.Split(string(lineBuf), ","), entryString), ",")
+	}
+	// new content of grub.cfg
+	grubContent = []byte(strings.Join(lines, "\n"))
+	err = ioutil.WriteFile(grubPath, grubContent, 0755)
+	if err != nil {
+		return fmt.Errorf("cannot write to grub.cfg at %q, "+
+			"input: grubPath=%q, name=%q, partUUID=%q, oemFSSize4K=%d, hash=%q, salt=%q, "+
+			"error msg:(%v)", grubPath, grubPath, name, partUUID, oemFSSize4K, hash, salt, err)
+	}
+	return nil
+}
diff --git a/src/pkg/utils/BUILD.bazel b/src/pkg/utils/BUILD.bazel
new file mode 100644
index 0000000..267454c
--- /dev/null
+++ b/src/pkg/utils/BUILD.bazel
@@ -0,0 +1,26 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "utils",
+    srcs = ["utils.go"],
+    importpath = "github.com/GoogleCloudPlatform/cos-customizer/src/pkg/utils",
+    visibility = ["//visibility:public"],
+    deps = [
+      "@com_github_golang_glog//:glog",
+      "@com_github_pkg_errors//:go_default_library",
+    ],
+)
diff --git a/src/pkg/utils/utils.go b/src/pkg/utils/utils.go
index fbc831c..5fc3bd6 100644
--- a/src/pkg/utils/utils.go
+++ b/src/pkg/utils/utils.go
@@ -1,3 +1,17 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 // Package utils provides utility functions.
 package utils
 
@@ -8,6 +22,7 @@
 	"fmt"
 	"io"
 	"io/ioutil"
+	"log"
 	"net/http"
 	"os"
 	"os/exec"
@@ -18,7 +33,7 @@
 
 	"github.com/pkg/errors"
 
-	log "github.com/golang/glog"
+	"github.com/golang/glog"
 )
 
 var (
@@ -60,17 +75,17 @@
 	// TODO(mikewu): generalize Flock to make it useful for other use cases.
 	f, err := os.OpenFile(lockFile, os.O_RDONLY|os.O_CREATE, 0666)
 	if err != nil {
-		log.Exitf("Failed to open lock file: %v", err)
+		glog.Exitf("Failed to open lock file: %v", err)
 	}
 	if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
-		log.Exitf("File %s is locked. Other process might be running.", lockFile)
+		glog.Exitf("File %s is locked. Other process might be running.", lockFile)
 	}
 }
 
 // DownloadContentFromURL downloads file from a given URL.
 func DownloadContentFromURL(url, outputPath, infoStr string) error {
 	url = strings.TrimSpace(url)
-	log.Infof("Downloading %s from %s", infoStr, url)
+	glog.Infof("Downloading %s from %s", infoStr, url)
 
 	req, err := http.NewRequest("GET", url, nil)
 	if err != nil {
@@ -99,10 +114,10 @@
 	for retries > 0 {
 		response, err = client.Do(req)
 		if err != nil {
-			log.Errorf("Failed to download %s: %v", infoStr, err)
+			glog.Errorf("Failed to download %s: %v", infoStr, err)
 			retries--
 			time.Sleep(time.Second)
-			log.Info("Retry...")
+			glog.Info("Retry...")
 		} else {
 			break
 		}
@@ -118,7 +133,7 @@
 		return errors.Wrapf(err, "failed to download %s", infoStr)
 	}
 
-	log.Infof("Successfully downloaded %s from %s", infoStr, url)
+	glog.Infof("Successfully downloaded %s from %s", infoStr, url)
 	return nil
 }
 
@@ -132,7 +147,7 @@
 
 // ListGCSBucket lists the objects whose names begin with the given prefix in the given GCS bucekt.
 func ListGCSBucket(bucket, prefix string) ([]string, error) {
-	log.Infof("Listing objects from GCS bucekt %s with prefix %s", bucket, prefix)
+	glog.Infof("Listing objects from GCS bucekt %s with prefix %s", bucket, prefix)
 
 	url := fmt.Sprintf("https://storage.googleapis.com/storage/v1/b/%s/o?prefix=%s", bucket, prefix)
 	dir, err := ioutil.TempDir("", "bucketlist")
@@ -268,17 +283,17 @@
 
 // RunCommandAndLogOutput runs the given command and logs the stdout and stderr in parallel.
 func RunCommandAndLogOutput(cmd *exec.Cmd, expectError bool) error {
-	errLogger := log.Error
+	errLogger := glog.Error
 	if expectError {
-		errLogger = log.V(1).Info
+		errLogger = glog.V(1).Info
 	}
 
-	cmd.Stdout = &loggingWriter{logger: log.Info}
+	cmd.Stdout = &loggingWriter{logger: glog.Info}
 	cmd.Stderr = &loggingWriter{logger: errLogger}
 
 	err := cmd.Run()
 	if _, ok := err.(*exec.ExitError); ok && expectError {
-		log.Warningf("command %s didn't complete successfully: %v", cmd.Path, err)
+		glog.Warningf("command %s didn't complete successfully: %v", cmd.Path, err)
 		return nil
 	}
 	return err
@@ -345,3 +360,40 @@
 	}
 	return len(p), nil
 }
+
+// CheckClose closes an io.Closer and checks its error. Useful for checking the
+// errors on deferred Close() behaviors.
+func CheckClose(closer io.Closer, errMsgOnClose string, err *error) {
+	if closeErr := closer.Close(); closeErr != nil {
+		var fullErr error
+		if errMsgOnClose != "" {
+			fullErr = fmt.Errorf("%s: %v", errMsgOnClose, closeErr)
+		} else {
+			fullErr = closeErr
+		}
+		if *err == nil {
+			*err = fullErr
+		} else {
+			log.Println(fullErr)
+		}
+	}
+}
+
+// RunCommand runs a command using exec.Command. The command runs in the working
+// directory "dir" with environment "env" and outputs to stdout and stderr.
+func RunCommand(args []string, dir string, env []string) error {
+	cmd := exec.Command(args[0], args[1:]...)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	cmd.Dir = dir
+	cmd.Env = env
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf(`error in cmd "%v", see stderr for details: %v`, args, err)
+	}
+	return nil
+}
+
+// QuoteForShell quotes a string for use in a bash shell.
+func QuoteForShell(str string) string {
+	return fmt.Sprintf("'%s'", strings.Replace(str, "'", "'\"'\"'", -1))
+}
diff --git a/src/third_party/dosfstools/BUILD.bazel b/src/third_party/dosfstools/BUILD.bazel
new file mode 100644
index 0000000..8e71575
--- /dev/null
+++ b/src/third_party/dosfstools/BUILD.bazel
@@ -0,0 +1,13 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/third_party/dosfstools/BUILD.dosfstools.bazel b/src/third_party/dosfstools/BUILD.dosfstools.bazel
new file mode 100644
index 0000000..98ee6e8
--- /dev/null
+++ b/src/third_party/dosfstools/BUILD.dosfstools.bazel
@@ -0,0 +1,49 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make")
+
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+    name = "all_srcs",
+    srcs = glob(["**"]),
+)
+
+configure_make(
+    name = "dosfstools_pkg",
+    lib_source = ":all_srcs",
+    configure_options = ["--disable-dependency-tracking"],
+    make_commands = [
+        # dosfstools requires the path to 'make' be present in the MAKE
+        # variable. Since the configure_make() rule doesn't do this for us, some
+        # magic is needed.
+        # 1. Set MAKE to the output of a 'shell' function that reads the path of
+        # the parent process. The parent process of the 'shell' function is the
+        # make process, run by configure_make() using the correct make program.
+        # 2. Write "$$PID" as "$$$PID$$". We do this because the
+        # configure_make() rule has a substitution that does "$$PID$$" ->
+        # "$PID".
+        "make MAKE='$(shell realpath /proc/$$$PPID$$/exe)'",
+        "make MAKE='$(shell realpath /proc/$$$PPID$$/exe)' install",
+    ],
+    out_bin_dir = "sbin",
+    binaries = ["mkfs.fat"],
+)
+
+filegroup(
+    name = "mkfs.fat",
+    srcs = [":dosfstools_pkg"],
+    output_group = "mkfs.fat",
+)
diff --git a/src/third_party/dosfstools/dosfstools_repositories.bzl b/src/third_party/dosfstools/dosfstools_repositories.bzl
new file mode 100644
index 0000000..1c839ca
--- /dev/null
+++ b/src/third_party/dosfstools/dosfstools_repositories.bzl
@@ -0,0 +1,28 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
+
+def dosfstools_repositories():
+    """Load all repositories needed for dosfstools."""
+
+    maybe(
+        http_archive,
+        name = "dosfstools",
+        build_file = Label("//src/third_party/dosfstools:BUILD.dosfstools.bazel"),
+        strip_prefix = "dosfstools-4.2",
+        urls = ["https://github.com/dosfstools/dosfstools/releases/download/v4.2/dosfstools-4.2.tar.gz"],
+        sha256 = "64926eebf90092dca21b14259a5301b7b98e7b1943e8a201c7d726084809b527",
+    )
diff --git a/src/third_party/mtools/BUILD.bazel b/src/third_party/mtools/BUILD.bazel
new file mode 100644
index 0000000..8e71575
--- /dev/null
+++ b/src/third_party/mtools/BUILD.bazel
@@ -0,0 +1,13 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/third_party/mtools/BUILD.mtools.bazel b/src/third_party/mtools/BUILD.mtools.bazel
new file mode 100644
index 0000000..9b0f74d
--- /dev/null
+++ b/src/third_party/mtools/BUILD.mtools.bazel
@@ -0,0 +1,34 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make")
+
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+    name = "all_srcs",
+    srcs = glob(["**"]),
+)
+
+configure_make(
+    name = "mtools_pkg",
+    lib_source = ":all_srcs",
+    binaries = ["mcopy"],
+)
+
+filegroup(
+    name = "mcopy",
+    srcs = [":mtools_pkg"],
+    output_group = "mcopy",
+)
diff --git a/src/third_party/mtools/mtools_repositories.bzl b/src/third_party/mtools/mtools_repositories.bzl
new file mode 100644
index 0000000..79064d9
--- /dev/null
+++ b/src/third_party/mtools/mtools_repositories.bzl
@@ -0,0 +1,31 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
+
+def mtools_repositories():
+    """Load all repositories needed for mtools."""
+
+    maybe(
+        http_archive,
+        name = "mtools",
+        build_file = Label("//src/third_party/mtools:BUILD.mtools.bazel"),
+        strip_prefix = "mtools-4.0.26",
+        urls = [
+            "https://mirror.bazel.build/ftp.gnu.org/gnu/mtools/mtools-4.0.26.tar.gz",
+            "http://ftp.gnu.org/gnu/mtools/mtools-4.0.26.tar.gz",
+        ],
+        sha256 = "b1adb6973d52b3b70b16047e682f96ef1b669d6b16894c9056a55f407e71cd0f",
+    )
diff --git a/testing/README b/testing/README
new file mode 100644
index 0000000..593e817
--- /dev/null
+++ b/testing/README
@@ -0,0 +1,20 @@
+The testing directory contains COS customizer integration tests.
+
+Each *.yaml file in the top level of the directory corresponds to exactly one
+COS customizer integration test. These *.yaml files are Cloud Build workflow
+definitions, and are intended to be run with the whole COS customizer source.
+For example, to run `smoke_test.yaml`, run
+`gcloud builds submit --config=testing/smoke_test.yaml .` from the COS
+customizer source root.
+
+Directories contain data needed for tests; this usually includes scripts and
+cloud-configs. For the most part, test assertions are made in cloud-configs
+that run on instances made from generated images.
+
+The util directory contains utilities for all tests. For example,
+run_test.wf.json is a Daisy workflow that facilitates running a test.
+vm.wf.json and gpu_vm.wf.json are used by run_test.wf.json for creating a
+normal VM and GPU VM respectively.
+
+To run all tests, use the run_tests.sh script in the COS customizer source
+root.
diff --git a/testing/deprecate_test.yaml b/testing/deprecate_test.yaml
new file mode 100644
index 0000000..93dfa34
--- /dev/null
+++ b/testing/deprecate_test.yaml
@@ -0,0 +1,50 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "deprecate_test"
+  "_INPUT_IMAGE": "cos-dev-69-10895-0-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'gcr.io/cloud-builders/gcloud'
+  args: ["compute", "images", "create", "preload-test-$BUILD_ID-old",
+         "--source-image=${_INPUT_IMAGE}", "--source-image-project=${_INPUT_PROJECT}",
+         "--family=test-family"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-family=test-family",
+         "-deprecate-old-images",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/cloud-builders/gcloud'
+  entrypoint: '/bin/bash'
+  env:
+  - "OLD_IMAGE=preload-test-$BUILD_ID-old"
+  - "IMAGE=preload-test-$BUILD_ID"
+  - "PROJECT=$PROJECT_ID"
+  args: ["/workspace/testing/${_TEST}/run_test.sh"]
+options:
+  machineType: 'N1_HIGHCPU_8'
+timeout: "7200s"
diff --git a/testing/deprecate_test/run_test.sh b/testing/deprecate_test/run_test.sh
new file mode 100644
index 0000000..2fb77d4
--- /dev/null
+++ b/testing/deprecate_test/run_test.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+RESULT="pass"
+old_deprecation_state="$(gcloud compute images describe "${OLD_IMAGE}" --project="${PROJECT}" --format='value(deprecated)')"
+if [[ -z "${old_deprecation_state}" ]]; then
+  echo "Old image isn't deprecated"
+  echo "Deprecation state: ${old_deprecation_state}"
+  RESULT="fail"
+fi
+
+new_deprecation_state="$(gcloud compute images describe "${IMAGE}" --project="${PROJECT}" --format='value(deprecated)')"
+if [[ -n "${new_deprecation_state}" ]]; then
+  echo "New image appears to be deprecated"
+  echo "Deprecation state: ${new_deprecation_state}"
+  RESULT="fail"
+fi
+
+gcloud compute images delete "${OLD_IMAGE}" --project="${PROJECT}"
+gcloud compute images delete "${IMAGE}" --project="${PROJECT}"
+if [[ "${RESULT}" == "fail" ]]; then
+  echo "Tests failed"
+  exit 1
+fi
+echo "Tests passed"
diff --git a/testing/disable_auto_update_test.yaml b/testing/disable_auto_update_test.yaml
new file mode 100644
index 0000000..4231a0e
--- /dev/null
+++ b/testing/disable_auto_update_test.yaml
@@ -0,0 +1,46 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "disable_auto_update_test"
+  "_INPUT_IMAGE": "cos-81-12871-148-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["disable-auto-update"]        
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/disable_auto_update_test/preload.sh b/testing/disable_auto_update_test/preload.sh
new file mode 100644
index 0000000..05bc817
--- /dev/null
+++ b/testing/disable_auto_update_test/preload.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "hello" > /mnt/stateful_partition/hello
+docker_code=0
+i=1
+while [[ $i -le 10 ]]; do
+  echo "Pulling ubuntu container image... [${i}/10]"
+  docker pull ubuntu && break || docker_code="$?"
+  i=$((i+1))
+done
+if [[ $i -eq 11 ]]; then
+  echo "Pulling ubuntu failed."
+  echo "Docker journal logs:"
+  journalctl -u docker.service --no-pager
+  exit "${docker_code}"
+fi
+echo "Successfully pulled ubuntu container image."
diff --git a/testing/disable_auto_update_test/preload_test.cfg b/testing/disable_auto_update_test/preload_test.cfg
new file mode 100644
index 0000000..fed1c9a
--- /dev/null
+++ b/testing/disable_auto_update_test/preload_test.cfg
@@ -0,0 +1,146 @@
+#cloud-config
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testHello() {
+        if [[ ! -f /mnt/stateful_partition/hello ]]; then
+          echo "/mnt/stateful_partition/hello is not a valid file (does it exist?)"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        expected="hello"
+        actual=$(cat /mnt/stateful_partition/hello)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "/mnt/stateful_partition/hello contains ${actual} instead of ${expected}"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHello pass"
+      }
+
+      testUbuntuImage() {
+        expected="ubuntu"
+        actual=$(docker images --format {{.Repository}})
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected docker images: ${expected}"
+          echo "actual docker images: ${actual}"
+          echo "testUbuntuImage fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testUbuntuImage pass"
+      }
+
+      testHomeDir() {
+        expected="chronos"
+        actual=$(ls /home)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected home contents: ${expected}"
+          echo "actual home contents: ${actual}"
+          echo "testHomeDir fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHomeDir pass"
+      }
+
+      testWorkdirClean() {
+        if [[ -d "/var/lib/.cos-customizer" ]]; then
+          echo "/var/lib/.cos-customizer exists"
+          echo "testWorkdirClean fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testWorkdirClean pass"
+      }
+
+      testAutoUpdateService(){
+        if (sudo systemctl -q status update-engine.service); then
+          echo "auto-update service is still on"
+          echo "testAutoUpdateService fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testAutoUpdateService pass"
+      }
+
+      testStatefulSize(){
+        local -r stateful_size_th=7168
+        actual=$(df --output=size /mnt/stateful_partition --block-size=$((1<<20)) | tail -n 1)
+        if [[ "${actual}" -le "${stateful_size_th}" ]]; then
+          echo "STATEFUL size 80% threshold: ${stateful_size_th} MB"
+          echo "actual STATEFUL size: ${actual} MB"
+          echo "testStatefulSize fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testStatefulSize pass"
+      }
+
+
+      main() {
+        RESULT="pass"
+        testHello
+        testUbuntuImage
+        testHomeDir
+        testWorkdirClean
+        testAutoUpdateService
+        testStatefulSize
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/env_test.yaml b/testing/env_test.yaml
new file mode 100644
index 0000000..dad5b19
--- /dev/null
+++ b/testing/env_test.yaml
@@ -0,0 +1,45 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "env_test"
+  "_INPUT_IMAGE": "cos-dev-69-10895-0-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh",
+         "-env=HELLO=hello"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_8'
+timeout: "7200s"
diff --git a/testing/env_test/preload.sh b/testing/env_test/preload.sh
new file mode 100644
index 0000000..6d85a20
--- /dev/null
+++ b/testing/env_test/preload.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "${HELLO}" > /var/lib/hello
diff --git a/testing/env_test/preload_test.cfg b/testing/env_test/preload_test.cfg
new file mode 100644
index 0000000..1b89bf2
--- /dev/null
+++ b/testing/env_test/preload_test.cfg
@@ -0,0 +1,82 @@
+#cloud-config
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+      set -o nounset
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testHello() {
+        if [[ ! -f /var/lib/hello ]]; then
+          echo "/var/lib/hello is not a valid file (does it exist?)"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        expected="hello"
+        actual=$(cat /var/lib/hello)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "/var/lib/hello contains ${actual} instead of ${expected}"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHello pass"
+      }
+
+      main() {
+        RESULT="pass"
+        testHello
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/extend_oem_test.yaml b/testing/extend_oem_test.yaml
new file mode 100644
index 0000000..152ebed
--- /dev/null
+++ b/testing/extend_oem_test.yaml
@@ -0,0 +1,69 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+steps:
+
+- name: 'gcr.io/cloud-builders/gcloud'
+  entrypoint: '/bin/bash'
+  args:
+  - '-c'
+  - |
+    test_list=("gcloud builds submit --config=testing/extend_oem_test/extend_oem_test.yaml\
+                  --substitutions=_OEM_SIZE=1G,_DISK_SIZE=11,_OEM_SIZE_TH=900 --async --format='value(ID)' ." 
+               "gcloud builds submit --config=testing/extend_oem_test/extend_oem_test.yaml\
+                  --substitutions=_OEM_SIZE=2G,_DISK_SIZE=12,_OEM_SIZE_TH=1700 --async --format='value(ID)' ."   
+               "gcloud builds submit --config=testing/extend_oem_test/extend_oem_test.yaml\
+                  --substitutions=_OEM_SIZE=10G,_DISK_SIZE=20,_OEM_SIZE_TH=8192 --async --format='value(ID)' ." 
+                "gcloud builds submit --config=testing/extend_oem_test/extend_oem_test.yaml\
+                  --substitutions=_OEM_SIZE=1024G,_DISK_SIZE=1034,_OEM_SIZE_TH=838861 --async --format='value(ID)' .")
+    build_ids=()
+    exit_code=0
+    for test in "${test_list[@]}"; do
+      build_ids+=("$(eval "${test}")")
+    done
+    for build_id in "${build_ids[@]}"; do
+      status=""
+      while true; do
+        status="$(gcloud builds describe "${build_id}" --format='value(status)')"
+        case "${status}" in
+          "SUCCESS"|"FAILURE"|"INTERNAL_ERROR"|"TIMEOUT"|"CANCELLED")
+            echo "${status}"
+            break
+            ;;
+          "QUEUED"|"WORKING")
+            sleep 5
+            ;;
+          "STATUS_UNKNOWN")
+            echo "Received STATUS_UNKNOWN for build ${build_id}" 1>&2
+            sleep 5
+            ;;
+          *)
+            echo "Unknown status for build ${build_id}: ${status}" 1>&2
+            break
+            ;;
+        esac
+      done
+      if [[ "${status}" == "SUCCESS" ]]; then
+        echo "Build ${build_id} succeeded"
+      else
+        log_url="$(gcloud builds describe "${build_id}" --format='value(logUrl)')"
+        echo "Build ${build_id} failed"
+        echo "Logs: ${log_url}"
+        exit_code=1
+      fi
+    done
+    exit "${exit_code}"
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/extend_oem_test/extend_oem_test.yaml b/testing/extend_oem_test/extend_oem_test.yaml
new file mode 100644
index 0000000..5516179
--- /dev/null
+++ b/testing/extend_oem_test/extend_oem_test.yaml
@@ -0,0 +1,52 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "extend_oem_test"
+  "_INPUT_IMAGE": "cos-81-12871-148-0"
+  "_INPUT_PROJECT": "cos-cloud"
+  "_OEM_SIZE": ""
+  "_DISK_SIZE": ""
+  "_OEM_SIZE_TH": ""
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'busybox'
+  args: ["sed", "-i", "-e", "s|%s|'${_OEM_SIZE_TH}'|",
+         "testing/${_TEST}/preload_test.cfg"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID",
+         "-disk-size-gb=${_DISK_SIZE}",
+         "-oem-size=${_OEM_SIZE}"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/extend_oem_test/preload.sh b/testing/extend_oem_test/preload.sh
new file mode 100644
index 0000000..e3204fd
--- /dev/null
+++ b/testing/extend_oem_test/preload.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "hello" > /mnt/stateful_partition/hello
+sudo mount -o remount,rw /usr/share/oem
+echo "hello" > /usr/share/oem/hello
+docker_code=0
+i=1
+while [[ $i -le 10 ]]; do
+  echo "Pulling ubuntu container image... [${i}/10]"
+  docker pull ubuntu && break || docker_code="$?"
+  i=$((i+1))
+done
+if [[ $i -eq 11 ]]; then
+  echo "Pulling ubuntu failed."
+  echo "Docker journal logs:"
+  journalctl -u docker.service --no-pager
+  exit "${docker_code}"
+fi
+echo "Successfully pulled ubuntu container image."
diff --git a/testing/extend_oem_test/preload_test.cfg b/testing/extend_oem_test/preload_test.cfg
new file mode 100644
index 0000000..fcbfc45
--- /dev/null
+++ b/testing/extend_oem_test/preload_test.cfg
@@ -0,0 +1,155 @@
+#cloud-config
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+
+      # Templated by cloudbuild config that runs this test.
+      OEM_SIZE_TH=%s
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testHello() {
+        if [[ ! -f /mnt/stateful_partition/hello ]]; then
+          echo "/mnt/stateful_partition/hello is not a valid file (does it exist?)"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        expected="hello"
+        actual=$(cat /mnt/stateful_partition/hello)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "/mnt/stateful_partition/hello contains ${actual} instead of ${expected}"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHello pass"
+      }
+
+      testOEMHello(){
+        if [[ ! -f /usr/share/oem/hello ]]; then
+          echo "/usr/share/oem/hello is not a valid file (does it exist?)"
+          echo "testOEMHello fail"
+          RESULT="fail"
+          return
+        fi
+        expected="hello"
+        actual=$(cat /usr/share/oem/hello)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "/usr/share/oem/hello contains ${actual} instead of ${expected}"
+          echo "testOEMHello fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHello pass"        
+      }
+
+      testUbuntuImage() {
+        expected="ubuntu"
+        actual=$(docker images --format {{.Repository}})
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected docker images: ${expected}"
+          echo "actual docker images: ${actual}"
+          echo "testUbuntuImage fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testUbuntuImage pass"
+      }
+
+      testHomeDir() {
+        expected="chronos"
+        actual=$(ls /home)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected home contents: ${expected}"
+          echo "actual home contents: ${actual}"
+          echo "testHomeDir fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHomeDir pass"
+      }
+
+      testWorkdirClean() {
+        if [[ -d "/var/lib/.cos-customizer" ]]; then
+          echo "/var/lib/.cos-customizer exists"
+          echo "testWorkdirClean fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testWorkdirClean pass"
+      }
+
+      testOEMSize(){
+        actual=$(df --output=size /usr/share/oem --block-size=$((1<<20)) | tail -n 1)
+        if [[ "${actual}" -le "${OEM_SIZE_TH}" ]]; then
+          echo "OEM size 80% threshold: ${OEM_SIZE_TH} MB"
+          echo "actual OEM size: ${actual} MB"
+          echo "testOEMSize fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testOEMSize pass"
+      }
+
+      main() {
+        RESULT="pass"
+        testHello
+        testUbuntuImage
+        testHomeDir
+        testWorkdirClean
+        testOEMSize
+        testOEMHello
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/gpu_test.yaml b/testing/gpu_test.yaml
new file mode 100644
index 0000000..a52da64
--- /dev/null
+++ b/testing/gpu_test.yaml
@@ -0,0 +1,81 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+steps:
+
+- name: 'gcr.io/cloud-builders/gcloud'
+  entrypoint: '/bin/bash'
+  args:
+  - '-c'
+  - |
+    test_list=("gcloud builds submit --config=testing/gpu_test/gpu_test.yaml\
+                  --substitutions=_DRIVER_VERSION=390.46 --async --format='value(ID)' ." 
+               "gcloud builds submit --config=testing/gpu_test/gpu_test.yaml\
+                  --substitutions=_DRIVER_VERSION=396.26 --async --format='value(ID)' ."   
+               "gcloud builds submit --config=testing/gpu_test/gpu_test.yaml\
+                  --substitutions=_DRIVER_VERSION=396.37 --async --format='value(ID)' ." 
+               "gcloud builds submit --config=testing/gpu_test/gpu_test.yaml\
+                  --substitutions=_DRIVER_VERSION=396.44 --async --format='value(ID)' ."
+               "gcloud builds submit --config=testing/gpu_test/gpu_test.yaml\
+                  --substitutions=_DRIVER_VERSION=418.67,_INPUT_IMAGE=cos-73-11647-656-0\
+                  --async --format='value(ID)' ."
+               "gcloud builds submit --config=testing/gpu_test/gpu_test.yaml\
+                  --substitutions=_DRIVER_VERSION=418.67,_INPUT_IMAGE=cos-77-12371-1079-0\
+                  --async --format='value(ID)' ."
+               "gcloud builds submit --config=testing/gpu_test/gpu_test.yaml\
+                  --substitutions=_DRIVER_VERSION=450.51.06,_INPUT_IMAGE=cos-81-12871-1196-0\
+                  --async --format='value(ID)' ."
+               "gcloud builds submit --config=testing/gpu_test/gpu_test.yaml\
+                  --substitutions=_DRIVER_VERSION=450.51.06,_INPUT_IMAGE=cos-rc-85-13310-1040-0\
+                  --async --format='value(ID)' .")
+    build_ids=()
+    exit_code=0
+    for test in "${test_list[@]}"; do
+      build_ids+=("$(eval "${test}")")
+    done
+    for build_id in "${build_ids[@]}"; do
+      status=""
+      while true; do
+        status="$(gcloud builds describe "${build_id}" --format='value(status)')"
+        case "${status}" in
+          "SUCCESS"|"FAILURE"|"INTERNAL_ERROR"|"TIMEOUT"|"CANCELLED")
+            echo "${status}"
+            break
+            ;;
+          "QUEUED"|"WORKING")
+            sleep 5
+            ;;
+          "STATUS_UNKNOWN")
+            echo "Received STATUS_UNKNOWN for build ${build_id}" 1>&2
+            sleep 5
+            ;;
+          *)
+            echo "Unknown status for build ${build_id}: ${status}" 1>&2
+            break
+            ;;
+        esac
+      done
+      if [[ "${status}" == "SUCCESS" ]]; then
+        echo "Build ${build_id} succeeded"
+      else
+        log_url="$(gcloud builds describe "${build_id}" --format='value(logUrl)')"
+        echo "Build ${build_id} failed"
+        echo "Logs: ${log_url}"
+        exit_code=1
+      fi
+    done
+    exit "${exit_code}"
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/gpu_test/gpu_test.yaml b/testing/gpu_test/gpu_test.yaml
new file mode 100644
index 0000000..110a2e0
--- /dev/null
+++ b/testing/gpu_test/gpu_test.yaml
@@ -0,0 +1,64 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "gpu_test"
+  "_INPUT_IMAGE": "cos-69-10895-71-0"
+  "_INPUT_PROJECT": "cos-cloud"
+  "_DRIVER_VERSION": ""
+  "_DEPS_DIR": ""
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'ubuntu'
+  args: 
+  - bash
+  - -c
+  - |
+    ver="${_DRIVER_VERSION}"
+    if [[ "${ver}" == *.run ]]; then
+      sub="$(echo "${ver%.run}" | cut -d '-' -f 4)"
+    else
+      sub="${ver}"
+    fi
+    sed -i -e "s|%s|'${sub}'|" testing/${_TEST}/preload_test.cfg
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["install-gpu",
+         "-version=${_DRIVER_VERSION}",
+         "-gpu-type=nvidia-tesla-k80",
+         "-deps-dir=${_DEPS_DIR}"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "-var:vm_workflow",
+         "./gpu_vm.wf.json", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/gpu_test/preload.sh b/testing/gpu_test/preload.sh
new file mode 100644
index 0000000..dede88b
--- /dev/null
+++ b/testing/gpu_test/preload.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_code=0
+i=1
+while [[ $i -le 10 ]]; do
+  echo "Pulling ubuntu container image... [${i}/10]"
+  docker pull ubuntu && break || docker_code="$?"
+  i=$((i+1))
+done
+if [[ $i -eq 11 ]]; then
+  echo "Pulling ubuntu failed."
+  echo "Docker journal logs:"
+  journalctl -u docker.service --no-pager
+  exit "${docker_code}"
+fi
+echo "Successfully pulled ubuntu container image."
diff --git a/testing/gpu_test/preload_test.cfg b/testing/gpu_test/preload_test.cfg
new file mode 100644
index 0000000..1edff1e
--- /dev/null
+++ b/testing/gpu_test/preload_test.cfg
@@ -0,0 +1,145 @@
+#cloud-config
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+
+      # Templated by cloudbuild config that runs this test.
+      EXPECTED_DRIVER_VERSION=%s
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testGpuDirExists() {
+        if [[ ! -d /var/lib/nvidia ]]; then
+          echo "/var/lib/nvidia is not a valid directory (does it exist?)"
+          echo "testGpuDirExists fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testGpuDirExists pass"
+      }
+
+      testDockerImageCached() {
+        expected=$'gcr.io/cos-cloud/cos-gpu-installer\nubuntu'
+        actual="$(docker images --format {{.Repository}} | sort)"
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected docker images: ${expected}"
+          echo "actual docker images: ${actual}"
+          echo "testDockerImageCached fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testDockerImageCached pass"
+      }
+
+      testActivateDrivers() {
+        local activate_drivers="pass"
+        /bin/bash /var/lib/nvidia/setup_gpu.sh
+        if [[ ! -c "/dev/nvidia-uvm" ]]; then
+          echo "Cannot find character device /dev/nvidia-uvm"
+          activate_drivers="fail"
+          RESULT="fail"
+        fi
+        if [[ ! -c "/dev/nvidia-uvm-tools" ]]; then
+          echo "Cannot find character device /dev/nvidia-uvm-tools"
+          activate_drivers="fail"
+          RESULT="fail"
+        fi
+        if [[ ! -c "/dev/nvidia0" ]]; then
+          echo "Cannot find character device /dev/nvidia0"
+          activate_drivers="fail"
+          RESULT="fail"
+        fi
+        if [[ ! -c "/dev/nvidiactl" ]]; then
+          echo "Cannot find character device /dev/nvidiactl"
+          activate_drivers="fail"
+          RESULT="fail"
+        fi
+        if ! pgrep -f nvidia-persistenced > /dev/null; then
+          echo "nvidia-persistenced is not running"
+          activate_drivers="fail"
+          RESULT="fail"
+        fi
+        if [[ "$(cat /proc/sys/kernel/softlockup_panic)" != "1" ]]; then
+          echo "softlockup_panic is not set"
+          activate_drivers="fail"
+          RESULT="fail"
+        fi
+        if [[ "${activate_drivers}" == "fail" ]]; then
+          echo "testActivateDrivers fail"
+          return
+        fi
+        echo "testActivateDrivers pass"
+      }
+
+      testDriverVersion() {
+        source /var/lib/nvidia/.cache
+        if [[ "${EXPECTED_DRIVER_VERSION}" != "${CACHE_NVIDIA_DRIVER_VERSION}" ]]; then
+          echo "Wrong driver version"
+          echo "Expected: ${EXPECTED_DRIVER_VERSION}"
+          echo "Actual: ${CACHE_NVIDIA_DRIVER_VERSION}"
+          echo "testDriverVersion fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testDriverVersion pass"
+      }
+
+      main() {
+        RESULT="pass"
+        testGpuDirExists
+        testDockerImageCached
+        testActivateDrivers
+        testDriverVersion
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/gpu_test_deps_dir.yaml b/testing/gpu_test_deps_dir.yaml
new file mode 100644
index 0000000..27acdf4
--- /dev/null
+++ b/testing/gpu_test_deps_dir.yaml
@@ -0,0 +1,27 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+steps:
+- name: 'gcr.io/cloud-builders/gsutil'
+  entrypoint: '/bin/bash'
+  args: ["-c", "mkdir deps_dir"]
+- name: 'gcr.io/cloud-builders/gsutil'
+  args: ["-m", "cp", "-r", "gs://cos-tools/12998.0.0/*", "deps_dir"]
+- name: 'gcr.io/cloud-builders/gsutil'
+  args: ["-m", "cp", "gs://nvidia-drivers-us-public/tesla/418.67/NVIDIA-Linux-x86_64-418.67.run", "deps_dir"]
+- name: 'gcr.io/cloud-builders/gcloud'
+  args: ["builds", "submit", "--config=testing/gpu_test/gpu_test.yaml",
+         "--substitutions=_DRIVER_VERSION=NVIDIA-Linux-x86_64-418.67.run,_INPUT_IMAGE=cos-dev-83-12998-0-0,_DEPS_DIR=deps_dir",
+         "."]
+timeout: "7200s"
diff --git a/testing/image_test.yaml b/testing/image_test.yaml
new file mode 100644
index 0000000..6eaeb9e
--- /dev/null
+++ b/testing/image_test.yaml
@@ -0,0 +1,51 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "image_test"
+  "_INPUT_IMAGE": "cos-dev-69-10895-0-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-labels=hello=world,test_key=test_value",
+         "-licenses=projects/cos-cloud/global/licenses/cos-gpu",
+         "-disk-size-gb=50",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-family=test-family",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/cloud-builders/gcloud'
+  entrypoint: '/bin/bash'
+  env:
+  - "IMAGE=preload-test-$BUILD_ID"
+  - "PROJECT=$PROJECT_ID"
+  - "LABELS=hello=world;test_key=test_value"
+  - "FAMILY=test-family"
+  - "DISK_SIZE_GB=50"
+  - "LICENSES=https://www.googleapis.com/compute/v1/projects/cos-cloud/global/licenses/cos-gpu;https://www.googleapis.com/compute/v1/projects/cos-cloud/global/licenses/cos"
+  args: ["/workspace/testing/${_TEST}/run_test.sh"]
+options:
+  machineType: 'N1_HIGHCPU_8'
+timeout: "7200s"
diff --git a/testing/image_test/run_test.sh b/testing/image_test/run_test.sh
new file mode 100644
index 0000000..7d915bc
--- /dev/null
+++ b/testing/image_test/run_test.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Image to test
+IMAGE="${IMAGE:-}"
+PROJECT="${PROJECT:-}"
+
+# Expected values to test against
+LICENSES="${LICENSES:-}"
+LABELS="${LABELS:-}"
+FAMILY="${FAMILY:-}"
+DISK_SIZE_GB="${DISK_SIZE_GB:-}"
+
+sort_licenses() {
+  local licenses="$1"
+  echo "${licenses}" | tr ';' '\n' | sort | tr '\n' ';'
+}
+
+RESULT="pass"
+actual_licenses="$(gcloud compute images describe "${IMAGE}" --project="${PROJECT}" --format='value(licenses)')"
+if [[ "$(sort_licenses "${LICENSES}")" != "$(sort_licenses "${actual_licenses}")" ]]; then
+  echo "Licenses differ."
+  echo "Expected: ${LICENSES}"
+  echo "Actual: ${actual_licenses}"
+  RESULT="fail"
+fi
+
+actual_labels="$(gcloud compute images describe "${IMAGE}" --project="${PROJECT}" --format='value(labels)')"
+if [[ "${LABELS}" != "${actual_labels}" ]]; then
+  echo "Labels differ."
+  echo "Expected: ${LABELS}"
+  echo "Actual: ${actual_labels}"
+  RESULT="fail"
+fi
+
+actual_family="$(gcloud compute images describe "${IMAGE}" --project="${PROJECT}" --format='value(family)')"
+if [[ "${FAMILY}" != "${actual_family}" ]]; then
+  echo "Family differs."
+  echo "Expected: ${FAMILY}"
+  echo "Actual: ${actual_family}"
+  RESULT="fail"
+fi
+
+actual_disk_size="$(gcloud compute images describe "${IMAGE}" --project="${PROJECT}" --format='value(diskSizeGb)')"
+if [[ "${DISK_SIZE_GB}" != "${actual_disk_size}" ]]; then
+  echo "Disk size differs."
+  echo "Expected: ${DISK_SIZE_GB}"
+  echo "Actual: ${actual_disk_size}"
+  RESULT="fail"
+fi
+
+gcloud compute images delete "${IMAGE}" --project="${PROJECT}"
+if [[ "${RESULT}" == "fail" ]]; then
+  echo "Tests failed"
+  exit 1
+fi
+echo "Tests passed"
diff --git a/testing/milestone_test.yaml b/testing/milestone_test.yaml
new file mode 100644
index 0000000..c1e4dbf
--- /dev/null
+++ b/testing/milestone_test.yaml
@@ -0,0 +1,41 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "milestone_test"
+  "_INPUT_IMAGE": "69"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-milestone=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_8'
+timeout: "7200s"
diff --git a/testing/milestone_test/preload_test.cfg b/testing/milestone_test/preload_test.cfg
new file mode 100644
index 0000000..a7a8c0b
--- /dev/null
+++ b/testing/milestone_test/preload_test.cfg
@@ -0,0 +1,76 @@
+#cloud-config
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testVersion() {
+        expected="69"
+        actual=$(. /etc/os-release; echo "${VERSION}")
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected milestone: ${expected}"
+          echo "actual milestone: ${actual}"
+          echo "testVersion fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testVersion pass"
+      }
+
+      main() {
+        RESULT="pass"
+        testVersion
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/multi_script/preload_1.sh b/testing/multi_script/preload_1.sh
new file mode 100644
index 0000000..91bde9a
--- /dev/null
+++ b/testing/multi_script/preload_1.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "hello" > /var/lib/hello
diff --git a/testing/multi_script/preload_2.sh b/testing/multi_script/preload_2.sh
new file mode 100644
index 0000000..dede88b
--- /dev/null
+++ b/testing/multi_script/preload_2.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_code=0
+i=1
+while [[ $i -le 10 ]]; do
+  echo "Pulling ubuntu container image... [${i}/10]"
+  docker pull ubuntu && break || docker_code="$?"
+  i=$((i+1))
+done
+if [[ $i -eq 11 ]]; then
+  echo "Pulling ubuntu failed."
+  echo "Docker journal logs:"
+  journalctl -u docker.service --no-pager
+  exit "${docker_code}"
+fi
+echo "Successfully pulled ubuntu container image."
diff --git a/testing/multi_script/preload_test.cfg b/testing/multi_script/preload_test.cfg
new file mode 100644
index 0000000..203cd15
--- /dev/null
+++ b/testing/multi_script/preload_test.cfg
@@ -0,0 +1,134 @@
+#cloud-config
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testHello() {
+        if [[ ! -f /var/lib/hello ]]; then
+          echo "/var/lib/hello is not a valid file (does it exist?)"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        expected="hello"
+        actual=$(cat /var/lib/hello)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "/var/lib/hello contains ${actual} instead of ${expected}"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHello pass"
+      }
+
+      testUbuntuImage() {
+        expected=$(docker images --format {{.Repository}})
+        actual="ubuntu"
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected docker images: ${expected}"
+          echo "actual docker images: ${actual}"
+          echo "testUbuntuImage fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testUbuntuImage pass"
+      }
+
+      testVersion() {
+        expected="10895.0.0"
+        actual=$(. /etc/os-release; echo "${BUILD_ID}")
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected version: ${expected}"
+          echo "actual version: ${actual}"
+          echo "testVersion fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testVersion pass"
+      }
+
+      testHomeDir() {
+        expected="chronos"
+        actual=$(ls /home)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected home contents: ${expected}"
+          echo "actual home contents: ${actual}"
+          echo "testHomeDir fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHomeDir pass"
+      }
+
+      testWorkdirClean() {
+        if [[ -d "/var/lib/.cos-customizer" ]]; then
+          echo "/var/lib/.cos-customizer exists"
+          echo "testWorkdirClean fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testWorkdirClean pass"
+      }
+
+      main() {
+        RESULT="pass"
+        testHello
+        testUbuntuImage
+        testVersion
+        testHomeDir
+        testWorkdirClean
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/multi_script_test.yaml b/testing/multi_script_test.yaml
new file mode 100644
index 0000000..10afdeb
--- /dev/null
+++ b/testing/multi_script_test.yaml
@@ -0,0 +1,47 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "multi_script"
+  "_INPUT_IMAGE": "cos-dev-69-10895-0-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload_1.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload_2.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_8'
+timeout: "7200s"
diff --git a/testing/parallel_test.yaml b/testing/parallel_test.yaml
new file mode 100644
index 0000000..16406c5
--- /dev/null
+++ b/testing/parallel_test.yaml
@@ -0,0 +1,103 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST_1": "smoke_test"
+  "_INPUT_IMAGE_1": "cos-dev-69-10895-0-0"
+  "_TEST_2": "gpu_test"
+  "_INPUT_IMAGE_2": "cos-69-10895-71-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  id: "docker-build"
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+# TEST_1 workflow
+- name: 'bazel:cos_customizer'
+  waitFor: "docker-build"
+  id: "test-1-start-build"
+  args: ["-local-state-workdir=.${_TEST_1}",
+         "start-image-build",
+         "-build-context=testing/${_TEST_1}",
+         "-image-name=${_INPUT_IMAGE_1}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-${_TEST_1}-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  waitFor: "test-1-start-build"
+  id: "test-1-run-script"
+  args: ["-local-state-workdir=.${_TEST_1}",
+         "run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  waitFor: "test-1-run-script"
+  id: "test-1-finish-build"
+  args: ["-local-state-workdir=.${_TEST_1}",
+         "finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-t1-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  waitFor: "test-1-finish-build"
+  id: "test-1-test-build"
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-t1-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST_1}/preload_test.cfg", "testing/util/run_test.wf.json"]
+# TEST_2 workflow
+- name: 'busybox'
+  args: ["sed", "-i", "-e", "s|%s|'396.26'|",
+         "testing/${_TEST_2}/preload_test.cfg"]
+- name: 'bazel:cos_customizer'
+  waitFor: "docker-build"
+  id: "test-2-start-build"
+  args: ["-local-state-workdir=.${_TEST_2}",
+         "start-image-build",
+         "-build-context=testing/${_TEST_2}",
+         "-image-name=${_INPUT_IMAGE_2}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-${_TEST_2}-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  waitFor: "test-2-start-build"
+  id: "test-2-install-gpu"
+  args: ["-local-state-workdir=.${_TEST_2}",
+         "install-gpu",
+         "-version=396.26",
+         "-md5sum=4c4b23467495338ef2612769ebeadb3d",
+         "-gpu-type=nvidia-tesla-k80"]
+- name: 'bazel:cos_customizer'
+  waitFor: "test-2-install-gpu"
+  id: "test-2-run-script"
+  args: ["-local-state-workdir=.${_TEST_2}",
+         "run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  waitFor: "test-2-run-script"
+  id: "test-2-finish-build"
+  args: ["-local-state-workdir=.${_TEST_2}",
+         "finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-t2-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  waitFor: "test-2-finish-build"
+  id: "test-2-test-build"
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-t2-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST_2}/preload_test.cfg", "-var:vm_workflow",
+         "./gpu_vm.wf.json", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/seal_oem_test.yaml b/testing/seal_oem_test.yaml
new file mode 100644
index 0000000..5a41d7d
--- /dev/null
+++ b/testing/seal_oem_test.yaml
@@ -0,0 +1,67 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+steps:
+
+- name: 'gcr.io/cloud-builders/gcloud'
+  entrypoint: '/bin/bash'
+  args:
+  - '-c'
+  - |
+    test_list=("gcloud builds submit --config=testing/seal_oem_test/seal_oem_test.yaml\
+                  --substitutions=_OEM_SIZE=50M,_DISK_SIZE=10,_OEM_SIZE_TH=40 --async --format='value(ID)' ." 
+               "gcloud builds submit --config=testing/seal_oem_test/seal_oem_test.yaml\
+                  --substitutions=_OEM_SIZE=1G,_DISK_SIZE=12,_OEM_SIZE_TH=900 --async --format='value(ID)' ."   
+               "gcloud builds submit --config=testing/seal_oem_test/seal_oem_test.yaml\
+                  --substitutions=_OEM_SIZE=2G,_DISK_SIZE=14,_OEM_SIZE_TH=1700 --async --format='value(ID)' ." )
+    build_ids=()
+    exit_code=0
+    for test in "${test_list[@]}"; do
+      build_ids+=("$(eval "${test}")")
+    done
+    for build_id in "${build_ids[@]}"; do
+      status=""
+      while true; do
+        status="$(gcloud builds describe "${build_id}" --format='value(status)')"
+        case "${status}" in
+          "SUCCESS"|"FAILURE"|"INTERNAL_ERROR"|"TIMEOUT"|"CANCELLED")
+            echo "${status}"
+            break
+            ;;
+          "QUEUED"|"WORKING")
+            sleep 5
+            ;;
+          "STATUS_UNKNOWN")
+            echo "Received STATUS_UNKNOWN for build ${build_id}" 1>&2
+            sleep 5
+            ;;
+          *)
+            echo "Unknown status for build ${build_id}: ${status}" 1>&2
+            break
+            ;;
+        esac
+      done
+      if [[ "${status}" == "SUCCESS" ]]; then
+        echo "Build ${build_id} succeeded"
+      else
+        log_url="$(gcloud builds describe "${build_id}" --format='value(logUrl)')"
+        echo "Build ${build_id} failed"
+        echo "Logs: ${log_url}"
+        exit_code=1
+      fi
+    done
+    exit "${exit_code}"
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/seal_oem_test/Dockerfile b/testing/seal_oem_test/Dockerfile
new file mode 100644
index 0000000..ec39642
--- /dev/null
+++ b/testing/seal_oem_test/Dockerfile
@@ -0,0 +1,21 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from gcr.io/compute-image-tools/daisy as daisy
+from ubuntu
+run apt-get update && apt-get install -y ca-certificates
+copy --from=daisy /daisy /daisy
+copy --from=daisy /workflows /workflows
+copy ./run_test.wf.json /
+copy ./vm.wf.json /
+copy  ./preload_test.cfg /
diff --git a/testing/seal_oem_test/modify_oem.sh b/testing/seal_oem_test/modify_oem.sh
new file mode 100644
index 0000000..52c7557
--- /dev/null
+++ b/testing/seal_oem_test/modify_oem.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+sudo mount -o rw /dev/sda8 /usr/share/oem
+echo "hello" > /usr/share/oem/hello
diff --git a/testing/seal_oem_test/preload.sh b/testing/seal_oem_test/preload.sh
new file mode 100644
index 0000000..e3204fd
--- /dev/null
+++ b/testing/seal_oem_test/preload.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "hello" > /mnt/stateful_partition/hello
+sudo mount -o remount,rw /usr/share/oem
+echo "hello" > /usr/share/oem/hello
+docker_code=0
+i=1
+while [[ $i -le 10 ]]; do
+  echo "Pulling ubuntu container image... [${i}/10]"
+  docker pull ubuntu && break || docker_code="$?"
+  i=$((i+1))
+done
+if [[ $i -eq 11 ]]; then
+  echo "Pulling ubuntu failed."
+  echo "Docker journal logs:"
+  journalctl -u docker.service --no-pager
+  exit "${docker_code}"
+fi
+echo "Successfully pulled ubuntu container image."
diff --git a/testing/seal_oem_test/preload_test.cfg b/testing/seal_oem_test/preload_test.cfg
new file mode 100644
index 0000000..5645702
--- /dev/null
+++ b/testing/seal_oem_test/preload_test.cfg
@@ -0,0 +1,192 @@
+#cloud-config
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+
+      # Templated by cloudbuild config that runs this test.
+      OEM_SIZE_TH=%s
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testHello() {
+        if [[ ! -f /mnt/stateful_partition/hello ]]; then
+          echo "/mnt/stateful_partition/hello is not a valid file (does it exist?)"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        expected="hello"
+        actual=$(cat /mnt/stateful_partition/hello)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "/mnt/stateful_partition/hello contains ${actual} instead of ${expected}"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHello pass"
+      }
+
+      testOEMHello(){
+        if [[ ! -f /usr/share/oem/hello ]]; then
+          echo "/usr/share/oem/hello is not a valid file (does it exist?)"
+          echo "testOEMHello fail"
+          RESULT="fail"
+          return
+        fi
+        expected="hello"
+        actual=$(cat /usr/share/oem/hello)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "/usr/share/oem/hello contains ${actual} instead of ${expected}"
+          echo "testOEMHello fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testOEMHello pass"        
+      }
+
+      testUbuntuImage() {
+        expected="ubuntu"
+        actual=$(docker images --format {{.Repository}})
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected docker images: ${expected}"
+          echo "actual docker images: ${actual}"
+          echo "testUbuntuImage fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testUbuntuImage pass"
+      }
+
+      testHomeDir() {
+        expected="chronos"
+        actual=$(ls /home)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected home contents: ${expected}"
+          echo "actual home contents: ${actual}"
+          echo "testHomeDir fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHomeDir pass"
+      }
+
+      testWorkdirClean() {
+        if [[ -d "/var/lib/.cos-customizer" ]]; then
+          echo "/var/lib/.cos-customizer exists"
+          echo "testWorkdirClean fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testWorkdirClean pass"
+      }
+
+      testOEMSize(){
+        actual=$(df --output=size /usr/share/oem --block-size=$((1<<20)) | tail -n 1)
+        if [[ "${actual}" -le "${OEM_SIZE_TH}" ]]; then
+          echo "OEM size 80% threshold: ${OEM_SIZE_TH} MB"
+          echo "actual OEM size: ${actual} MB"
+          echo "testOEMSize fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testOEMSize pass"
+      }
+
+      testStatefulSize(){
+        local -r stateful_size_th=7168
+        actual=$(df --output=size /mnt/stateful_partition --block-size=$((1<<20)) | tail -n 1)
+        if [[ "${actual}" -le "${stateful_size_th}" ]]; then
+          echo "STATEFUL size 80% threshold: ${stateful_size_th} MB"
+          echo "actual STATEFUL size: ${actual} MB"
+          echo "testStatefulSize fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testStatefulSize pass"
+      }
+
+      mountOEM(){
+        if [[ ! -e /dev/dm-1 ]]; then
+          echo "/dev/dm-1 doesn't exist"
+          echo "mountOEM fail"
+          RESULT="fail"
+          return
+        fi
+        sudo mount -o ro /dev/dm-1 /usr/share/oem/
+        echo "mountOEM pass"
+      }
+
+      testOEMMountService(){
+        if (! sudo systemctl show -p LoadState usr-share-oem.mount | grep "masked" > /dev/null ); then
+          echo "usr-share-oem.mount not masked"
+          echo "testOEMMountService fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testOEMMountService pass"
+      }
+
+      main() {
+        RESULT="pass"
+        mountOEM
+        testHello
+        testUbuntuImage
+        testHomeDir
+        testWorkdirClean
+        testOEMSize
+        testOEMHello
+        testOEMMountService
+        testStatefulSize
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/seal_oem_test/run_test.wf.json b/testing/seal_oem_test/run_test.wf.json
new file mode 100644
index 0000000..b4ebdbd
--- /dev/null
+++ b/testing/seal_oem_test/run_test.wf.json
@@ -0,0 +1,50 @@
+{
+  "Name": "run-test",
+  "Vars": {
+    "image_name": { "Required": true, "Description": "Name of image to test" },
+    "image_project": { "Required": true, "Description": "Project of image to test" },
+    "test_cfg": { "Required": true, "Description": "Cloud-config to use for the test" },
+    "vm_workflow": { "Value": "./vm.wf.json", "Description": "Workflow to use for creating the test VM" }
+  },
+  "Steps": {
+    "setup-disk": {
+      "CreateDisks": [
+        {
+          "Name": "boot-disk",
+          "SourceImage": "projects/${image_project}/global/images/${image_name}"
+        }
+      ]
+    },
+    "delete-image": {
+      "DeleteResources": {
+        "Images": ["projects/${image_project}/global/images/${image_name}"]
+      }
+    },
+    "create-vm": {
+      "IncludeWorkflow": {
+        "Path": "${vm_workflow}",
+        "Vars": {
+          "test_cfg": "${test_cfg}"
+        }
+      }
+    },
+    "wait": {
+      "WaitForInstancesSignal": [
+        {
+          "Name": "preload-test",
+          "SerialOutput": {
+            "Port": 2,
+            "SuccessMatch": "TestPass:",
+            "FailureMatch": "TestFail:",
+            "StatusMatch": "TestStatus:"
+          }
+        }
+      ]
+    }
+  },
+  "Dependencies": {
+    "create-vm": ["setup-disk"],
+    "delete-image": ["setup-disk"],
+    "wait": ["create-vm"]
+  }
+}
diff --git a/testing/seal_oem_test/seal_oem_test.yaml b/testing/seal_oem_test/seal_oem_test.yaml
new file mode 100644
index 0000000..373f90c
--- /dev/null
+++ b/testing/seal_oem_test/seal_oem_test.yaml
@@ -0,0 +1,54 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "seal_oem_test"
+  "_INPUT_IMAGE": "cos-81-12871-148-0"
+  "_INPUT_PROJECT": "cos-cloud"
+  "_OEM_SIZE": ""
+  "_DISK_SIZE": ""
+  "_OEM_SIZE_TH": ""
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'busybox'
+  args: ["sed", "-i", "-e", "s|%s|'${_OEM_SIZE_TH}'|",
+         "testing/${_TEST}/preload_test.cfg"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["seal-oem"]        
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID",
+         "-disk-size-gb=${_DISK_SIZE}",
+         "-oem-size=${_OEM_SIZE}"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-default_timeout=15m", "-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/seal_oem_test/vm.wf.json b/testing/seal_oem_test/vm.wf.json
new file mode 100644
index 0000000..9721c00
--- /dev/null
+++ b/testing/seal_oem_test/vm.wf.json
@@ -0,0 +1,23 @@
+{
+  "Name": "vm",
+  "Vars": {
+    "test_cfg": { "Required": true, "Description": "Cloud-config to use for the test" }
+  },
+  "Sources": {
+    "cloud-config": "${test_cfg}"
+  },
+  "Steps": {
+    "create-vm": {
+      "CreateInstances": [
+        {
+          "Name": "preload-test",
+          "Disks": [{"Source": "boot-disk"}],
+          "Metadata": {
+            "user-data": "${SOURCE:cloud-config}",
+            "block-project-ssh-keys": "TRUE"
+          }
+        }
+      ]
+    }
+  }
+}
diff --git a/testing/seal_oem_test_no_oem_size.yaml b/testing/seal_oem_test_no_oem_size.yaml
new file mode 100644
index 0000000..1974487
--- /dev/null
+++ b/testing/seal_oem_test_no_oem_size.yaml
@@ -0,0 +1,46 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "seal_oem_test"
+  "_INPUT_IMAGE": "cos-81-12871-148-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["seal-oem"]        
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-default_timeout=15m", "-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/seal_oem_test_no_size.yaml b/testing/seal_oem_test_no_size.yaml
new file mode 100644
index 0000000..051180b
--- /dev/null
+++ b/testing/seal_oem_test_no_size.yaml
@@ -0,0 +1,47 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "seal_oem_test"
+  "_INPUT_IMAGE": "cos-81-12871-148-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["seal-oem"]        
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID",
+         "-disk-size-gb=11"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-default_timeout=15m", "-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/seal_oem_test_timeout.yaml b/testing/seal_oem_test_timeout.yaml
new file mode 100644
index 0000000..2b6dcca
--- /dev/null
+++ b/testing/seal_oem_test_timeout.yaml
@@ -0,0 +1,58 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "seal_oem_test"
+  "_INPUT_IMAGE": "cos-81-12871-148-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["seal-oem"]   
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=modify_oem.sh"]       
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID",
+         "-disk-size-gb=11"]
+- name: 'gcr.io/cloud-builders/docker'    
+  args: [ 'build', '-t', 'seal-oem-test-timeout-image', 'testing/${_TEST}' ]    
+- name: 'seal-oem-test-timeout-image'     
+  entrypoint: '/bin/bash'    
+  args:    
+  - '-c'    
+  - |    
+    /daisy -default_timeout=5m -project=$PROJECT_ID -zone=us-west1-b \
+    -var:image_name preload-test-$BUILD_ID -var:image_project $PROJECT_ID \
+    -var:test_cfg /preload_test.cfg /run_test.wf.json | tee /build.log 
+    if ( grep "did not complete within the specified timeout" /build.log > /dev/null ); \
+    then exit 0; else echo "error: timemout expected"; exit 1; fi
+options:
+  machineType: 'N1_HIGHCPU_32'
+timeout: "7200s"
diff --git a/testing/smoke_test.yaml b/testing/smoke_test.yaml
new file mode 100644
index 0000000..59950c6
--- /dev/null
+++ b/testing/smoke_test.yaml
@@ -0,0 +1,44 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "smoke_test"
+  "_INPUT_IMAGE": "cos-dev-69-10895-0-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_8'
+timeout: "7200s"
diff --git a/testing/smoke_test/preload.sh b/testing/smoke_test/preload.sh
new file mode 100644
index 0000000..ba25e10
--- /dev/null
+++ b/testing/smoke_test/preload.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "hello" > /var/lib/hello
+docker_code=0
+i=1
+while [[ $i -le 10 ]]; do
+  echo "Pulling ubuntu container image... [${i}/10]"
+  docker pull ubuntu && break || docker_code="$?"
+  i=$((i+1))
+done
+if [[ $i -eq 11 ]]; then
+  echo "Pulling ubuntu failed."
+  echo "Docker journal logs:"
+  journalctl -u docker.service --no-pager
+  exit "${docker_code}"
+fi
+echo "Successfully pulled ubuntu container image."
diff --git a/testing/smoke_test/preload_test.cfg b/testing/smoke_test/preload_test.cfg
new file mode 100644
index 0000000..c943032
--- /dev/null
+++ b/testing/smoke_test/preload_test.cfg
@@ -0,0 +1,134 @@
+#cloud-config
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testHello() {
+        if [[ ! -f /var/lib/hello ]]; then
+          echo "/var/lib/hello is not a valid file (does it exist?)"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        expected="hello"
+        actual=$(cat /var/lib/hello)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "/var/lib/hello contains ${actual} instead of ${expected}"
+          echo "testHello fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHello pass"
+      }
+
+      testUbuntuImage() {
+        expected="ubuntu"
+        actual=$(docker images --format {{.Repository}})
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected docker images: ${expected}"
+          echo "actual docker images: ${actual}"
+          echo "testUbuntuImage fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testUbuntuImage pass"
+      }
+
+      testVersion() {
+        expected="10895.0.0"
+        actual=$(. /etc/os-release; echo "${BUILD_ID}")
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected version: ${expected}"
+          echo "actual version: ${actual}"
+          echo "testVersion fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testVersion pass"
+      }
+
+      testHomeDir() {
+        expected="chronos"
+        actual=$(ls /home)
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected home contents: ${expected}"
+          echo "actual home contents: ${actual}"
+          echo "testHomeDir fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testHomeDir pass"
+      }
+
+      testWorkdirClean() {
+        if [[ -d "/var/lib/.cos-customizer" ]]; then
+          echo "/var/lib/.cos-customizer exists"
+          echo "testWorkdirClean fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testWorkdirClean pass"
+      }
+
+      main() {
+        RESULT="pass"
+        testHello
+        testUbuntuImage
+        testVersion
+        testHomeDir
+        testWorkdirClean
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/timeout_test.yaml b/testing/timeout_test.yaml
new file mode 100644
index 0000000..8f9f05e
--- /dev/null
+++ b/testing/timeout_test.yaml
@@ -0,0 +1,23 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+steps:
+- name: 'gcr.io/cloud-builders/gcloud'
+  entrypoint: '/bin/bash'
+  args:
+  - '-c'
+  - |
+    gcloud builds submit --config=testing/timeout_test/timeout_test.yaml . | tee /build.log
+    grep "did not complete within the specified timeout" /build.log > /dev/null
+timeout: '7200s'
diff --git a/testing/timeout_test/preload.sh b/testing/timeout_test/preload.sh
new file mode 100644
index 0000000..85f83c0
--- /dev/null
+++ b/testing/timeout_test/preload.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+sleep 300
diff --git a/testing/timeout_test/timeout_test.yaml b/testing/timeout_test/timeout_test.yaml
new file mode 100644
index 0000000..0c5bc3f
--- /dev/null
+++ b/testing/timeout_test/timeout_test.yaml
@@ -0,0 +1,41 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  '_TEST': 'timeout_test'
+  '_INPUT_IMAGE': 'cos-dev-69-10895-0-0'
+  '_INPUT_PROJECT': 'cos-cloud'
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ['run', '--spawn_strategy=standalone', ':cos_customizer', '--', '--norun']
+- name: 'bazel:cos_customizer'
+  args: ['start-image-build',
+         '-build-context=testing/${_TEST}',
+         '-image-name=${_INPUT_IMAGE}',
+         '-image-project=${_INPUT_PROJECT}',
+         '-gcs-bucket=${PROJECT_ID}_cloudbuild',
+         '-gcs-workdir=customizer-$BUILD_ID']
+- name: 'bazel:cos_customizer'
+  args: ['run-script',
+         '-script=preload.sh']
+- name: 'bazel:cos_customizer'
+  args: ['finish-image-build',
+         '-zone=us-west1-b',
+         '-project=$PROJECT_ID',
+         '-image-name=preload-test-$BUILD_ID',
+         '-image-project=$PROJECT_ID',
+         '-timeout=1m']
+options:
+  machineType: 'N1_HIGHCPU_8'
+timeout: '1500s'
diff --git a/testing/toolbox_test.yaml b/testing/toolbox_test.yaml
new file mode 100644
index 0000000..0b89950
--- /dev/null
+++ b/testing/toolbox_test.yaml
@@ -0,0 +1,44 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the License);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+substitutions:
+  "_TEST": "toolbox_test"
+  "_INPUT_IMAGE": "cos-dev-69-10895-0-0"
+  "_INPUT_PROJECT": "cos-cloud"
+steps:
+- name: 'gcr.io/cloud-builders/bazel'
+  args: ["run", "--spawn_strategy=standalone", ":cos_customizer", "--", "--norun"]
+- name: 'bazel:cos_customizer'
+  args: ["start-image-build",
+         "-build-context=testing/${_TEST}",
+         "-image-name=${_INPUT_IMAGE}",
+         "-image-project=${_INPUT_PROJECT}",
+         "-gcs-bucket=${PROJECT_ID}_cloudbuild",
+         "-gcs-workdir=customizer-$BUILD_ID"]
+- name: 'bazel:cos_customizer'
+  args: ["run-script",
+         "-script=preload.sh"]
+- name: 'bazel:cos_customizer'
+  args: ["finish-image-build",
+         "-zone=us-west1-b",
+         "-project=$PROJECT_ID",
+         "-image-name=preload-test-$BUILD_ID",
+         "-image-project=$PROJECT_ID"]
+- name: 'gcr.io/compute-image-tools/daisy'
+  args: ["-project=$PROJECT_ID", "-zone=us-west1-b", "-var:image_name",
+         "preload-test-$BUILD_ID", "-var:image_project", "$PROJECT_ID",
+         "-var:test_cfg", "../${_TEST}/preload_test.cfg", "testing/util/run_test.wf.json"]
+options:
+  machineType: 'N1_HIGHCPU_8'
+timeout: "7200s"
diff --git a/testing/toolbox_test/preload.sh b/testing/toolbox_test/preload.sh
new file mode 100644
index 0000000..fee0210
--- /dev/null
+++ b/testing/toolbox_test/preload.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+toolbox
diff --git a/testing/toolbox_test/preload_test.cfg b/testing/toolbox_test/preload_test.cfg
new file mode 100644
index 0000000..da3f4f6
--- /dev/null
+++ b/testing/toolbox_test/preload_test.cfg
@@ -0,0 +1,77 @@
+#cloud-config
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+write_files:
+  - path: /tmp/preloader-test/test.sh
+    permissions: 0644
+    owner: root
+    content: |
+      set -o errexit
+      set -o pipefail
+      set -o nounset
+
+      trap 'fail exiting due to errors' EXIT
+
+      fail() {
+        echo "TestFail: $@"
+      }
+
+      testToolbox() {
+        expected="gcr.io/google-containers/toolbox"
+        actual=$(docker images --format {{.Repository}})
+        if [[ "${expected}" != "${actual}" ]]; then
+          echo "expected: ${expected}"
+          echo "actual: ${actual}"
+          echo "testToolbox fail"
+          RESULT="fail"
+          return
+        fi
+        echo "testToolbox pass"
+      }
+
+      main() {
+        RESULT="pass"
+        testToolbox
+        if [[ "${RESULT}" == "fail" ]]; then
+          exit 1
+        fi
+      }
+
+      main 2>&1 | sed "s/^/TestStatus: /"
+      trap - EXIT
+      echo "TestPass: all tests passed"
+
+  - path: /etc/systemd/system/preloader-test.service
+    permissions: 0644
+    owner: root
+    content: |
+      [Unit]
+      Description=Preloader test
+      Wants=network-online.target gcr-online.target docker.service
+      After=network-online.target gcr-online.target docker.service
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      User=root
+      ExecStart=/bin/bash /tmp/preloader-test/test.sh
+      StandardOutput=tty
+      StandardError=tty
+      TTYPath=/dev/ttyS1
+
+runcmd:
+  - systemctl daemon-reload
+  - systemctl --no-block start preloader-test.service
diff --git a/testing/util/gpu_vm.wf.json b/testing/util/gpu_vm.wf.json
new file mode 100644
index 0000000..a497f1c
--- /dev/null
+++ b/testing/util/gpu_vm.wf.json
@@ -0,0 +1,32 @@
+{
+  "Name": "gpu-vm",
+  "Vars": {
+    "test_cfg": { "Required": true, "Description": "Cloud-config to use for the test" }
+  },
+  "Sources": {
+    "cloud-config": "${test_cfg}"
+  },
+  "Steps": {
+    "create-vm": {
+      "CreateInstances": [
+        {
+          "Name": "preload-test",
+          "Disks": [{"Source": "boot-disk"}],
+          "Metadata": {
+            "user-data": "${SOURCE:cloud-config}",
+            "block-project-ssh-keys": "TRUE"
+          },
+          "guestAccelerators": [
+            {
+              "acceleratorCount": 1,
+              "acceleratorType": "https://www.googleapis.com/compute/v1/projects/${PROJECT}/zones/${ZONE}/acceleratorTypes/nvidia-tesla-k80"
+            }
+          ],
+          "scheduling": {
+            "onHostMaintenance": "TERMINATE"
+          }
+        }
+      ]
+    }
+  }
+}
diff --git a/testing/util/run_test.wf.json b/testing/util/run_test.wf.json
new file mode 100644
index 0000000..b4ebdbd
--- /dev/null
+++ b/testing/util/run_test.wf.json
@@ -0,0 +1,50 @@
+{
+  "Name": "run-test",
+  "Vars": {
+    "image_name": { "Required": true, "Description": "Name of image to test" },
+    "image_project": { "Required": true, "Description": "Project of image to test" },
+    "test_cfg": { "Required": true, "Description": "Cloud-config to use for the test" },
+    "vm_workflow": { "Value": "./vm.wf.json", "Description": "Workflow to use for creating the test VM" }
+  },
+  "Steps": {
+    "setup-disk": {
+      "CreateDisks": [
+        {
+          "Name": "boot-disk",
+          "SourceImage": "projects/${image_project}/global/images/${image_name}"
+        }
+      ]
+    },
+    "delete-image": {
+      "DeleteResources": {
+        "Images": ["projects/${image_project}/global/images/${image_name}"]
+      }
+    },
+    "create-vm": {
+      "IncludeWorkflow": {
+        "Path": "${vm_workflow}",
+        "Vars": {
+          "test_cfg": "${test_cfg}"
+        }
+      }
+    },
+    "wait": {
+      "WaitForInstancesSignal": [
+        {
+          "Name": "preload-test",
+          "SerialOutput": {
+            "Port": 2,
+            "SuccessMatch": "TestPass:",
+            "FailureMatch": "TestFail:",
+            "StatusMatch": "TestStatus:"
+          }
+        }
+      ]
+    }
+  },
+  "Dependencies": {
+    "create-vm": ["setup-disk"],
+    "delete-image": ["setup-disk"],
+    "wait": ["create-vm"]
+  }
+}
diff --git a/testing/util/vm.wf.json b/testing/util/vm.wf.json
new file mode 100644
index 0000000..9721c00
--- /dev/null
+++ b/testing/util/vm.wf.json
@@ -0,0 +1,23 @@
+{
+  "Name": "vm",
+  "Vars": {
+    "test_cfg": { "Required": true, "Description": "Cloud-config to use for the test" }
+  },
+  "Sources": {
+    "cloud-config": "${test_cfg}"
+  },
+  "Steps": {
+    "create-vm": {
+      "CreateInstances": [
+        {
+          "Name": "preload-test",
+          "Disks": [{"Source": "boot-disk"}],
+          "Metadata": {
+            "user-data": "${SOURCE:cloud-config}",
+            "block-project-ssh-keys": "TRUE"
+          }
+        }
+      ]
+    }
+  }
+}